[{"file_date_updated":"2019-01-08T14:03:53Z","language":[{"iso":"eng"}],"ddc":["000"],"keyword":["parallel machine scheduling with setup times","parallel branch-and-price algorithm","high performance computing","master/worker parallelization"],"user_id":"61579","department":[{"_id":"277"}],"_id":"6512","file":[{"file_size":4153528,"file_id":"6513","access_level":"open_access","file_name":"cor-parallel-bp-for-upmsp.pdf","date_updated":"2019-01-08T14:03:53Z","creator":"hsiemes","date_created":"2019-01-08T14:03:53Z","relation":"main_file","content_type":"application/pdf"}],"status":"public","abstract":[{"text":"Scheduling problems are essential for decision making in many academic disciplines, including operations management, computer science, and information systems. Since many scheduling problems are NP-hard in the strong sense, there is only limited research on exact algorithms and how their efficiency scales when implemented on parallel computing architectures. We address this gap by (1) adapting an exact branch-and-price algorithm to a parallel machine scheduling problem on unrelated machines with sequence- and machine-dependent setup times, (2) parallelizing the adapted algorithm by implementing a distributed-memory parallelization with a master/worker approach, and (3) conducting extensive computational experiments using up to 960 MPI processes on a modern high performance computing cluster. With our experiments, we show that the efficiency of our parallelization approach can lead to superlinear speedup but can vary substantially between instances. We further show that the wall time of serial execution can be substantially reduced through our parallelization, in some cases from 94 hours to less than six minutes when our algorithm is executed on 960 processes.","lang":"eng"}],"type":"journal_article","publication":"Computers & Operations Research","title":"Using High Performance Computing for Unrelated Parallel Machine Scheduling with Sequence-Dependent Setup Times: Development and Computational Evaluation of a Parallel Branch-and-Price Algorithm","author":[{"first_name":"Gerhard","last_name":"Rauchecker","full_name":"Rauchecker, Gerhard"},{"id":"72850","full_name":"Schryen, Guido","last_name":"Schryen","first_name":"Guido"}],"date_created":"2019-01-08T13:50:44Z","oa":"1","date_updated":"2022-01-06T07:03:08Z","publisher":"Elsevier","citation":{"chicago":"Rauchecker, Gerhard, and Guido Schryen. “Using High Performance Computing for Unrelated Parallel Machine Scheduling with Sequence-Dependent Setup Times: Development and Computational Evaluation of a Parallel Branch-and-Price Algorithm.” <i>Computers &#38; Operations Research</i>, no. 104 (2019): 338–57.","ieee":"G. Rauchecker and G. Schryen, “Using High Performance Computing for Unrelated Parallel Machine Scheduling with Sequence-Dependent Setup Times: Development and Computational Evaluation of a Parallel Branch-and-Price Algorithm,” <i>Computers &#38; Operations Research</i>, no. 104, pp. 338–357, 2019.","ama":"Rauchecker G, Schryen G. Using High Performance Computing for Unrelated Parallel Machine Scheduling with Sequence-Dependent Setup Times: Development and Computational Evaluation of a Parallel Branch-and-Price Algorithm. <i>Computers &#38; Operations Research</i>. 2019;(104):338-357.","mla":"Rauchecker, Gerhard, and Guido Schryen. “Using High Performance Computing for Unrelated Parallel Machine Scheduling with Sequence-Dependent Setup Times: Development and Computational Evaluation of a Parallel Branch-and-Price Algorithm.” <i>Computers &#38; Operations Research</i>, no. 104, Elsevier, 2019, pp. 338–57.","short":"G. Rauchecker, G. Schryen, Computers &#38; Operations Research (2019) 338–357.","bibtex":"@article{Rauchecker_Schryen_2019, title={Using High Performance Computing for Unrelated Parallel Machine Scheduling with Sequence-Dependent Setup Times: Development and Computational Evaluation of a Parallel Branch-and-Price Algorithm}, number={104}, journal={Computers &#38; Operations Research}, publisher={Elsevier}, author={Rauchecker, Gerhard and Schryen, Guido}, year={2019}, pages={338–357} }","apa":"Rauchecker, G., &#38; Schryen, G. (2019). Using High Performance Computing for Unrelated Parallel Machine Scheduling with Sequence-Dependent Setup Times: Development and Computational Evaluation of a Parallel Branch-and-Price Algorithm. <i>Computers &#38; Operations Research</i>, (104), 338–357."},"page":"338-357","year":"2019","issue":"104","has_accepted_license":"1"},{"year":"2015","page":"1-13","citation":{"bibtex":"@inproceedings{Rauchecker_Schryen_2015, title={High-Performance Computing for Scheduling Decision Support: A Parallel Depth-First Search Heuristic}, booktitle={Australasian Conference on Information Systems}, author={Rauchecker, Gerhard and Schryen, Guido}, year={2015}, pages={1–13} }","mla":"Rauchecker, Gerhard, and Guido Schryen. “High-Performance Computing for Scheduling Decision Support: A Parallel Depth-First Search Heuristic.” <i>Australasian Conference on Information Systems</i>, 2015, pp. 1–13.","short":"G. Rauchecker, G. Schryen, in: Australasian Conference on Information Systems, 2015, pp. 1–13.","apa":"Rauchecker, G., &#38; Schryen, G. (2015). High-Performance Computing for Scheduling Decision Support: A Parallel Depth-First Search Heuristic. In <i>Australasian Conference on Information Systems</i> (pp. 1–13).","ieee":"G. Rauchecker and G. Schryen, “High-Performance Computing for Scheduling Decision Support: A Parallel Depth-First Search Heuristic,” in <i>Australasian Conference on Information Systems</i>, 2015, pp. 1–13.","chicago":"Rauchecker, Gerhard, and Guido Schryen. “High-Performance Computing for Scheduling Decision Support: A Parallel Depth-First Search Heuristic.” In <i>Australasian Conference on Information Systems</i>, 1–13, 2015.","ama":"Rauchecker G, Schryen G. High-Performance Computing for Scheduling Decision Support: A Parallel Depth-First Search Heuristic. In: <i>Australasian Conference on Information Systems</i>. ; 2015:1-13."},"has_accepted_license":"1","title":"High-Performance Computing for Scheduling Decision Support: A Parallel Depth-First Search Heuristic","date_updated":"2022-01-06T07:02:30Z","oa":"1","date_created":"2018-11-14T15:39:50Z","author":[{"first_name":"Gerhard","full_name":"Rauchecker, Gerhard","last_name":"Rauchecker"},{"first_name":"Guido","last_name":"Schryen","id":"72850","full_name":"Schryen, Guido"}],"abstract":[{"lang":"eng","text":"Many academic disciplines - including information systems, computer science, and operations management - face scheduling problems as important decision making tasks. Since many scheduling problems are NP-hard in the strong sense, there is a need for developing solution heuristics. For scheduling problems with setup times on unrelated parallel machines, there is limited research on solution methods and to the best of our knowledge, parallel computer architectures have not yet been taken advantage of. We address this gap by proposing and implementing a new solution heuristic and by testing different parallelization strategies. In our computational experiments, we show that our heuristic calculates near-optimal solutions even for large instances and that computing time can be reduced substantially by our parallelization approach."}],"status":"public","file":[{"relation":"main_file","content_type":"application/pdf","file_size":6771871,"file_id":"6031","access_level":"open_access","file_name":"ACIS_2015_paper_7.pdf","date_updated":"2018-12-13T15:08:28Z","date_created":"2018-12-07T11:40:18Z","creator":"hsiemes"}],"publication":"Australasian Conference on Information Systems","type":"conference","keyword":["scheduling","decision support","heuristic","high performance computing","parallel algorithms"],"ddc":["000"],"file_date_updated":"2018-12-13T15:08:28Z","extern":"1","language":[{"iso":"eng"}],"_id":"5678","department":[{"_id":"277"}],"user_id":"61579"},{"type":"conference","status":"public","_id":"1998","user_id":"15274","series_title":"Lecture Notes in Computer Science","department":[{"_id":"27"}],"publication_status":"published","place":"Berlin / Heidelberg","citation":{"ama":"Hovestadt M, Kao O, Keller A, Streit A. Scheduling in HPC Resource Management Systems: Queuing vs. Planning. In: <i>Proc. Workshop on Job Scheduling Strategies for Parallel Processing (JSSPP)</i>. Vol 2862. Lecture Notes in Computer Science. Berlin / Heidelberg; 2003:1-20. doi:<a href=\"https://doi.org/10.1007/10968987_1\">10.1007/10968987_1</a>","chicago":"Hovestadt, Matthias, Odej Kao, Axel Keller, and Achim Streit. “Scheduling in HPC Resource Management Systems: Queuing vs. Planning.” In <i>Proc. Workshop on Job Scheduling Strategies for Parallel Processing (JSSPP)</i>, 2862:1–20. Lecture Notes in Computer Science. Berlin / Heidelberg, 2003. <a href=\"https://doi.org/10.1007/10968987_1\">https://doi.org/10.1007/10968987_1</a>.","ieee":"M. Hovestadt, O. Kao, A. Keller, and A. Streit, “Scheduling in HPC Resource Management Systems: Queuing vs. Planning,” in <i>Proc. Workshop on Job Scheduling Strategies for Parallel Processing (JSSPP)</i>, 2003, vol. 2862, pp. 1–20.","bibtex":"@inproceedings{Hovestadt_Kao_Keller_Streit_2003, place={Berlin / Heidelberg}, series={Lecture Notes in Computer Science}, title={Scheduling in HPC Resource Management Systems: Queuing vs. Planning}, volume={2862}, DOI={<a href=\"https://doi.org/10.1007/10968987_1\">10.1007/10968987_1</a>}, booktitle={Proc. Workshop on Job Scheduling Strategies for Parallel Processing (JSSPP)}, author={Hovestadt, Matthias and Kao, Odej and Keller, Axel and Streit, Achim}, year={2003}, pages={1–20}, collection={Lecture Notes in Computer Science} }","mla":"Hovestadt, Matthias, et al. “Scheduling in HPC Resource Management Systems: Queuing vs. Planning.” <i>Proc. Workshop on Job Scheduling Strategies for Parallel Processing (JSSPP)</i>, vol. 2862, 2003, pp. 1–20, doi:<a href=\"https://doi.org/10.1007/10968987_1\">10.1007/10968987_1</a>.","short":"M. Hovestadt, O. Kao, A. Keller, A. Streit, in: Proc. Workshop on Job Scheduling Strategies for Parallel Processing (JSSPP), Berlin / Heidelberg, 2003, pp. 1–20.","apa":"Hovestadt, M., Kao, O., Keller, A., &#38; Streit, A. (2003). Scheduling in HPC Resource Management Systems: Queuing vs. Planning. In <i>Proc. Workshop on Job Scheduling Strategies for Parallel Processing (JSSPP)</i> (Vol. 2862, pp. 1–20). Berlin / Heidelberg. <a href=\"https://doi.org/10.1007/10968987_1\">https://doi.org/10.1007/10968987_1</a>"},"intvolume":"      2862","page":"1-20","date_updated":"2022-01-06T06:54:17Z","author":[{"first_name":"Matthias","last_name":"Hovestadt","full_name":"Hovestadt, Matthias"},{"first_name":"Odej","full_name":"Kao, Odej","last_name":"Kao"},{"last_name":"Keller","full_name":"Keller, Axel","id":"15274","first_name":"Axel"},{"first_name":"Achim","full_name":"Streit, Achim","last_name":"Streit"}],"volume":2862,"doi":"10.1007/10968987_1","publication":"Proc. Workshop on Job Scheduling Strategies for Parallel Processing (JSSPP)","abstract":[{"text":"Nearly all existing HPC systems are operated by resource management systems based on the queuing approach. With the increasing acceptance of grid middleware like Globus, new requirements for the underlying local resource management systems arise. Features like advanced reservation or quality of service are needed to implement high level functions like co-allocation. However it is difficult to realize these features with a resource management system based on the queuing concept since it considers only the present resource usage.\r\n\r\nIn this paper we present an approach which closes this gap. By assigning start times to each resource request, a complete schedule is planned. Advanced reservations are now easily possible. Based on this planning approach functions like diffuse requests, automatic duration extension, or service level agreements are described. We think they are useful to increase the usability, acceptance and performance of HPC machines. In the second part of this paper we present a planning based resource management system which already covers some of the mentioned features.","lang":"eng"}],"keyword":["High Performance Computing","Service Level Agreement","Grid Resource","Resource Management System","Advance Reservation"],"language":[{"iso":"eng"}],"year":"2003","date_created":"2018-03-29T11:37:24Z","title":"Scheduling in HPC Resource Management Systems: Queuing vs. Planning"},{"title":"TKDM – A Reconfigurable Co-processor in a PC's Memory Slot","doi":"10.1109/FPT.2003.1275755","date_updated":"2022-01-06T06:56:09Z","publisher":"IEEE Computer Society","date_created":"2018-04-17T15:03:34Z","author":[{"first_name":"Christian","full_name":"Plessl, Christian","id":"16153","last_name":"Plessl","orcid":"0000-0001-5728-9982"},{"first_name":"Marco","last_name":"Platzner","full_name":"Platzner, Marco","id":"398"}],"year":"2003","page":"252-259","citation":{"ieee":"C. Plessl and M. Platzner, “TKDM – A Reconfigurable Co-processor in a PC’s Memory Slot,” in <i>Proc. Int. Conf. on Field Programmable Technology (ICFPT)</i>, 2003, pp. 252–259.","chicago":"Plessl, Christian, and Marco Platzner. “TKDM – A Reconfigurable Co-Processor in a PC’s Memory Slot.” In <i>Proc. Int. Conf. on Field Programmable Technology (ICFPT)</i>, 252–59. IEEE Computer Society, 2003. <a href=\"https://doi.org/10.1109/FPT.2003.1275755\">https://doi.org/10.1109/FPT.2003.1275755</a>.","ama":"Plessl C, Platzner M. TKDM – A Reconfigurable Co-processor in a PC’s Memory Slot. In: <i>Proc. Int. Conf. on Field Programmable Technology (ICFPT)</i>. IEEE Computer Society; 2003:252-259. doi:<a href=\"https://doi.org/10.1109/FPT.2003.1275755\">10.1109/FPT.2003.1275755</a>","apa":"Plessl, C., &#38; Platzner, M. (2003). TKDM – A Reconfigurable Co-processor in a PC’s Memory Slot. In <i>Proc. Int. Conf. on Field Programmable Technology (ICFPT)</i> (pp. 252–259). IEEE Computer Society. <a href=\"https://doi.org/10.1109/FPT.2003.1275755\">https://doi.org/10.1109/FPT.2003.1275755</a>","mla":"Plessl, Christian, and Marco Platzner. “TKDM – A Reconfigurable Co-Processor in a PC’s Memory Slot.” <i>Proc. Int. Conf. on Field Programmable Technology (ICFPT)</i>, IEEE Computer Society, 2003, pp. 252–59, doi:<a href=\"https://doi.org/10.1109/FPT.2003.1275755\">10.1109/FPT.2003.1275755</a>.","bibtex":"@inproceedings{Plessl_Platzner_2003, title={TKDM – A Reconfigurable Co-processor in a PC’s Memory Slot}, DOI={<a href=\"https://doi.org/10.1109/FPT.2003.1275755\">10.1109/FPT.2003.1275755</a>}, booktitle={Proc. Int. Conf. on Field Programmable Technology (ICFPT)}, publisher={IEEE Computer Society}, author={Plessl, Christian and Platzner, Marco}, year={2003}, pages={252–259} }","short":"C. Plessl, M. Platzner, in: Proc. Int. Conf. on Field Programmable Technology (ICFPT), IEEE Computer Society, 2003, pp. 252–259."},"keyword":["coprocessor","DIMM","memory bus","FPGA","high performance computing"],"_id":"2418","department":[{"_id":"518"},{"_id":"78"}],"user_id":"24135","abstract":[{"text":" This paper presents TKDM, a PC-based high-performance reconfigurable computing environment. The TKDM hardware consists of an FPGA module that uses the DIMM (dual inline memory module) bus for high-bandwidth and low-latency communication with the host CPU. The system's firmware is integrated with the Linux host operating system and offers functions for data communication and FPGA reconfiguration. The intended use of TKDM is that of a dynamically reconfigurable co-processor for data streaming applications. The system's firmware can be customized for specific application domains to facilitate simple and easy-to-use programming interfaces. ","lang":"eng"}],"status":"public","publication":"Proc. Int. Conf. on Field Programmable Technology (ICFPT)","type":"conference"}]
