[{"publication_identifier":{"issn":["0920-8542","1573-0484"]},"publication_status":"published","citation":{"short":"A. Rodríguez, A. Navarro, R. Asenjo, F. Corbera, R. Gran, D. Suárez, J. Nunez-Yanez, The Journal of Supercomputing (2019).","bibtex":"@article{Rodríguez_Navarro_Asenjo_Corbera_Gran_Suárez_Nunez-Yanez_2019, title={Parallel multiprocessing and scheduling on the heterogeneous Xeon+FPGA platform}, DOI={<a href=\"https://doi.org/10.1007/s11227-019-02935-1\">10.1007/s11227-019-02935-1</a>}, journal={The Journal of Supercomputing}, author={Rodríguez, Andrés and Navarro, Angeles and Asenjo, Rafael and Corbera, Francisco and Gran, Rubén and Suárez, Darío and Nunez-Yanez, Jose}, year={2019} }","mla":"Rodríguez, Andrés, et al. “Parallel Multiprocessing and Scheduling on the Heterogeneous Xeon+FPGA Platform.” <i>The Journal of Supercomputing</i>, 2019, doi:<a href=\"https://doi.org/10.1007/s11227-019-02935-1\">10.1007/s11227-019-02935-1</a>.","apa":"Rodríguez, A., Navarro, A., Asenjo, R., Corbera, F., Gran, R., Suárez, D., &#38; Nunez-Yanez, J. (2019). Parallel multiprocessing and scheduling on the heterogeneous Xeon+FPGA platform. <i>The Journal of Supercomputing</i>. <a href=\"https://doi.org/10.1007/s11227-019-02935-1\">https://doi.org/10.1007/s11227-019-02935-1</a>","ieee":"A. Rodríguez <i>et al.</i>, “Parallel multiprocessing and scheduling on the heterogeneous Xeon+FPGA platform,” <i>The Journal of Supercomputing</i>, 2019.","chicago":"Rodríguez, Andrés, Angeles Navarro, Rafael Asenjo, Francisco Corbera, Rubén Gran, Darío Suárez, and Jose Nunez-Yanez. “Parallel Multiprocessing and Scheduling on the Heterogeneous Xeon+FPGA Platform.” <i>The Journal of Supercomputing</i>, 2019. <a href=\"https://doi.org/10.1007/s11227-019-02935-1\">https://doi.org/10.1007/s11227-019-02935-1</a>.","ama":"Rodríguez A, Navarro A, Asenjo R, et al. Parallel multiprocessing and scheduling on the heterogeneous Xeon+FPGA platform. <i>The Journal of Supercomputing</i>. 2019. doi:<a href=\"https://doi.org/10.1007/s11227-019-02935-1\">10.1007/s11227-019-02935-1</a>"},"year":"2019","author":[{"first_name":"Andrés","last_name":"Rodríguez","full_name":"Rodríguez, Andrés"},{"last_name":"Navarro","full_name":"Navarro, Angeles","first_name":"Angeles"},{"first_name":"Rafael","last_name":"Asenjo","full_name":"Asenjo, Rafael"},{"full_name":"Corbera, Francisco","last_name":"Corbera","first_name":"Francisco"},{"first_name":"Rubén","last_name":"Gran","full_name":"Gran, Rubén"},{"first_name":"Darío","last_name":"Suárez","full_name":"Suárez, Darío"},{"first_name":"Jose","full_name":"Nunez-Yanez, Jose","last_name":"Nunez-Yanez"}],"date_created":"2020-04-06T12:09:25Z","date_updated":"2022-01-06T06:52:50Z","doi":"10.1007/s11227-019-02935-1","title":"Parallel multiprocessing and scheduling on the heterogeneous Xeon+FPGA platform","publication":"The Journal of Supercomputing","type":"journal_article","status":"public","abstract":[{"lang":"eng","text":"Heterogeneous computing that exploits simultaneous co-processing with different device types has been shown to be effective at both increasing performance and reducing energy consumption. In this paper, we extend a scheduling framework encapsulated in a high-level C++ template and previously developed for heterogeneous chips comprising CPU and GPU cores, to new high-performance platforms for the data center, which include a cache coherent FPGA fabric and many-core CPU resources. Our goal is to evaluate the suitability of our framework with these new FPGA-based platforms, identifying performance benefits and limitations.We target the state-of-the-art HARP processor that includes 14 high-end Xeon classes tightly coupled to a FPGA device located in the same package. We select eight benchmarks from the high-performance computing domain that have been ported and optimized for this heterogeneous platform. The results show that a dynamic and adaptive scheduler that exploits simultaneous processing among the devices can improve performance up to a factor of 8 × compared to the best alternative solutions that only use the CPU cores or the FPGA fabric. Moreover, our proposal achieves up to 15% and 37% of improvement compared to the best heterogeneous solutions found with a dynamic and static schedulers, respectively."}],"user_id":"61189","_id":"16423","language":[{"iso":"eng"}],"keyword":["pc2-harp-ressources"]},{"date_updated":"2022-01-06T06:56:10Z","author":[{"first_name":"Christian","id":"16153","full_name":"Plessl, Christian","orcid":"0000-0001-5728-9982","last_name":"Plessl"},{"last_name":"Platzner","full_name":"Platzner, Marco","id":"398","first_name":"Marco"}],"volume":26,"doi":"10.1023/a:1024443416592","publication_identifier":{"issn":["0920-8542"]},"citation":{"apa":"Plessl, C., &#38; Platzner, M. (2003). Instance-Specific Accelerators for Minimum Covering. <i>Journal of Supercomputing</i>, <i>26</i>(2), 109–129. <a href=\"https://doi.org/10.1023/a:1024443416592\">https://doi.org/10.1023/a:1024443416592</a>","short":"C. Plessl, M. Platzner, Journal of Supercomputing 26 (2003) 109–129.","mla":"Plessl, Christian, and Marco Platzner. “Instance-Specific Accelerators for Minimum Covering.” <i>Journal of Supercomputing</i>, vol. 26, no. 2, Kluwer Academic Publishers, 2003, pp. 109–29, doi:<a href=\"https://doi.org/10.1023/a:1024443416592\">10.1023/a:1024443416592</a>.","bibtex":"@article{Plessl_Platzner_2003, title={Instance-Specific Accelerators for Minimum Covering}, volume={26}, DOI={<a href=\"https://doi.org/10.1023/a:1024443416592\">10.1023/a:1024443416592</a>}, number={2}, journal={Journal of Supercomputing}, publisher={Kluwer Academic Publishers}, author={Plessl, Christian and Platzner, Marco}, year={2003}, pages={109–129} }","chicago":"Plessl, Christian, and Marco Platzner. “Instance-Specific Accelerators for Minimum Covering.” <i>Journal of Supercomputing</i> 26, no. 2 (2003): 109–29. <a href=\"https://doi.org/10.1023/a:1024443416592\">https://doi.org/10.1023/a:1024443416592</a>.","ieee":"C. Plessl and M. Platzner, “Instance-Specific Accelerators for Minimum Covering,” <i>Journal of Supercomputing</i>, vol. 26, no. 2, pp. 109–129, 2003.","ama":"Plessl C, Platzner M. Instance-Specific Accelerators for Minimum Covering. <i>Journal of Supercomputing</i>. 2003;26(2):109-129. doi:<a href=\"https://doi.org/10.1023/a:1024443416592\">10.1023/a:1024443416592</a>"},"page":"109-129","intvolume":"        26","_id":"2420","user_id":"398","department":[{"_id":"518"},{"_id":"78"}],"extern":"1","type":"journal_article","status":"public","publisher":"Kluwer Academic Publishers","date_created":"2018-04-17T15:10:00Z","title":"Instance-Specific Accelerators for Minimum Covering","issue":"2","year":"2003","keyword":["reconfigurable computing","instance-specific acceleration","minimum covering"],"language":[{"iso":"eng"}],"publication":"Journal of Supercomputing","abstract":[{"lang":"eng","text":" This paper presents the acceleration of minimum-cost covering problems by instance-specific hardware. First, we formulate the minimum-cost covering problem and discuss a branch \\& bound algorithm to solve it. Then we describe instance-specific hardware architectures that implement branch \\& bound in 3-valued logic and use reduction techniques similar to those found in software solvers. We further present prototypical accelerator implementations and a corresponding design tool flow. Our experiments reveal significant raw speedups up to five orders of magnitude for a set of smaller unate covering problems. Provided that hardware compilation times can be reduced, we conclude that instance-specific acceleration of hard minimum-cost covering problems will lead to substantial overall speedups. "}]}]
