MPress: Democratizing Billion-Scale Model Training on Multi-GPU Servers via Memory-Saving Inter-Operator Parallelism.
Resource URI: https://dblp.l3s.de/d2r/resource/publications/conf/hpca/ZhouWYLBYX23
Home
|
Example Publications
Property
Value
dcterms:
bibliographicCitation
<
http://dblp.uni-trier.de/rec/bibtex/conf/hpca/ZhouWYLBYX23
>
dc:
creator
<
https://dblp.l3s.de/d2r/resource/authors/Cheng_Li_0001
>
dc:
creator
<
https://dblp.l3s.de/d2r/resource/authors/Feng_Yan_0001
>
dc:
creator
<
https://dblp.l3s.de/d2r/resource/authors/Haiquan_Wang
>
dc:
creator
<
https://dblp.l3s.de/d2r/resource/authors/Quan_Zhou
>
dc:
creator
<
https://dblp.l3s.de/d2r/resource/authors/Xiaoyan_Yu
>
dc:
creator
<
https://dblp.l3s.de/d2r/resource/authors/Yinlong_Xu
>
dc:
creator
<
https://dblp.l3s.de/d2r/resource/authors/Youhui_Bai
>
foaf:
homepage
<
http://dx.doi.org/doi.org%2F10.1109%2FHPCA56546.2023.10071077
>
foaf:
homepage
<
https://doi.org/10.1109/HPCA56546.2023.10071077
>
dc:
identifier
DBLP conf/hpca/ZhouWYLBYX23
(xsd:string)
dc:
identifier
DOI doi.org%2F10.1109%2FHPCA56546.2023.10071077
(xsd:string)
dcterms:
issued
2023
(xsd:gYear)
rdfs:
label
MPress: Democratizing Billion-Scale Model Training on Multi-GPU Servers via Memory-Saving Inter-Operator Parallelism.
(xsd:string)
foaf:
maker
<
https://dblp.l3s.de/d2r/resource/authors/Cheng_Li_0001
>
foaf:
maker
<
https://dblp.l3s.de/d2r/resource/authors/Feng_Yan_0001
>
foaf:
maker
<
https://dblp.l3s.de/d2r/resource/authors/Haiquan_Wang
>
foaf:
maker
<
https://dblp.l3s.de/d2r/resource/authors/Quan_Zhou
>
foaf:
maker
<
https://dblp.l3s.de/d2r/resource/authors/Xiaoyan_Yu
>
foaf:
maker
<
https://dblp.l3s.de/d2r/resource/authors/Yinlong_Xu
>
foaf:
maker
<
https://dblp.l3s.de/d2r/resource/authors/Youhui_Bai
>
swrc:
pages
556-569
(xsd:string)
dcterms:
partOf
<
https://dblp.l3s.de/d2r/resource/publications/conf/hpca/2023
>
owl:
sameAs
<
http://bibsonomy.org/uri/bibtexkey/conf/hpca/ZhouWYLBYX23/dblp
>
owl:
sameAs
<
http://dblp.rkbexplorer.com/id/conf/hpca/ZhouWYLBYX23
>
rdfs:
seeAlso
<
http://dblp.uni-trier.de/db/conf/hpca/hpca2023.html#ZhouWYLBYX23
>
rdfs:
seeAlso
<
https://doi.org/10.1109/HPCA56546.2023.10071077
>
swrc:
series
<
https://dblp.l3s.de/d2r/resource/conferences/hpca
>
dc:
title
MPress: Democratizing Billion-Scale Model Training on Multi-GPU Servers via Memory-Saving Inter-Operator Parallelism.
(xsd:string)
dc:
type
<
http://purl.org/dc/dcmitype/Text
>
rdf:
type
swrc:InProceedings
rdf:
type
foaf:Document