Throughput Maximization of Delay-Aware DNN Inference in Edge Computing by Exploring DNN Model Partitioning and Inference Parallelism.
Resource URI: https://dblp.l3s.de/d2r/resource/publications/journals/tmc/LiLLXJG23
Home
|
Example Publications
Property
Value
dcterms:
bibliographicCitation
<
http://dblp.uni-trier.de/rec/bibtex/journals/tmc/LiLLXJG23
>
dc:
creator
<
https://dblp.l3s.de/d2r/resource/authors/Jing_Li_0093
>
dc:
creator
<
https://dblp.l3s.de/d2r/resource/authors/Song_Guo_0001
>
dc:
creator
<
https://dblp.l3s.de/d2r/resource/authors/Weifa_Liang
>
dc:
creator
<
https://dblp.l3s.de/d2r/resource/authors/Xiaohua_Jia
>
dc:
creator
<
https://dblp.l3s.de/d2r/resource/authors/Yuchen_Li_0003
>
dc:
creator
<
https://dblp.l3s.de/d2r/resource/authors/Zichuan_Xu
>
foaf:
homepage
<
http://dx.doi.org/doi.org%2F10.1109%2FTMC.2021.3125949
>
foaf:
homepage
<
https://doi.org/10.1109/TMC.2021.3125949
>
dc:
identifier
DBLP journals/tmc/LiLLXJG23
(xsd:string)
dc:
identifier
DOI doi.org%2F10.1109%2FTMC.2021.3125949
(xsd:string)
dcterms:
issued
2023
(xsd:gYear)
swrc:
journal
<
https://dblp.l3s.de/d2r/resource/journals/tmc
>
rdfs:
label
Throughput Maximization of Delay-Aware DNN Inference in Edge Computing by Exploring DNN Model Partitioning and Inference Parallelism.
(xsd:string)
foaf:
maker
<
https://dblp.l3s.de/d2r/resource/authors/Jing_Li_0093
>
foaf:
maker
<
https://dblp.l3s.de/d2r/resource/authors/Song_Guo_0001
>
foaf:
maker
<
https://dblp.l3s.de/d2r/resource/authors/Weifa_Liang
>
foaf:
maker
<
https://dblp.l3s.de/d2r/resource/authors/Xiaohua_Jia
>
foaf:
maker
<
https://dblp.l3s.de/d2r/resource/authors/Yuchen_Li_0003
>
foaf:
maker
<
https://dblp.l3s.de/d2r/resource/authors/Zichuan_Xu
>
swrc:
month
May
(xsd:string)
swrc:
number
5
(xsd:string)
swrc:
pages
3017-3030
(xsd:string)
owl:
sameAs
<
http://bibsonomy.org/uri/bibtexkey/journals/tmc/LiLLXJG23/dblp
>
owl:
sameAs
<
http://dblp.rkbexplorer.com/id/journals/tmc/LiLLXJG23
>
rdfs:
seeAlso
<
http://dblp.uni-trier.de/db/journals/tmc/tmc22.html#LiLLXJG23
>
rdfs:
seeAlso
<
https://doi.org/10.1109/TMC.2021.3125949
>
dc:
title
Throughput Maximization of Delay-Aware DNN Inference in Edge Computing by Exploring DNN Model Partitioning and Inference Parallelism.
(xsd:string)
dc:
type
<
http://purl.org/dc/dcmitype/Text
>
rdf:
type
swrc:Article
rdf:
type
foaf:Document
swrc:
volume
22
(xsd:string)