Update and add index
This commit is contained in:
@@ -1,4 +1,4 @@
|
||||
[38;5;12m [39m[38;2;255;187;0m[1m[4mAwesome - Most Cited Deep Learning Papers[0m
|
||||
[38;5;12m [39m[38;2;255;187;0m[1m[4mAwesome - Most Cited Deep Learning Papers[0m
|
||||
|
||||
[38;5;14m[1m![0m[38;5;12mAwesome[39m[38;5;14m[1m (https://cdn.rawgit.com/sindresorhus/awesome/d7305f38d29fed78fa85652e3a63e154dd8e8829/media/badge.svg)[0m[38;5;12m (https://github.com/sindresorhus/awesome)[39m
|
||||
|
||||
@@ -6,25 +6,24 @@
|
||||
|
||||
[38;5;12mA curated list of the most cited deep learning papers (2012-2016)[39m
|
||||
|
||||
[38;5;12mWe[39m[38;5;12m [39m[38;5;12mbelieve[39m[38;5;12m [39m[38;5;12mthat[39m[38;5;12m [39m[38;5;12mthere[39m[38;5;12m [39m[38;5;12mexist[39m[38;5;12m [39m[48;2;30;30;40m[38;5;13m[3mclassic[0m[38;5;12m [39m[38;5;12mdeep[39m[38;5;12m [39m[38;5;12mlearning[39m[38;5;12m [39m[38;5;12mpapers[39m[38;5;12m [39m[38;5;12mwhich[39m[38;5;12m [39m[38;5;12mare[39m[38;5;12m [39m[38;5;12mworth[39m[38;5;12m [39m[38;5;12mreading[39m[38;5;12m [39m[38;5;12mregardless[39m[38;5;12m [39m[38;5;12mof[39m[38;5;12m [39m[38;5;12mtheir[39m[38;5;12m [39m[38;5;12mapplication[39m[38;5;12m [39m[38;5;12mdomain.[39m[38;5;12m [39m[38;5;12mRather[39m[38;5;12m [39m[38;5;12mthan[39m[38;5;12m [39m[38;5;12mproviding[39m[38;5;12m [39m[38;5;12moverwhelming[39m[38;5;12m [39m[38;5;12mamount[39m[38;5;12m [39m[38;5;12mof[39m[38;5;12m [39m[38;5;12mpapers,[39m[38;5;12m [39m[38;5;12mWe[39m[38;5;12m [39m[38;5;12mwould[39m[38;5;12m [39m[38;5;12mlike[39m[38;5;12m [39m[38;5;12mto[39m[38;5;12m [39m
|
||||
[38;5;12mprovide[39m[38;5;12m [39m[38;5;12ma[39m[38;5;12m [39m[48;2;30;30;40m[38;5;13m[3mcurated[0m[48;2;30;30;40m[38;5;13m[3m [0m[48;2;30;30;40m[38;5;13m[3mlist[0m[38;5;12m [39m[38;5;12mof[39m[38;5;12m [39m[38;5;12mthe[39m[38;5;12m [39m[38;5;12mawesome[39m[38;5;12m [39m[38;5;12mdeep[39m[38;5;12m [39m[38;5;12mlearning[39m[38;5;12m [39m[38;5;12mpapers[39m[38;5;12m [39m[38;5;12mwhich[39m[38;5;12m [39m[38;5;12mare[39m[38;5;12m [39m[38;5;12mconsidered[39m[38;5;12m [39m[38;5;12mas[39m[38;5;12m [39m[48;2;30;30;40m[38;5;13m[3mmust-reads[0m[38;5;12m [39m[38;5;12min[39m[38;5;12m [39m[38;5;12mcertain[39m[38;5;12m [39m[38;5;12mresearch[39m[38;5;12m [39m[38;5;12mdomains.[39m
|
||||
[38;5;12mWe[39m[38;5;12m [39m[38;5;12mbelieve[39m[38;5;12m [39m[38;5;12mthat[39m[38;5;12m [39m[38;5;12mthere[39m[38;5;12m [39m[38;5;12mexist[39m[38;5;12m [39m[48;2;30;30;40m[38;5;13m[3mclassic[0m[38;5;12m [39m[38;5;12mdeep[39m[38;5;12m [39m[38;5;12mlearning[39m[38;5;12m [39m[38;5;12mpapers[39m[38;5;12m [39m[38;5;12mwhich[39m[38;5;12m [39m[38;5;12mare[39m[38;5;12m [39m[38;5;12mworth[39m[38;5;12m [39m[38;5;12mreading[39m[38;5;12m [39m[38;5;12mregardless[39m[38;5;12m [39m[38;5;12mof[39m[38;5;12m [39m[38;5;12mtheir[39m[38;5;12m [39m[38;5;12mapplication[39m[38;5;12m [39m[38;5;12mdomain.[39m[38;5;12m [39m[38;5;12mRather[39m[38;5;12m [39m[38;5;12mthan[39m[38;5;12m [39m[38;5;12mproviding[39m[38;5;12m [39m[38;5;12moverwhelming[39m[38;5;12m [39m[38;5;12mamount[39m[38;5;12m [39m[38;5;12mof[39m[38;5;12m [39m[38;5;12mpapers,[39m[38;5;12m [39m[38;5;12mWe[39m[38;5;12m [39m[38;5;12mwould[39m[38;5;12m [39m[38;5;12mlike[39m[38;5;12m [39m[38;5;12mto[39m[38;5;12m [39m[38;5;12mprovide[39m[38;5;12m [39m[38;5;12ma[39m[38;5;12m [39m[48;2;30;30;40m[38;5;13m[3mcurated[0m[48;2;30;30;40m[38;5;13m[3m [0m[48;2;30;30;40m[38;5;13m[3mlist[0m[38;5;12m [39m[38;5;12mof[39m[38;5;12m [39m[38;5;12mthe[39m[38;5;12m [39m[38;5;12mawesome[39m[38;5;12m [39m[38;5;12mdeep[39m[38;5;12m [39m
|
||||
[38;5;12mlearning[39m[38;5;12m [39m[38;5;12mpapers[39m[38;5;12m [39m[38;5;12mwhich[39m[38;5;12m [39m[38;5;12mare[39m[38;5;12m [39m[38;5;12mconsidered[39m[38;5;12m [39m[38;5;12mas[39m[38;5;12m [39m[48;2;30;30;40m[38;5;13m[3mmust-reads[0m[38;5;12m [39m[38;5;12min[39m[38;5;12m [39m[38;5;12mcertain[39m[38;5;12m [39m[38;5;12mresearch[39m[38;5;12m [39m[38;5;12mdomains.[39m
|
||||
|
||||
[38;2;255;187;0m[4mBackground[0m
|
||||
|
||||
[38;5;12mBefore[39m[38;5;12m [39m[38;5;12mthis[39m[38;5;12m [39m[38;5;12mlist,[39m[38;5;12m [39m[38;5;12mthere[39m[38;5;12m [39m[38;5;12mexist[39m[38;5;12m [39m[38;5;12mother[39m[38;5;12m [39m[48;2;30;30;40m[38;5;13m[3mawesome[0m[48;2;30;30;40m[38;5;13m[3m [0m[48;2;30;30;40m[38;5;13m[3mdeep[0m[48;2;30;30;40m[38;5;13m[3m [0m[48;2;30;30;40m[38;5;13m[3mlearning[0m[48;2;30;30;40m[38;5;13m[3m [0m[48;2;30;30;40m[38;5;13m[3mlists[0m[38;5;12m,[39m[38;5;12m [39m[38;5;12mfor[39m[38;5;12m [39m[38;5;12mexample,[39m[38;5;12m [39m[38;5;14m[1mDeep[0m[38;5;14m[1m [0m[38;5;14m[1mVision[0m[38;5;12m [39m[38;5;12m(https://github.com/kjw0612/awesome-deep-vision)[39m[38;5;12m [39m[38;5;12mand[39m[38;5;12m [39m[38;5;14m[1mAwesome[0m[38;5;14m[1m [0m[38;5;14m[1mRecurrent[0m[38;5;14m[1m [0m[38;5;14m[1mNeural[0m[38;5;14m[1m [0m[38;5;14m[1mNetworks[0m[38;5;12m [39m
|
||||
[38;5;12m(https://github.com/kjw0612/awesome-rnn).[39m[38;5;12m [39m[38;5;12mAlso,[39m[38;5;12m [39m[38;5;12mafter[39m[38;5;12m [39m[38;5;12mthis[39m[38;5;12m [39m[38;5;12mlist[39m[38;5;12m [39m[38;5;12mcomes[39m[38;5;12m [39m[38;5;12mout,[39m[38;5;12m [39m[38;5;12manother[39m[38;5;12m [39m[38;5;12mawesome[39m[38;5;12m [39m[38;5;12mlist[39m[38;5;12m [39m[38;5;12mfor[39m[38;5;12m [39m[38;5;12mdeep[39m[38;5;12m [39m[38;5;12mlearning[39m[38;5;12m [39m[38;5;12mbeginners,[39m[38;5;12m [39m[38;5;12mcalled[39m[38;5;12m [39m[38;5;14m[1mDeep[0m[38;5;14m[1m [0m[38;5;14m[1mLearning[0m[38;5;14m[1m [0m[38;5;14m[1mPapers[0m[38;5;14m[1m [0m[38;5;14m[1mReading[0m[38;5;14m[1m [0m[38;5;14m[1mRoadmap[0m[38;5;12m [39m
|
||||
[38;5;12m(https://github.com/songrotek/Deep-Learning-Papers-Reading-Roadmap),[39m[38;5;12m [39m[38;5;12mhas[39m[38;5;12m [39m[38;5;12mbeen[39m[38;5;12m [39m[38;5;12mcreated[39m[38;5;12m [39m[38;5;12mand[39m[38;5;12m [39m[38;5;12mloved[39m[38;5;12m [39m[38;5;12mby[39m[38;5;12m [39m[38;5;12mmany[39m[38;5;12m [39m[38;5;12mdeep[39m[38;5;12m [39m[38;5;12mlearning[39m[38;5;12m [39m[38;5;12mresearchers.[39m
|
||||
[38;5;12mBefore[39m[38;5;12m [39m[38;5;12mthis[39m[38;5;12m [39m[38;5;12mlist,[39m[38;5;12m [39m[38;5;12mthere[39m[38;5;12m [39m[38;5;12mexist[39m[38;5;12m [39m[38;5;12mother[39m[38;5;12m [39m[48;2;30;30;40m[38;5;13m[3mawesome[0m[48;2;30;30;40m[38;5;13m[3m [0m[48;2;30;30;40m[38;5;13m[3mdeep[0m[48;2;30;30;40m[38;5;13m[3m [0m[48;2;30;30;40m[38;5;13m[3mlearning[0m[48;2;30;30;40m[38;5;13m[3m [0m[48;2;30;30;40m[38;5;13m[3mlists[0m[38;5;12m,[39m[38;5;12m [39m[38;5;12mfor[39m[38;5;12m [39m[38;5;12mexample,[39m[38;5;12m [39m[38;5;14m[1mDeep[0m[38;5;14m[1m [0m[38;5;14m[1mVision[0m[38;5;12m [39m[38;5;12m(https://github.com/kjw0612/awesome-deep-vision)[39m[38;5;12m [39m[38;5;12mand[39m[38;5;12m [39m[38;5;14m[1mAwesome[0m[38;5;14m[1m [0m[38;5;14m[1mRecurrent[0m[38;5;14m[1m [0m[38;5;14m[1mNeural[0m[38;5;14m[1m [0m[38;5;14m[1mNetworks[0m[38;5;12m [39m[38;5;12m(https://github.com/kjw0612/awesome-rnn).[39m[38;5;12m [39m[38;5;12mAlso,[39m[38;5;12m [39m[38;5;12mafter[39m[38;5;12m [39m[38;5;12mthis[39m[38;5;12m [39m
|
||||
[38;5;12mlist[39m[38;5;12m [39m[38;5;12mcomes[39m[38;5;12m [39m[38;5;12mout,[39m[38;5;12m [39m[38;5;12manother[39m[38;5;12m [39m[38;5;12mawesome[39m[38;5;12m [39m[38;5;12mlist[39m[38;5;12m [39m[38;5;12mfor[39m[38;5;12m [39m[38;5;12mdeep[39m[38;5;12m [39m[38;5;12mlearning[39m[38;5;12m [39m[38;5;12mbeginners,[39m[38;5;12m [39m[38;5;12mcalled[39m[38;5;12m [39m[38;5;14m[1mDeep[0m[38;5;14m[1m [0m[38;5;14m[1mLearning[0m[38;5;14m[1m [0m[38;5;14m[1mPapers[0m[38;5;14m[1m [0m[38;5;14m[1mReading[0m[38;5;14m[1m [0m[38;5;14m[1mRoadmap[0m[38;5;12m [39m[38;5;12m(https://github.com/songrotek/Deep-Learning-Papers-Reading-Roadmap),[39m[38;5;12m [39m[38;5;12mhas[39m[38;5;12m [39m[38;5;12mbeen[39m[38;5;12m [39m[38;5;12mcreated[39m[38;5;12m [39m[38;5;12mand[39m[38;5;12m [39m[38;5;12mloved[39m[38;5;12m [39m[38;5;12mby[39m[38;5;12m [39m[38;5;12mmany[39m[38;5;12m [39m[38;5;12mdeep[39m[38;5;12m [39m[38;5;12mlearning[39m[38;5;12m [39m
|
||||
[38;5;12mresearchers.[39m
|
||||
|
||||
[38;5;12mAlthough[39m[38;5;12m [39m[38;5;12mthe[39m[38;5;12m [39m[48;2;30;30;40m[38;5;13m[3mRoadmap[0m[48;2;30;30;40m[38;5;13m[3m [0m[48;2;30;30;40m[38;5;13m[3mList[0m[38;5;12m [39m[38;5;12mincludes[39m[38;5;12m [39m[38;5;12mlots[39m[38;5;12m [39m[38;5;12mof[39m[38;5;12m [39m[38;5;12mimportant[39m[38;5;12m [39m[38;5;12mdeep[39m[38;5;12m [39m[38;5;12mlearning[39m[38;5;12m [39m[38;5;12mpapers,[39m[38;5;12m [39m[38;5;12mit[39m[38;5;12m [39m[38;5;12mfeels[39m[38;5;12m [39m[38;5;12moverwhelming[39m[38;5;12m [39m[38;5;12mfor[39m[38;5;12m [39m[38;5;12mme[39m[38;5;12m [39m[38;5;12mto[39m[38;5;12m [39m[38;5;12mread[39m[38;5;12m [39m[38;5;12mthem[39m[38;5;12m [39m[38;5;12mall.[39m[38;5;12m [39m[38;5;12mAs[39m[38;5;12m [39m[38;5;12mI[39m[38;5;12m [39m[38;5;12mmentioned[39m[38;5;12m [39m[38;5;12min[39m[38;5;12m [39m[38;5;12mthe[39m[38;5;12m [39m[38;5;12mintroduction,[39m[38;5;12m [39m[38;5;12mI[39m[38;5;12m [39m[38;5;12mbelieve[39m[38;5;12m [39m[38;5;12mthat[39m[38;5;12m [39m[38;5;12mseminal[39m[38;5;12m [39m[38;5;12mworks[39m[38;5;12m [39m[38;5;12mcan[39m[38;5;12m [39m
|
||||
[38;5;12mgive[39m[38;5;12m [39m[38;5;12mus[39m[38;5;12m [39m[38;5;12mlessons[39m[38;5;12m [39m[38;5;12mregardless[39m[38;5;12m [39m[38;5;12mof[39m[38;5;12m [39m[38;5;12mtheir[39m[38;5;12m [39m[38;5;12mapplication[39m[38;5;12m [39m[38;5;12mdomain.[39m[38;5;12m [39m[38;5;12mThus,[39m[38;5;12m [39m[38;5;12mI[39m[38;5;12m [39m[38;5;12mwould[39m[38;5;12m [39m[38;5;12mlike[39m[38;5;12m [39m[38;5;12mto[39m[38;5;12m [39m[38;5;12mintroduce[39m[38;5;12m [39m[38;5;14m[1mtop[0m[38;5;14m[1m [0m[38;5;14m[1m100[0m[38;5;14m[1m [0m[38;5;14m[1mdeep[0m[38;5;14m[1m [0m[38;5;14m[1mlearning[0m[38;5;14m[1m [0m[38;5;14m[1mpapers[0m[38;5;12m [39m[38;5;12mhere[39m[38;5;12m [39m[38;5;12mas[39m[38;5;12m [39m[38;5;12ma[39m[38;5;12m [39m[38;5;12mgood[39m[38;5;12m [39m[38;5;12mstarting[39m[38;5;12m [39m[38;5;12mpoint[39m[38;5;12m [39m[38;5;12mof[39m[38;5;12m [39m[38;5;12moverviewing[39m[38;5;12m [39m[38;5;12mdeep[39m[38;5;12m [39m[38;5;12mlearning[39m[38;5;12m [39m[38;5;12mresearches.[39m
|
||||
[38;5;12mAlthough[39m[38;5;12m [39m[38;5;12mthe[39m[38;5;12m [39m[48;2;30;30;40m[38;5;13m[3mRoadmap[0m[48;2;30;30;40m[38;5;13m[3m [0m[48;2;30;30;40m[38;5;13m[3mList[0m[38;5;12m [39m[38;5;12mincludes[39m[38;5;12m [39m[38;5;12mlots[39m[38;5;12m [39m[38;5;12mof[39m[38;5;12m [39m[38;5;12mimportant[39m[38;5;12m [39m[38;5;12mdeep[39m[38;5;12m [39m[38;5;12mlearning[39m[38;5;12m [39m[38;5;12mpapers,[39m[38;5;12m [39m[38;5;12mit[39m[38;5;12m [39m[38;5;12mfeels[39m[38;5;12m [39m[38;5;12moverwhelming[39m[38;5;12m [39m[38;5;12mfor[39m[38;5;12m [39m[38;5;12mme[39m[38;5;12m [39m[38;5;12mto[39m[38;5;12m [39m[38;5;12mread[39m[38;5;12m [39m[38;5;12mthem[39m[38;5;12m [39m[38;5;12mall.[39m[38;5;12m [39m[38;5;12mAs[39m[38;5;12m [39m[38;5;12mI[39m[38;5;12m [39m[38;5;12mmentioned[39m[38;5;12m [39m[38;5;12min[39m[38;5;12m [39m[38;5;12mthe[39m[38;5;12m [39m[38;5;12mintroduction,[39m[38;5;12m [39m[38;5;12mI[39m[38;5;12m [39m[38;5;12mbelieve[39m[38;5;12m [39m[38;5;12mthat[39m[38;5;12m [39m[38;5;12mseminal[39m[38;5;12m [39m[38;5;12mworks[39m[38;5;12m [39m[38;5;12mcan[39m[38;5;12m [39m[38;5;12mgive[39m[38;5;12m [39m[38;5;12mus[39m[38;5;12m [39m[38;5;12mlessons[39m[38;5;12m [39m[38;5;12mregardless[39m[38;5;12m [39m[38;5;12mof[39m[38;5;12m [39m[38;5;12mtheir[39m[38;5;12m [39m
|
||||
[38;5;12mapplication[39m[38;5;12m [39m[38;5;12mdomain.[39m[38;5;12m [39m[38;5;12mThus,[39m[38;5;12m [39m[38;5;12mI[39m[38;5;12m [39m[38;5;12mwould[39m[38;5;12m [39m[38;5;12mlike[39m[38;5;12m [39m[38;5;12mto[39m[38;5;12m [39m[38;5;12mintroduce[39m[38;5;12m [39m[38;5;14m[1mtop[0m[38;5;14m[1m [0m[38;5;14m[1m100[0m[38;5;14m[1m [0m[38;5;14m[1mdeep[0m[38;5;14m[1m [0m[38;5;14m[1mlearning[0m[38;5;14m[1m [0m[38;5;14m[1mpapers[0m[38;5;12m [39m[38;5;12mhere[39m[38;5;12m [39m[38;5;12mas[39m[38;5;12m [39m[38;5;12ma[39m[38;5;12m [39m[38;5;12mgood[39m[38;5;12m [39m[38;5;12mstarting[39m[38;5;12m [39m[38;5;12mpoint[39m[38;5;12m [39m[38;5;12mof[39m[38;5;12m [39m[38;5;12moverviewing[39m[38;5;12m [39m[38;5;12mdeep[39m[38;5;12m [39m[38;5;12mlearning[39m[38;5;12m [39m[38;5;12mresearches.[39m
|
||||
|
||||
[38;5;12mTo get the news for newly released papers everyday, follow my [39m[38;5;14m[1mtwitter[0m[38;5;12m (https://twitter.com/TerryUm_ML) or [39m[38;5;14m[1mfacebook page[0m[38;5;12m (https://www.facebook.com/terryum.io/)! [39m
|
||||
|
||||
[38;2;255;187;0m[4mAwesome list criteria[0m
|
||||
|
||||
[38;5;12m1. A list of [39m[38;5;14m[1mtop 100 deep learning papers[0m[38;5;12m published from 2012 to 2016 is suggested.[39m
|
||||
[38;5;12m2. If a paper is added to the list, another paper (usually from [39m
|
||||
[48;2;30;30;40m[38;5;13m[3mMore Papers from 2016" section) should be removed to keep top 100 papers. (Thus, removing papers is also important contributions as well as adding papers)[0m
|
||||
[38;5;12m2. If a paper is added to the list, another paper (usually from [39m[48;2;30;30;40m[38;5;13m[3mMore Papers from 2016" section) should be removed to keep top 100 papers. (Thus, removing papers is also important contributions as well as adding papers)[0m
|
||||
[38;5;12m3. Papers that are important, but failed to be included in the list, will be listed in [39m[48;2;30;30;40m[38;5;13m[3mMore than Top 100[0m[38;5;12m section.[39m
|
||||
[38;5;12m4. Please refer to [39m[48;2;30;30;40m[38;5;13m[3mNew Papers[0m[38;5;12m and [39m[48;2;30;30;40m[38;5;13m[3mOld Papers[0m[38;5;12m sections for the papers published in recent 6 months or before 2012.[39m
|
||||
|
||||
@@ -37,18 +36,17 @@
|
||||
[38;5;12m- [39m[38;5;14m[1m2012[0m[38;5;12m : +800 citations[39m
|
||||
[38;5;12m- [39m[38;5;14m[1m~2012[0m[38;5;12m : [39m[48;2;30;30;40m[38;5;13m[3mOld Papers[0m[38;5;12m (by discussion)[39m
|
||||
|
||||
[38;5;12mPlease[39m[38;5;12m [39m[38;5;12mnote[39m[38;5;12m [39m[38;5;12mthat[39m[38;5;12m [39m[38;5;12mwe[39m[38;5;12m [39m[38;5;12mprefer[39m[38;5;12m [39m[38;5;12mseminal[39m[38;5;12m [39m[38;5;12mdeep[39m[38;5;12m [39m[38;5;12mlearning[39m[38;5;12m [39m[38;5;12mpapers[39m[38;5;12m [39m[38;5;12mthat[39m[38;5;12m [39m[38;5;12mcan[39m[38;5;12m [39m[38;5;12mbe[39m[38;5;12m [39m[38;5;12mapplied[39m[38;5;12m [39m[38;5;12mto[39m[38;5;12m [39m[38;5;12mvarious[39m[38;5;12m [39m[38;5;12mresearches[39m[38;5;12m [39m[38;5;12mrather[39m[38;5;12m [39m[38;5;12mthan[39m[38;5;12m [39m[38;5;12mapplication[39m[38;5;12m [39m[38;5;12mpapers.[39m[38;5;12m [39m[38;5;12mFor[39m[38;5;12m [39m[38;5;12mthat[39m[38;5;12m [39m[38;5;12mreason,[39m[38;5;12m [39m[38;5;12msome[39m[38;5;12m [39m[38;5;12mpapers[39m[38;5;12m [39m[38;5;12mthat[39m[38;5;12m [39m[38;5;12mmeet[39m[38;5;12m [39m[38;5;12mthe[39m[38;5;12m [39m[38;5;12mcriteria[39m[38;5;12m [39m[38;5;12mmay[39m[38;5;12m [39m[38;5;12mnot[39m[38;5;12m [39m
|
||||
[38;5;12mbe[39m[38;5;12m [39m[38;5;12maccepted[39m[38;5;12m [39m[38;5;12mwhile[39m[38;5;12m [39m[38;5;12mothers[39m[38;5;12m [39m[38;5;12mcan[39m[38;5;12m [39m[38;5;12mbe.[39m[38;5;12m [39m[38;5;12mIt[39m[38;5;12m [39m[38;5;12mdepends[39m[38;5;12m [39m[38;5;12mon[39m[38;5;12m [39m[38;5;12mthe[39m[38;5;12m [39m[38;5;12mimpact[39m[38;5;12m [39m[38;5;12mof[39m[38;5;12m [39m[38;5;12mthe[39m[38;5;12m [39m[38;5;12mpaper,[39m[38;5;12m [39m[38;5;12mapplicability[39m[38;5;12m [39m[38;5;12mto[39m[38;5;12m [39m[38;5;12mother[39m[38;5;12m [39m[38;5;12mresearches[39m[38;5;12m [39m[38;5;12mscarcity[39m[38;5;12m [39m[38;5;12mof[39m[38;5;12m [39m[38;5;12mthe[39m[38;5;12m [39m[38;5;12mresearch[39m[38;5;12m [39m[38;5;12mdomain,[39m[38;5;12m [39m[38;5;12mand[39m[38;5;12m [39m[38;5;12mso[39m[38;5;12m [39m[38;5;12mon.[39m
|
||||
[38;5;12mPlease[39m[38;5;12m [39m[38;5;12mnote[39m[38;5;12m [39m[38;5;12mthat[39m[38;5;12m [39m[38;5;12mwe[39m[38;5;12m [39m[38;5;12mprefer[39m[38;5;12m [39m[38;5;12mseminal[39m[38;5;12m [39m[38;5;12mdeep[39m[38;5;12m [39m[38;5;12mlearning[39m[38;5;12m [39m[38;5;12mpapers[39m[38;5;12m [39m[38;5;12mthat[39m[38;5;12m [39m[38;5;12mcan[39m[38;5;12m [39m[38;5;12mbe[39m[38;5;12m [39m[38;5;12mapplied[39m[38;5;12m [39m[38;5;12mto[39m[38;5;12m [39m[38;5;12mvarious[39m[38;5;12m [39m[38;5;12mresearches[39m[38;5;12m [39m[38;5;12mrather[39m[38;5;12m [39m[38;5;12mthan[39m[38;5;12m [39m[38;5;12mapplication[39m[38;5;12m [39m[38;5;12mpapers.[39m[38;5;12m [39m[38;5;12mFor[39m[38;5;12m [39m[38;5;12mthat[39m[38;5;12m [39m[38;5;12mreason,[39m[38;5;12m [39m[38;5;12msome[39m[38;5;12m [39m[38;5;12mpapers[39m[38;5;12m [39m[38;5;12mthat[39m[38;5;12m [39m[38;5;12mmeet[39m[38;5;12m [39m[38;5;12mthe[39m[38;5;12m [39m[38;5;12mcriteria[39m[38;5;12m [39m[38;5;12mmay[39m[38;5;12m [39m[38;5;12mnot[39m[38;5;12m [39m[38;5;12mbe[39m[38;5;12m [39m[38;5;12maccepted[39m[38;5;12m [39m[38;5;12mwhile[39m[38;5;12m [39m[38;5;12mothers[39m[38;5;12m [39m[38;5;12mcan[39m[38;5;12m [39m[38;5;12mbe.[39m[38;5;12m [39m[38;5;12mIt[39m[38;5;12m [39m[38;5;12mdepends[39m[38;5;12m [39m[38;5;12mon[39m
|
||||
[38;5;12mthe[39m[38;5;12m [39m[38;5;12mimpact[39m[38;5;12m [39m[38;5;12mof[39m[38;5;12m [39m[38;5;12mthe[39m[38;5;12m [39m[38;5;12mpaper,[39m[38;5;12m [39m[38;5;12mapplicability[39m[38;5;12m [39m[38;5;12mto[39m[38;5;12m [39m[38;5;12mother[39m[38;5;12m [39m[38;5;12mresearches[39m[38;5;12m [39m[38;5;12mscarcity[39m[38;5;12m [39m[38;5;12mof[39m[38;5;12m [39m[38;5;12mthe[39m[38;5;12m [39m[38;5;12mresearch[39m[38;5;12m [39m[38;5;12mdomain,[39m[38;5;12m [39m[38;5;12mand[39m[38;5;12m [39m[38;5;12mso[39m[38;5;12m [39m[38;5;12mon.[39m
|
||||
|
||||
[38;5;14m[1mWe need your contributions![0m
|
||||
|
||||
[38;5;12mIf you have any suggestions (missing papers, new papers, key researchers or typos), please feel free to edit and pull a request.[39m
|
||||
[38;5;12m(Please[39m[38;5;12m [39m[38;5;12mread[39m[38;5;12m [39m[38;5;12mthe[39m[38;5;12m [39m[38;5;14m[1mcontributing[0m[38;5;14m[1m [0m[38;5;14m[1mguide[0m[38;5;12m [39m[38;5;12m(https://github.com/terryum/awesome-deep-learning-papers/blob/master/Contributing.md)[39m[38;5;12m [39m[38;5;12mfor[39m[38;5;12m [39m[38;5;12mfurther[39m[38;5;12m [39m[38;5;12minstructions,[39m[38;5;12m [39m[38;5;12mthough[39m[38;5;12m [39m[38;5;12mjust[39m[38;5;12m [39m[38;5;12mletting[39m[38;5;12m [39m[38;5;12mme[39m[38;5;12m [39m[38;5;12mknow[39m[38;5;12m [39m[38;5;12mthe[39m[38;5;12m [39m[38;5;12mtitle[39m[38;5;12m [39m[38;5;12mof[39m[38;5;12m [39m
|
||||
[38;5;12mpapers[39m[38;5;12m [39m[38;5;12mcan[39m[38;5;12m [39m[38;5;12malso[39m[38;5;12m [39m[38;5;12mbe[39m[38;5;12m [39m[38;5;12ma[39m[38;5;12m [39m[38;5;12mbig[39m[38;5;12m [39m[38;5;12mcontribution[39m[38;5;12m [39m[38;5;12mto[39m[38;5;12m [39m[38;5;12mus.)[39m
|
||||
[38;5;12m(Please read the [39m[38;5;14m[1mcontributing guide[0m[38;5;12m (https://github.com/terryum/awesome-deep-learning-papers/blob/master/Contributing.md) for further instructions, though just letting me know the title of papers can also be a big contribution to us.)[39m
|
||||
|
||||
[38;5;12m(Update)[39m[38;5;12m [39m[38;5;12mYou[39m[38;5;12m [39m[38;5;12mcan[39m[38;5;12m [39m[38;5;12mdownload[39m[38;5;12m [39m[38;5;12mall[39m[38;5;12m [39m[38;5;12mtop-100[39m[38;5;12m [39m[38;5;12mpapers[39m[38;5;12m [39m[38;5;12mwith[39m[38;5;12m [39m[38;5;14m[1mthis[0m[38;5;12m [39m[38;5;12m(https://github.com/terryum/awesome-deep-learning-papers/blob/master/fetch_papers.py)[39m[38;5;12m [39m[38;5;12mand[39m[38;5;12m [39m[38;5;12mcollect[39m[38;5;12m [39m[38;5;12mall[39m[38;5;12m [39m[38;5;12mauthors'[39m[38;5;12m [39m[38;5;12mnames[39m[38;5;12m [39m[38;5;12mwith[39m[38;5;12m [39m[38;5;14m[1mthis[0m[38;5;12m [39m
|
||||
[38;5;12m(https://github.com/terryum/awesome-deep-learning-papers/blob/master/get_authors.py).[39m[38;5;12m [39m[38;5;12mAlso,[39m[38;5;12m [39m[38;5;14m[1mbib[0m[38;5;14m[1m [0m[38;5;14m[1mfile[0m[38;5;12m [39m[38;5;12m(https://github.com/terryum/awesome-deep-learning-papers/blob/master/top100papers.bib)[39m[38;5;12m [39m[38;5;12mfor[39m
|
||||
[38;5;12mall[39m[38;5;12m [39m[38;5;12mtop-100[39m[38;5;12m [39m[38;5;12mpapers[39m[38;5;12m [39m[38;5;12mare[39m[38;5;12m [39m[38;5;12mavailable.[39m[38;5;12m [39m[38;5;12mThanks,[39m[38;5;12m [39m[38;5;12mdoodhwala,[39m[38;5;12m [39m[38;5;14m[1mSven[0m[38;5;12m [39m[38;5;12m(https://github.com/sunshinemyson)[39m[38;5;12m [39m[38;5;12mand[39m[38;5;12m [39m[38;5;14m[1mgrepinsight[0m[38;5;12m [39m[38;5;12m(https://github.com/grepinsight)![39m
|
||||
[38;5;12m(https://github.com/terryum/awesome-deep-learning-papers/blob/master/get_authors.py).[39m[38;5;12m [39m[38;5;12mAlso,[39m[38;5;12m [39m[38;5;14m[1mbib[0m[38;5;14m[1m [0m[38;5;14m[1mfile[0m[38;5;12m [39m[38;5;12m(https://github.com/terryum/awesome-deep-learning-papers/blob/master/top100papers.bib)[39m[38;5;12m [39m[38;5;12mfor[39m[38;5;12m [39m[38;5;12mall[39m[38;5;12m [39m[38;5;12mtop-100[39m[38;5;12m [39m[38;5;12mpapers[39m[38;5;12m [39m[38;5;12mare[39m[38;5;12m [39m[38;5;12mavailable.[39m[38;5;12m [39m[38;5;12mThanks,[39m[38;5;12m [39m
|
||||
[38;5;12mdoodhwala,[39m[38;5;12m [39m[38;5;14m[1mSven[0m[38;5;12m [39m[38;5;12m(https://github.com/sunshinemyson)[39m[38;5;12m [39m[38;5;12mand[39m[38;5;12m [39m[38;5;14m[1mgrepinsight[0m[38;5;12m [39m[38;5;12m(https://github.com/grepinsight)![39m
|
||||
|
||||
[38;5;12m+ Can anyone contribute the code for obtaining the statistics of the authors of Top-100 papers?[39m
|
||||
|
||||
@@ -93,8 +91,7 @@
|
||||
[38;2;255;187;0m[4mOptimization / Training Techniques[0m
|
||||
[38;5;12m- [39m[38;5;14m[1mTraining very deep networks[0m[38;5;12m (2015), R. Srivastava et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://papers.nips.cc/paper/5850-training-very-deep-networks.pdf)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mBatch normalization: Accelerating deep network training by reducing internal covariate shift[0m[38;5;12m (2015), S. Loffe and C. Szegedy [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://arxiv.org/pdf/1502.03167)[39m
|
||||
[38;5;12m-[39m[38;5;12m [39m[38;5;14m[1mDelving[0m[38;5;14m[1m [0m[38;5;14m[1mdeep[0m[38;5;14m[1m [0m[38;5;14m[1minto[0m[38;5;14m[1m [0m[38;5;14m[1mrectifiers:[0m[38;5;14m[1m [0m[38;5;14m[1mSurpassing[0m[38;5;14m[1m [0m[38;5;14m[1mhuman-level[0m[38;5;14m[1m [0m[38;5;14m[1mperformance[0m[38;5;14m[1m [0m[38;5;14m[1mon[0m[38;5;14m[1m [0m[38;5;14m[1mimagenet[0m[38;5;14m[1m [0m[38;5;14m[1mclassification[0m[38;5;12m [39m[38;5;12m(2015),[39m[38;5;12m [39m[38;5;12mK.[39m[38;5;12m [39m[38;5;12mHe[39m[38;5;12m [39m[38;5;12met[39m[38;5;12m [39m[38;5;12mal.[39m[38;5;12m [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m [39m
|
||||
[38;5;12m(http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mDelving deep into rectifiers: Surpassing human-level performance on imagenet classification[0m[38;5;12m (2015), K. He et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mDropout: A simple way to prevent neural networks from overfitting[0m[38;5;12m (2014), N. Srivastava et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://jmlr.org/papers/volume15/srivastava14a/srivastava14a.pdf)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mAdam: A method for stochastic optimization[0m[38;5;12m (2014), D. Kingma and J. Ba [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://arxiv.org/pdf/1412.6980)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mImproving neural networks by preventing co-adaptation of feature detectors[0m[38;5;12m (2012), G. Hinton et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://arxiv.org/pdf/1207.0580.pdf)[39m
|
||||
@@ -113,8 +110,7 @@
|
||||
|
||||
|
||||
[38;2;255;187;0m[4mConvolutional Neural Network Models[0m
|
||||
[38;5;12m-[39m[38;5;12m [39m[38;5;14m[1mRethinking[0m[38;5;14m[1m [0m[38;5;14m[1mthe[0m[38;5;14m[1m [0m[38;5;14m[1minception[0m[38;5;14m[1m [0m[38;5;14m[1marchitecture[0m[38;5;14m[1m [0m[38;5;14m[1mfor[0m[38;5;14m[1m [0m[38;5;14m[1mcomputer[0m[38;5;14m[1m [0m[38;5;14m[1mvision[0m[38;5;12m [39m[38;5;12m(2016),[39m[38;5;12m [39m[38;5;12mC.[39m[38;5;12m [39m[38;5;12mSzegedy[39m[38;5;12m [39m[38;5;12met[39m[38;5;12m [39m[38;5;12mal.[39m[38;5;12m [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m [39m
|
||||
[38;5;12m(http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Szegedy_Rethinking_the_Inception_CVPR_2016_paper.pdf)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mRethinking the inception architecture for computer vision[0m[38;5;12m (2016), C. Szegedy et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Szegedy_Rethinking_the_Inception_CVPR_2016_paper.pdf)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mInception-v4, inception-resnet and the impact of residual connections on learning[0m[38;5;12m (2016), C. Szegedy et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://arxiv.org/pdf/1602.07261)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mIdentity Mappings in Deep Residual Networks[0m[38;5;12m (2016), K. He et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (https://arxiv.org/pdf/1603.05027v2.pdf)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mDeep residual learning for image recognition[0m[38;5;12m (2016), K. He et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://arxiv.org/pdf/1512.03385)[39m
|
||||
@@ -125,21 +121,16 @@
|
||||
[38;5;12m- [39m[38;5;14m[1mOverFeat: Integrated recognition, localization and detection using convolutional networks[0m[38;5;12m (2013), P. Sermanet et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://arxiv.org/pdf/1312.6229)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mMaxout networks[0m[38;5;12m (2013), I. Goodfellow et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://arxiv.org/pdf/1302.4389v4)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mNetwork in network[0m[38;5;12m (2013), M. Lin et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://arxiv.org/pdf/1312.4400)[39m
|
||||
[38;5;12m-[39m[38;5;12m [39m[38;5;14m[1mImageNet[0m[38;5;14m[1m [0m[38;5;14m[1mclassification[0m[38;5;14m[1m [0m[38;5;14m[1mwith[0m[38;5;14m[1m [0m[38;5;14m[1mdeep[0m[38;5;14m[1m [0m[38;5;14m[1mconvolutional[0m[38;5;14m[1m [0m[38;5;14m[1mneural[0m[38;5;14m[1m [0m[38;5;14m[1mnetworks[0m[38;5;12m [39m[38;5;12m(2012),[39m[38;5;12m [39m[38;5;12mA.[39m[38;5;12m [39m[38;5;12mKrizhevsky[39m[38;5;12m [39m[38;5;12met[39m[38;5;12m [39m[38;5;12mal.[39m[38;5;12m [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m [39m
|
||||
[38;5;12m(http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mImageNet classification with deep convolutional neural networks[0m[38;5;12m (2012), A. Krizhevsky et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf)[39m
|
||||
|
||||
|
||||
|
||||
[38;2;255;187;0m[4mImage: Segmentation / Object Detection[0m
|
||||
[38;5;12m-[39m[38;5;12m [39m[38;5;14m[1mYou[0m[38;5;14m[1m [0m[38;5;14m[1monly[0m[38;5;14m[1m [0m[38;5;14m[1mlook[0m[38;5;14m[1m [0m[38;5;14m[1monce:[0m[38;5;14m[1m [0m[38;5;14m[1mUnified,[0m[38;5;14m[1m [0m[38;5;14m[1mreal-time[0m[38;5;14m[1m [0m[38;5;14m[1mobject[0m[38;5;14m[1m [0m[38;5;14m[1mdetection[0m[38;5;12m [39m[38;5;12m(2016),[39m[38;5;12m [39m[38;5;12mJ.[39m[38;5;12m [39m[38;5;12mRedmon[39m[38;5;12m [39m[38;5;12met[39m[38;5;12m [39m[38;5;12mal.[39m[38;5;12m [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m [39m
|
||||
[38;5;12m(http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Redmon_You_Only_Look_CVPR_2016_paper.pdf)[39m
|
||||
[38;5;12m-[39m[38;5;12m [39m[38;5;14m[1mFully[0m[38;5;14m[1m [0m[38;5;14m[1mconvolutional[0m[38;5;14m[1m [0m[38;5;14m[1mnetworks[0m[38;5;14m[1m [0m[38;5;14m[1mfor[0m[38;5;14m[1m [0m[38;5;14m[1msemantic[0m[38;5;14m[1m [0m[38;5;14m[1msegmentation[0m[38;5;12m [39m[38;5;12m(2015),[39m[38;5;12m [39m[38;5;12mJ.[39m[38;5;12m [39m[38;5;12mLong[39m[38;5;12m [39m[38;5;12met[39m[38;5;12m [39m[38;5;12mal.[39m[38;5;12m [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m [39m
|
||||
[38;5;12m(http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Long_Fully_Convolutional_Networks_2015_CVPR_paper.pdf)[39m
|
||||
[38;5;12m-[39m[38;5;12m [39m[38;5;14m[1mFaster[0m[38;5;14m[1m [0m[38;5;14m[1mR-CNN:[0m[38;5;14m[1m [0m[38;5;14m[1mTowards[0m[38;5;14m[1m [0m[38;5;14m[1mReal-Time[0m[38;5;14m[1m [0m[38;5;14m[1mObject[0m[38;5;14m[1m [0m[38;5;14m[1mDetection[0m[38;5;14m[1m [0m[38;5;14m[1mwith[0m[38;5;14m[1m [0m[38;5;14m[1mRegion[0m[38;5;14m[1m [0m[38;5;14m[1mProposal[0m[38;5;14m[1m [0m[38;5;14m[1mNetworks[0m[38;5;12m [39m[38;5;12m(2015),[39m[38;5;12m [39m[38;5;12mS.[39m[38;5;12m [39m[38;5;12mRen[39m[38;5;12m [39m[38;5;12met[39m[38;5;12m [39m[38;5;12mal.[39m[38;5;12m [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m [39m
|
||||
[38;5;12m(http://papers.nips.cc/paper/5638-faster-r-cnn-towards-real-time-object-detection-with-region-proposal-networks.pdf)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mYou only look once: Unified, real-time object detection[0m[38;5;12m (2016), J. Redmon et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Redmon_You_Only_Look_CVPR_2016_paper.pdf)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mFully convolutional networks for semantic segmentation[0m[38;5;12m (2015), J. Long et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Long_Fully_Convolutional_Networks_2015_CVPR_paper.pdf)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mFaster R-CNN: Towards Real-Time Object Detection with Region Proposal Networks[0m[38;5;12m (2015), S. Ren et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://papers.nips.cc/paper/5638-faster-r-cnn-towards-real-time-object-detection-with-region-proposal-networks.pdf)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mFast R-CNN[0m[38;5;12m (2015), R. Girshick [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Girshick_Fast_R-CNN_ICCV_2015_paper.pdf)[39m
|
||||
[38;5;12m-[39m[38;5;12m [39m[38;5;14m[1mRich[0m[38;5;14m[1m [0m[38;5;14m[1mfeature[0m[38;5;14m[1m [0m[38;5;14m[1mhierarchies[0m[38;5;14m[1m [0m[38;5;14m[1mfor[0m[38;5;14m[1m [0m[38;5;14m[1maccurate[0m[38;5;14m[1m [0m[38;5;14m[1mobject[0m[38;5;14m[1m [0m[38;5;14m[1mdetection[0m[38;5;14m[1m [0m[38;5;14m[1mand[0m[38;5;14m[1m [0m[38;5;14m[1msemantic[0m[38;5;14m[1m [0m[38;5;14m[1msegmentation[0m[38;5;12m [39m[38;5;12m(2014),[39m[38;5;12m [39m[38;5;12mR.[39m[38;5;12m [39m[38;5;12mGirshick[39m[38;5;12m [39m[38;5;12met[39m[38;5;12m [39m[38;5;12mal.[39m[38;5;12m [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m [39m
|
||||
[38;5;12m(http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Girshick_Rich_Feature_Hierarchies_2014_CVPR_paper.pdf)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mRich feature hierarchies for accurate object detection and semantic segmentation[0m[38;5;12m (2014), R. Girshick et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Girshick_Rich_Feature_Hierarchies_2014_CVPR_paper.pdf)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mSpatial pyramid pooling in deep convolutional networks for visual recognition[0m[38;5;12m (2014), K. He et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://arxiv.org/pdf/1406.4729)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mSemantic image segmentation with deep convolutional nets and fully connected CRFs[0m[38;5;12m, L. Chen et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (https://arxiv.org/pdf/1412.7062)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mLearning hierarchical features for scene labeling[0m[38;5;12m (2013), C. Farabet et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (https://hal-enpc.archives-ouvertes.fr/docs/00/74/20/77/PDF/farabet-pami-13.pdf)[39m
|
||||
@@ -149,18 +140,15 @@
|
||||
[38;2;255;187;0m[4mImage / Video / Etc[0m
|
||||
[38;5;12m- [39m[38;5;14m[1mImage Super-Resolution Using Deep Convolutional Networks[0m[38;5;12m (2016), C. Dong et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (https://arxiv.org/pdf/1501.00092v3.pdf)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mA neural algorithm of artistic style[0m[38;5;12m (2015), L. Gatys et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (https://arxiv.org/pdf/1508.06576)[39m
|
||||
[38;5;12m-[39m[38;5;12m [39m[38;5;14m[1mDeep[0m[38;5;14m[1m [0m[38;5;14m[1mvisual-semantic[0m[38;5;14m[1m [0m[38;5;14m[1malignments[0m[38;5;14m[1m [0m[38;5;14m[1mfor[0m[38;5;14m[1m [0m[38;5;14m[1mgenerating[0m[38;5;14m[1m [0m[38;5;14m[1mimage[0m[38;5;14m[1m [0m[38;5;14m[1mdescriptions[0m[38;5;12m [39m[38;5;12m(2015),[39m[38;5;12m [39m[38;5;12mA.[39m[38;5;12m [39m[38;5;12mKarpathy[39m[38;5;12m [39m[38;5;12mand[39m[38;5;12m [39m[38;5;12mL.[39m[38;5;12m [39m[38;5;12mFei-Fei[39m[38;5;12m [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m [39m
|
||||
[38;5;12m(http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Karpathy_Deep_Visual-Semantic_Alignments_2015_CVPR_paper.pdf)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mDeep visual-semantic alignments for generating image descriptions[0m[38;5;12m (2015), A. Karpathy and L. Fei-Fei [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Karpathy_Deep_Visual-Semantic_Alignments_2015_CVPR_paper.pdf)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mShow, attend and tell: Neural image caption generation with visual attention[0m[38;5;12m (2015), K. Xu et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://arxiv.org/pdf/1502.03044)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mShow and tell: A neural image caption generator[0m[38;5;12m (2015), O. Vinyals et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Vinyals_Show_and_Tell_2015_CVPR_paper.pdf)[39m
|
||||
[38;5;12m-[39m[38;5;12m [39m[38;5;14m[1mLong-term[0m[38;5;14m[1m [0m[38;5;14m[1mrecurrent[0m[38;5;14m[1m [0m[38;5;14m[1mconvolutional[0m[38;5;14m[1m [0m[38;5;14m[1mnetworks[0m[38;5;14m[1m [0m[38;5;14m[1mfor[0m[38;5;14m[1m [0m[38;5;14m[1mvisual[0m[38;5;14m[1m [0m[38;5;14m[1mrecognition[0m[38;5;14m[1m [0m[38;5;14m[1mand[0m[38;5;14m[1m [0m[38;5;14m[1mdescription[0m[38;5;12m [39m[38;5;12m(2015),[39m[38;5;12m [39m[38;5;12mJ.[39m[38;5;12m [39m[38;5;12mDonahue[39m[38;5;12m [39m[38;5;12met[39m[38;5;12m [39m[38;5;12mal.[39m[38;5;12m [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m [39m
|
||||
[38;5;12m(http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Donahue_Long-Term_Recurrent_Convolutional_2015_CVPR_paper.pdf)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mVQA: Visual question answering[0m[38;5;12m (2015), S. Antol et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Antol_VQA_Visual_Question_ICCV_2015_paper.pdf)[39m
|
||||
[38;5;12m-[39m[38;5;12m [39m[38;5;14m[1mDeepFace:[0m[38;5;14m[1m [0m[38;5;14m[1mClosing[0m[38;5;14m[1m [0m[38;5;14m[1mthe[0m[38;5;14m[1m [0m[38;5;14m[1mgap[0m[38;5;14m[1m [0m[38;5;14m[1mto[0m[38;5;14m[1m [0m[38;5;14m[1mhuman-level[0m[38;5;14m[1m [0m[38;5;14m[1mperformance[0m[38;5;14m[1m [0m[38;5;14m[1min[0m[38;5;14m[1m [0m[38;5;14m[1mface[0m[38;5;14m[1m [0m[38;5;14m[1mverification[0m[38;5;12m [39m[38;5;12m(2014),[39m[38;5;12m [39m[38;5;12mY.[39m[38;5;12m [39m[38;5;12mTaigman[39m[38;5;12m [39m[38;5;12met[39m[38;5;12m [39m[38;5;12mal.[39m[38;5;12m [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m [39m
|
||||
[38;5;12m(http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Taigman_DeepFace_Closing_the_2014_CVPR_paper.pdf):[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mDeepFace: Closing the gap to human-level performance in face verification[0m[38;5;12m (2014), Y. Taigman et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Taigman_DeepFace_Closing_the_2014_CVPR_paper.pdf):[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mLarge-scale video classification with convolutional neural networks[0m[38;5;12m (2014), A. Karpathy et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://vision.stanford.edu/pdf/karpathy14.pdf)[39m
|
||||
[38;5;12m-[39m[38;5;12m [39m[38;5;14m[1mTwo-stream[0m[38;5;14m[1m [0m[38;5;14m[1mconvolutional[0m[38;5;14m[1m [0m[38;5;14m[1mnetworks[0m[38;5;14m[1m [0m[38;5;14m[1mfor[0m[38;5;14m[1m [0m[38;5;14m[1maction[0m[38;5;14m[1m [0m[38;5;14m[1mrecognition[0m[38;5;14m[1m [0m[38;5;14m[1min[0m[38;5;14m[1m [0m[38;5;14m[1mvideos[0m[38;5;12m [39m[38;5;12m(2014),[39m[38;5;12m [39m[38;5;12mK.[39m[38;5;12m [39m[38;5;12mSimonyan[39m[38;5;12m [39m[38;5;12met[39m[38;5;12m [39m[38;5;12mal.[39m[38;5;12m [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m [39m
|
||||
[38;5;12m(http://papers.nips.cc/paper/5353-two-stream-convolutional-networks-for-action-recognition-in-videos.pdf)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mTwo-stream convolutional networks for action recognition in videos[0m[38;5;12m (2014), K. Simonyan et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://papers.nips.cc/paper/5353-two-stream-convolutional-networks-for-action-recognition-in-videos.pdf)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1m3D convolutional neural networks for human action recognition[0m[38;5;12m (2013), S. Ji et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://machinelearning.wustl.edu/mlpapers/paper_files/icml2010_JiXYY10.pdf)[39m
|
||||
|
||||
|
||||
@@ -172,8 +160,7 @@
|
||||
[38;5;12m- [39m[38;5;14m[1mExploring the limits of language modeling[0m[38;5;12m (2016), R. Jozefowicz et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://arxiv.org/pdf/1602.02410)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mTeaching machines to read and comprehend[0m[38;5;12m (2015), K. Hermann et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://papers.nips.cc/paper/5945-teaching-machines-to-read-and-comprehend.pdf)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mEffective approaches to attention-based neural machine translation[0m[38;5;12m (2015), M. Luong et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (https://arxiv.org/pdf/1508.04025)[39m
|
||||
[38;5;12m-[39m[38;5;12m [39m[38;5;14m[1mConditional[0m[38;5;14m[1m [0m[38;5;14m[1mrandom[0m[38;5;14m[1m [0m[38;5;14m[1mfields[0m[38;5;14m[1m [0m[38;5;14m[1mas[0m[38;5;14m[1m [0m[38;5;14m[1mrecurrent[0m[38;5;14m[1m [0m[38;5;14m[1mneural[0m[38;5;14m[1m [0m[38;5;14m[1mnetworks[0m[38;5;12m [39m[38;5;12m(2015),[39m[38;5;12m [39m[38;5;12mS.[39m[38;5;12m [39m[38;5;12mZheng[39m[38;5;12m [39m[38;5;12mand[39m[38;5;12m [39m[38;5;12mS.[39m[38;5;12m [39m[38;5;12mJayasumana.[39m[38;5;12m [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m [39m
|
||||
[38;5;12m(http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Zheng_Conditional_Random_Fields_ICCV_2015_paper.pdf)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mConditional random fields as recurrent neural networks[0m[38;5;12m (2015), S. Zheng and S. Jayasumana. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Zheng_Conditional_Random_Fields_ICCV_2015_paper.pdf)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mMemory networks[0m[38;5;12m (2014), J. Weston et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (https://arxiv.org/pdf/1410.3916)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mNeural turing machines[0m[38;5;12m (2014), A. Graves et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (https://arxiv.org/pdf/1410.5401)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mNeural machine translation by jointly learning to align and translate[0m[38;5;12m (2014), D. Bahdanau et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://arxiv.org/pdf/1409.0473)[39m
|
||||
@@ -183,11 +170,9 @@
|
||||
[38;5;12m- [39m[38;5;14m[1mConvolutional neural networks for sentence classification[0m[38;5;12m (2014), Y. Kim [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://arxiv.org/pdf/1408.5882)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mGlove: Global vectors for word representation[0m[38;5;12m (2014), J. Pennington et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://anthology.aclweb.org/D/D14/D14-1162.pdf)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mDistributed representations of sentences and documents[0m[38;5;12m (2014), Q. Le and T. Mikolov [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://arxiv.org/pdf/1405.4053)[39m
|
||||
[38;5;12m-[39m[38;5;12m [39m[38;5;14m[1mDistributed[0m[38;5;14m[1m [0m[38;5;14m[1mrepresentations[0m[38;5;14m[1m [0m[38;5;14m[1mof[0m[38;5;14m[1m [0m[38;5;14m[1mwords[0m[38;5;14m[1m [0m[38;5;14m[1mand[0m[38;5;14m[1m [0m[38;5;14m[1mphrases[0m[38;5;14m[1m [0m[38;5;14m[1mand[0m[38;5;14m[1m [0m[38;5;14m[1mtheir[0m[38;5;14m[1m [0m[38;5;14m[1mcompositionality[0m[38;5;12m [39m[38;5;12m(2013),[39m[38;5;12m [39m[38;5;12mT.[39m[38;5;12m [39m[38;5;12mMikolov[39m[38;5;12m [39m[38;5;12met[39m[38;5;12m [39m[38;5;12mal.[39m[38;5;12m [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m [39m
|
||||
[38;5;12m(http://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mDistributed representations of words and phrases and their compositionality[0m[38;5;12m (2013), T. Mikolov et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mEfficient estimation of word representations in vector space[0m[38;5;12m (2013), T. Mikolov et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://arxiv.org/pdf/1301.3781)[39m
|
||||
[38;5;12m-[39m[38;5;12m [39m[38;5;14m[1mRecursive[0m[38;5;14m[1m [0m[38;5;14m[1mdeep[0m[38;5;14m[1m [0m[38;5;14m[1mmodels[0m[38;5;14m[1m [0m[38;5;14m[1mfor[0m[38;5;14m[1m [0m[38;5;14m[1msemantic[0m[38;5;14m[1m [0m[38;5;14m[1mcompositionality[0m[38;5;14m[1m [0m[38;5;14m[1mover[0m[38;5;14m[1m [0m[38;5;14m[1ma[0m[38;5;14m[1m [0m[38;5;14m[1msentiment[0m[38;5;14m[1m [0m[38;5;14m[1mtreebank[0m[38;5;12m [39m[38;5;12m(2013),[39m[38;5;12m [39m[38;5;12mR.[39m[38;5;12m [39m[38;5;12mSocher[39m[38;5;12m [39m[38;5;12met[39m[38;5;12m [39m[38;5;12mal.[39m[38;5;12m [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m [39m
|
||||
[38;5;12m(http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.383.1327&rep=rep1&type=pdf)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mRecursive deep models for semantic compositionality over a sentiment treebank[0m[38;5;12m (2013), R. Socher et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.383.1327&rep=rep1&type=pdf)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mGenerating sequences with recurrent neural networks[0m[38;5;12m (2013), A. Graves. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (https://arxiv.org/pdf/1308.0850)[39m
|
||||
|
||||
|
||||
@@ -196,10 +181,8 @@
|
||||
[38;5;12m- [39m[38;5;14m[1mEnd-to-end attention-based large vocabulary speech recognition[0m[38;5;12m (2016), D. Bahdanau et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (https://arxiv.org/pdf/1508.04395)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mDeep speech 2: End-to-end speech recognition in English and Mandarin[0m[38;5;12m (2015), D. Amodei et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (https://arxiv.org/pdf/1512.02595)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mSpeech recognition with deep recurrent neural networks[0m[38;5;12m (2013), A. Graves [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://arxiv.org/pdf/1303.5778.pdf)[39m
|
||||
[38;5;12m-[39m[38;5;12m [39m[38;5;14m[1mDeep[0m[38;5;14m[1m [0m[38;5;14m[1mneural[0m[38;5;14m[1m [0m[38;5;14m[1mnetworks[0m[38;5;14m[1m [0m[38;5;14m[1mfor[0m[38;5;14m[1m [0m[38;5;14m[1macoustic[0m[38;5;14m[1m [0m[38;5;14m[1mmodeling[0m[38;5;14m[1m [0m[38;5;14m[1min[0m[38;5;14m[1m [0m[38;5;14m[1mspeech[0m[38;5;14m[1m [0m[38;5;14m[1mrecognition:[0m[38;5;14m[1m [0m[38;5;14m[1mThe[0m[38;5;14m[1m [0m[38;5;14m[1mshared[0m[38;5;14m[1m [0m[38;5;14m[1mviews[0m[38;5;14m[1m [0m[38;5;14m[1mof[0m[38;5;14m[1m [0m[38;5;14m[1mfour[0m[38;5;14m[1m [0m[38;5;14m[1mresearch[0m[38;5;14m[1m [0m[38;5;14m[1mgroups[0m[38;5;12m [39m[38;5;12m(2012),[39m[38;5;12m [39m[38;5;12mG.[39m[38;5;12m [39m[38;5;12mHinton[39m[38;5;12m [39m[38;5;12met[39m[38;5;12m [39m[38;5;12mal.[39m[38;5;12m [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m [39m
|
||||
[38;5;12m(http://www.cs.toronto.edu/~asamir/papers/SPM_DNN_12.pdf)[39m
|
||||
[38;5;12m-[39m[38;5;12m [39m[38;5;14m[1mContext-dependent[0m[38;5;14m[1m [0m[38;5;14m[1mpre-trained[0m[38;5;14m[1m [0m[38;5;14m[1mdeep[0m[38;5;14m[1m [0m[38;5;14m[1mneural[0m[38;5;14m[1m [0m[38;5;14m[1mnetworks[0m[38;5;14m[1m [0m[38;5;14m[1mfor[0m[38;5;14m[1m [0m[38;5;14m[1mlarge-vocabulary[0m[38;5;14m[1m [0m[38;5;14m[1mspeech[0m[38;5;14m[1m [0m[38;5;14m[1mrecognition[0m[38;5;12m [39m[38;5;12m(2012)[39m[38;5;12m [39m[38;5;12mG.[39m[38;5;12m [39m[38;5;12mDahl[39m[38;5;12m [39m[38;5;12met[39m[38;5;12m [39m[38;5;12mal.[39m[38;5;12m [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m [39m
|
||||
[38;5;12m(http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.337.7548&rep=rep1&type=pdf)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mDeep neural networks for acoustic modeling in speech recognition: The shared views of four research groups[0m[38;5;12m (2012), G. Hinton et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://www.cs.toronto.edu/~asamir/papers/SPM_DNN_12.pdf)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mContext-dependent pre-trained deep neural networks for large-vocabulary speech recognition[0m[38;5;12m (2012) G. Dahl et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.337.7548&rep=rep1&type=pdf)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mAcoustic modeling using deep belief networks[0m[38;5;12m (2012), A. Mohamed et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://www.cs.toronto.edu/~asamir/papers/speechDBN_jrnl.pdf)[39m
|
||||
|
||||
|
||||
@@ -230,8 +213,7 @@
|
||||
[38;5;12m- [39m[38;5;14m[1mEie: Efficient inference engine on compressed deep neural network[0m[38;5;12m (2016), S. Han et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://arxiv.org/pdf/1602.01528)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mBinarized neural networks: Training deep neural networks with weights and activations constrained to+ 1 or-1[0m[38;5;12m (2016), M. Courbariaux et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (https://arxiv.org/pdf/1602.02830)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mDynamic memory networks for visual and textual question answering[0m[38;5;12m (2016), C. Xiong et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://www.jmlr.org/proceedings/papers/v48/xiong16.pdf)[39m
|
||||
[38;5;12m-[39m[38;5;12m [39m[38;5;14m[1mStacked[0m[38;5;14m[1m [0m[38;5;14m[1mattention[0m[38;5;14m[1m [0m[38;5;14m[1mnetworks[0m[38;5;14m[1m [0m[38;5;14m[1mfor[0m[38;5;14m[1m [0m[38;5;14m[1mimage[0m[38;5;14m[1m [0m[38;5;14m[1mquestion[0m[38;5;14m[1m [0m[38;5;14m[1manswering[0m[38;5;12m [39m[38;5;12m(2016),[39m[38;5;12m [39m[38;5;12mZ.[39m[38;5;12m [39m[38;5;12mYang[39m[38;5;12m [39m[38;5;12met[39m[38;5;12m [39m[38;5;12mal.[39m[38;5;12m [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m [39m
|
||||
[38;5;12m(http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Yang_Stacked_Attention_Networks_CVPR_2016_paper.pdf)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mStacked attention networks for image question answering[0m[38;5;12m (2016), Z. Yang et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Yang_Stacked_Attention_Networks_CVPR_2016_paper.pdf)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mHybrid computing using a neural network with dynamic external memory[0m[38;5;12m (2016), A. Graves et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (https://www.gwern.net/docs/2016-graves.pdf)[39m
|
||||
[38;5;12m- [39m[38;5;14m[1mGoogle's neural machine translation system: Bridging the gap between human and machine translation[0m[38;5;12m (2016), Y. Wu et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (https://arxiv.org/pdf/1609.08144)[39m
|
||||
|
||||
@@ -264,20 +246,15 @@
|
||||
[38;5;12m- Deep sparse rectifier neural networks (2011), X. Glorot et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://machinelearning.wustl.edu/mlpapers/paper_files/AISTATS2011_GlorotBB11.pdf)[39m
|
||||
[38;5;12m- Natural language processing (almost) from scratch (2011), R. Collobert et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://arxiv.org/pdf/1103.0398)[39m
|
||||
[38;5;12m- Recurrent neural network based language model (2010), T. Mikolov et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://www.fit.vutbr.cz/research/groups/speech/servite/2010/rnnlm_mikolov.pdf)[39m
|
||||
[38;5;12m-[39m[38;5;12m [39m[38;5;12mStacked[39m[38;5;12m [39m[38;5;12mdenoising[39m[38;5;12m [39m[38;5;12mautoencoders:[39m[38;5;12m [39m[38;5;12mLearning[39m[38;5;12m [39m[38;5;12museful[39m[38;5;12m [39m[38;5;12mrepresentations[39m[38;5;12m [39m[38;5;12min[39m[38;5;12m [39m[38;5;12ma[39m[38;5;12m [39m[38;5;12mdeep[39m[38;5;12m [39m[38;5;12mnetwork[39m[38;5;12m [39m[38;5;12mwith[39m[38;5;12m [39m[38;5;12ma[39m[38;5;12m [39m[38;5;12mlocal[39m[38;5;12m [39m[38;5;12mdenoising[39m[38;5;12m [39m[38;5;12mcriterion[39m[38;5;12m [39m[38;5;12m(2010),[39m[38;5;12m [39m[38;5;12mP.[39m[38;5;12m [39m[38;5;12mVincent[39m[38;5;12m [39m[38;5;12met[39m[38;5;12m [39m[38;5;12mal.[39m[38;5;12m [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m [39m
|
||||
[38;5;12m(http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.297.3484&rep=rep1&type=pdf)[39m
|
||||
[38;5;12m- Stacked denoising autoencoders: Learning useful representations in a deep network with a local denoising criterion (2010), P. Vincent et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.297.3484&rep=rep1&type=pdf)[39m
|
||||
[38;5;12m- Learning mid-level features for recognition (2010), Y. Boureau [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://ece.duke.edu/~lcarin/boureau-cvpr-10.pdf)[39m
|
||||
[38;5;12m- A practical guide to training restricted boltzmann machines (2010), G. Hinton [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://www.csri.utoronto.ca/~hinton/absps/guideTR.pdf)[39m
|
||||
[38;5;12m-[39m[38;5;12m [39m[38;5;12mUnderstanding[39m[38;5;12m [39m[38;5;12mthe[39m[38;5;12m [39m[38;5;12mdifficulty[39m[38;5;12m [39m[38;5;12mof[39m[38;5;12m [39m[38;5;12mtraining[39m[38;5;12m [39m[38;5;12mdeep[39m[38;5;12m [39m[38;5;12mfeedforward[39m[38;5;12m [39m[38;5;12mneural[39m[38;5;12m [39m[38;5;12mnetworks[39m[38;5;12m [39m[38;5;12m(2010),[39m[38;5;12m [39m[38;5;12mX.[39m[38;5;12m [39m[38;5;12mGlorot[39m[38;5;12m [39m[38;5;12mand[39m[38;5;12m [39m[38;5;12mY.[39m[38;5;12m [39m[38;5;12mBengio[39m[38;5;12m [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m [39m
|
||||
[38;5;12m(http://machinelearning.wustl.edu/mlpapers/paper_files/AISTATS2010_GlorotB10.pdf)[39m
|
||||
[38;5;12m- Understanding the difficulty of training deep feedforward neural networks (2010), X. Glorot and Y. Bengio [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://machinelearning.wustl.edu/mlpapers/paper_files/AISTATS2010_GlorotB10.pdf)[39m
|
||||
[38;5;12m- Why does unsupervised pre-training help deep learning (2010), D. Erhan et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://machinelearning.wustl.edu/mlpapers/paper_files/AISTATS2010_ErhanCBV10.pdf)[39m
|
||||
[38;5;12m-[39m[38;5;12m [39m[38;5;12mLearning[39m[38;5;12m [39m[38;5;12mdeep[39m[38;5;12m [39m[38;5;12marchitectures[39m[38;5;12m [39m[38;5;12mfor[39m[38;5;12m [39m[38;5;12mAI[39m[38;5;12m [39m[38;5;12m(2009),[39m[38;5;12m [39m[38;5;12mY.[39m[38;5;12m [39m[38;5;12mBengio.[39m[38;5;12m [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m [39m
|
||||
[38;5;12m(http://sanghv.com/download/soft/machine%20learning,%20artificial%20intelligence,%20mathematics%20ebooks/ML/learning%20deep%20architectures%20for%20AI%20(2009).pdf)[39m
|
||||
[38;5;12m-[39m[38;5;12m [39m[38;5;12mConvolutional[39m[38;5;12m [39m[38;5;12mdeep[39m[38;5;12m [39m[38;5;12mbelief[39m[38;5;12m [39m[38;5;12mnetworks[39m[38;5;12m [39m[38;5;12mfor[39m[38;5;12m [39m[38;5;12mscalable[39m[38;5;12m [39m[38;5;12munsupervised[39m[38;5;12m [39m[38;5;12mlearning[39m[38;5;12m [39m[38;5;12mof[39m[38;5;12m [39m[38;5;12mhierarchical[39m[38;5;12m [39m[38;5;12mrepresentations[39m[38;5;12m [39m[38;5;12m(2009),[39m[38;5;12m [39m[38;5;12mH.[39m[38;5;12m [39m[38;5;12mLee[39m[38;5;12m [39m[38;5;12met[39m[38;5;12m [39m[38;5;12mal.[39m[38;5;12m [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m [39m
|
||||
[38;5;12m(http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.149.802&rep=rep1&type=pdf)[39m
|
||||
[38;5;12m- Learning deep architectures for AI (2009), Y. Bengio. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://sanghv.com/download/soft/machine%20learning,%20artificial%20intelligence,%20mathematics%20ebooks/ML/learning%20deep%20architectures%20for%20AI%20(2009).pdf)[39m
|
||||
[38;5;12m- Convolutional deep belief networks for scalable unsupervised learning of hierarchical representations (2009), H. Lee et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.149.802&rep=rep1&type=pdf)[39m
|
||||
[38;5;12m- Greedy layer-wise training of deep networks (2007), Y. Bengio et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://machinelearning.wustl.edu/mlpapers/paper_files/NIPS2006_739.pdf)[39m
|
||||
[38;5;12m-[39m[38;5;12m [39m[38;5;12mReducing[39m[38;5;12m [39m[38;5;12mthe[39m[38;5;12m [39m[38;5;12mdimensionality[39m[38;5;12m [39m[38;5;12mof[39m[38;5;12m [39m[38;5;12mdata[39m[38;5;12m [39m[38;5;12mwith[39m[38;5;12m [39m[38;5;12mneural[39m[38;5;12m [39m[38;5;12mnetworks,[39m[38;5;12m [39m[38;5;12mG.[39m[38;5;12m [39m[38;5;12mHinton[39m[38;5;12m [39m[38;5;12mand[39m[38;5;12m [39m[38;5;12mR.[39m[38;5;12m [39m[38;5;12mSalakhutdinov.[39m[38;5;12m [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m [39m
|
||||
[38;5;12m(http://homes.mpimf-heidelberg.mpg.de/~mhelmsta/pdf/2006%20Hinton%20Salakhudtkinov%20Science.pdf)[39m
|
||||
[38;5;12m- Reducing the dimensionality of data with neural networks, G. Hinton and R. Salakhutdinov. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://homes.mpimf-heidelberg.mpg.de/~mhelmsta/pdf/2006%20Hinton%20Salakhudtkinov%20Science.pdf)[39m
|
||||
[38;5;12m- A fast learning algorithm for deep belief nets (2006), G. Hinton et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://nuyoo.utm.mx/~jjf/rna/A8%20A%20fast%20learning%20algorithm%20for%20deep%20belief%20nets.pdf)[39m
|
||||
[38;5;12m- Gradient-based learning applied to document recognition (1998), Y. LeCun et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://yann.lecun.com/exdb/publis/pdf/lecun-01a.pdf)[39m
|
||||
[38;5;12m- Long short-term memory (1997), S. Hochreiter and J. Schmidhuber. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://www.mitpressjournals.org/doi/pdfplus/10.1162/neco.1997.9.8.1735)[39m
|
||||
@@ -347,29 +324,23 @@
|
||||
[38;5;12m- Achieving open vocabulary neural machine translation with hybrid word-character models, M. Luong and C. Manning. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (https://arxiv.org/pdf/1604.00788)[39m
|
||||
[38;5;12m- Very Deep Convolutional Networks for Natural Language Processing (2016), A. Conneau et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (https://arxiv.org/pdf/1606.01781)[39m
|
||||
[38;5;12m- Bag of tricks for efficient text classification (2016), A. Joulin et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (https://arxiv.org/pdf/1607.01759)[39m
|
||||
[38;5;12m-[39m[38;5;12m [39m[38;5;12mEfficient[39m[38;5;12m [39m[38;5;12mpiecewise[39m[38;5;12m [39m[38;5;12mtraining[39m[38;5;12m [39m[38;5;12mof[39m[38;5;12m [39m[38;5;12mdeep[39m[38;5;12m [39m[38;5;12mstructured[39m[38;5;12m [39m[38;5;12mmodels[39m[38;5;12m [39m[38;5;12mfor[39m[38;5;12m [39m[38;5;12msemantic[39m[38;5;12m [39m[38;5;12msegmentation[39m[38;5;12m [39m[38;5;12m(2016),[39m[38;5;12m [39m[38;5;12mG.[39m[38;5;12m [39m[38;5;12mLin[39m[38;5;12m [39m[38;5;12met[39m[38;5;12m [39m[38;5;12mal.[39m[38;5;12m [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m [39m
|
||||
[38;5;12m(http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Lin_Efficient_Piecewise_Training_CVPR_2016_paper.pdf)[39m
|
||||
[38;5;12m- Efficient piecewise training of deep structured models for semantic segmentation (2016), G. Lin et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Lin_Efficient_Piecewise_Training_CVPR_2016_paper.pdf)[39m
|
||||
[38;5;12m- Learning to compose neural networks for question answering (2016), J. Andreas et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (https://arxiv.org/pdf/1601.01705)[39m
|
||||
[38;5;12m- Perceptual losses for real-time style transfer and super-resolution (2016), J. Johnson et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (https://arxiv.org/pdf/1603.08155)[39m
|
||||
[38;5;12m- Reading text in the wild with convolutional neural networks (2016), M. Jaderberg et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://arxiv.org/pdf/1412.1842)[39m
|
||||
[38;5;12m- What makes for effective detection proposals? (2016), J. Hosang et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (https://arxiv.org/pdf/1502.05082)[39m
|
||||
[38;5;12m-[39m[38;5;12m [39m[38;5;12mInside-outside[39m[38;5;12m [39m[38;5;12mnet:[39m[38;5;12m [39m[38;5;12mDetecting[39m[38;5;12m [39m[38;5;12mobjects[39m[38;5;12m [39m[38;5;12min[39m[38;5;12m [39m[38;5;12mcontext[39m[38;5;12m [39m[38;5;12mwith[39m[38;5;12m [39m[38;5;12mskip[39m[38;5;12m [39m[38;5;12mpooling[39m[38;5;12m [39m[38;5;12mand[39m[38;5;12m [39m[38;5;12mrecurrent[39m[38;5;12m [39m[38;5;12mneural[39m[38;5;12m [39m[38;5;12mnetworks[39m[38;5;12m [39m[38;5;12m(2016),[39m[38;5;12m [39m[38;5;12mS.[39m[38;5;12m [39m[38;5;12mBell[39m[38;5;12m [39m[38;5;12met[39m[38;5;12m [39m[38;5;12mal.[39m[38;5;12m [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m [39m
|
||||
[38;5;12m(http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Bell_Inside-Outside_Net_Detecting_CVPR_2016_paper.pdf).[39m
|
||||
[38;5;12m-[39m[38;5;12m [39m[38;5;12mInstance-aware[39m[38;5;12m [39m[38;5;12msemantic[39m[38;5;12m [39m[38;5;12msegmentation[39m[38;5;12m [39m[38;5;12mvia[39m[38;5;12m [39m[38;5;12mmulti-task[39m[38;5;12m [39m[38;5;12mnetwork[39m[38;5;12m [39m[38;5;12mcascades[39m[38;5;12m [39m[38;5;12m(2016),[39m[38;5;12m [39m[38;5;12mJ.[39m[38;5;12m [39m[38;5;12mDai[39m[38;5;12m [39m[38;5;12met[39m[38;5;12m [39m[38;5;12mal.[39m[38;5;12m [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m [39m
|
||||
[38;5;12m(http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Dai_Instance-Aware_Semantic_Segmentation_CVPR_2016_paper.pdf)[39m
|
||||
[38;5;12m-[39m[38;5;12m [39m[38;5;12mConditional[39m[38;5;12m [39m[38;5;12mimage[39m[38;5;12m [39m[38;5;12mgeneration[39m[38;5;12m [39m[38;5;12mwith[39m[38;5;12m [39m[38;5;12mpixelcnn[39m[38;5;12m [39m[38;5;12mdecoders[39m[38;5;12m [39m[38;5;12m(2016),[39m[38;5;12m [39m[38;5;12mA.[39m[38;5;12m [39m[38;5;12mvan[39m[38;5;12m [39m[38;5;12mden[39m[38;5;12m [39m[38;5;12mOord[39m[38;5;12m [39m[38;5;12met[39m[38;5;12m [39m[38;5;12mal.[39m[38;5;12m [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m [39m
|
||||
[38;5;12m(http://papers.nips.cc/paper/6527-tree-structured-reinforcement-learning-for-sequential-object-localization.pdf)[39m
|
||||
[38;5;12m- Instance-aware semantic segmentation via multi-task network cascades (2016), J. Dai et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Dai_Instance-Aware_Semantic_Segmentation_CVPR_2016_paper.pdf)[39m
|
||||
[38;5;12m- Conditional image generation with pixelcnn decoders (2016), A. van den Oord et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://papers.nips.cc/paper/6527-tree-structured-reinforcement-learning-for-sequential-object-localization.pdf)[39m
|
||||
[38;5;12m- Deep networks with stochastic depth (2016), G. Huang et al., [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (https://arxiv.org/pdf/1603.09382)[39m
|
||||
[38;5;12m- Consistency and Fluctuations For Stochastic Gradient Langevin Dynamics (2016), Yee Whye Teh et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://www.jmlr.org/papers/volume17/teh16a/teh16a.pdf)[39m
|
||||
|
||||
[48;2;30;30;40m[38;5;13m[3m(2015)[0m
|
||||
[38;5;12m-[39m[38;5;12m [39m[38;5;12mAsk[39m[38;5;12m [39m[38;5;12myour[39m[38;5;12m [39m[38;5;12mneurons:[39m[38;5;12m [39m[38;5;12mA[39m[38;5;12m [39m[38;5;12mneural-based[39m[38;5;12m [39m[38;5;12mapproach[39m[38;5;12m [39m[38;5;12mto[39m[38;5;12m [39m[38;5;12manswering[39m[38;5;12m [39m[38;5;12mquestions[39m[38;5;12m [39m[38;5;12mabout[39m[38;5;12m [39m[38;5;12mimages[39m[38;5;12m [39m[38;5;12m(2015),[39m[38;5;12m [39m[38;5;12mM.[39m[38;5;12m [39m[38;5;12mMalinowski[39m[38;5;12m [39m[38;5;12met[39m[38;5;12m [39m[38;5;12mal.[39m[38;5;12m [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m [39m
|
||||
[38;5;12m(http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Malinowski_Ask_Your_Neurons_ICCV_2015_paper.pdf)[39m
|
||||
[38;5;12m- Ask your neurons: A neural-based approach to answering questions about images (2015), M. Malinowski et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Malinowski_Ask_Your_Neurons_ICCV_2015_paper.pdf)[39m
|
||||
[38;5;12m- Exploring models and data for image question answering (2015), M. Ren et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://papers.nips.cc/paper/5640-stochastic-variational-inference-for-hidden-markov-models.pdf)[39m
|
||||
[38;5;12m-[39m[38;5;12m [39m[38;5;12mAre[39m[38;5;12m [39m[38;5;12myou[39m[38;5;12m [39m[38;5;12mtalking[39m[38;5;12m [39m[38;5;12mto[39m[38;5;12m [39m[38;5;12ma[39m[38;5;12m [39m[38;5;12mmachine?[39m[38;5;12m [39m[38;5;12mdataset[39m[38;5;12m [39m[38;5;12mand[39m[38;5;12m [39m[38;5;12mmethods[39m[38;5;12m [39m[38;5;12mfor[39m[38;5;12m [39m[38;5;12mmultilingual[39m[38;5;12m [39m[38;5;12mimage[39m[38;5;12m [39m[38;5;12mquestion[39m[38;5;12m [39m[38;5;12m(2015),[39m[38;5;12m [39m[38;5;12mH.[39m[38;5;12m [39m[38;5;12mGao[39m[38;5;12m [39m[38;5;12met[39m[38;5;12m [39m[38;5;12mal.[39m[38;5;12m [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m [39m
|
||||
[38;5;12m(http://papers.nips.cc/paper/5641-are-you-talking-to-a-machine-dataset-and-methods-for-multilingual-image-question.pdf)[39m
|
||||
[38;5;12m-[39m[38;5;12m [39m[38;5;12mMind's[39m[38;5;12m [39m[38;5;12meye:[39m[38;5;12m [39m[38;5;12mA[39m[38;5;12m [39m[38;5;12mrecurrent[39m[38;5;12m [39m[38;5;12mvisual[39m[38;5;12m [39m[38;5;12mrepresentation[39m[38;5;12m [39m[38;5;12mfor[39m[38;5;12m [39m[38;5;12mimage[39m[38;5;12m [39m[38;5;12mcaption[39m[38;5;12m [39m[38;5;12mgeneration[39m[38;5;12m [39m[38;5;12m(2015),[39m[38;5;12m [39m[38;5;12mX.[39m[38;5;12m [39m[38;5;12mChen[39m[38;5;12m [39m[38;5;12mand[39m[38;5;12m [39m[38;5;12mC.[39m[38;5;12m [39m[38;5;12mZitnick.[39m[38;5;12m [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m [39m
|
||||
[38;5;12m(http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Chen_Minds_Eye_A_2015_CVPR_paper.pdf)[39m
|
||||
[38;5;12m- Are you talking to a machine? dataset and methods for multilingual image question (2015), H. Gao et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://papers.nips.cc/paper/5641-are-you-talking-to-a-machine-dataset-and-methods-for-multilingual-image-question.pdf)[39m
|
||||
[38;5;12m- Mind's eye: A recurrent visual representation for image caption generation (2015), X. Chen and C. Zitnick. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Chen_Minds_Eye_A_2015_CVPR_paper.pdf)[39m
|
||||
[38;5;12m- From captions to visual concepts and back (2015), H. Fang et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Fang_From_Captions_to_2015_CVPR_paper.pdf).[39m
|
||||
[38;5;12m- Towards AI-complete question answering: A set of prerequisite toy tasks (2015), J. Weston et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://arxiv.org/pdf/1502.05698)[39m
|
||||
[38;5;12m- Ask me anything: Dynamic memory networks for natural language processing (2015), A. Kumar et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://arxiv.org/pdf/1506.07285)[39m
|
||||
@@ -381,15 +352,12 @@
|
||||
[38;5;12m- Trust Region Policy Optimization (2015), J. Schulman et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://www.jmlr.org/proceedings/papers/v37/schulman15.pdf)[39m
|
||||
[38;5;12m- Beyond short snippents: Deep networks for video classification (2015) [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Ng_Beyond_Short_Snippets_2015_CVPR_paper.pdf)[39m
|
||||
[38;5;12m- Learning Deconvolution Network for Semantic Segmentation (2015), H. Noh et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (https://arxiv.org/pdf/1505.04366v1)[39m
|
||||
[38;5;12m-[39m[38;5;12m [39m[38;5;12mLearning[39m[38;5;12m [39m[38;5;12mspatiotemporal[39m[38;5;12m [39m[38;5;12mfeatures[39m[38;5;12m [39m[38;5;12mwith[39m[38;5;12m [39m[38;5;12m3d[39m[38;5;12m [39m[38;5;12mconvolutional[39m[38;5;12m [39m[38;5;12mnetworks[39m[38;5;12m [39m[38;5;12m(2015),[39m[38;5;12m [39m[38;5;12mD.[39m[38;5;12m [39m[38;5;12mTran[39m[38;5;12m [39m[38;5;12met[39m[38;5;12m [39m[38;5;12mal.[39m[38;5;12m [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m [39m
|
||||
[38;5;12m(http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Tran_Learning_Spatiotemporal_Features_ICCV_2015_paper.pdf)[39m
|
||||
[38;5;12m- Learning spatiotemporal features with 3d convolutional networks (2015), D. Tran et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Tran_Learning_Spatiotemporal_Features_ICCV_2015_paper.pdf)[39m
|
||||
[38;5;12m- Understanding neural networks through deep visualization (2015), J. Yosinski et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (https://arxiv.org/pdf/1506.06579)[39m
|
||||
[38;5;12m- An Empirical Exploration of Recurrent Network Architectures (2015), R. Jozefowicz et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)[39m
|
||||
[38;5;12m-[39m[38;5;12m [39m[38;5;12mDeep[39m[38;5;12m [39m[38;5;12mgenerative[39m[38;5;12m [39m[38;5;12mimage[39m[38;5;12m [39m[38;5;12mmodels[39m[38;5;12m [39m[38;5;12musing[39m[38;5;12m [39m[38;5;12ma[39m[38;5;12m [39m[38;5;12mlaplacian[39m[38;5;12m [39m[38;5;12mpyramid[39m[38;5;12m [39m[38;5;12mof[39m[38;5;12m [39m[38;5;12madversarial[39m[38;5;12m [39m[38;5;12mnetworks[39m[38;5;12m [39m[38;5;12m(2015),[39m[38;5;12m [39m[38;5;12mE.Denton[39m[38;5;12m [39m[38;5;12met[39m[38;5;12m [39m[38;5;12mal.[39m[38;5;12m [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m [39m
|
||||
[38;5;12m(http://papers.nips.cc/paper/5773-deep-generative-image-models-using-a-laplacian-pyramid-of-adversarial-networks.pdf)[39m
|
||||
[38;5;12m- Deep generative image models using a laplacian pyramid of adversarial networks (2015), E.Denton et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://papers.nips.cc/paper/5773-deep-generative-image-models-using-a-laplacian-pyramid-of-adversarial-networks.pdf)[39m
|
||||
[38;5;12m- Gated Feedback Recurrent Neural Networks (2015), J. Chung et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://www.jmlr.org/proceedings/papers/v37/chung15.pdf)[39m
|
||||
[38;5;12m-[39m[38;5;12m [39m[38;5;12mFast[39m[38;5;12m [39m[38;5;12mand[39m[38;5;12m [39m[38;5;12maccurate[39m[38;5;12m [39m[38;5;12mdeep[39m[38;5;12m [39m[38;5;12mnetwork[39m[38;5;12m [39m[38;5;12mlearning[39m[38;5;12m [39m[38;5;12mby[39m[38;5;12m [39m[38;5;12mexponential[39m[38;5;12m [39m[38;5;12mlinear[39m[38;5;12m [39m[38;5;12munits[39m[38;5;12m [39m[38;5;12m(ELUS)[39m[38;5;12m [39m[38;5;12m(2015),[39m[38;5;12m [39m[38;5;12mD.[39m[38;5;12m [39m[38;5;12mClevert[39m[38;5;12m [39m[38;5;12met[39m[38;5;12m [39m[38;5;12mal.[39m[38;5;12m [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m [39m
|
||||
[38;5;12m(https://arxiv.org/pdf/1511.07289.pdf%5Cnhttp://arxiv.org/abs/1511.07289%5Cnhttp://arxiv.org/abs/1511.07289)[39m
|
||||
[38;5;12m- Fast and accurate deep network learning by exponential linear units (ELUS) (2015), D. Clevert et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (https://arxiv.org/pdf/1511.07289.pdf%5Cnhttp://arxiv.org/abs/1511.07289%5Cnhttp://arxiv.org/abs/1511.07289)[39m
|
||||
[38;5;12m- Pointer networks (2015), O. Vinyals et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://papers.nips.cc/paper/5866-pointer-networks.pdf)[39m
|
||||
[38;5;12m- Visualizing and Understanding Recurrent Networks (2015), A. Karpathy et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (https://arxiv.org/pdf/1506.02078)[39m
|
||||
[38;5;12m- Attention-based models for speech recognition (2015), J. Chorowski et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://papers.nips.cc/paper/5847-attention-based-models-for-speech-recognition.pdf)[39m
|
||||
@@ -403,8 +371,7 @@
|
||||
|
||||
|
||||
[48;2;30;30;40m[38;5;13m[3m(~2014)[0m
|
||||
[38;5;12m-[39m[38;5;12m [39m[38;5;12mDeepPose:[39m[38;5;12m [39m[38;5;12mHuman[39m[38;5;12m [39m[38;5;12mpose[39m[38;5;12m [39m[38;5;12mestimation[39m[38;5;12m [39m[38;5;12mvia[39m[38;5;12m [39m[38;5;12mdeep[39m[38;5;12m [39m[38;5;12mneural[39m[38;5;12m [39m[38;5;12mnetworks[39m[38;5;12m [39m[38;5;12m(2014),[39m[38;5;12m [39m[38;5;12mA.[39m[38;5;12m [39m[38;5;12mToshev[39m[38;5;12m [39m[38;5;12mand[39m[38;5;12m [39m[38;5;12mC.[39m[38;5;12m [39m[38;5;12mSzegedy[39m[38;5;12m [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m [39m
|
||||
[38;5;12m(http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Toshev_DeepPose_Human_Pose_2014_CVPR_paper.pdf)[39m
|
||||
[38;5;12m- DeepPose: Human pose estimation via deep neural networks (2014), A. Toshev and C. Szegedy [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Toshev_DeepPose_Human_Pose_2014_CVPR_paper.pdf)[39m
|
||||
[38;5;12m-[39m[38;5;12m [39m[38;5;12mLearning[39m[38;5;12m [39m[38;5;12ma[39m[38;5;12m [39m[38;5;12mDeep[39m[38;5;12m [39m[38;5;12mConvolutional[39m[38;5;12m [39m[38;5;12mNetwork[39m[38;5;12m [39m[38;5;12mfor[39m[38;5;12m [39m[38;5;12mImage[39m[38;5;12m [39m[38;5;12mSuper-Resolution[39m[38;5;12m [39m[38;5;12m(2014,[39m[38;5;12m [39m[38;5;12mC.[39m[38;5;12m [39m[38;5;12mDong[39m[38;5;12m [39m[38;5;12met[39m[38;5;12m [39m[38;5;12mal.[39m[38;5;12m [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m [39m
|
||||
[38;5;12m(https://www.researchgate.net/profile/Chen_Change_Loy/publication/264552416_Lecture_Notes_in_Computer_Science/links/53e583e50cf25d674e9c280e.pdf)[39m
|
||||
[38;5;12m- Recurrent models of visual attention (2014), V. Mnih et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://arxiv.org/pdf/1406.6247.pdf)[39m
|
||||
@@ -414,8 +381,7 @@
|
||||
[38;5;12m- Recurrent neural network regularization (2014), W. Zaremba et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://arxiv.org/pdf/1409.2329)[39m
|
||||
[38;5;12m- Intriguing properties of neural networks (2014), C. Szegedy et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (https://arxiv.org/pdf/1312.6199.pdf)[39m
|
||||
[38;5;12m- Towards end-to-end speech recognition with recurrent neural networks (2014), A. Graves and N. Jaitly. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://www.jmlr.org/proceedings/papers/v32/graves14.pdf)[39m
|
||||
[38;5;12m-[39m[38;5;12m [39m[38;5;12mScalable[39m[38;5;12m [39m[38;5;12mobject[39m[38;5;12m [39m[38;5;12mdetection[39m[38;5;12m [39m[38;5;12musing[39m[38;5;12m [39m[38;5;12mdeep[39m[38;5;12m [39m[38;5;12mneural[39m[38;5;12m [39m[38;5;12mnetworks[39m[38;5;12m [39m[38;5;12m(2014),[39m[38;5;12m [39m[38;5;12mD.[39m[38;5;12m [39m[38;5;12mErhan[39m[38;5;12m [39m[38;5;12met[39m[38;5;12m [39m[38;5;12mal.[39m[38;5;12m [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m [39m
|
||||
[38;5;12m(http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Erhan_Scalable_Object_Detection_2014_CVPR_paper.pdf)[39m
|
||||
[38;5;12m- Scalable object detection using deep neural networks (2014), D. Erhan et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://www.cv-foundation.org/openaccess/content_cvpr_2014/papers/Erhan_Scalable_Object_Detection_2014_CVPR_paper.pdf)[39m
|
||||
[38;5;12m- On the importance of initialization and momentum in deep learning (2013), I. Sutskever et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://machinelearning.wustl.edu/mlpapers/paper_files/icml2013_sutskever13.pdf)[39m
|
||||
[38;5;12m- Regularization of neural networks using dropconnect (2013), L. Wan et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (http://machinelearning.wustl.edu/mlpapers/paper_files/icml2013_wan13.pdf)[39m
|
||||
[38;5;12m- Learning Hierarchical Features for Scene Labeling (2013), C. Farabet et al. [39m[38;5;12mpdf[39m[38;5;14m[1m [0m[38;5;12m (https://hal-enpc.archives-ouvertes.fr/docs/00/74/20/77/PDF/farabet-pami-13.pdf)[39m
|
||||
@@ -427,8 +393,7 @@
|
||||
|
||||
[38;2;255;187;0m[4mAcknowledgement[0m
|
||||
|
||||
[38;5;12mThank[39m[38;5;12m [39m[38;5;12myou[39m[38;5;12m [39m[38;5;12mfor[39m[38;5;12m [39m[38;5;12mall[39m[38;5;12m [39m[38;5;12myour[39m[38;5;12m [39m[38;5;12mcontributions.[39m[38;5;12m [39m[38;5;12mPlease[39m[38;5;12m [39m[38;5;12mmake[39m[38;5;12m [39m[38;5;12msure[39m[38;5;12m [39m[38;5;12mto[39m[38;5;12m [39m[38;5;12mread[39m[38;5;12m [39m[38;5;12mthe[39m[38;5;12m [39m[38;5;14m[1mcontributing[0m[38;5;14m[1m [0m[38;5;14m[1mguide[0m[38;5;12m [39m[38;5;12m(https://github.com/terryum/awesome-deep-learning-papers/blob/master/Contributing.md)[39m[38;5;12m [39m[38;5;12mbefore[39m[38;5;12m [39m[38;5;12myou[39m[38;5;12m [39m[38;5;12mmake[39m[38;5;12m [39m[38;5;12ma[39m[38;5;12m [39m
|
||||
[38;5;12mpull[39m[38;5;12m [39m[38;5;12mrequest.[39m
|
||||
[38;5;12mThank you for all your contributions. Please make sure to read the [39m[38;5;14m[1mcontributing guide[0m[38;5;12m (https://github.com/terryum/awesome-deep-learning-papers/blob/master/Contributing.md) before you make a pull request.[39m
|
||||
|
||||
[38;2;255;187;0m[4mLicense[0m
|
||||
[38;5;14m[1m![0m[38;5;12mCC0[39m[38;5;14m[1m (http://mirrors.creativecommons.org/presskit/buttons/88x31/svg/cc-zero.svg)[0m[38;5;12m (https://creativecommons.org/publicdomain/zero/1.0/)[39m
|
||||
|
||||
Reference in New Issue
Block a user