@inproceedings{oai:hiroshima-cu.repo.nii.ac.jp:00001836, author = {積際, 早紀 and 黒澤, 義明 and 目良, 和也 and 竹澤, 寿幸 and TSUMUGIWA, Saki and KUROSAWA, Yoshiaki and MERA, Kazuya and TAKEZAWA, Toshiyuki}, book = {人工知能学会全国大会論文集}, month = {}, note = {application/pdf, This paper deals with image-to-image translation of apparel items. The images are difficult to be translated because the items are variously set, when they are took photos: being placed flat, being put on the mannequin and so on. We try to investigate and improve the previous work also known as ‘pix2pix’ based on deep neural networks, especially deep convolutional generative adversarial network (DCGAN). We propose a new two-stage procedure. Some experimentation revealed that our proposed method was superior to the previous work, evaluated using structural similarity index. Moreover, we confirmed it generated item details (zipper, button) and patterns (dot) as the result of visual confirmation. This knowledge is very important because the fault image of the item without buttons should be completely different from the original item image., 2019年度(第33回):2019年6月4日-6月7日:新潟県新潟市(朱鷺メッセ新潟コンベンションセンター) 3Rin2-21 この研究の一部は, 国立研究開発法人科学技術振興機構(JST)の研究成果展開事業「センター・オブ・イノベーション(COI)プログラム」.広島市立大学特定研究費(先端学術研究費H27~29, 30年度科研費獲得支援費)の補助を得ている}, pages = {1--4}, publisher = {人工知能学会}, title = {深層学習を用いたアパレルアイテム平置き画像から着装状態への変換}, volume = {33}, year = {2019}, yomi = {ツムギワ, サキ and クロサワ, ヨシアキ and メラ, カズヤ and タケザワ, トシユキ} }