@InProceedings{neffOAGM2017,
author = {Neff, Thomas and Payer, Christian and Stern, Darko and Urschler, Martin},
title = {Generative Adversarial Network based Synthesis for Supervised Medical Image Segmentation},
booktitle = {Proceedings of the OAGM\&ARW Joint Workshop},
year = {2017},
month = {05},
pages = {140--145},
doi = {10.3217/978-3-85125-524-9-30},
url = {http://castor.tugraz.at/doku/OAGM-ARWWorkshop2017/oagm-arw-17_paper_30.pdf},
best_paper_award = {true},
}
@InProceedings{payerPoseTrack,
author = {Payer, Christian and Neff, Thomas and Bischof, Horst and Urschler, Martin and Stern, Darko},
title = {Simultaneous Multi-Person Detection and Single-Person Pose Estimation With a Single Heatmap Regression Network},
booktitle = {ICCV 2017 PoseTrack Challenge: Human Pose Estimation and Tracking in the Wild},
address = {Venice, Italy},
day = {23},
year = {2017},
month = {10},
url = {https://posetrack.net/workshops/iccv2017/pdfs/ICG.pdf}
}
@InProceedings{neffOAGM2018,
title = {Generative Adversarial Networks to Synthetically Augment Data for Deep Learning based Image Segmentation},
author = {Neff, Thomas and Payer, Christian and Stern, Darko and Urschler, Martin},
year = {2018},
month = {5},
doi = {10.3217/978-3-85125-603-1-07},
pages = {22--29},
editor = {Martin Welk and Roth, {Peter M.} and Martin Urschler},
booktitle = {Proceedings of the OAGM Workshop 2018},
publisher = {Verlag der Technischen Universität Graz},
url = {https://diglib.tugraz.at/download.php?id=5b3619809d758&location=browse}
}
@InProceedings{payerMICCAI2018,
title = "Instance segmentation and tracking with cosine embeddings and recurrent hourglass networks",
abstract = "Different to semantic segmentation, instance segmentation assigns unique labels to each individual instance of the same class. In this work, we propose a novel recurrent fully convolutional network architecture for tracking such instance segmentations over time. The network architecture incorporates convolutional gated recurrent units (ConvGRU) into a stacked hourglass network to utilize temporal video information. Furthermore, we train the network with a novel embedding loss based on cosine similarities, such that the network predicts unique embeddings for every instance throughout videos. Afterwards, these embeddings are clustered among subsequent video frames to create the final tracked instance segmentations. We evaluate the recurrent hourglass network by segmenting left ventricles in MR videos of the heart, where it outperforms a network that does not incorporate video information. Furthermore, we show applicability of the cosine embedding loss for segmenting leaf instances on still images of plants. Finally, we evaluate the framework for instance segmentation and tracking on six datasets of the ISBI celltracking challenge, where it shows state-of-the-art performance.",
keywords = "Cell, Embeddings, Instances, Recurrent, Segmentation, Tracking, Video",
author = {Payer, Christian and Štern, Darko and Neff, Thomas and Bischof, Horst and Urschler, Martin},
year = "2018",
month = "9",
doi = "10.1007/978-3-030-00934-2_1",
isbn = "9783030009335",
series = "Lecture Notes in Computer Science",
publisher = "Springer Verlag Heidelberg",
pages = "3--11",
booktitle = "Medical Image Computing and Computer Assisted Intervention – MICCAI 2018 - 21st International Conference, 2018, Proceedings",
url = {https://link.springer.com/chapter/10.1007/978-3-030-00934-2_1}
}
@inproceedings{ismar2018_demo_19,
author = "Mueller, Joerg H. and Voglreiter, Philip and Dokter, Mark and Neff, Thomas and Makar, Mina and Steinberger, Markus and Schmalstieg, Dieter",
title = "Shading Atlas Streaming Demonstration",
year = "2018",
booktitle = "Adjunct Proceedings of the IEEE International Symposium for Mixed and Augmented Reality 2018"
}
@article{mueller2018,
author = {Mueller, Joerg H. and Voglreiter, Philip and Dokter, Mark and Neff, Thomas and Makar, Mina and Steinberger, Markus and Schmalstieg, Dieter},
title = {Shading Atlas Streaming},
journal = {ACM Transactions on Graphics},
issue_date = {November 2018},
volume = {37}, number = {6},
month = nov,
year = {2018},
articleno = {199},
numpages = {16},
doi = {10.1145/3272127.3275087},
url = {https://dl.acm.org/doi/10.1145/3272127.3275087},
publisher = {ACM},
address = {New York, NY, USA},
}
@inproceedings{Mueller:2019:SAS:3305367.3327981,
author = {Mueller, Joerg H. and Neff, Thomas and Voglreiter, Philip and Makar, Mina and Steinberger, Markus and Schmalstieg, Dieter},
title = {Shading Atlas Streaming Demonstration},
booktitle = {ACM SIGGRAPH 2019 Emerging Technologies},
series = {SIGGRAPH '19},
year = {2019},
isbn = {978-1-4503-6308-2},
location = {Los Angeles, California},
pages = {22:1--22:2},
articleno = {22},
numpages = {2},
url = {http://doi.acm.org/10.1145/3305367.3327981},
doi = {10.1145/3305367.3327981},
acmid = {3327981},
publisher = {ACM},
address = {New York, NY, USA},
keywords = {object-space shading, shading, streaming, texture atlas, virtual reality},
}
@article{10.1145/3446790,
author = {Mueller, Joerg H. and Neff, Thomas and Voglreiter, Philip and Steinberger, Markus and Schmalstieg, Dieter},
title = {Temporally Adaptive Shading Reuse for Real-Time Rendering and Virtual Reality},
year = {2021},
issue_date = {May 2021},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
volume = {40},
number = {2},
issn = {0730-0301},
url = {https://doi.org/10.1145/3446790},
doi = {10.1145/3446790},
abstract = {Temporal coherence has the potential to enable a huge reduction of shading costs in rendering. Existing techniques focus either only on spatial shading reuse or cannot adaptively choose temporal shading frequencies. We find that temporal shading reuse is possible for extended periods of time for a majority of samples, and we show under which circumstances users perceive temporal artifacts. Our analysis implies that we can approximate shading gradients to efficiently determine when and how long shading can be reused. Whereas visibility usually stays temporally coherent from frame to frame for more than 90%, we find that even in heavily animated game scenes with advanced shading, typically more than 50% of shading is also temporally coherent. To exploit this potential, we introduce a temporally adaptive shading framework and apply it to two real-time methods. Its application saves more than 57% of the shader invocations, reducing overall rendering times up to in virtual reality applications without a noticeable loss in visual quality. Overall, our work shows that there is significantly more potential for shading reuse than currently exploited.},
journal = {ACM Trans. Graph.},
month = apr,
articleno = {11},
numpages = {14},
keywords = {Shading, temporal shading reuse, temporal coherence, virtual reality, texture-space shading, shading difference}
}
@article {neff2021donerf,
journal = {Computer Graphics Forum},
title = {{DONeRF: Towards Real-Time Rendering of Compact Neural Radiance Fields using Depth Oracle Networks}},
author = {Neff, Thomas and Stadlbauer, Pascal and Parger, Mathias and Kurz, Andreas and Mueller, Joerg H. and Chaitanya, Chakravarty R. Alla and Kaplanyan, Anton S. and Steinberger, Markus},
year = {2021},
publisher = {The Eurographics Association and John Wiley & Sons Ltd.},
ISSN = {1467-8659},
DOI = {10.1111/cgf.14340},
url = {https://doi.org/10.1111/cgf.14340},
volume = {40},
number = {4},
}
@article{neff2022meshlets,
author = {Neff, Thomas and Mueller, Joerg H. and Steinberger, Markus and Schmalstieg, Dieter},
title = {Meshlets and How to Shade Them: A Study on Texture-Space Shading},
journal = {Computer Graphics Forum},
volume = {41},
number = {2},
pages = {277-287},
keywords = {CCS Concepts, • Computing methodologies → Rendering, Texturing},
doi = {https://doi.org/10.1111/cgf.14474},
url = {https://onlinelibrary.wiley.com/doi/abs/10.1111/cgf.14474},
eprint = {https://onlinelibrary.wiley.com/doi/pdf/10.1111/cgf.14474},
year = {2022}
}
@inproceedings{kurz2022adanerf,
title = {AdaNeRF: Adaptive Sampling for Real-time Rendering of Neural Radiance Fields},
author = {Kurz, Andreas and Neff, Thomas and Lv, Zhaoyang and Zollh\"{o}fer, Michael and Steinberger, Markus},
booktitle={Computer Vision -- ECCV 2022},
year={2022},
publisher={Springer Nature Switzerland},
address={Cham},
pages={254--270},
isbn={978-3-031-19790-1},
url = {https://www.ecva.net/papers/eccv_2022/papers_ECCV/html/6513_ECCV_2022_paper.php},
}