diff --git a/_cite/.cache/cache.db b/_cite/.cache/cache.db index 0c0ea88..87e6554 100644 Binary files a/_cite/.cache/cache.db and b/_cite/.cache/cache.db differ diff --git a/_data/citations.yaml b/_data/citations.yaml index cd10f7d..5d6bbe5 100644 --- a/_data/citations.yaml +++ b/_data/citations.yaml @@ -17,6 +17,26 @@ image: images/publication_thumbnails/eval-checklist.png plugin: sources.py file: sources.yaml +- id: arxiv:2503.20191 + title: 'Maya: Optimizing Deep Learning Training Workloads using Emulated Virtual + Accelerators' + authors: + - Srihas Yarlagadda + - Amey Agrawal + - Elton Pinto + - Hakesh Darapaneni + - Mitali Meratwal + - Shivam Mittal + - Pranavi Bajjuri + - Srinivas Sridharan + - Alexey Tumanov + publisher: 21st European Conference on Computer Systems (EuroSys 2026), Edinburgh, + April 2026 + date: '2025-03-27' + link: https://arxiv.org/abs/2503.20191 + image: images/publication_thumbnails/maya.png + plugin: sources.py + file: sources.yaml - id: arxiv:2502.14051 title: 'RocketKV: Accelerating Long-Context LLM Inference via Two-Stage KV Cache Compression' diff --git a/_data/sources.yaml b/_data/sources.yaml index 4df2ec9..4ce4aec 100644 --- a/_data/sources.yaml +++ b/_data/sources.yaml @@ -1,6 +1,7 @@ - id: arxiv:2507.09019 # on evaluating llm inf.. image: images/publication_thumbnails/eval-checklist.png -- id: arxiv:2502.14051 # maya +- id: arxiv:2503.20191 # maya + publisher: "21st European Conference on Computer Systems (EuroSys 2026), Edinburgh, April 2026" image: images/publication_thumbnails/maya.png - id: arxiv:2502.14051 # rocketkv image: images/publication_thumbnails/rocketkv.png diff --git a/_members/chus-antonanzas.md b/_members/chus-antonanzas.md index c7ba6b4..4a4e3a8 100644 --- a/_members/chus-antonanzas.md +++ b/_members/chus-antonanzas.md @@ -1,7 +1,7 @@ --- name: Chus Antonanzas image: https://chus.space/static/me.jpg -role: masters +role: masters-alumni aliases: - Chus Antonanzas - Jesus M. Antonanzas @@ -11,4 +11,4 @@ links: github: chus-chus --- -⚡️ At the SAIL lab, I'm helping create infrastructure for the next generation of AI systems (visit my website for more!). ⚡️ \ No newline at end of file +⚡️ At the SAIL lab, I'm helping create infrastructure for the next generation of AI systems (visit my website for more!). ⚡️ diff --git a/_members/monish-r.md b/_members/monish-r.md deleted file mode 100644 index 03f1529..0000000 --- a/_members/monish-r.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -name: Monish R -image: https://media.licdn.com/dms/image/D5603AQHQ2rKNFM7naQ/profile-displayphoto-shrink_400_400/0/1696646256304?e=1715817600&v=beta&t=KqrqXKr6MpCgLHHrMiRyC_ydcUgMdrQjRfOByFjHu78 -role: masters-alumni -links: - home-page: https://www.linkedin.com/in/monish-r-897708117/ ---- - -My primary focus is Parallel Computing of training algorithms for Machine Learning and Neural Networks. I hope to one day drive new models for data mining that are optimized for existing and future hardware. \ No newline at end of file diff --git a/index.md b/index.md index 5816694..651b7c2 100644 --- a/index.md +++ b/index.md @@ -5,6 +5,7 @@ The System for AI Lab (SAIL) at Georgia Tech, led by Prof. Alexey Tumanov, speci # Recent News +- Our paper on GPU cluster emulation, [Maya](https://arxiv.org/pdf/2503.20191) has been accepted at EuroSys'26! - 🎉 Congratulations to Payman Behnam, Amey Agrawal, Alind Khare, and Dhruv Garg! Three papers accepted at ACM SIGOPS Operating Systems Review, July 2025. - We are looking for contributors for our new inference engine [Vajra](https://project-vajra.github.io/). ⚡️ - Our papers on common anti-patterns in LLm Inference systems evaluations is now on [Arxiv](https://arxiv.org/pdf/2507.09019).