almost done
This commit is contained in:
@@ -1,2 +0,0 @@
|
||||
.next/
|
||||
node_modules/
|
||||
62
.gitea/workflows/main.yaml
Normal file
62
.gitea/workflows/main.yaml
Normal file
@@ -0,0 +1,62 @@
|
||||
name: Next.js App CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "main" ]
|
||||
|
||||
jobs:
|
||||
docker:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Login to Gitea Package Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{vars.LOCAL_REGISTRY}}
|
||||
username: ${{ VARS.USER }}
|
||||
password: ${{ secrets.TOKEN }}
|
||||
|
||||
# --- ADDED: Step to generate image assets before the build ---
|
||||
- name: Generate Static Image Assets
|
||||
run: |
|
||||
# Install dependencies required by the scripts
|
||||
sudo apt-get update && sudo apt-get install -y imagemagick webp
|
||||
|
||||
# Make scripts executable
|
||||
chmod +x ./scripts/*.sh
|
||||
|
||||
# Run the scripts to generate PNGs from PDFs and create WebP images
|
||||
echo "Generating PNGs from PDFs..."
|
||||
bash ./scripts/first_page_image.sh
|
||||
echo "Generating WebP images..."
|
||||
bash ./scripts/generate_webp.sh
|
||||
|
||||
# --- REPLACED: The following steps are changed from Ruby/Jekyll to Node.js/Next.js ---
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '20.x' # Use a current LTS version of Node.js
|
||||
cache: 'npm'
|
||||
|
||||
- name: Install Node.js Dependencies
|
||||
run: npm ci # 'npm ci' is recommended for CI for faster, reliable installs
|
||||
|
||||
- name: Build Next.js App
|
||||
run: npm run build
|
||||
# --- END OF REPLACED STEPS ---
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
# --- UNCHANGED: This step remains the same as requested ---
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
platforms: linux/amd64
|
||||
push: true
|
||||
tags: |
|
||||
${{ vars.LOCAL_REGISTRY }}/${{ gitea.repository_owner }}/website:latest
|
||||
${{ vars.LOCAL_REGISTRY }}/${{ gitea.repository_owner }}/website:${{ gitea.sha }}
|
||||
@@ -1,44 +0,0 @@
|
||||
name: Jekyll site CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "main" ]
|
||||
|
||||
jobs:
|
||||
docker:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Login to Gitea Package Registry
|
||||
# (Test credentials early, befor failing late in the process.)
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{vars.LOCAL_REGISTRY}}
|
||||
username: ${{ VARS.USER }}
|
||||
password: ${{ secrets.TOKEN }}
|
||||
|
||||
- name: Setup Ruby
|
||||
uses: ruby/setup-ruby@v1
|
||||
with:
|
||||
ruby-version: '3.3'
|
||||
|
||||
- name: Install GEM bundle
|
||||
run: bundle install
|
||||
|
||||
- name: Build Website using Jekyll
|
||||
run: JEKYLL_ENV=production bundle exec jekyll build --trace --future
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
platforms: linux/amd64 # ,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
${{ vars.LOCAL_REGISTRY }}/${{ gitea.repository_owner }}/website:latest
|
||||
${{ vars.LOCAL_REGISTRY }}/${{ gitea.repository_owner }}/website:${{ gitea.sha }}
|
||||
@@ -1,239 +0,0 @@
|
||||
---
|
||||
---
|
||||
References
|
||||
==========
|
||||
@inproceedings {koelle23primate,
|
||||
title = {Improving Primate Sounds Classification using Binary Presorting for Deep Learning},
|
||||
author = {Kölle, Michael and Illium, Steffen and Zorn, Maximilian and Nü{\ss}lein, Jonas and Suchostawski, Patrick and Linnhoff-Popien, Claudia},
|
||||
year = {2023},
|
||||
organization = {Int. Conference on Deep Learning Theory and Application - DeLTA 2023},
|
||||
publisher = {Springer CCIS Series},
|
||||
}
|
||||
|
||||
@inproceedings{zorn23surprise,
|
||||
author = {Zorn, Maximilian and Illium, Steffen and Phan, Thomy and Kaiser, Tanja Katharina and Linnhoff-Popien, Claudia and Gabor, Thomas},
|
||||
title = {Social Neural Network Soups with Surprise Minimization},
|
||||
year = {2023},
|
||||
publisher = {MIT Press Direct},
|
||||
organization={Conference on Artificial Life - Alife 2023},
|
||||
}
|
||||
|
||||
@inproceedings{feld2018trajectory,
|
||||
title={Trajectory annotation using sequences of spatial perception},
|
||||
author={Feld, Sebastian and Illium, Steffen and Sedlmeier, Andreas and Belzner, Lenz},
|
||||
booktitle={Proceedings of the 26th ACM SIGSPATIAL International Conference on Advances in Geographic Information Systems},
|
||||
pages={329--338},
|
||||
year={2018}
|
||||
}
|
||||
|
||||
@article{gabor2022self,
|
||||
title={Self-replication in neural networks},
|
||||
author={Gabor, Thomas and Illium, Steffen and Zorn, Maximilian and Lenta, Cristian and Mattausch, Andy and Belzner, Lenz and Linnhoff-Popien, Claudia},
|
||||
journal={Artificial Life},
|
||||
volume={28},
|
||||
number={2},
|
||||
pages={205--223},
|
||||
year={2022},
|
||||
publisher={MIT Press One Broadway, 12th Floor, Cambridge, Massachusetts 02142, USA~…}
|
||||
}
|
||||
|
||||
@proceedings{gabor2019self,
|
||||
author = {Gabor, Thomas and Illium, Steffen and Mattausch, Andy and Belzner, Lenz and Linnhoff-Popien, Claudia},
|
||||
title = "{Self-Replication in Neural Networks}",
|
||||
volume = {ALIFE 2019: The 2019 Conference on Artificial Life},
|
||||
series = {Artificial Life Conference Proceedings},
|
||||
pages = {424-431},
|
||||
year = {2019},
|
||||
month = {07},
|
||||
doi = {10.1162/isal_a_00197},
|
||||
url = {https://doi.org/10.1162/isal\_a\_00197},
|
||||
eprint = {https://direct.mit.edu/isal/proceedings-pdf/isal2019/31/424/1903421/isal\_a\_00197.pdf},
|
||||
}
|
||||
|
||||
|
||||
@article{elsner2019deep,
|
||||
title={Deep neural baselines for computational paralinguistics},
|
||||
author={Elsner, Daniel and Langer, Stefan and Ritz, Fabian and Mueller, Robert and Illium, Steffen},
|
||||
journal={arXiv preprint arXiv:1907.02864},
|
||||
year={2019}
|
||||
}
|
||||
|
||||
@inproceedings{muller2020soccer,
|
||||
title={Soccer Team Vectors},
|
||||
author={Müller, Robert and Langer, Stefan and Ritz, Fabian and Roch, Christoph and Illium, Steffen and Linnhoff-Popien, Claudia},
|
||||
booktitle={Machine Learning and Knowledge Discovery in Databases: International Workshops of ECML PKDD 2019, Würzburg, Germany, September 16--20, 2019, Proceedings, Part II},
|
||||
pages={247--257},
|
||||
year={2020},
|
||||
organization={Springer International Publishing}
|
||||
}
|
||||
|
||||
@inproceedings{friedrich2020hybrid,
|
||||
title={A Hybrid Approach for Segmenting and Fitting Solid Primitives to 3D Point Clouds},
|
||||
author={Friedrich, Markus and Illium, Steffen and Fayolle, Pierre-Alain and Linnhoff-Popien, Claudia},
|
||||
booktitle={15th International Joint Conference on Computer Graphics Theory and Applications},
|
||||
year={2020}
|
||||
}
|
||||
|
||||
@inproceedings{sedlmeier2020policy,
|
||||
title={Policy entropy for out-of-distribution classification},
|
||||
author={Sedlmeier, Andreas and Müller, Robert and Illium, Steffen and Linnhoff-Popien, Claudia},
|
||||
booktitle={Artificial Neural Networks and Machine Learning--ICANN 2020: 29th International Conference on Artificial Neural Networks, Bratislava, Slovakia, September 15--18, 2020, Proceedings, Part II 29},
|
||||
pages={420--431},
|
||||
year={2020},
|
||||
organization={Springer International Publishing}
|
||||
}
|
||||
|
||||
@article{muller2020acoustic,
|
||||
title={Acoustic anomaly detection for machine sounds based on image transfer learning},
|
||||
author={Müller, Robert and Ritz, Fabian and Illium, Steffen and Linnhoff-Popien, Claudia},
|
||||
journal={arXiv preprint arXiv:2006.03429},
|
||||
year={2020}
|
||||
}
|
||||
|
||||
@article{illium2020meantime,
|
||||
title={What to do in the meantime: A service coverage analysis for parked autonomous vehicles},
|
||||
author={Illium, Steffen and Friese, Philipp Andreas and Müller, Robert and Feld, Sebastian},
|
||||
journal={AGILE: GIScience Series},
|
||||
volume={1},
|
||||
pages={7},
|
||||
year={2020},
|
||||
publisher={Copernicus Publications Göttingen, Germany}
|
||||
}
|
||||
|
||||
@inproceedings{illium2020surgical,
|
||||
title={Surgical Mask Detection with Convolutional Neural Networks and Data Augmentations on Spectrograms},
|
||||
author={Illium, Steffen and M{\"u}ller, Robert and Sedlmeier, Andreas and Linnhoff-Popien, Claudia},
|
||||
booktitle={Proc. Interspeech 2020},
|
||||
pages={2052--2056},
|
||||
year={2020}
|
||||
}
|
||||
|
||||
@article{muller2020analysis,
|
||||
title={Analysis of feature representations for anomalous sound detection},
|
||||
author={Müller, Robert and Illium, Steffen and Ritz, Fabian and Schmid, Kyrill},
|
||||
journal={arXiv preprint arXiv:2012.06282},
|
||||
year={2020}
|
||||
}
|
||||
|
||||
@article{muller2021acoustic,
|
||||
title={Acoustic leak detection in water networks},
|
||||
author={Müller, Robert and Illium, Steffen and Ritz, Fabian and Schröder, Tobias and Platschek, Christian and Ochs, Jörg and Linnhoff-Popien, Claudia},
|
||||
journal={arXiv preprint arXiv:2012.06280},
|
||||
year={2020}
|
||||
}
|
||||
|
||||
@inproceedings{gabor2021goals,
|
||||
title={Goals for self-replicating neural networks},
|
||||
author={Gabor, Thomas and Illium, Steffen and Zorn, Maximilian and Linnhoff-Popien, Claudia},
|
||||
booktitle={Artificial Life Conference Proceedings 33},
|
||||
volume={2021},
|
||||
number={1},
|
||||
pages={101},
|
||||
year={2021},
|
||||
organization={MIT Press One Rogers Street, Cambridge, MA 02142-1209, USA journals-info~…}
|
||||
}
|
||||
|
||||
@article{illium2021visual,
|
||||
title={Visual Transformers for Primates Classification and Covid Detection},
|
||||
author={Illium, Steffen and Müller, Robert and Sedlmeier, Andreas and Popien, Claudia-Linnhoff},
|
||||
journal={Proc. Interspeech 2021},
|
||||
pages={451--455},
|
||||
year={2021}
|
||||
}
|
||||
|
||||
@article{muller2021deep,
|
||||
title={A Deep and Recurrent Architecture for Primate Vocalization Classification},
|
||||
author={Müller, Robert and Illium, Steffen and Linnhoff-Popien, Claudia},
|
||||
journal={Proc. Interspeech 2021},
|
||||
pages={461--465},
|
||||
year={2021}
|
||||
}
|
||||
|
||||
@inproceedings{muller2021deep,
|
||||
title={Deep recurrent interpolation networks for anomalous sound detection},
|
||||
author={Müller, Robert and Illium, Steffen and Linnhoff-Popien, Claudia},
|
||||
booktitle={2021 International Joint Conference on Neural Networks (IJCNN)},
|
||||
pages={1--7},
|
||||
year={2021},
|
||||
organization={IEEE}
|
||||
}
|
||||
|
||||
@inproceedings{friedrich2022csg,
|
||||
title={CSG Tree Extraction from 3D Point Clouds and Meshes Using a Hybrid Approach},
|
||||
author={Friedrich, Markus and Illium, Steffen and Fayolle, Pierre-Alain and Linnhoff-Popien, Claudia},
|
||||
booktitle={Computer Vision, Imaging and Computer Graphics Theory and Applications: 15th International Joint Conference, VISIGRAPP 2020 Valletta, Malta, February 27--29, 2020, Revised Selected Papers},
|
||||
pages={53--79},
|
||||
year={2022},
|
||||
organization={Springer International Publishing Cham}
|
||||
}
|
||||
|
||||
@inproceedings{illium2022empirical,
|
||||
title={Empirical Analysis of Limits for Memory Distance in Recurrent Neural Networks},
|
||||
author={Illium, Steffen and Schillman, Thore and Müller, Robert and Gabor, Thomas and Linnhoff-Popien, Claudia},
|
||||
booktitle={14th International Conference on Agents and Artificial Intelligence: ICAART},
|
||||
volume={3},
|
||||
number={Proceedings},
|
||||
pages={308--315},
|
||||
year={2022}
|
||||
}
|
||||
|
||||
@inproceedings{muller2022towards,
|
||||
title={Towards Anomaly Detection in Reinforcement Learning},
|
||||
author={Müller, Robert and Illium, Steffen and Phan, Thomy and Haider, Tom and Linnhoff-Popien, Claudia},
|
||||
booktitle={Proceedings of the 21st International Conference on Autonomous Agents and Multiagent Systems},
|
||||
pages={1799--1803},
|
||||
year={2022}
|
||||
}
|
||||
|
||||
@inproceedings{nusslein2022case,
|
||||
title={Case-Based Inverse Reinforcement Learning Using Temporal Coherence},
|
||||
author={Nüßlein, Jonas and Illium, Steffen and Müller, Robert and Gabor, Thomas and Linnhoff-Popien, Claudia},
|
||||
booktitle={Case-Based Reasoning Research and Development: 30th International Conference, ICCBR 2022, Nancy, France, September 12--15, 2022, Proceedings},
|
||||
pages={304--317},
|
||||
year={2022},
|
||||
organization={Springer International Publishing Cham}
|
||||
}
|
||||
|
||||
@inproceedings{illium2022constructing,
|
||||
title={Constructing organism networks from collaborative self-replicators},
|
||||
author={Illium, Steffen and Zorn, Maximilian and Lenta, Cristian and K{\"o}lle, Michael and Linnhoff-Popien, Claudia and Gabor, Thomas},
|
||||
booktitle={2022 IEEE Symposium Series on Computational Intelligence (SSCI)},
|
||||
pages={1268--1275},
|
||||
year={2022},
|
||||
organization={IEEE}
|
||||
}
|
||||
|
||||
@inproceedings{illium2023voronoipatches,
|
||||
title={VoronoiPatches: Evaluating a New Data Augmentation Method},
|
||||
author={Illium, Steffen and Griffin, Gretchen and K{\"o}lle, Michael and Zorn, Maximilian and N{\"u}sslein, Jonas and Linnhoff-Popien, Claudia},
|
||||
booktitle={International Conference on Agents and Artificial Intelligence},
|
||||
volume={15},
|
||||
number={Volume 3},
|
||||
pages={350--357},
|
||||
year={2023}
|
||||
}
|
||||
|
||||
@article{kolle2023compression,
|
||||
title={Compression of GPS Trajectories using Autoencoders},
|
||||
author={Kölle, Michael and Illium, Steffen and Hahn, Carsten and Schauer, Lorenz and Hutter, Johannes and Linnhoff-Popien, Claudia},
|
||||
journal={arXiv preprint arXiv:2301.07420},
|
||||
year={2023}
|
||||
|
||||
}
|
||||
|
||||
@inproceedings{altmann2024emergence,
|
||||
title={Emergence in Multi-agent Systems: A Safety Perspective},
|
||||
author={Altmann, Philipp and Schönberger, Julian and Illium, Steffen and Zorn, Maximilian and Ritz, Fabian and Haider, Tom and Burton, Simon and Gabor, Thomas},
|
||||
booktitle={International Symposium on Leveraging Applications of Formal Methods},
|
||||
pages={104--120},
|
||||
year={2024},
|
||||
organization={Springer Nature Switzerland Cham}
|
||||
}
|
||||
|
||||
@article{kolle2024aquarium,
|
||||
title={Aquarium: A Comprehensive Framework for Exploring Predator-Prey Dynamics through Multi-Agent Reinforcement Learning Algorithms},
|
||||
author={Kölle, Michael and Erpelding, Yannick and Ritz, Fabian and Phan, Thomy and Illium, Steffen and Linnhoff-Popien, Claudia},
|
||||
journal={arXiv preprint arXiv:2401.07056},
|
||||
year={2024}
|
||||
|
||||
}
|
||||
@@ -1,11 +1,11 @@
|
||||
import { getPostBySlug, getPostSlugs } from '@/lib/mdx';
|
||||
import { CitationProvider } from '@/context/citation-context';
|
||||
import { ReferencesContainer } from '@/components/references-container';
|
||||
import { CitationProvider } from '@/components/context-citation';
|
||||
import { ReferencesContainer } from '@/components/container-references';
|
||||
import { notFound } from 'next/navigation';
|
||||
import Image from 'next/image';
|
||||
import { DATA } from '@/data/resume';
|
||||
import { DATA } from '@/app/resume';
|
||||
import { getPublicationsData } from '@/lib/publications';
|
||||
import { CustomMDX } from '@/components/custom-mdx';
|
||||
import { CustomMDX } from '@/components/mdx-custom';
|
||||
import Link from 'next/link';
|
||||
|
||||
export async function generateStaticParams() {
|
||||
@@ -13,8 +13,8 @@ export async function generateStaticParams() {
|
||||
return slugs.map((slug) => ({ slug }));
|
||||
}
|
||||
|
||||
export async function generateMetadata({ params }: { params: { slug: string } }) {
|
||||
const post = await getPostBySlug('blog', params.slug);
|
||||
export async function generateMetadata({ params: { slug } }: { params: { slug: string } }) {
|
||||
const post = await getPostBySlug('blog', slug);
|
||||
if (!post) {
|
||||
return {};
|
||||
}
|
||||
@@ -24,8 +24,8 @@ export async function generateMetadata({ params }: { params: { slug: string } })
|
||||
};
|
||||
}
|
||||
|
||||
export default async function BlogPage({ params }: { params: { slug: string } }) {
|
||||
const post = await getPostBySlug('blog', params.slug);
|
||||
export default async function BlogPage({ params: { slug } }: { params: { slug: string } }) {
|
||||
const post = await getPostBySlug('blog', slug);
|
||||
const publications = getPublicationsData();
|
||||
|
||||
if (!post) {
|
||||
@@ -1,6 +1,6 @@
|
||||
import { getSortedPostsData } from "@/lib/posts";
|
||||
import { ProjectCard } from "@/components/project-card";
|
||||
import BlurFade from "@/components/magicui/blur-fade";
|
||||
import { BlurFade } from "@/components/magicui/blur-fade";
|
||||
|
||||
const BLUR_FADE_DELAY = 0.04;
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
import { DATA } from "@/data/resume";
|
||||
import { DATA } from "@/app/resume";
|
||||
import Image from "next/image";
|
||||
import Link from "next/link";
|
||||
import BlurFade from "@/components/magicui/blur-fade";
|
||||
|
||||
import { Button } from "@/components/ui/button";
|
||||
import { CenteredImage } from "@/components/centered-image";
|
||||
import { BlurFade } from "@/components/magicui/blur-fade";
|
||||
import { TrackedLink } from "@/components/util-tracked-link";
|
||||
|
||||
const BLUR_FADE_DELAY = 0.05;
|
||||
|
||||
@@ -37,20 +39,22 @@ export default function ConnectPage() {
|
||||
<BlurFade delay={BLUR_FADE_DELAY * 3}>
|
||||
<div className="grid grid-cols-2 sm:grid-cols-3 gap-2 w-full">
|
||||
{socialLinks.map(([name, social]) => (
|
||||
<Link href={social.url} key={name} target="_blank">
|
||||
<TrackedLink href={social.url} key={name} eventName={`${name}-social`} target="_blank">
|
||||
<Button variant="outline" className="w-full">
|
||||
<social.icon className="size-4 mr-2" />
|
||||
{name}
|
||||
</Button>
|
||||
</Link>
|
||||
</TrackedLink>
|
||||
))}
|
||||
</div>
|
||||
</BlurFade>
|
||||
|
||||
<div className="flex w-full flex-col items-center space-y-4 pb-8">
|
||||
<BlurFade delay={BLUR_FADE_DELAY * 4} className="w-full px-8">
|
||||
<hr className="w-full" />
|
||||
</BlurFade>
|
||||
<div className="w-full px-8">
|
||||
<BlurFade delay={BLUR_FADE_DELAY * 4}>
|
||||
<hr className="w-full" />
|
||||
</BlurFade>
|
||||
</div>
|
||||
|
||||
<BlurFade delay={BLUR_FADE_DELAY * 5}>
|
||||
<a href="/images/qr.png" download="SteffenIllium-QRCode.png">
|
||||
48
app/experience/[slug]/page.tsx
Normal file
48
app/experience/[slug]/page.tsx
Normal file
@@ -0,0 +1,48 @@
|
||||
import { getPostBySlug, getPostSlugs } from '@/lib/mdx';
|
||||
import { notFound } from 'next/navigation';
|
||||
import { DATA } from '@/app/resume';
|
||||
import { getPublicationsData } from '@/lib/publications';
|
||||
import { Article } from '@/components/page-article';
|
||||
|
||||
export async function generateStaticParams() {
|
||||
const slugs = getPostSlugs('experience');
|
||||
return slugs.map((slug) => ({ slug }));
|
||||
}
|
||||
|
||||
export async function generateMetadata({ params }: { params: { slug: string } }) {
|
||||
// FIX: Await params to get slug for Next.js 15
|
||||
const { slug } = await params;
|
||||
|
||||
const post = await getPostBySlug('experience', slug);
|
||||
if (!post) { return {}; }
|
||||
return {
|
||||
title: post.frontmatter.title,
|
||||
description: post.frontmatter.teaser || DATA.description,
|
||||
};
|
||||
}
|
||||
|
||||
export default async function ExperiencePage({ params }: { params: { slug: string } }) {
|
||||
// FIX: Await params to get slug for Next.js 15
|
||||
const { slug } = await params;
|
||||
|
||||
const post = await getPostBySlug('experience', slug);
|
||||
const publications = getPublicationsData();
|
||||
|
||||
if (!post) {
|
||||
notFound();
|
||||
}
|
||||
|
||||
// --- Navigation Logic ---
|
||||
const allSlugs = getPostSlugs('experience');
|
||||
const currentIndex = allSlugs.findIndex((s) => s === slug);
|
||||
const prevSlug = currentIndex > 0 ? allSlugs[currentIndex - 1] : null;
|
||||
const nextSlug = currentIndex < allSlugs.length - 1 ? allSlugs[currentIndex + 1] : null;
|
||||
const prevPost = prevSlug ? await getPostBySlug('experience', prevSlug) : null;
|
||||
const nextPost = nextSlug ? await getPostBySlug('experience', nextSlug) : null;
|
||||
const navigation = {
|
||||
prev: prevPost ? { slug: prevSlug, title: prevPost.frontmatter.title } : null,
|
||||
next: nextPost ? { slug: nextSlug, title: nextPost.frontmatter.title } : null,
|
||||
};
|
||||
|
||||
return <Article post={post} publications={publications} navigation={navigation} basePath="experience" />;
|
||||
}
|
||||
@@ -1,18 +1,18 @@
|
||||
// src/pages/teaching.tsx (or wherever your teaching page is)
|
||||
// src/pages/experience.tsx
|
||||
|
||||
import { getSortedPostsData } from "@/lib/posts";
|
||||
import { ProjectCard } from "@/components/project-card";
|
||||
import { ProjectListItem } from "@/components/project-list-item"; // Import the new component
|
||||
import BlurFade from "@/components/magicui/blur-fade";
|
||||
import { ExperienceCard } from "@/components/list-item"; // Import the new component
|
||||
import { BlurFade } from "@/components/magicui/blur-fade";
|
||||
|
||||
const BLUR_FADE_DELAY = 0.04;
|
||||
|
||||
export default function TeachingPage() {
|
||||
const posts = getSortedPostsData("teaching");
|
||||
export default function ExperiencePage() {
|
||||
const posts = getSortedPostsData("experience");
|
||||
|
||||
// Filter out posts that might not be suitable for a list item if needed,
|
||||
// or ensure your getSortedPostsData provides necessary fields for both.
|
||||
const teachingProjects = posts.filter((post) => post.title);
|
||||
const experiencePosts = posts.filter((post) => post.title);
|
||||
|
||||
return (
|
||||
<main className="flex flex-col min-h-[100dvh] space-y-10">
|
||||
@@ -20,22 +20,20 @@ export default function TeachingPage() {
|
||||
<div className="mx-auto w-full max-w-6xl space-y-8">
|
||||
<div className="space-y-2">
|
||||
<h1 className="text-3xl font-bold tracking-tighter sm:text-5xl xl:text-6xl/none">
|
||||
Teaching
|
||||
Experience
|
||||
</h1>
|
||||
<p className="text-muted-foreground">
|
||||
At LMU Munich, I mentored undergraduate and graduate students, contributed to large-scale lectures, and led practical seminars and courses. The following sections detail my teaching experience.<br/>
|
||||
<br/>
|
||||
For a detailed list please visit my <b><a href="https://www.mobile.ifi.lmu.de/team/steffen-illium/">LMU profile page</a></b>.
|
||||
My professional experience encompasses both hands-on systems engineering and academic instruction. I've worked at the intersection of machine learning and complex systems, with projects ranging from exploring emergent behavior in AI to managing cluster infrastructure. In my role at <b><a href="https://www.mobile.ifi.lmu.de/team/steffen-illium/">LMU Munich</a></b>, I further developed this experience by mentoring students, contributing to lectures, and leading practical seminars.<br/>
|
||||
</p>
|
||||
</div>
|
||||
<hr />
|
||||
<div className="flex flex-col space-y-4"> {/* Use flex-col for list items */}
|
||||
{teachingProjects.map((post, id) => (
|
||||
<div className="flex flex-col space-y-4">
|
||||
{experiencePosts.map((post, id) => (
|
||||
<BlurFade
|
||||
key={post.title + "-list"} // Use a different key to avoid collisions if both sections are rendered
|
||||
delay={BLUR_FADE_DELAY * 2 + id * 0.05} // You might want to adjust delay for list items
|
||||
key={post.title + "-list"}
|
||||
delay={BLUR_FADE_DELAY * 2 + id * 0.005}
|
||||
>
|
||||
<ProjectListItem
|
||||
<ExperienceCard
|
||||
href={post.href}
|
||||
key={post.title}
|
||||
title={post.title!}
|
||||
|
Before Width: | Height: | Size: 1.1 KiB After Width: | Height: | Size: 1.1 KiB |
122
app/globals.css
Normal file
122
app/globals.css
Normal file
@@ -0,0 +1,122 @@
|
||||
/* file: app/globals.css */
|
||||
@import "tailwindcss";
|
||||
@import "tw-animate-css";
|
||||
/*
|
||||
The :root and .dark variables for shadcn/ui theming remain standard CSS.
|
||||
*/
|
||||
@layer base {
|
||||
:root {
|
||||
--background: 0 0% 100%;
|
||||
--foreground: 240 10% 3.9%;
|
||||
--card: 0 0% 100%;
|
||||
--card-foreground: 240 10% 3.9%;
|
||||
--popover: 0 0% 100%;
|
||||
--popover-foreground: 240 10% 3.9%;
|
||||
--primary: 240 5.9% 10%;
|
||||
--primary-foreground: 0 0% 98%;
|
||||
--secondary: 240 4.8% 95.9%;
|
||||
--secondary-foreground: 240 5.9% 10%;
|
||||
--muted: 240 4.8% 95.9%;
|
||||
--muted-foreground: 240 3.8% 46.1%;
|
||||
--accent: 240 4.8% 95.9%;
|
||||
--accent-foreground: 240 5.9% 10%;
|
||||
/* ADDED: A slightly darker shade for hover in light mode */
|
||||
--accent-hover: 240 5% 94%;
|
||||
--destructive: 0 84.2% 60.2%;
|
||||
--destructive-foreground: 0 0% 98%;
|
||||
--border: 240 5.9% 90%;
|
||||
--input: 240 5.9% 90%;
|
||||
--ring: 240 10% 3.9%;
|
||||
--radius: 0.5rem;
|
||||
--shadow-glow: 0 0 15px 0 hsl(var(--foreground) / 0.075);
|
||||
--shadow-glow-hover: 0 0 25px 0 hsl(var(--foreground) / 0.25);
|
||||
}
|
||||
.dark {
|
||||
--background: 240 10% 3.9%;
|
||||
--foreground: 0 0% 98%;
|
||||
--card: 240 10% 3.9%;
|
||||
--card-foreground: 0 0% 98%;
|
||||
--popover: 240 10% 3.9%;
|
||||
--popover-foreground: 0 0% 98%;
|
||||
--primary: 0 0% 98%;
|
||||
--primary-foreground: 240 5.9% 10%;
|
||||
--secondary: 240 3.7% 15.9%;
|
||||
--secondary-foreground: 0 0% 98%;
|
||||
--muted: 240 3.7% 15.9%;
|
||||
--muted-foreground: 240 5% 64.9%;
|
||||
--accent: 240 3.7% 15.9%;
|
||||
--accent-foreground: 0 0% 98%;
|
||||
--accent-hover: 240 3.7% 19.9%;
|
||||
--destructive: 0 62.8% 30.6%;
|
||||
--destructive-foreground: 0 0% 98%;
|
||||
--border: 240 3.7% 18%;
|
||||
--input: 240 3.7% 15.9%;
|
||||
--ring: 240 4.9% 83.9%;
|
||||
--radius: 0.5rem;
|
||||
--shadow-glow: 0 0 15px 0 hsl(var(--foreground) / 0.075);
|
||||
--shadow-glow-hover: 0 0 25px 0 hsl(var(--foreground) / 0.25);
|
||||
}
|
||||
* {
|
||||
border-color: theme(colors.border);
|
||||
}
|
||||
a {
|
||||
@apply cursor-pointer font-bold text-accent-foreground transition-colors duration-300 hover:underline;
|
||||
}
|
||||
};
|
||||
@theme {
|
||||
--dark-mode: "class";
|
||||
--container-center: true;
|
||||
--container-padding: 2rem;
|
||||
--screen-2xl: 1400px;
|
||||
--color-border: hsl(var(--border));
|
||||
--color-input: hsl(var(--input));
|
||||
--color-ring: hsl(var(--ring));
|
||||
--color-background: hsl(var(--background));
|
||||
--color-foreground: hsl(var(--foreground));
|
||||
--color-primary: hsl(var(--primary));
|
||||
--color-primary-foreground: hsl(var(--primary-foreground));
|
||||
--color-secondary: hsl(var(--secondary));
|
||||
--color-secondary-foreground: hsl(var(--secondary-foreground));
|
||||
--color-destructive: hsl(var(--destructive));
|
||||
--color-destructive-foreground: hsl(var(--destructive-foreground));
|
||||
--color-muted: hsl(var(--muted));
|
||||
--color-muted-foreground: hsl(var(--muted-foreground));
|
||||
--color-accent: hsl(var(--accent));
|
||||
--color-accent-foreground: hsl(var(--accent-foreground));
|
||||
/* ADDED: Make the new variable available to Tailwind */
|
||||
--color-accent-hover: hsl(var(--accent-hover));
|
||||
--color-popover: hsl(var(--popover));
|
||||
--color-popover-foreground: hsl(var(--popover-foreground));
|
||||
--color-card: hsl(var(--card));
|
||||
--color-card-foreground: hsl(var(--card-foreground));
|
||||
--border-radius-lg: var(--radius);
|
||||
--border-radius-md: calc(var(--radius) - 2px);
|
||||
--border-radius-sm: calc(var(--radius) - 4px);
|
||||
--animation-accordion-down: accordion-down 0.2s ease-out;
|
||||
--animation-accordion-up: accordion-up 0.2s ease-out;
|
||||
}
|
||||
@layer components {
|
||||
.cards {
|
||||
@apply no-underline transition-all duration-300 ease-out shadow-sm hover:bg-accent-hover hover:shadow-lg dark:shadow-[var(--shadow-glow)] dark:hover:shadow-[var(--shadow-glow-hover)];
|
||||
}
|
||||
}
|
||||
@keyframes accordion-down {
|
||||
from {
|
||||
height: 0;
|
||||
}
|
||||
to {
|
||||
height: var(--radix-accordion-content-height);
|
||||
}
|
||||
}
|
||||
@keyframes accordion-up {
|
||||
from {
|
||||
height: var(--radix-accordion-content-height);
|
||||
}
|
||||
to {
|
||||
height: 0;
|
||||
}
|
||||
}
|
||||
::view-transition-old(root), ::view-transition-new(root) {
|
||||
animation: none;
|
||||
mix-blend-mode: normal;
|
||||
}
|
||||
@@ -1,12 +1,12 @@
|
||||
import { Header } from "@/components/header";
|
||||
import { Header } from "@/components/container-header";
|
||||
import Navbar from "@/components/navbar";
|
||||
import { ThemeProvider } from "@/components/theme-provider";
|
||||
import { TooltipProvider } from "@/components/ui/tooltip";
|
||||
import { DATA } from "@/data/resume";
|
||||
import { DATA } from "@/app/resume";
|
||||
import { cn } from "@/lib/utils";
|
||||
import type { Metadata } from "next";
|
||||
import { Inter as FontSans } from "next/font/google";
|
||||
import "./globals.css";
|
||||
import { Providers } from "@/components/providers";
|
||||
import { Footer } from "@/components/footer";
|
||||
|
||||
const fontSans = FontSans({
|
||||
subsets: ["latin"],
|
||||
@@ -63,13 +63,12 @@ export default function RootLayout({
|
||||
)}
|
||||
>
|
||||
<script defer src="https://umami.steffenillium.de/script.js" data-website-id="170441c3-f9ca-4dea-9f44-ba0573b0f9e5"></script>
|
||||
<ThemeProvider attribute="class" defaultTheme="light">
|
||||
<TooltipProvider delayDuration={0}>
|
||||
<Header />
|
||||
{children}
|
||||
<Navbar />
|
||||
</TooltipProvider>
|
||||
</ThemeProvider>
|
||||
<Providers>
|
||||
<Header />
|
||||
{children}
|
||||
<Footer />
|
||||
<Navbar />
|
||||
</Providers>
|
||||
</body>
|
||||
</html>
|
||||
);
|
||||
@@ -1,16 +1,15 @@
|
||||
|
||||
import { getSortedPostsData } from "@/lib/posts";
|
||||
import BlurFade from "@/components/magicui/blur-fade";
|
||||
import BlurFadeText from "@/components/magicui/blur-fade-text";
|
||||
import { ProjectCard } from "@/components/project-card";
|
||||
import { ResumeCard } from "@/components/resume-card";
|
||||
import { Avatar, AvatarFallback, AvatarImage } from "@/components/ui/avatar";
|
||||
import { Badge } from "@/components/ui/badge";
|
||||
import { DATA } from "@/data/resume";
|
||||
import { DATA } from "@/app/resume";
|
||||
import Markdown from "react-markdown";
|
||||
//import {ContactCard} from "@/components/contact-card";
|
||||
import {Button} from "@/components/ui/button";
|
||||
import Link from "next/link";
|
||||
import { TextAnimate } from "@/components/magicui/text-animate";
|
||||
import { BlurFade } from "@/components/magicui/blur-fade";
|
||||
import { TrackedLink } from "@/components/util-tracked-link";
|
||||
|
||||
const BLUR_FADE_DELAY = 0.04;
|
||||
|
||||
@@ -22,17 +21,12 @@ export default function Page() {
|
||||
<div className="mx-auto w-full max-w-2xl space-y-8">
|
||||
<div className="flex flex-col sm:flex-row items-center sm:items-start sm:justify-between gap-4">
|
||||
<div className="flex-col flex flex-1 space-y-1.5 items-center sm:items-start">
|
||||
<BlurFadeText
|
||||
delay={BLUR_FADE_DELAY}
|
||||
className="text-3xl font-bold tracking-tighter sm:text-5xl xl:text-6xl/none text-center sm:text-left"
|
||||
yOffset={8}
|
||||
text={`Hi, I'm ${DATA.name.split(" ")[0]}`}
|
||||
/>
|
||||
<BlurFadeText
|
||||
className="max-w-[400px] md:text-xl text-center sm:text-left"
|
||||
delay={BLUR_FADE_DELAY}
|
||||
text={"AI Consultant & Researcher with a PhD in Computer Science."}
|
||||
/>
|
||||
<TextAnimate delay={BLUR_FADE_DELAY} className="text-3xl font-bold tracking-tighter sm:text-5xl xl:text-6xl/none text-center sm:text-left">
|
||||
{`Hi, I'm ${DATA.name.split(" ")[0]}`}
|
||||
</TextAnimate>
|
||||
<TextAnimate className="max-w-[400px] md:text-xl text-center sm:text-left" delay={BLUR_FADE_DELAY}>
|
||||
AI Consultant & Researcher with a PhD in Computer Science.
|
||||
</TextAnimate>
|
||||
</div>
|
||||
<div className="flex flex-col items-center gap-y-2">
|
||||
<BlurFade delay={BLUR_FADE_DELAY}>
|
||||
@@ -42,9 +36,11 @@ export default function Page() {
|
||||
</Avatar>
|
||||
</BlurFade>
|
||||
<BlurFade delay={BLUR_FADE_DELAY * 2}>
|
||||
<Link href="/illium_cv_censored.pdf" target="_blank" className="w-full">
|
||||
<Button className="w-full">Download CV</Button>
|
||||
</Link>
|
||||
<TrackedLink href="/illium_cv_censored.pdf" eventName="cd-download" className="w-full" download>
|
||||
<Badge className="flex gap-2 px-3 py-1 text-sm shadow-sm transition-all duration-300 ease-out hover:shadow-lg dark:shadow-[var(--shadow-glow)] dark:hover:shadow-[var(--shadow-glow-hover)]">
|
||||
Download CV
|
||||
</Badge>
|
||||
</TrackedLink>
|
||||
</BlurFade>
|
||||
</div>
|
||||
</div>
|
||||
@@ -54,8 +50,8 @@ export default function Page() {
|
||||
<BlurFade delay={BLUR_FADE_DELAY * 3}>
|
||||
<h2 className="text-xl font-bold">About</h2>
|
||||
</BlurFade>
|
||||
<BlurFade delay={BLUR_FADE_DELAY * 4}>
|
||||
<Markdown className="prose max-w-full text-pretty font-sans text-sm text-muted-foreground dark:prose-invert">
|
||||
<BlurFade delay={BLUR_FADE_DELAY * 4} className="prose max-w-full text-pretty font-sans text-sm text-muted-foreground dark:prose-invert">
|
||||
<Markdown >
|
||||
{DATA.summary}
|
||||
</Markdown>
|
||||
</BlurFade>
|
||||
@@ -110,7 +106,9 @@ export default function Page() {
|
||||
<div className="flex flex-col gap-y-2">
|
||||
{Object.entries(DATA.skills).map(([category, skills], id) => (
|
||||
<div key={id}>
|
||||
<h3 className="text-lg font-semibold">{category}</h3>
|
||||
<BlurFade delay={BLUR_FADE_DELAY * 9.5}>
|
||||
<h3 className="text-lg font-semibold">{category}</h3>
|
||||
</BlurFade>
|
||||
<div className="flex flex-wrap gap-1">
|
||||
{skills.map((skill, id) => (
|
||||
<BlurFade key={skill} delay={BLUR_FADE_DELAY * 10 + id * 0.05}>
|
||||
@@ -182,17 +180,17 @@ export default function Page() {
|
||||
</p>
|
||||
</div>
|
||||
</BlurFade>
|
||||
<BlurFade delay={BLUR_FADE_DELAY * 18}>
|
||||
<BlurFade delay={BLUR_FADE_DELAY * 18}>
|
||||
<div className="flex flex-wrap gap-2 justify-center">
|
||||
{Object.entries(DATA.contact.social)
|
||||
.filter(([_, social]) => social.navbar)
|
||||
//.filter(([_, social]) => !social.pub)
|
||||
.map(([name, social]) => (
|
||||
<Link href={social.url} key={name} target="_blank">
|
||||
<Badge className="flex gap-2 px-3 py-1 text-sm">
|
||||
<TrackedLink href={social.url} key={name} className="group" eventName={`${name}-social`}>
|
||||
<Badge className="cards flex gap-2 px-3 py-1 text-sm">
|
||||
<social.icon className="size-4" />
|
||||
{name}
|
||||
</Badge>
|
||||
</Link>
|
||||
</TrackedLink>
|
||||
))}
|
||||
</div>
|
||||
</BlurFade>
|
||||
@@ -1,8 +1,9 @@
|
||||
import { getPostBySlug, getPostSlugs } from '@/lib/mdx';
|
||||
import { notFound } from 'next/navigation';
|
||||
import { DATA } from '@/data/resume';
|
||||
|
||||
import { getPublicationsData } from '@/lib/publications';
|
||||
import { Article } from '@/components/Article'; // <-- Use the new component
|
||||
import { Article } from '@/components/page-article';
|
||||
import { DATA } from '@/app/resume';
|
||||
|
||||
export async function generateStaticParams() {
|
||||
const slugs = getPostSlugs('projects');
|
||||
@@ -10,16 +11,22 @@ export async function generateStaticParams() {
|
||||
}
|
||||
|
||||
export async function generateMetadata({ params }: { params: { slug: string } }) {
|
||||
const post = await getPostBySlug('projects', params.slug);
|
||||
// FIX: Await params to get slug for Next.js 15
|
||||
const { slug } = await params;
|
||||
|
||||
const post = await getPostBySlug('projects', slug);
|
||||
if (!post) { return {}; }
|
||||
return {
|
||||
title: post.frontmatter.title,
|
||||
description: post.frontmatter.description || DATA.description,
|
||||
description: post.frontmatter.teaser || DATA.description,
|
||||
};
|
||||
}
|
||||
|
||||
export default async function ProjectPage({ params }: { params: { slug: string } }) {
|
||||
const post = await getPostBySlug('projects', params.slug);
|
||||
// FIX: Await params to get slug for Next.js 15
|
||||
const { slug } = await params;
|
||||
|
||||
const post = await getPostBySlug('projects', slug);
|
||||
const publications = getPublicationsData();
|
||||
|
||||
if (!post) {
|
||||
@@ -28,7 +35,7 @@ export default async function ProjectPage({ params }: { params: { slug: string }
|
||||
|
||||
// --- Navigation Logic ---
|
||||
const allSlugs = getPostSlugs('projects');
|
||||
const currentIndex = allSlugs.findIndex((s) => s === params.slug);
|
||||
const currentIndex = allSlugs.findIndex((s) => s === slug);
|
||||
const prevSlug = currentIndex > 0 ? allSlugs[currentIndex - 1] : null;
|
||||
const nextSlug = currentIndex < allSlugs.length - 1 ? allSlugs[currentIndex + 1] : null;
|
||||
const prevPost = prevSlug ? await getPostBySlug('projects', prevSlug) : null;
|
||||
@@ -1,6 +1,6 @@
|
||||
import { getSortedPostsData } from "@/lib/posts";
|
||||
import BlurFade from "@/components/magicui/blur-fade";
|
||||
import { ProjectListItem } from "@/components/project-list-item";
|
||||
import { BlurFade } from "@/components/magicui/blur-fade";
|
||||
import { ExperienceCard } from "@/components/list-item";
|
||||
|
||||
const BLUR_FADE_DELAY = 0.04;
|
||||
|
||||
@@ -12,12 +12,11 @@ export default function ProjectsPage() {
|
||||
<section id="projects">
|
||||
<div className="mx-auto w-full max-w-6xl space-y-8">
|
||||
<div className="space-y-2">
|
||||
<h1 className="text-3xl font-bold tracking-tighter sm:text-5xl xl:text-6xl/none">
|
||||
<h1 className="text-3xl font-bold tracking-tighter sm:text-5xl xl:text-6xl/none mt-12">
|
||||
Projects
|
||||
</h1>
|
||||
<p className="text-muted-foreground">
|
||||
I've worked on a variety of projects, from simple websites to
|
||||
complex web applications. Here are a few of my favorites.
|
||||
My work sits at the intersection of machine learning and systems engineering. I have experience in everything from exploring emergent behavior in AI agents to managing robust, automated infrastructure with Kubernetes. Here are some highlights.
|
||||
</p>
|
||||
</div>
|
||||
<hr />
|
||||
@@ -27,7 +26,7 @@ export default function ProjectsPage() {
|
||||
key={post.title + "-list"} // Use a different key to avoid collisions if both sections are rendered
|
||||
delay={BLUR_FADE_DELAY * 2 + id * 0.05} // You might want to adjust delay for list items
|
||||
>
|
||||
<ProjectListItem
|
||||
<ExperienceCard
|
||||
href={post.href}
|
||||
key={post.title}
|
||||
title={post.title!}
|
||||
@@ -1,10 +1,11 @@
|
||||
import { getPublicationsData } from "@/lib/publications";
|
||||
import { PublicationCard } from "@/components/publication-card";
|
||||
import { DATA } from "@/data/resume";
|
||||
import { DATA } from "@/app/resume";
|
||||
import Link from "next/link";
|
||||
import { Badge } from "@/components/ui/badge";
|
||||
import fs from "fs";
|
||||
import path from "path";
|
||||
import { TrackedLink } from "@/components/util-tracked-link";
|
||||
|
||||
export default function PublicationsPage() {
|
||||
const publicationsRaw = getPublicationsData();
|
||||
@@ -38,18 +39,20 @@ export default function PublicationsPage() {
|
||||
This section details my scientific publications, primarily focused on advancing machine learning and deep neural networks. For an updated list of my work, please refer to my research profiles linked below.
|
||||
</p>
|
||||
</div>
|
||||
<div className="flex flex-wrap gap-2">
|
||||
|
||||
<div className="flex flex-wrap gap-3 my-10 justify-center">
|
||||
{Object.entries(DATA.contact.social)
|
||||
.filter(([_, social]) => social.navbar)
|
||||
.filter(([_, social]) => social.pub)
|
||||
.map(([name, social]) => (
|
||||
<Link href={social.url} key={name} target="_blank">
|
||||
<TrackedLink href={social.url} key={name} eventName={`${name}-social`} target="_blank">
|
||||
<Badge className="flex gap-2 px-3 py-1 text-sm">
|
||||
<social.icon className="size-4" />
|
||||
{name}
|
||||
</Badge>
|
||||
</Link>
|
||||
</TrackedLink>
|
||||
))}
|
||||
</div>
|
||||
<hr/>
|
||||
<div className="space-y-8"> {/* Increased spacing between year sections */}
|
||||
{years.map((year) => (
|
||||
<div key={year} className="flex flex-col md:flex-row md:space-x-8">
|
||||
@@ -71,6 +74,7 @@ export default function PublicationsPage() {
|
||||
pdfUrl={pub.pdfUrl}
|
||||
bibtex={pub.bibtex}
|
||||
pdfAvailable={pub.pdfAvailable}
|
||||
className="cards"
|
||||
/>
|
||||
))}
|
||||
</div>
|
||||
@@ -1,44 +1,50 @@
|
||||
// file: app/research/[slug]/page.tsx
|
||||
|
||||
import { getPostBySlug, getPostSlugs } from '@/lib/mdx';
|
||||
import { notFound } from 'next/navigation';
|
||||
import { DATA } from '@/data/resume';
|
||||
import { getPublicationsData } from '@/lib/publications';
|
||||
import { Article } from '@/components/Article';
|
||||
import { Article } from '@/components/page-article';
|
||||
import { DATA } from '@/app/resume';
|
||||
|
||||
export default async function ResearchPage({ params }: { params: { slug: string } }) {
|
||||
// `getPostBySlug` is async, so `await` is correct
|
||||
const post = await getPostBySlug('research', params.slug);
|
||||
const publications = getPublicationsData();
|
||||
|
||||
if (!post) { notFound(); }
|
||||
|
||||
const allSlugs = getPostSlugs('research');
|
||||
const currentIndex = allSlugs.findIndex((s) => s === params.slug);
|
||||
const prevSlug = currentIndex > 0 ? allSlugs[currentIndex - 1] : null;
|
||||
const nextSlug = currentIndex < allSlugs.length - 1 ? allSlugs[currentIndex + 1] : null;
|
||||
|
||||
const prevPost = prevSlug ? await getPostBySlug('research', prevSlug) : null;
|
||||
const nextPost = nextSlug ? await getPostBySlug('research', nextSlug) : null;
|
||||
|
||||
const navigation = {
|
||||
prev: prevPost ? { slug: prevPost.slug, title: prevPost.frontmatter.title } : null,
|
||||
next: nextPost ? { slug: nextPost.slug, title: nextPost.frontmatter.title } : null,
|
||||
};
|
||||
|
||||
// The 'post' object now has the correct shape for the <Article> component
|
||||
return <Article post={post} publications={publications} navigation={navigation} basePath="research" />;
|
||||
}
|
||||
|
||||
// These functions remain unchanged
|
||||
export async function generateStaticParams() {
|
||||
const slugs = getPostSlugs('research');
|
||||
return slugs.map((slug) => ({ slug }));
|
||||
}
|
||||
|
||||
export async function generateMetadata({ params }: { params: { slug: string } }) {
|
||||
const post = await getPostBySlug('research', params.slug);
|
||||
if (!post) { return {}; }
|
||||
return {
|
||||
title: post.frontmatter.title,
|
||||
description: post.frontmatter.excerpt || DATA.description, // Use excerpt for description
|
||||
};
|
||||
const { slug } = await params;
|
||||
|
||||
const post = await getPostBySlug('research', slug);
|
||||
if (!post) { return {}; }
|
||||
return {
|
||||
title: post.frontmatter.title,
|
||||
description: post.frontmatter.teaser || DATA.description,
|
||||
};
|
||||
}
|
||||
|
||||
export default async function ResearchPage({ params }: { params: { slug: string } }) {
|
||||
const { slug } = await params;
|
||||
|
||||
const post = await getPostBySlug('research', slug);
|
||||
const publications = getPublicationsData();
|
||||
|
||||
if (!post) {
|
||||
notFound();
|
||||
}
|
||||
|
||||
// --- Navigation Logic ---
|
||||
const allSlugs = getPostSlugs('research');
|
||||
const currentIndex = allSlugs.findIndex((s) => s === slug);
|
||||
const prevSlug = currentIndex > 0 ? allSlugs[currentIndex - 1] : null;
|
||||
const nextSlug = currentIndex < allSlugs.length - 1 ? allSlugs[currentIndex + 1] : null;
|
||||
|
||||
const prevPost = prevSlug ? await getPostBySlug('research', prevSlug) : null;
|
||||
const nextPost = nextSlug ? await getPostBySlug('research', nextSlug) : null;
|
||||
|
||||
const navigation = {
|
||||
prev: prevPost ? { slug: prevSlug, title: prevPost.frontmatter.title } : null,
|
||||
next: nextPost ? { slug: nextSlug, title: nextPost.frontmatter.title } : null,
|
||||
};
|
||||
|
||||
return <Article post={post} publications={publications} navigation={navigation} basePath="research" />;
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
import { getSortedPostsData } from "@/lib/posts";
|
||||
import { ProjectCard } from "@/components/project-card";
|
||||
import BlurFade from "@/components/magicui/blur-fade";
|
||||
import { BlurFade } from "@/components/magicui/blur-fade";
|
||||
|
||||
const BLUR_FADE_DELAY = 0.04;
|
||||
|
||||
@@ -12,7 +12,7 @@ export default function ResearchPage() {
|
||||
<section id="research">
|
||||
<div className="mx-auto w-full max-w-6xl space-y-8">
|
||||
<div className="space-y-2">
|
||||
<h1 className="text-3xl font-bold tracking-tighter sm:text-5xl xl:text-6xl/none">
|
||||
<h1 className="text-3xl font-bold tracking-tighter sm:text-5xl xl:text-6xl/none mt-12">
|
||||
Research
|
||||
</h1>
|
||||
<p className="text-muted-foreground">
|
||||
@@ -24,13 +24,13 @@ export default function ResearchPage() {
|
||||
</p>
|
||||
</div>
|
||||
<hr />
|
||||
<div className="grid grid-cols-1 gap-3 sm:grid-cols-2 lg:grid-cols-3">
|
||||
<div className="grid grid-cols-1 gap-3 sm:grid-cols-2 lg:grid-cols-2">
|
||||
{posts
|
||||
.filter((post) => post.title)
|
||||
.map((post, id) => (
|
||||
<BlurFade
|
||||
key={post.title}
|
||||
delay={BLUR_FADE_DELAY * 2 + id * 0.05}
|
||||
delay={BLUR_FADE_DELAY * 2 + id * 0.005}
|
||||
>
|
||||
<ProjectCard
|
||||
href={post.href}
|
||||
@@ -1,4 +1,5 @@
|
||||
import { Icons } from "@/components/icons";
|
||||
import { url } from "inspector";
|
||||
import {
|
||||
HomeIcon,
|
||||
NotebookIcon,
|
||||
@@ -7,6 +8,9 @@ import {
|
||||
BookUserIcon,
|
||||
BriefcaseIcon,
|
||||
GraduationCapIcon,
|
||||
LayersIcon,
|
||||
GridIcon,
|
||||
ClipboardListIcon,
|
||||
} from "lucide-react";
|
||||
|
||||
export const DATA = {
|
||||
@@ -18,52 +22,54 @@ export const DATA = {
|
||||
description:
|
||||
"Machine Learning Researcher and Data Science Expert with a PhD in Computer Science. Brings 6+ years of experience developing and analyzing algorithms, models and foundational research, evidenced by numerous publications. Loves to offer expertise in AI/ML (PyTorch), data science, and problem-solving to drive impactful results.",
|
||||
summary:
|
||||
"My path reflects a deep-seated interest in transforming data into actionable insights, culminating in a **PhD in Computer Science**. During my doctoral studies, I focused on advancing *machine learning models* for sequential data and self-learning systems. My research frequently involved collaborations with industry partners on projects like leak detection in water networks and dysfunction detection in *multi-agent reinforcement learning*. This blend of theoretical research and practical application is the foundation of my work. Beyond research, I have actively engaged in teaching, mentoring over **20 Bachelor's and Master's theses**. I have also embraced leadership roles, organizing a major tech conference and heading the editorial team for a science magazine, broadening my experience in *project management* and communication.",
|
||||
"I am a **Machine Learning expert** who builds bridges between raw data and impactful results. With **7+ years** of **experience** and many **publications**, my focus is on developing models for **computer vision** and **sequential data**. My expertise is twofold: I love diving deep into theoretical research, but my true passion lies in translating complex concepts into **practical, scalable solutions**. This is why I am deeply invested in **DevOps** and automation, so that I can ensure **safe**, **reliable** and **efficient** operations in **production**.",
|
||||
avatarUrl: "/images/newshot_2.jpg",
|
||||
skills: {
|
||||
"Machine Learning": [
|
||||
"skills": {
|
||||
"Machine Learning & Research": [
|
||||
"Deep Learning",
|
||||
"Reinforcement Learning",
|
||||
"Multi-Agent RL",
|
||||
"Emergence",
|
||||
"Geoinformatics",
|
||||
"Data Augmentation",
|
||||
"Classification",
|
||||
"Segmentation",
|
||||
"Anomaly Detection",
|
||||
"OOD Detection",
|
||||
"Reinforcement Learning",
|
||||
"Multi-Agent RL",
|
||||
"Emergence",
|
||||
"Industrial Safety (AI)",
|
||||
"Industrial Safety (AI)"
|
||||
],
|
||||
"Data Science": [
|
||||
"Geoinformatics",
|
||||
"Programming, Languages & Libraries": [
|
||||
"Python",
|
||||
"PyTorch",
|
||||
"NumPy",
|
||||
"Pandas",
|
||||
"Scikit-learn",
|
||||
"FastAPI",
|
||||
"LaTeX",
|
||||
],
|
||||
"Software Engineering": [
|
||||
"Kotlin",
|
||||
"JavaScript",
|
||||
"Tailwinds",
|
||||
"SQL/NoSQL",
|
||||
"Shell Script",
|
||||
"HTML5/CSS3",
|
||||
"JavaScript",
|
||||
"SQL/NoSQL",
|
||||
"Linux",
|
||||
"Docker",
|
||||
"Kubernetes",
|
||||
"Git",
|
||||
"Nginx",
|
||||
"LaTeX"
|
||||
],
|
||||
"DevOps & Backend Engineering": [
|
||||
"Docker (Swarm)",
|
||||
"Kubernetes",
|
||||
"Nginx",
|
||||
"Traefik",
|
||||
"FastAPI",
|
||||
"Linux",
|
||||
"Git"
|
||||
]
|
||||
},
|
||||
navbar:
|
||||
[
|
||||
{ href: "/", icon: HomeIcon, label: "Home" },
|
||||
{ href: "/research", icon: FlaskConicalIcon, label: "Research" },
|
||||
{ href: "/projects", icon: BriefcaseIcon, label: "Projects" },
|
||||
{ href: "/teaching", icon: GraduationCapIcon, label: "Teaching" },
|
||||
/*{ href: "/projects", icon: BriefcaseIcon, label: "Projects" },*/
|
||||
{ href: "/experience", icon: ClipboardListIcon, label: "Experience" },
|
||||
{ href: "/publications", icon: PaperclipIcon, label: "Publications" },
|
||||
{ href: "/blog", icon: NotebookIcon, label: "Blog" },
|
||||
/*{ href: "/blog", icon: NotebookIcon, label: "Blog" },*/
|
||||
],
|
||||
contact:
|
||||
{
|
||||
@@ -76,58 +82,83 @@ export const DATA = {
|
||||
name: "Email",
|
||||
url: "mailto://steffen.illium@ifi.lmu.de",
|
||||
icon: Icons.email,
|
||||
navbar: true,
|
||||
pub: false,
|
||||
},
|
||||
LinkedIn:
|
||||
{
|
||||
name: "LinkedIn",
|
||||
url: "https://www.linkedin.com/in/steffen-illium/",
|
||||
icon: Icons.linkedin,
|
||||
navbar: true,
|
||||
pub: false,
|
||||
},
|
||||
GoogleScholar:
|
||||
{
|
||||
name: "Google Scholar",
|
||||
url: "https://scholar.google.de/citations?user=NODAd94AAAAJ&hl=en",
|
||||
icon: Icons.globe,
|
||||
navbar: true,
|
||||
pub: true,
|
||||
},
|
||||
arXiv:
|
||||
{
|
||||
name: "arXiv",
|
||||
url: "https://arxiv.org/a/illium_s_1",
|
||||
icon: Icons.globe,
|
||||
navbar: true,
|
||||
pub: true,
|
||||
},
|
||||
ORCiD:
|
||||
{
|
||||
name: "ORCiD",
|
||||
url: "https://orcid.org/0000-0003-0021-436X",
|
||||
icon: Icons.globe,
|
||||
pub: true,
|
||||
},
|
||||
SemanticScholar:
|
||||
{
|
||||
name: "SemanticScholar",
|
||||
url: "https://www.semanticscholar.org/author/Steffen-Illium/51893497",
|
||||
icon: Icons.globe,
|
||||
pub: true
|
||||
},
|
||||
ResearchGate:
|
||||
{
|
||||
name: "ResearchGate",
|
||||
url: "https://www.researchgate.net/profile/Steffen-Illium",
|
||||
icon: Icons.globe,
|
||||
navbar: true,
|
||||
pub: true,
|
||||
},
|
||||
Gitea:
|
||||
{
|
||||
name: "Gitea",
|
||||
url: "https://gitea.steffenillium.de",
|
||||
icon: Icons.git,
|
||||
navbar: true,
|
||||
pub: false,
|
||||
},
|
||||
}
|
||||
},
|
||||
|
||||
work:
|
||||
[
|
||||
{
|
||||
company: "XITASO GmbH",
|
||||
href: "https://xitaso.com",
|
||||
location: "Augsburg, Germany",
|
||||
title: "Senior AI Consultant",
|
||||
logoUrl: "/images/projects/xitaso.png",
|
||||
start: "2024",
|
||||
end: "2024",
|
||||
description:
|
||||
"Provided expert technical consulting on AI/ML solutions. Pre-sales support. Engaged with clients to understand requirements. External talks and participation in industry expert groups.",
|
||||
},
|
||||
{
|
||||
company: "Mobile Distributed Systems Chair",
|
||||
href: "https://www.mobile.ifi.lmu.de",
|
||||
location: "Munich, Germany",
|
||||
title: "System Administrator, DevOps Engineer, Network Administrator",
|
||||
logoUrl: "/images/research/mvs.jpg",
|
||||
start: "2018",
|
||||
start: "2019",
|
||||
end: "2023",
|
||||
description:
|
||||
"Managed LMU chair IT: Kubernetes, CI/CD, automation (2018-2023).",
|
||||
"Co-Managed LMU chair IT: Kubernetes, CI/CD, automation, networking, web.",
|
||||
},
|
||||
{
|
||||
company: "LMU Munich",
|
||||
@@ -1,7 +1,9 @@
|
||||
import { getPostsByTag, getAllTags, getTagData } from "@/lib/posts";
|
||||
import { ProjectCard } from "@/components/project-card";
|
||||
import BlurFade from "@/components/magicui/blur-fade";
|
||||
import { BlurFade } from "@/components/magicui/blur-fade";
|
||||
import { notFound } from "next/navigation";
|
||||
import { Breadcrumbs } from "@/components/element-breadcrumbs";
|
||||
|
||||
|
||||
const BLUR_FADE_DELAY = 0.04;
|
||||
|
||||
@@ -11,16 +13,20 @@ export async function generateStaticParams() {
|
||||
}
|
||||
|
||||
export async function generateMetadata({ params }: { params: { tag: string } }) {
|
||||
const tagData = getTagData(params.tag);
|
||||
|
||||
const { tag } = await params;
|
||||
const tagData = getTagData(tag);
|
||||
return {
|
||||
title: params.tag,
|
||||
description: tagData?.definition || `Posts tagged with ${params.tag}`,
|
||||
title: tag,
|
||||
description: tagData?.definition || `Posts tagged with ${tag}`,
|
||||
};
|
||||
}
|
||||
|
||||
export default function TagPage({ params }: { params: { tag: string } }) {
|
||||
const posts = getPostsByTag(params.tag);
|
||||
const tagData = getTagData(params.tag);
|
||||
export default async function TagPage({ params }: { params: { tag: string } }) {
|
||||
const { tag } = await params;
|
||||
const posts = getPostsByTag(tag);
|
||||
const tagData = getTagData(tag);
|
||||
const basePath = "tags";
|
||||
|
||||
if (posts.length === 0) {
|
||||
return notFound();
|
||||
@@ -29,18 +35,23 @@ export default function TagPage({ params }: { params: { tag: string } }) {
|
||||
return (
|
||||
<main className="flex flex-col min-h-[100dvh] space-y-10">
|
||||
<section id="tag">
|
||||
<div className="mx-auto w-full max-w-4xl space-y-6">
|
||||
<Breadcrumbs
|
||||
basePath={basePath}
|
||||
baseLabel={basePath.charAt(0).toUpperCase() + basePath.slice(1)}
|
||||
/>
|
||||
</div>
|
||||
<div className="mx-auto w-full max-w-6xl space-y-8">
|
||||
<div className="space-y-2">
|
||||
<h1 className="text-3xl font-bold tracking-tighter sm:text-5xl xl:text-6xl/none">
|
||||
{params.tag}
|
||||
<h1 className="text-3xl font-bold tracking-tighter sm:text-5xl xl:text-6xl/none mt-8">
|
||||
{tag.charAt(0).toUpperCase() + tag.slice(1)}
|
||||
</h1>
|
||||
{tagData?.definition && (
|
||||
<p className="text-muted-foreground">{tagData.definition}</p>
|
||||
)}
|
||||
</div>
|
||||
|
||||
<h2 className="text-2xl font-bold tracking-tighter sm:text-4xl">Posts</h2>
|
||||
<div className="grid grid-cols-1 gap-3 sm:grid-cols-2 lg:grid-cols-3">
|
||||
<hr />
|
||||
<div className="grid grid-cols-1 gap-3 sm:grid-cols-2 lg:grid-cols-2">
|
||||
{posts
|
||||
.filter((post) => post.title)
|
||||
.map((post, id) => (
|
||||
@@ -1,11 +1,11 @@
|
||||
import { getAllTags } from "@/lib/posts";
|
||||
import Link from "next/link";
|
||||
import BlurFade from "@/components/magicui/blur-fade";
|
||||
import { BlurFade } from "@/components/magicui/blur-fade";
|
||||
|
||||
const BLUR_FADE_DELAY = 0.04;
|
||||
|
||||
export default function TagsPage() {
|
||||
const tags = getAllTags();
|
||||
const tags = getAllTags(2);
|
||||
|
||||
return (
|
||||
<main className="flex flex-col min-h-[100dvh] space-y-10">
|
||||
@@ -24,7 +24,7 @@ export default function TagsPage() {
|
||||
{tags.map((tag, id) => (
|
||||
<BlurFade
|
||||
key={tag.name}
|
||||
delay={BLUR_FADE_DELAY * 2 + id * 0.05}
|
||||
delay={BLUR_FADE_DELAY * 2 + id * 0.011}
|
||||
>
|
||||
<Link
|
||||
href={`/tags/${tag.name}`}
|
||||
@@ -4,16 +4,18 @@
|
||||
"rsc": true,
|
||||
"tsx": true,
|
||||
"tailwind": {
|
||||
"config": "tailwind.config.ts",
|
||||
"css": "src/app/globals.css",
|
||||
"config": "",
|
||||
"css": "app/globals.css",
|
||||
"baseColor": "neutral",
|
||||
"cssVariables": true,
|
||||
"prefix": ""
|
||||
},
|
||||
"iconLibrary": "lucide",
|
||||
"aliases": {
|
||||
"components": "@/components",
|
||||
"utils": "@/lib/utils",
|
||||
"ui": "@/components/ui",
|
||||
"lib": "@/lib",
|
||||
"hooks": "@/hooks"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
"use client";
|
||||
|
||||
import React, { useEffect } from 'react';
|
||||
import { useCitations } from '@/context/citation-context';
|
||||
import { useCitations } from '@/components/context-citation';
|
||||
import Link from 'next/link';
|
||||
|
||||
interface CiteProps {
|
||||
@@ -1,6 +1,6 @@
|
||||
"use client";
|
||||
|
||||
import { DATA } from "@/data/resume";
|
||||
import { DATA } from "@/app/resume";
|
||||
import { Card, CardContent } from "@/components/ui/card";
|
||||
import {Button} from "@/components/ui/button";
|
||||
import Link from "next/link";
|
||||
@@ -1,7 +1,7 @@
|
||||
"use client";
|
||||
|
||||
import React, { useEffect, useState } from 'react';
|
||||
import { useCitations } from '@/context/citation-context';
|
||||
import { useCitations } from '@/components/context-citation';
|
||||
import { PublicationCard } from './publication-card';
|
||||
import { BookOpen } from 'lucide-react';
|
||||
|
||||
@@ -25,7 +25,6 @@ export function ReferencesContainer() {
|
||||
<BookOpen className="h-6 w-6" />
|
||||
References
|
||||
</h2>
|
||||
{/* --- FIX IS IN THE LINE BELOW: Added not-prose --- */}
|
||||
<div className="not-prose space-y-4">
|
||||
{sortedKeys.map(key => {
|
||||
const pub = getPublicationByKey(key);
|
||||
@@ -41,6 +40,7 @@ export function ReferencesContainer() {
|
||||
pdfUrl={pub.pdfUrl}
|
||||
bibtex={pub.bibtex}
|
||||
pdfAvailable={pub.pdfAvailable}
|
||||
className="cards"
|
||||
/>
|
||||
);
|
||||
})}
|
||||
9
components/footer.tsx
Normal file
9
components/footer.tsx
Normal file
@@ -0,0 +1,9 @@
|
||||
"use client";
|
||||
|
||||
export function Footer() {
|
||||
return (
|
||||
<div className="h-10">
|
||||
|
||||
</div>
|
||||
);
|
||||
}
|
||||
95
components/list-item.tsx
Normal file
95
components/list-item.tsx
Normal file
@@ -0,0 +1,95 @@
|
||||
"use client";
|
||||
|
||||
import { Badge } from "@/components/ui/badge";
|
||||
import { cn } from "@/lib/utils";
|
||||
import Image from "next/image";
|
||||
import Link from "next/link";
|
||||
import Markdown from "react-markdown";
|
||||
import React from "react";
|
||||
import { ChevronRightIcon } from "lucide-react";
|
||||
import { Card, CardHeader } from "./ui/card";
|
||||
import { Avatar, AvatarFallback, AvatarImage } from "@radix-ui/react-avatar";
|
||||
|
||||
interface Props {
|
||||
title: string;
|
||||
href?: string;
|
||||
description: string;
|
||||
dates: string;
|
||||
tags: readonly string[];
|
||||
link?: string;
|
||||
image?: string;
|
||||
video?: string;
|
||||
links?: readonly {
|
||||
icon: React.ReactNode;
|
||||
type: string;
|
||||
href: string;
|
||||
}[];
|
||||
className?: string;
|
||||
}
|
||||
|
||||
export function ExperienceCard({
|
||||
title,
|
||||
href,
|
||||
description,
|
||||
dates,
|
||||
tags,
|
||||
link,
|
||||
image,
|
||||
video,
|
||||
links,
|
||||
className,
|
||||
}: Props) {
|
||||
return (
|
||||
<Link
|
||||
href={href || "#"}
|
||||
className="cards group block rounded-xl overflow-hidden font-normal no-underline cursor-pointer"
|
||||
>
|
||||
{/* 3. The Card component now has its conflicting shadow removed. */}
|
||||
<Card className="flex flex-row items-center p-4 shadow-none">
|
||||
<div className="flex-none flex-shrink-0 size-12">
|
||||
<Avatar className="h-full w-full rounded-md bg-muted-background">
|
||||
{image && (
|
||||
<AvatarImage
|
||||
src={image}
|
||||
alt={title}
|
||||
className="object-contain"
|
||||
/>
|
||||
)}
|
||||
<AvatarFallback>{title[0]}</AvatarFallback>
|
||||
</Avatar>
|
||||
</div>
|
||||
|
||||
<div className="flex-grow ml-4 items-center flex-col">
|
||||
<CardHeader className="p-0">
|
||||
<div className="flex items-center justify-between gap-x-2 text-base">
|
||||
<h3 className="inline-flex items-center justify-center gap-x-2 font-semibold leading-none text-xs sm:text-sm">
|
||||
{title}
|
||||
{links && links.length > 0 && (
|
||||
<span className="inline-flex gap-x-1">
|
||||
{links.map((link, idx) => (
|
||||
<Badge
|
||||
variant="secondary"
|
||||
className="align-middle text-xs"
|
||||
key={idx}
|
||||
>
|
||||
{link.type}
|
||||
</Badge>
|
||||
))}
|
||||
</span>
|
||||
)}
|
||||
<ChevronRightIcon className="size-4 shrink-0 translate-x-0 transform opacity-0 transition-all duration-300 ease-out group-hover:translate-x-1 group-hover:opacity-100" />
|
||||
</h3>
|
||||
<div className="text-xs sm:text-sm tabular-nums text-muted-foreground text-right whitespace-nowrap">
|
||||
{dates}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div className="prose max-w-full text-pretty font-sans text-sm text-muted-foreground dark:prose-invert pt-1">
|
||||
<Markdown>{description}</Markdown>
|
||||
</div>
|
||||
</CardHeader>
|
||||
</div>
|
||||
</Card>
|
||||
</Link>
|
||||
);
|
||||
}
|
||||
@@ -1,9 +1,18 @@
|
||||
"use client";
|
||||
|
||||
import { AnimatePresence, motion, useInView, Variants } from "framer-motion";
|
||||
import {
|
||||
AnimatePresence,
|
||||
motion,
|
||||
useInView,
|
||||
UseInViewOptions,
|
||||
Variants,
|
||||
MotionProps,
|
||||
} from "motion/react";
|
||||
import { useRef } from "react";
|
||||
|
||||
interface BlurFadeProps {
|
||||
type MarginType = UseInViewOptions["margin"];
|
||||
|
||||
interface BlurFadeProps extends MotionProps {
|
||||
children: React.ReactNode;
|
||||
className?: string;
|
||||
variant?: {
|
||||
@@ -12,32 +21,41 @@ interface BlurFadeProps {
|
||||
};
|
||||
duration?: number;
|
||||
delay?: number;
|
||||
yOffset?: number;
|
||||
offset?: number;
|
||||
direction?: "up" | "down" | "left" | "right";
|
||||
inView?: boolean;
|
||||
inViewMargin?: string;
|
||||
inViewMargin?: MarginType;
|
||||
blur?: string;
|
||||
}
|
||||
const BlurFade = ({
|
||||
|
||||
export function BlurFade({
|
||||
children,
|
||||
className,
|
||||
variant,
|
||||
duration = 0.4,
|
||||
delay = 0,
|
||||
yOffset = 6,
|
||||
offset = 6,
|
||||
direction = "down",
|
||||
inView = false,
|
||||
inViewMargin = "-50px" as const,
|
||||
inViewMargin = "-50px",
|
||||
blur = "6px",
|
||||
}: BlurFadeProps) => {
|
||||
...props
|
||||
}: BlurFadeProps) {
|
||||
const ref = useRef(null);
|
||||
const inViewResult = useInView(ref, {
|
||||
once: true,
|
||||
// @ts-ignore
|
||||
margin: inViewMargin // eslint-disable-next-line
|
||||
});
|
||||
const inViewResult = useInView(ref, { once: true, margin: inViewMargin });
|
||||
const isInView = !inView || inViewResult;
|
||||
const defaultVariants: Variants = {
|
||||
hidden: { y: yOffset, opacity: 0, filter: `blur(${blur})` },
|
||||
visible: { y: -yOffset, opacity: 1, filter: `blur(0px)` },
|
||||
hidden: {
|
||||
[direction === "left" || direction === "right" ? "x" : "y"]:
|
||||
direction === "right" || direction === "down" ? -offset : offset,
|
||||
opacity: 0,
|
||||
filter: `blur(${blur})`,
|
||||
},
|
||||
visible: {
|
||||
[direction === "left" || direction === "right" ? "x" : "y"]: 0,
|
||||
opacity: 1,
|
||||
filter: `blur(0px)`,
|
||||
},
|
||||
};
|
||||
const combinedVariants = variant || defaultVariants;
|
||||
return (
|
||||
@@ -54,11 +72,10 @@ const BlurFade = ({
|
||||
ease: "easeOut",
|
||||
}}
|
||||
className={className}
|
||||
{...props}
|
||||
>
|
||||
{children}
|
||||
</motion.div>
|
||||
</AnimatePresence>
|
||||
);
|
||||
};
|
||||
|
||||
export default BlurFade;
|
||||
}
|
||||
153
components/magicui/dock.tsx
Normal file
153
components/magicui/dock.tsx
Normal file
@@ -0,0 +1,153 @@
|
||||
"use client";
|
||||
|
||||
import { cva, type VariantProps } from "class-variance-authority";
|
||||
import {
|
||||
motion,
|
||||
MotionValue,
|
||||
useMotionValue,
|
||||
useSpring,
|
||||
useTransform,
|
||||
} from "motion/react";
|
||||
import type { MotionProps } from "motion/react";
|
||||
import React, { PropsWithChildren, useRef } from "react";
|
||||
|
||||
import { cn } from "@/lib/utils";
|
||||
|
||||
export interface DockProps extends VariantProps<typeof dockVariants> {
|
||||
className?: string;
|
||||
iconSize?: number;
|
||||
iconMagnification?: number;
|
||||
disableMagnification?: boolean;
|
||||
iconDistance?: number;
|
||||
direction?: "top" | "middle" | "bottom";
|
||||
children: React.ReactNode;
|
||||
}
|
||||
|
||||
const DEFAULT_SIZE = 40;
|
||||
const DEFAULT_MAGNIFICATION = 60;
|
||||
const DEFAULT_DISTANCE = 140;
|
||||
const DEFAULT_DISABLEMAGNIFICATION = false;
|
||||
|
||||
const dockVariants = cva(
|
||||
"supports-backdrop-blur:bg-white/10 supports-backdrop-blur:dark:bg-black/10 mx-auto mt-8 flex h-[58px] w-max items-center justify-center gap-2 rounded-2xl border p-2 backdrop-blur-md",
|
||||
);
|
||||
|
||||
const Dock = React.forwardRef<HTMLDivElement, DockProps>(
|
||||
(
|
||||
{
|
||||
className,
|
||||
children,
|
||||
iconSize = DEFAULT_SIZE,
|
||||
iconMagnification = DEFAULT_MAGNIFICATION,
|
||||
disableMagnification = DEFAULT_DISABLEMAGNIFICATION,
|
||||
iconDistance = DEFAULT_DISTANCE,
|
||||
direction = "middle",
|
||||
...props
|
||||
},
|
||||
ref,
|
||||
) => {
|
||||
const mouseX = useMotionValue(Infinity);
|
||||
|
||||
const renderChildren = () => {
|
||||
return React.Children.map(children, (child) => {
|
||||
if (
|
||||
React.isValidElement<DockIconProps>(child) &&
|
||||
child.type === DockIcon
|
||||
) {
|
||||
return React.cloneElement(child, {
|
||||
...child.props,
|
||||
mouseX: mouseX,
|
||||
size: iconSize,
|
||||
magnification: iconMagnification,
|
||||
disableMagnification: disableMagnification,
|
||||
distance: iconDistance,
|
||||
});
|
||||
}
|
||||
return child;
|
||||
});
|
||||
};
|
||||
|
||||
return (
|
||||
<motion.div
|
||||
ref={ref}
|
||||
onMouseMove={(e) => mouseX.set(e.pageX)}
|
||||
onMouseLeave={() => mouseX.set(Infinity)}
|
||||
{...props}
|
||||
className={cn(dockVariants({ className }), {
|
||||
"items-start": direction === "top",
|
||||
"items-center": direction === "middle",
|
||||
"items-end": direction === "bottom",
|
||||
})}
|
||||
>
|
||||
{renderChildren()}
|
||||
</motion.div>
|
||||
);
|
||||
},
|
||||
);
|
||||
|
||||
Dock.displayName = "Dock";
|
||||
|
||||
export interface DockIconProps
|
||||
extends Omit<MotionProps & React.HTMLAttributes<HTMLDivElement>, "children"> {
|
||||
size?: number;
|
||||
magnification?: number;
|
||||
disableMagnification?: boolean;
|
||||
distance?: number;
|
||||
mouseX?: MotionValue<number>;
|
||||
className?: string;
|
||||
children?: React.ReactNode;
|
||||
props?: PropsWithChildren;
|
||||
}
|
||||
|
||||
const DockIcon = ({
|
||||
size = DEFAULT_SIZE,
|
||||
magnification = DEFAULT_MAGNIFICATION,
|
||||
disableMagnification,
|
||||
distance = DEFAULT_DISTANCE,
|
||||
mouseX,
|
||||
className,
|
||||
children,
|
||||
...props
|
||||
}: DockIconProps) => {
|
||||
const ref = useRef<HTMLDivElement>(null);
|
||||
const padding = Math.max(6, size * 0.2);
|
||||
const defaultMouseX = useMotionValue(Infinity);
|
||||
|
||||
const distanceCalc = useTransform(mouseX ?? defaultMouseX, (val: number) => {
|
||||
const bounds = ref.current?.getBoundingClientRect() ?? { x: 0, width: 0 };
|
||||
return val - bounds.x - bounds.width / 2;
|
||||
});
|
||||
|
||||
const targetSize = disableMagnification ? size : magnification;
|
||||
|
||||
const sizeTransform = useTransform(
|
||||
distanceCalc,
|
||||
[-distance, 0, distance],
|
||||
[size, targetSize, size],
|
||||
);
|
||||
|
||||
const scaleSize = useSpring(sizeTransform, {
|
||||
mass: 0.1,
|
||||
stiffness: 150,
|
||||
damping: 12,
|
||||
});
|
||||
|
||||
return (
|
||||
<motion.div
|
||||
ref={ref}
|
||||
style={{ width: scaleSize, height: scaleSize, padding }}
|
||||
className={cn(
|
||||
"flex aspect-square cursor-pointer items-center justify-center rounded-full",
|
||||
disableMagnification && "transition-colors hover:bg-muted-foreground",
|
||||
className,
|
||||
)}
|
||||
{...props}
|
||||
>
|
||||
<div>{children}</div>
|
||||
</motion.div>
|
||||
);
|
||||
};
|
||||
|
||||
DockIcon.displayName = "DockIcon";
|
||||
|
||||
export { Dock, DockIcon, dockVariants };
|
||||
88
components/magicui/highlighter.tsx
Normal file
88
components/magicui/highlighter.tsx
Normal file
@@ -0,0 +1,88 @@
|
||||
"use client";
|
||||
|
||||
import { useEffect, useRef } from "react";
|
||||
import { useInView } from "motion/react";
|
||||
import { annotate } from "rough-notation";
|
||||
import type React from "react";
|
||||
|
||||
type AnnotationAction =
|
||||
| "highlight"
|
||||
| "underline"
|
||||
| "box"
|
||||
| "circle"
|
||||
| "strike-through"
|
||||
| "crossed-off"
|
||||
| "bracket";
|
||||
|
||||
interface HighlighterProps {
|
||||
children: React.ReactNode;
|
||||
action?: AnnotationAction;
|
||||
color?: string;
|
||||
strokeWidth?: number;
|
||||
animationDuration?: number;
|
||||
iterations?: number;
|
||||
padding?: number;
|
||||
multiline?: boolean;
|
||||
isView?: boolean;
|
||||
}
|
||||
|
||||
export function Highlighter({
|
||||
children,
|
||||
action = "highlight",
|
||||
color = "#ffd1dc",
|
||||
strokeWidth = 1.5,
|
||||
animationDuration = 600,
|
||||
iterations = 2,
|
||||
padding = 2,
|
||||
multiline = true,
|
||||
isView = false,
|
||||
}: HighlighterProps) {
|
||||
const elementRef = useRef<HTMLSpanElement>(null);
|
||||
const isInView = useInView(elementRef, {
|
||||
once: true,
|
||||
margin: "-10%",
|
||||
});
|
||||
|
||||
// If isView is false, always show. If isView is true, wait for inView
|
||||
const shouldShow = !isView || isInView;
|
||||
|
||||
useEffect(() => {
|
||||
if (!shouldShow) return;
|
||||
|
||||
const element = elementRef.current;
|
||||
if (!element) return;
|
||||
|
||||
const annotation = annotate(element, {
|
||||
type: action,
|
||||
color,
|
||||
strokeWidth,
|
||||
animationDuration,
|
||||
iterations,
|
||||
padding,
|
||||
multiline,
|
||||
});
|
||||
|
||||
annotation.show();
|
||||
|
||||
return () => {
|
||||
if (element) {
|
||||
annotate(element, { type: action }).remove();
|
||||
}
|
||||
};
|
||||
}, [
|
||||
shouldShow,
|
||||
action,
|
||||
color,
|
||||
strokeWidth,
|
||||
animationDuration,
|
||||
iterations,
|
||||
padding,
|
||||
multiline,
|
||||
]);
|
||||
|
||||
return (
|
||||
<span ref={elementRef} className="relative inline-block bg-transparent">
|
||||
{children}
|
||||
</span>
|
||||
);
|
||||
}
|
||||
72
components/magicui/shiny-button.tsx
Normal file
72
components/magicui/shiny-button.tsx
Normal file
@@ -0,0 +1,72 @@
|
||||
"use client";
|
||||
|
||||
import { cn } from "@/lib/utils";
|
||||
import { motion, MotionProps, type AnimationProps } from "motion/react";
|
||||
import React from "react";
|
||||
|
||||
const animationProps = {
|
||||
initial: { "--x": "100%", scale: 0.8 },
|
||||
animate: { "--x": "-100%", scale: 1 },
|
||||
whileTap: { scale: 0.95 },
|
||||
transition: {
|
||||
repeat: Infinity,
|
||||
repeatType: "loop",
|
||||
repeatDelay: 1,
|
||||
type: "spring",
|
||||
stiffness: 20,
|
||||
damping: 15,
|
||||
mass: 2,
|
||||
scale: {
|
||||
type: "spring",
|
||||
stiffness: 200,
|
||||
damping: 5,
|
||||
mass: 0.5,
|
||||
},
|
||||
},
|
||||
} as AnimationProps;
|
||||
|
||||
interface ShinyButtonProps
|
||||
extends Omit<React.HTMLAttributes<HTMLElement>, keyof MotionProps>,
|
||||
MotionProps {
|
||||
children: React.ReactNode;
|
||||
className?: string;
|
||||
}
|
||||
|
||||
export const ShinyButton = React.forwardRef<
|
||||
HTMLButtonElement,
|
||||
ShinyButtonProps
|
||||
>(({ children, className, ...props }, ref) => {
|
||||
return (
|
||||
<motion.button
|
||||
ref={ref}
|
||||
className={cn(
|
||||
"relative cursor-pointer rounded-lg px-6 py-2 font-medium backdrop-blur-xl border transition-shadow duration-300 ease-in-out hover:shadow dark:bg-[radial-gradient(circle_at_50%_0%,var(--primary)/10%_0%,transparent_60%)] dark:hover:shadow-[0_0_20px_var(--primary)/10%]",
|
||||
className,
|
||||
)}
|
||||
{...animationProps}
|
||||
{...props}
|
||||
>
|
||||
<span
|
||||
className="relative block size-full text-sm uppercase tracking-wide text-[rgb(0,0,0,65%)] dark:font-light dark:text-[rgb(255,255,255,90%)]"
|
||||
style={{
|
||||
maskImage:
|
||||
"linear-gradient(-75deg,var(--primary) calc(var(--x) + 20%),transparent calc(var(--x) + 30%),var(--primary) calc(var(--x) + 100%))",
|
||||
}}
|
||||
>
|
||||
{children}
|
||||
</span>
|
||||
<span
|
||||
style={{
|
||||
mask: "linear-gradient(rgb(0,0,0), rgb(0,0,0)) content-box exclude,linear-gradient(rgb(0,0,0), rgb(0,0,0))",
|
||||
WebkitMask:
|
||||
"linear-gradient(rgb(0,0,0), rgb(0,0,0)) content-box exclude,linear-gradient(rgb(0,0,0), rgb(0,0,0))",
|
||||
backgroundImage:
|
||||
"linear-gradient(-75deg,var(--primary)/10% calc(var(--x)+20%),var(--primary)/50% calc(var(--x)+25%),var(--primary)/10% calc(var(--x)+100%))",
|
||||
}}
|
||||
className="absolute inset-0 z-10 block rounded-[inherit] p-px"
|
||||
/>
|
||||
</motion.button>
|
||||
);
|
||||
});
|
||||
|
||||
ShinyButton.displayName = "ShinyButton";
|
||||
418
components/magicui/text-animate.tsx
Normal file
418
components/magicui/text-animate.tsx
Normal file
@@ -0,0 +1,418 @@
|
||||
"use client";
|
||||
|
||||
import { cn } from "@/lib/utils";
|
||||
import { AnimatePresence, motion, MotionProps, Variants } from "motion/react";
|
||||
import { ElementType, memo } from "react";
|
||||
|
||||
type AnimationType = "text" | "word" | "character" | "line";
|
||||
type AnimationVariant =
|
||||
| "fadeIn"
|
||||
| "blurIn"
|
||||
| "blurInUp"
|
||||
| "blurInDown"
|
||||
| "slideUp"
|
||||
| "slideDown"
|
||||
| "slideLeft"
|
||||
| "slideRight"
|
||||
| "scaleUp"
|
||||
| "scaleDown";
|
||||
|
||||
interface TextAnimateProps extends MotionProps {
|
||||
/**
|
||||
* The text content to animate
|
||||
*/
|
||||
children: string;
|
||||
/**
|
||||
* The class name to be applied to the component
|
||||
*/
|
||||
className?: string;
|
||||
/**
|
||||
* The class name to be applied to each segment
|
||||
*/
|
||||
segmentClassName?: string;
|
||||
/**
|
||||
* The delay before the animation starts
|
||||
*/
|
||||
delay?: number;
|
||||
/**
|
||||
* The duration of the animation
|
||||
*/
|
||||
duration?: number;
|
||||
/**
|
||||
* Custom motion variants for the animation
|
||||
*/
|
||||
variants?: Variants;
|
||||
/**
|
||||
* The element type to render
|
||||
*/
|
||||
as?: ElementType;
|
||||
/**
|
||||
* How to split the text ("text", "word", "character")
|
||||
*/
|
||||
by?: AnimationType;
|
||||
/**
|
||||
* Whether to start animation when component enters viewport
|
||||
*/
|
||||
startOnView?: boolean;
|
||||
/**
|
||||
* Whether to animate only once
|
||||
*/
|
||||
once?: boolean;
|
||||
/**
|
||||
* The animation preset to use
|
||||
*/
|
||||
animation?: AnimationVariant;
|
||||
/**
|
||||
* Whether to enable accessibility features (default: true)
|
||||
*/
|
||||
accessible?: boolean;
|
||||
}
|
||||
|
||||
const staggerTimings: Record<AnimationType, number> = {
|
||||
text: 0.06,
|
||||
word: 0.05,
|
||||
character: 0.03,
|
||||
line: 0.06,
|
||||
};
|
||||
|
||||
const defaultContainerVariants = {
|
||||
hidden: { opacity: 1 },
|
||||
show: {
|
||||
opacity: 1,
|
||||
transition: {
|
||||
delayChildren: 0,
|
||||
staggerChildren: 0.05,
|
||||
},
|
||||
},
|
||||
exit: {
|
||||
opacity: 0,
|
||||
transition: {
|
||||
staggerChildren: 0.05,
|
||||
staggerDirection: -1,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const defaultItemVariants: Variants = {
|
||||
hidden: { opacity: 0 },
|
||||
show: {
|
||||
opacity: 1,
|
||||
},
|
||||
exit: {
|
||||
opacity: 0,
|
||||
},
|
||||
};
|
||||
|
||||
const defaultItemAnimationVariants: Record<
|
||||
AnimationVariant,
|
||||
{ container: Variants; item: Variants }
|
||||
> = {
|
||||
fadeIn: {
|
||||
container: defaultContainerVariants,
|
||||
item: {
|
||||
hidden: { opacity: 0, y: 20 },
|
||||
show: {
|
||||
opacity: 1,
|
||||
y: 0,
|
||||
transition: {
|
||||
duration: 0.3,
|
||||
},
|
||||
},
|
||||
exit: {
|
||||
opacity: 0,
|
||||
y: 20,
|
||||
transition: { duration: 0.3 },
|
||||
},
|
||||
},
|
||||
},
|
||||
blurIn: {
|
||||
container: defaultContainerVariants,
|
||||
item: {
|
||||
hidden: { opacity: 0, filter: "blur(10px)" },
|
||||
show: {
|
||||
opacity: 1,
|
||||
filter: "blur(0px)",
|
||||
transition: {
|
||||
duration: 0.3,
|
||||
},
|
||||
},
|
||||
exit: {
|
||||
opacity: 0,
|
||||
filter: "blur(10px)",
|
||||
transition: { duration: 0.3 },
|
||||
},
|
||||
},
|
||||
},
|
||||
blurInUp: {
|
||||
container: defaultContainerVariants,
|
||||
item: {
|
||||
hidden: { opacity: 0, filter: "blur(10px)", y: 20 },
|
||||
show: {
|
||||
opacity: 1,
|
||||
filter: "blur(0px)",
|
||||
y: 0,
|
||||
transition: {
|
||||
y: { duration: 0.3 },
|
||||
opacity: { duration: 0.4 },
|
||||
filter: { duration: 0.3 },
|
||||
},
|
||||
},
|
||||
exit: {
|
||||
opacity: 0,
|
||||
filter: "blur(10px)",
|
||||
y: 20,
|
||||
transition: {
|
||||
y: { duration: 0.3 },
|
||||
opacity: { duration: 0.4 },
|
||||
filter: { duration: 0.3 },
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
blurInDown: {
|
||||
container: defaultContainerVariants,
|
||||
item: {
|
||||
hidden: { opacity: 0, filter: "blur(10px)", y: -20 },
|
||||
show: {
|
||||
opacity: 1,
|
||||
filter: "blur(0px)",
|
||||
y: 0,
|
||||
transition: {
|
||||
y: { duration: 0.3 },
|
||||
opacity: { duration: 0.4 },
|
||||
filter: { duration: 0.3 },
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
slideUp: {
|
||||
container: defaultContainerVariants,
|
||||
item: {
|
||||
hidden: { y: 20, opacity: 0 },
|
||||
show: {
|
||||
y: 0,
|
||||
opacity: 1,
|
||||
transition: {
|
||||
duration: 0.3,
|
||||
},
|
||||
},
|
||||
exit: {
|
||||
y: -20,
|
||||
opacity: 0,
|
||||
transition: {
|
||||
duration: 0.3,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
slideDown: {
|
||||
container: defaultContainerVariants,
|
||||
item: {
|
||||
hidden: { y: -20, opacity: 0 },
|
||||
show: {
|
||||
y: 0,
|
||||
opacity: 1,
|
||||
transition: { duration: 0.3 },
|
||||
},
|
||||
exit: {
|
||||
y: 20,
|
||||
opacity: 0,
|
||||
transition: { duration: 0.3 },
|
||||
},
|
||||
},
|
||||
},
|
||||
slideLeft: {
|
||||
container: defaultContainerVariants,
|
||||
item: {
|
||||
hidden: { x: 20, opacity: 0 },
|
||||
show: {
|
||||
x: 0,
|
||||
opacity: 1,
|
||||
transition: { duration: 0.3 },
|
||||
},
|
||||
exit: {
|
||||
x: -20,
|
||||
opacity: 0,
|
||||
transition: { duration: 0.3 },
|
||||
},
|
||||
},
|
||||
},
|
||||
slideRight: {
|
||||
container: defaultContainerVariants,
|
||||
item: {
|
||||
hidden: { x: -20, opacity: 0 },
|
||||
show: {
|
||||
x: 0,
|
||||
opacity: 1,
|
||||
transition: { duration: 0.3 },
|
||||
},
|
||||
exit: {
|
||||
x: 20,
|
||||
opacity: 0,
|
||||
transition: { duration: 0.3 },
|
||||
},
|
||||
},
|
||||
},
|
||||
scaleUp: {
|
||||
container: defaultContainerVariants,
|
||||
item: {
|
||||
hidden: { scale: 0.5, opacity: 0 },
|
||||
show: {
|
||||
scale: 1,
|
||||
opacity: 1,
|
||||
transition: {
|
||||
duration: 0.3,
|
||||
scale: {
|
||||
type: "spring",
|
||||
damping: 15,
|
||||
stiffness: 300,
|
||||
},
|
||||
},
|
||||
},
|
||||
exit: {
|
||||
scale: 0.5,
|
||||
opacity: 0,
|
||||
transition: { duration: 0.3 },
|
||||
},
|
||||
},
|
||||
},
|
||||
scaleDown: {
|
||||
container: defaultContainerVariants,
|
||||
item: {
|
||||
hidden: { scale: 1.5, opacity: 0 },
|
||||
show: {
|
||||
scale: 1,
|
||||
opacity: 1,
|
||||
transition: {
|
||||
duration: 0.3,
|
||||
scale: {
|
||||
type: "spring",
|
||||
damping: 15,
|
||||
stiffness: 300,
|
||||
},
|
||||
},
|
||||
},
|
||||
exit: {
|
||||
scale: 1.5,
|
||||
opacity: 0,
|
||||
transition: { duration: 0.3 },
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const TextAnimateBase = ({
|
||||
children,
|
||||
delay = 0,
|
||||
duration = 0.3,
|
||||
variants,
|
||||
className,
|
||||
segmentClassName,
|
||||
as: Component = "p",
|
||||
startOnView = true,
|
||||
once = false,
|
||||
by = "word",
|
||||
animation = "fadeIn",
|
||||
accessible = true,
|
||||
...props
|
||||
}: TextAnimateProps) => {
|
||||
const MotionComponent = motion.create(Component);
|
||||
|
||||
let segments: string[] = [];
|
||||
switch (by) {
|
||||
case "word":
|
||||
segments = children.split(/(\s+)/);
|
||||
break;
|
||||
case "character":
|
||||
segments = children.split("");
|
||||
break;
|
||||
case "line":
|
||||
segments = children.split("\n");
|
||||
break;
|
||||
case "text":
|
||||
default:
|
||||
segments = [children];
|
||||
break;
|
||||
}
|
||||
|
||||
const finalVariants = variants
|
||||
? {
|
||||
container: {
|
||||
hidden: { opacity: 0 },
|
||||
show: {
|
||||
opacity: 1,
|
||||
transition: {
|
||||
opacity: { duration: 0.01, delay },
|
||||
delayChildren: delay,
|
||||
staggerChildren: duration / segments.length,
|
||||
},
|
||||
},
|
||||
exit: {
|
||||
opacity: 0,
|
||||
transition: {
|
||||
staggerChildren: duration / segments.length,
|
||||
staggerDirection: -1,
|
||||
},
|
||||
},
|
||||
},
|
||||
item: variants,
|
||||
}
|
||||
: animation
|
||||
? {
|
||||
container: {
|
||||
...defaultItemAnimationVariants[animation].container,
|
||||
show: {
|
||||
...defaultItemAnimationVariants[animation].container.show,
|
||||
transition: {
|
||||
delayChildren: delay,
|
||||
staggerChildren: duration / segments.length,
|
||||
},
|
||||
},
|
||||
exit: {
|
||||
...defaultItemAnimationVariants[animation].container.exit,
|
||||
transition: {
|
||||
staggerChildren: duration / segments.length,
|
||||
staggerDirection: -1,
|
||||
},
|
||||
},
|
||||
},
|
||||
item: defaultItemAnimationVariants[animation].item,
|
||||
}
|
||||
: { container: defaultContainerVariants, item: defaultItemVariants };
|
||||
|
||||
return (
|
||||
<AnimatePresence mode="popLayout">
|
||||
<MotionComponent
|
||||
variants={finalVariants.container as Variants}
|
||||
initial="hidden"
|
||||
whileInView={startOnView ? "show" : undefined}
|
||||
animate={startOnView ? undefined : "show"}
|
||||
exit="exit"
|
||||
className={cn("whitespace-pre-wrap", className)}
|
||||
viewport={{ once }}
|
||||
aria-label={accessible ? children : undefined}
|
||||
{...props}
|
||||
>
|
||||
{accessible && <span className="sr-only">{children}</span>}
|
||||
{segments.map((segment, i) => (
|
||||
<motion.span
|
||||
key={`${by}-${segment}-${i}`}
|
||||
variants={finalVariants.item}
|
||||
custom={i * staggerTimings[by]}
|
||||
className={cn(
|
||||
by === "line" ? "block" : "inline-block whitespace-pre",
|
||||
by === "character" && "",
|
||||
segmentClassName,
|
||||
)}
|
||||
aria-hidden={accessible ? true : undefined}
|
||||
>
|
||||
{segment}
|
||||
</motion.span>
|
||||
))}
|
||||
</MotionComponent>
|
||||
</AnimatePresence>
|
||||
);
|
||||
};
|
||||
|
||||
// Export the memoized version
|
||||
export const TextAnimate = memo(TextAnimateBase);
|
||||
120
components/mdx.tsx
Normal file
120
components/mdx.tsx
Normal file
@@ -0,0 +1,120 @@
|
||||
"use client";
|
||||
|
||||
import Image from "next/image";
|
||||
import Link from "next/link";
|
||||
import React, { Children, isValidElement } from "react";
|
||||
import { InfoBox } from "./infobox";
|
||||
import { Cite } from "./cite";
|
||||
import { FloatingImage } from "./floating-image";
|
||||
import { CenteredImage } from "./centered-image";
|
||||
import { FaGithub, FaPython, FaBook, FaFileAlt } from 'react-icons/fa';
|
||||
|
||||
|
||||
function CustomLink(props: any) {
|
||||
let href = props.href;
|
||||
|
||||
if (href.startsWith("/")) {
|
||||
return (
|
||||
<Link href={href} {...props}>
|
||||
{props.children}
|
||||
</Link>
|
||||
);
|
||||
}
|
||||
|
||||
if (href.startsWith("#")) {
|
||||
return <a {...props} />;
|
||||
}
|
||||
|
||||
return <a target="_blank" rel="noopener noreferrer" {...props} />;
|
||||
}
|
||||
|
||||
function RoundedImage(props: any) {
|
||||
return <Image alt={props.alt} className="rounded-lg" {...props} />;
|
||||
}
|
||||
|
||||
// Helper to extract plain text content from React children for slugification.
|
||||
function getPlainTextFromChildren(nodes: React.ReactNode): string {
|
||||
return Children.toArray(nodes)
|
||||
.map(node => {
|
||||
if (typeof node === 'string') {
|
||||
return node;
|
||||
}
|
||||
// Recursively extract text from valid React elements
|
||||
if (isValidElement(node) && node.props && node.props.children) {
|
||||
return getPlainTextFromChildren(node.props.children);
|
||||
}
|
||||
return ''; // Ignore other types of nodes (null, undefined, numbers etc.)
|
||||
})
|
||||
.join('');
|
||||
}
|
||||
|
||||
// Robust slugification function
|
||||
function slugify(str: string) {
|
||||
return str
|
||||
.toString()
|
||||
.toLowerCase()
|
||||
.trim() // Remove whitespace from both ends of a string
|
||||
.replace(/&/g, " and ") // Replace & with ' and ', using spaces to prevent immediate double hyphens
|
||||
.replace(/[^\w\s-]/g, "") // Remove all non-word characters except spaces and hyphens
|
||||
.replace(/[\s_-]+/g, "-") // Replace spaces, underscores, or multiple hyphens with a single hyphen
|
||||
.replace(/^-+|-+$/g, ""); // Remove any leading/trailing hyphens that might remain
|
||||
}
|
||||
|
||||
function createHeading(level: number) {
|
||||
const Heading = ({ children, ...props }: { children?: React.ReactNode, [key: string]: any }) => {
|
||||
// Extract plain text from children for reliable slug generation
|
||||
const plainText = getPlainTextFromChildren(children);
|
||||
let slug = slugify(plainText);
|
||||
|
||||
// Process children to handle HTML entities for text nodes and ensure unique keys
|
||||
const processedChildren = Children.map(children, (child, index) => {
|
||||
if (typeof child === 'string') {
|
||||
// If it's a string, use dangerouslySetInnerHTML to prevent escaping of entities
|
||||
// Ensure a key is provided for children in a map
|
||||
return <span key={`text-content-${slug}-${index}`} dangerouslySetInnerHTML={{ __html: child }} />;
|
||||
}
|
||||
// If it's a React element, clone it to add a key if it doesn't have one
|
||||
if (isValidElement(child)) {
|
||||
return React.cloneElement(child, { key: child.key || `element-content-${slug}-${index}` });
|
||||
}
|
||||
return child; // Return other types as-is (numbers, null, undefined)
|
||||
});
|
||||
|
||||
return React.createElement(
|
||||
`h${level}`,
|
||||
{ id: slug, ...props },
|
||||
[
|
||||
React.createElement("a", {
|
||||
href: `#${slug}`,
|
||||
key: `link-${slug}`,
|
||||
className: "anchor",
|
||||
}), ...processedChildren
|
||||
]
|
||||
);
|
||||
};
|
||||
Heading.displayName = `Heading${level}`;
|
||||
return Heading;
|
||||
}
|
||||
|
||||
export const mdxComponents = {
|
||||
h1: createHeading(1),
|
||||
h2: createHeading(2),
|
||||
h3: createHeading(3),
|
||||
h4: createHeading(4),
|
||||
h5: createHeading(5),
|
||||
h6: createHeading(6),
|
||||
Image: RoundedImage,
|
||||
a: CustomLink,
|
||||
InfoBox,
|
||||
Cite,
|
||||
FloatingImage: FloatingImage,
|
||||
CenteredImage: CenteredImage,
|
||||
FaGithub: FaGithub,
|
||||
FaPython: FaPython,
|
||||
FaBook: FaBook,
|
||||
FaFileAlt: FaFileAlt,
|
||||
ul: (props: React.HTMLAttributes<HTMLUListElement>) => <ul className="list-disc pl-6" {...props} />,
|
||||
ol: (props: React.HTMLAttributes<HTMLOListElement>) => <ol className="list-decimal pl-6" {...props} />,
|
||||
li: (props: React.HTMLAttributes<HTMLLIElement>) => <li className="mb-2" {...props} />,
|
||||
p: (props: React.HTMLAttributes<HTMLParagraphElement>) => <p className="mb-4" {...props} />,
|
||||
};
|
||||
32
components/mode-toggle.tsx
Normal file
32
components/mode-toggle.tsx
Normal file
@@ -0,0 +1,32 @@
|
||||
// components/mode-toggle.tsx
|
||||
|
||||
"use client";
|
||||
|
||||
import * as React from "react";
|
||||
import { MoonIcon, SunIcon } from "@radix-ui/react-icons";
|
||||
import { useTheme } from "next-themes";
|
||||
|
||||
import { Button } from "@/components/ui/button";
|
||||
|
||||
export const ModeToggle = React.forwardRef<
|
||||
HTMLButtonElement,
|
||||
React.ButtonHTMLAttributes<HTMLButtonElement>
|
||||
>(({ className, ...props }, ref) => {
|
||||
const { setTheme, theme } = useTheme();
|
||||
|
||||
return (
|
||||
<Button
|
||||
variant="ghost"
|
||||
size="icon"
|
||||
className={className}
|
||||
ref={ref}
|
||||
{...props}
|
||||
onClick={() => setTheme(theme === "light" ? "dark" : "light")}
|
||||
>
|
||||
<SunIcon className="h-[1.2rem] w-[1.2rem] rotate-0 scale-100 transition-all dark:rotate-90 dark:scale-0 text-foreground" />
|
||||
<MoonIcon className="absolute h-[1.2rem] w-[1.2rem] rotate-90 scale-0 transition-all dark:rotate-0 dark:scale-100 text-foreground" />
|
||||
</Button>
|
||||
);
|
||||
});
|
||||
|
||||
ModeToggle.displayName = "ModeToggle";
|
||||
57
components/navbar.tsx
Normal file
57
components/navbar.tsx
Normal file
@@ -0,0 +1,57 @@
|
||||
// components/navbar.tsx
|
||||
|
||||
import { Dock, DockIcon } from "@/components/magicui/dock";
|
||||
import { ModeToggle } from "@/components/mode-toggle";
|
||||
import { buttonVariants } from "@/components/ui/button";
|
||||
import { Separator } from "@/components/ui/separator";
|
||||
import {
|
||||
Tooltip,
|
||||
TooltipContent,
|
||||
TooltipTrigger,
|
||||
TooltipProvider,
|
||||
} from "@/components/ui/tooltip";
|
||||
import { DATA } from "@/app/resume";
|
||||
import { cn } from "@/lib/utils";
|
||||
import Link from "next/link";
|
||||
|
||||
export default function Navbar() {
|
||||
return (
|
||||
<div className="pointer-events-auto fixed inset-x-0 bottom-0 z-30 mx-auto mb-4 flex origin-bottom">
|
||||
<div className="pointer-events-auto mx-auto max-w-max">
|
||||
<Dock direction="middle" className="bg-background backdrop-blur-none border">
|
||||
{DATA.navbar.map((item) => (
|
||||
<DockIcon key={item.href}>
|
||||
<Tooltip>
|
||||
<TooltipTrigger asChild>
|
||||
<Link
|
||||
href={item.href}
|
||||
className={cn(
|
||||
buttonVariants({ variant: "ghost", size: "icon" }),
|
||||
"size-12",
|
||||
)}
|
||||
>
|
||||
<item.icon className="size-4" />
|
||||
</Link>
|
||||
</TooltipTrigger>
|
||||
<TooltipContent>
|
||||
<p>{item.label}</p>
|
||||
</TooltipContent>
|
||||
</Tooltip>
|
||||
</DockIcon>
|
||||
))}
|
||||
<Separator orientation="vertical" className="h-full" />
|
||||
<DockIcon>
|
||||
<Tooltip>
|
||||
<TooltipTrigger asChild>
|
||||
<ModeToggle />
|
||||
</TooltipTrigger>
|
||||
<TooltipContent>
|
||||
<p>Theme</p>
|
||||
</TooltipContent>
|
||||
</Tooltip>
|
||||
</DockIcon>
|
||||
</Dock>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -1,9 +1,9 @@
|
||||
"use client";
|
||||
|
||||
import { CitationProvider } from '@/context/citation-context';
|
||||
import { ReferencesContainer } from '@/components/references-container';
|
||||
import { CustomMDX } from '@/components/custom-mdx';
|
||||
import { Breadcrumbs } from "./breadcrumbs";
|
||||
import { CitationProvider } from '@/components/context-citation';
|
||||
import { ReferencesContainer } from '@/components/container-references';
|
||||
import { CustomMDX } from '@/components/mdx-custom';
|
||||
import { Breadcrumbs } from "./element-breadcrumbs";
|
||||
import Link from 'next/link';
|
||||
import { Publication } from '@/lib/publications';
|
||||
import { ProjectNavigation } from './project-navigation';
|
||||
@@ -41,7 +41,6 @@ export function Article({ post, publications, navigation, basePath }: ArticlePro
|
||||
basePath={basePath}
|
||||
baseLabel={basePath.charAt(0).toUpperCase() + basePath.slice(1)}
|
||||
/>
|
||||
{/* MOVED: The date is now on the right side of the container */}
|
||||
{post.frontmatter.date && (
|
||||
<time className="text-sm text-muted-foreground" dateTime={post.frontmatter.date}>
|
||||
{new Date(post.frontmatter.date).toLocaleDateString("en-US", {
|
||||
@@ -70,7 +69,7 @@ export function Article({ post, publications, navigation, basePath }: ArticlePro
|
||||
alt={`${post.frontmatter.title} icon`}
|
||||
width={64}
|
||||
height={64}
|
||||
className="rounded-full"
|
||||
className="full"
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
@@ -78,7 +77,6 @@ export function Article({ post, publications, navigation, basePath }: ArticlePro
|
||||
<CustomMDX code={post.code} />
|
||||
</article>
|
||||
|
||||
{/* Tags, References, and Navigation now live outside the `prose` scope for better control */}
|
||||
{post.frontmatter.tags && (
|
||||
<div className="flex flex-wrap items-center gap-x-2 gap-y-1">
|
||||
<Tag className="size-4 text-muted-foreground" />
|
||||
113
components/project-card.tsx
Normal file
113
components/project-card.tsx
Normal file
@@ -0,0 +1,113 @@
|
||||
"use client";
|
||||
|
||||
import { Badge } from "@/components/ui/badge";
|
||||
import {
|
||||
Card,
|
||||
CardContent,
|
||||
CardFooter,
|
||||
CardHeader,
|
||||
CardTitle,
|
||||
} from "@/components/ui/card";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { ChevronRightIcon } from "lucide-react";
|
||||
import Image from "next/image";
|
||||
import Link from "next/link";
|
||||
import Markdown from "react-markdown";
|
||||
|
||||
interface Props {
|
||||
title: string;
|
||||
href?: string;
|
||||
description: string;
|
||||
dates: string;
|
||||
tags: readonly string[];
|
||||
link?: string;
|
||||
image?: string;
|
||||
video?: string;
|
||||
links?: readonly {
|
||||
icon: React.ReactNode;
|
||||
type: string;
|
||||
href: string;
|
||||
}[];
|
||||
className?: string;
|
||||
}
|
||||
|
||||
export function ProjectCard({
|
||||
title,
|
||||
href,
|
||||
description,
|
||||
dates,
|
||||
tags,
|
||||
link,
|
||||
image,
|
||||
video,
|
||||
links,
|
||||
className,
|
||||
}: Props) {
|
||||
return (
|
||||
<Link
|
||||
href={href || "#"}
|
||||
className={cn("block cursor-pointer font-normal cards", className)}
|
||||
>
|
||||
<Card
|
||||
className={
|
||||
"group flex flex-col overflow-hidden h-75 gap-2 py-3 border"
|
||||
}
|
||||
>
|
||||
|
||||
{video && (
|
||||
<video
|
||||
src={video}
|
||||
autoPlay
|
||||
loop
|
||||
muted
|
||||
playsInline
|
||||
className="pointer-events-none mx-auto max-h-40 h-full object-cover object-top" // needed because random black line at bottom of video
|
||||
/>
|
||||
)}
|
||||
{image && (
|
||||
<Image
|
||||
src={image}
|
||||
alt={title}
|
||||
width={500}
|
||||
height={300}
|
||||
className="m-auto h-full max-h-40 min-h-40 max-w-60 overflow-hidden object-scale-down object-center place-content-center"
|
||||
/>
|
||||
)}
|
||||
<CardHeader className="px-4">
|
||||
<div className="space-y-1">
|
||||
<CardTitle className="mt-1 text-base flex items-center">
|
||||
<span className="truncate max-w-65">{title}</span>
|
||||
<ChevronRightIcon
|
||||
className="size-4 shrink-0 translate-x-0 transform opacity-0 transition-all duration-300 ease-out group-hover:translate-x-1 group-hover:opacity-100"
|
||||
/>
|
||||
</CardTitle>
|
||||
<time className="font-sans text-xs">{dates}</time>
|
||||
<div className="hidden font-sans text-xs underline print:visible">
|
||||
{link?.replace("https://", "").replace("www.", "").replace("/", "")}
|
||||
</div>
|
||||
<div className="prose max-w-full text-pretty font-sans text-xs text-muted-foreground dark:prose-invert">
|
||||
<Markdown>
|
||||
{description}
|
||||
</Markdown>
|
||||
</div>
|
||||
</div>
|
||||
</CardHeader>
|
||||
<CardContent className="mt-auto flex flex-col px-2" />
|
||||
<CardFooter className="px-0 pb-2">
|
||||
{links && links.length > 0 && (
|
||||
<div className="flex flex-row flex-wrap items-start gap-1">
|
||||
{links?.map((link, idx) => (
|
||||
<Link href={link?.href} key={idx} target="_blank">
|
||||
<Badge key={idx} className="flex gap-2 px-2 py-1 text-[10px]">
|
||||
{link.icon}
|
||||
{link.type}
|
||||
</Badge>
|
||||
</Link>
|
||||
))}
|
||||
</div>
|
||||
)}
|
||||
</CardFooter>
|
||||
</Card>
|
||||
</Link>
|
||||
);
|
||||
}
|
||||
20
components/providers.tsx
Normal file
20
components/providers.tsx
Normal file
@@ -0,0 +1,20 @@
|
||||
// file: components/providers.tsx
|
||||
|
||||
"use client";
|
||||
|
||||
import { MotionConfig } from "motion/react";
|
||||
import { ThemeProvider } from "@/components/theme-provider";
|
||||
import { TooltipProvider } from "@/components/ui/tooltip";
|
||||
|
||||
export function Providers({ children }: { children: React.ReactNode }) {
|
||||
return (
|
||||
<ThemeProvider attribute="class" defaultTheme="light">
|
||||
{/* MotionConfig is the key fix for the animation issues */}
|
||||
<MotionConfig reducedMotion="user">
|
||||
<TooltipProvider delayDuration={0}>
|
||||
{children}
|
||||
</TooltipProvider>
|
||||
</MotionConfig>
|
||||
</ThemeProvider>
|
||||
);
|
||||
}
|
||||
226
components/publication-card.tsx
Normal file
226
components/publication-card.tsx
Normal file
@@ -0,0 +1,226 @@
|
||||
"use client";
|
||||
import { Button } from "@/components/ui/button";
|
||||
import { CardTitle } from "@/components/ui/card";
|
||||
import {
|
||||
Tooltip,
|
||||
TooltipContent,
|
||||
TooltipTrigger,
|
||||
} from "@/components/ui/tooltip";
|
||||
import { cn } from "@/lib/utils";
|
||||
import {
|
||||
BookOpen,
|
||||
Check,
|
||||
Copy,
|
||||
FileText,
|
||||
Paperclip,
|
||||
X,
|
||||
} from "lucide-react";
|
||||
import Image from "next/image";
|
||||
import { useEffect, useState } from "react";
|
||||
import { TrackedLink } from "./util-tracked-link";
|
||||
import { TrackedButton } from "./util-tracked-button";
|
||||
|
||||
interface Props {
|
||||
bibtexKey: string;
|
||||
title: string;
|
||||
authors: string[];
|
||||
journal: string;
|
||||
year: string;
|
||||
url?: string;
|
||||
pdfUrl?: string;
|
||||
bibtex: string;
|
||||
className?: string;
|
||||
pdfAvailable?: boolean;
|
||||
}
|
||||
|
||||
export function PublicationCard({
|
||||
bibtexKey,
|
||||
title,
|
||||
authors,
|
||||
journal,
|
||||
year,
|
||||
url,
|
||||
pdfUrl,
|
||||
bibtex,
|
||||
className,
|
||||
pdfAvailable = false,
|
||||
}: Props) {
|
||||
const [copyStatus, setCopyStatus] = useState<"idle" | "success" | "error">(
|
||||
"idle"
|
||||
);
|
||||
const [imageError, setImageError] = useState(false);
|
||||
const [downloadUrl, setDownloadUrl] = useState<string | null>(null);
|
||||
const [isClient, setIsClient] = useState(false);
|
||||
|
||||
useEffect(() => {
|
||||
setIsClient(true);
|
||||
}, []);
|
||||
|
||||
useEffect(() => {
|
||||
return () => {
|
||||
if (downloadUrl) {
|
||||
URL.revokeObjectURL(downloadUrl);
|
||||
}
|
||||
};
|
||||
}, [downloadUrl]);
|
||||
|
||||
const handleCopy = (e: React.MouseEvent) => {
|
||||
e.stopPropagation();
|
||||
if (isClient && navigator.clipboard?.writeText) {
|
||||
navigator.clipboard
|
||||
.writeText(bibtex)
|
||||
.then(() => {
|
||||
setCopyStatus("success");
|
||||
setTimeout(() => setCopyStatus("idle"), 2000);
|
||||
})
|
||||
.catch((err) => {
|
||||
console.error("Failed to copy BibTeX:", err);
|
||||
setCopyStatus("error");
|
||||
setTimeout(() => setCopyStatus("idle"), 2000);
|
||||
});
|
||||
} else {
|
||||
setCopyStatus("error");
|
||||
setTimeout(() => setCopyStatus("idle"), 2000);
|
||||
}
|
||||
};
|
||||
|
||||
const handleDownload = (e: React.MouseEvent) => {
|
||||
e.stopPropagation();
|
||||
const blob = new Blob([bibtex], { type: "text/plain" });
|
||||
const url = URL.createObjectURL(blob);
|
||||
setDownloadUrl(url);
|
||||
const a = document.createElement("a");
|
||||
a.href = url;
|
||||
a.download = `${bibtexKey}.bib`;
|
||||
document.body.appendChild(a);
|
||||
a.click();
|
||||
document.body.removeChild(a);
|
||||
};
|
||||
|
||||
const handleCardClick = () => {
|
||||
if (url) {
|
||||
if (window.umami) {
|
||||
window.umami.track(`${bibtexKey}-card-click`);
|
||||
}
|
||||
window.open(url, "_blank", "noopener,noreferrer");
|
||||
}
|
||||
};
|
||||
|
||||
const imageUrl = `/publications/${bibtexKey}.png`;
|
||||
const formattedAuthors =
|
||||
authors.length > 3
|
||||
? `${authors.slice(0, 3).join(", ")}, et al.`
|
||||
: authors.join(", ");
|
||||
|
||||
const ImageContent = (
|
||||
<>
|
||||
{!imageError ? (
|
||||
<Image
|
||||
src={imageUrl}
|
||||
alt={`Preview for ${title}`}
|
||||
fill
|
||||
className="rounded-md object-cover object-top"
|
||||
onError={() => setImageError(true)}
|
||||
sizes="96px"
|
||||
loading="lazy"
|
||||
objectFit="cover"
|
||||
/>
|
||||
) : (
|
||||
<div className="flex h-full w-full items-center justify-center rounded-md bg-muted/20">
|
||||
<Paperclip className="h-6 w-6 text-muted-foreground" />
|
||||
</div>
|
||||
)}
|
||||
</>
|
||||
);
|
||||
|
||||
return (
|
||||
<div
|
||||
className={cn(
|
||||
"flex items-start space-x-4 rounded-lg border p-4 shadow-sm transition-all duration-300 ease-out",
|
||||
url && "cursor-pointer hover:shadow-lg",
|
||||
className
|
||||
)}
|
||||
id={bibtexKey}
|
||||
onClick={handleCardClick}
|
||||
>
|
||||
<div className="pointer-events-none relative h-24 w-24 flex-shrink-0">
|
||||
{ImageContent}
|
||||
</div>
|
||||
|
||||
<div className="pointer-events-none flex-grow space-y-1">
|
||||
<CardTitle className="text-base font-medium leading-snug">
|
||||
<span className={cn(url && "hover:underline")}>{title}</span>
|
||||
</CardTitle>
|
||||
<p className="text-sm text-muted-foreground">
|
||||
{formattedAuthors}. <em>{journal}</em>. {year}
|
||||
</p>
|
||||
</div>
|
||||
|
||||
<div className="z-10 flex flex-shrink-0 flex-col space-y-1">
|
||||
<Tooltip>
|
||||
<TooltipTrigger asChild>
|
||||
<TrackedButton
|
||||
variant="ghost"
|
||||
size="sm"
|
||||
className="h-7 w-7 cursor-pointer px-2"
|
||||
onClick={handleDownload}
|
||||
eventName={`${bibtexKey}-bibfile`}
|
||||
>
|
||||
<FileText className="h-4 w-4" />
|
||||
</TrackedButton>
|
||||
</TooltipTrigger>
|
||||
<TooltipContent>
|
||||
<p>Download BibTeX</p>
|
||||
</TooltipContent>
|
||||
</Tooltip>
|
||||
|
||||
<Tooltip>
|
||||
<TooltipTrigger asChild>
|
||||
<TrackedButton
|
||||
variant="ghost"
|
||||
size="sm"
|
||||
className="relative h-7 w-7 cursor-pointer px-2"
|
||||
onClick={handleCopy}
|
||||
eventName={`${bibtexKey}-copycite`}
|
||||
>
|
||||
{copyStatus === "idle" && <Copy className="h-4 w-4" />}
|
||||
{copyStatus === "success" && (
|
||||
<Check className="h-4 w-4 text-green-500" />
|
||||
)}
|
||||
{copyStatus === "error" && <X className="h-4 w-4 text-red-500" />}
|
||||
</TrackedButton>
|
||||
</TooltipTrigger>
|
||||
<TooltipContent>
|
||||
<p>Copy Citation</p>
|
||||
</TooltipContent>
|
||||
</Tooltip>
|
||||
|
||||
{pdfUrl && pdfAvailable && (
|
||||
<Tooltip>
|
||||
<TooltipTrigger asChild>
|
||||
<Button
|
||||
variant="ghost"
|
||||
size="sm"
|
||||
className="h-7 w-7 px-2"
|
||||
asChild
|
||||
onClick={(e) => e.stopPropagation()}
|
||||
>
|
||||
<TrackedLink
|
||||
href={pdfUrl}
|
||||
target="_blank"
|
||||
eventName={`${bibtexKey}-pdf`}
|
||||
rel="noopener noreferrer"
|
||||
>
|
||||
<BookOpen className="h-4 w-4" />
|
||||
</TrackedLink>
|
||||
</Button>
|
||||
</TooltipTrigger>
|
||||
<TooltipContent>
|
||||
<p>Open PDF</p>
|
||||
</TooltipContent>
|
||||
</Tooltip>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
@@ -4,7 +4,7 @@ import { Avatar, AvatarFallback, AvatarImage } from "@/components/ui/avatar";
|
||||
import { Badge } from "@/components/ui/badge";
|
||||
import { Card, CardHeader } from "@/components/ui/card";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { motion } from "framer-motion";
|
||||
import { motion } from "motion/react";
|
||||
import { ChevronRightIcon } from "lucide-react";
|
||||
import Link from "next/link";
|
||||
import React from "react";
|
||||
@@ -41,12 +41,12 @@ export const ResumeCard = ({
|
||||
return (
|
||||
<Link
|
||||
href={href || "#"}
|
||||
className="block cursor-pointer"
|
||||
className="font-normal no-underline block cursor-pointer"
|
||||
onClick={handleClick}
|
||||
>
|
||||
<Card className="flex">
|
||||
<Card className="flex flex-row items-center p-4 shadow-sm transition-all duration-300 ease-out hover:shadow-lg dark:shadow-[var(--shadow-glow)] dark:hover:shadow-[var(--shadow-glow-hover)]">
|
||||
<div className="flex-none">
|
||||
<Avatar className="border size-12 m-auto bg-muted-background dark:bg-foreground">
|
||||
<Avatar className="size-12 m-auto bg-muted-background shadow-sm dark:shadow-[var(--shadow-glow)]">
|
||||
<AvatarImage
|
||||
src={logoUrl}
|
||||
alt={altText}
|
||||
@@ -56,8 +56,8 @@ export const ResumeCard = ({
|
||||
</Avatar>
|
||||
</div>
|
||||
<div className="flex-grow ml-4 items-center flex-col group">
|
||||
<CardHeader>
|
||||
<div className="flex items-center justify-between gap-x-2 text-base">
|
||||
<CardHeader className="px-0">
|
||||
<div className="flex items-center justify-between gap-x-0 text-base">
|
||||
<h3 className="inline-flex items-center justify-center font-semibold leading-none text-xs sm:text-sm">
|
||||
{title}
|
||||
{badges && (
|
||||
53
components/ui/avatar.tsx
Normal file
53
components/ui/avatar.tsx
Normal file
@@ -0,0 +1,53 @@
|
||||
"use client"
|
||||
|
||||
import * as React from "react"
|
||||
import * as AvatarPrimitive from "@radix-ui/react-avatar"
|
||||
|
||||
import { cn } from "@/lib/utils"
|
||||
|
||||
function Avatar({
|
||||
className,
|
||||
...props
|
||||
}: React.ComponentProps<typeof AvatarPrimitive.Root>) {
|
||||
return (
|
||||
<AvatarPrimitive.Root
|
||||
data-slot="avatar"
|
||||
className={cn(
|
||||
"relative flex size-8 shrink-0 overflow-hidden rounded-full",
|
||||
className
|
||||
)}
|
||||
{...props}
|
||||
/>
|
||||
)
|
||||
}
|
||||
|
||||
function AvatarImage({
|
||||
className,
|
||||
...props
|
||||
}: React.ComponentProps<typeof AvatarPrimitive.Image>) {
|
||||
return (
|
||||
<AvatarPrimitive.Image
|
||||
data-slot="avatar-image"
|
||||
className={cn("aspect-square size-full", className)}
|
||||
{...props}
|
||||
/>
|
||||
)
|
||||
}
|
||||
|
||||
function AvatarFallback({
|
||||
className,
|
||||
...props
|
||||
}: React.ComponentProps<typeof AvatarPrimitive.Fallback>) {
|
||||
return (
|
||||
<AvatarPrimitive.Fallback
|
||||
data-slot="avatar-fallback"
|
||||
className={cn(
|
||||
"bg-muted flex size-full items-center justify-center rounded-full",
|
||||
className
|
||||
)}
|
||||
{...props}
|
||||
/>
|
||||
)
|
||||
}
|
||||
|
||||
export { Avatar, AvatarImage, AvatarFallback }
|
||||
46
components/ui/badge.tsx
Normal file
46
components/ui/badge.tsx
Normal file
@@ -0,0 +1,46 @@
|
||||
import * as React from "react"
|
||||
import { Slot } from "@radix-ui/react-slot"
|
||||
import { cva, type VariantProps } from "class-variance-authority"
|
||||
|
||||
import { cn } from "@/lib/utils"
|
||||
|
||||
const badgeVariants = cva(
|
||||
"inline-flex items-center justify-center rounded-md border px-2 py-0.5 text-xs font-medium w-fit whitespace-nowrap shrink-0 [&>svg]:size-3 gap-1 [&>svg]:pointer-events-none focus-visible:border-ring focus-visible:ring-ring/50 focus-visible:ring-[3px] aria-invalid:ring-destructive/20 dark:aria-invalid:ring-destructive/40 aria-invalid:border-destructive transition-[color,box-shadow] overflow-hidden",
|
||||
{
|
||||
variants: {
|
||||
variant: {
|
||||
default:
|
||||
"border-transparent bg-primary text-primary-foreground [a&]:hover:bg-primary/90",
|
||||
secondary:
|
||||
"border-transparent bg-secondary text-secondary-foreground [a&]:hover:bg-secondary/90",
|
||||
destructive:
|
||||
"border-transparent bg-destructive text-white [a&]:hover:bg-destructive/90 focus-visible:ring-destructive/20 dark:focus-visible:ring-destructive/40 dark:bg-destructive/60",
|
||||
outline:
|
||||
"text-foreground [a&]:hover:bg-accent [a&]:hover:text-accent-foreground",
|
||||
},
|
||||
},
|
||||
defaultVariants: {
|
||||
variant: "default",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
function Badge({
|
||||
className,
|
||||
variant,
|
||||
asChild = false,
|
||||
...props
|
||||
}: React.ComponentProps<"span"> &
|
||||
VariantProps<typeof badgeVariants> & { asChild?: boolean }) {
|
||||
const Comp = asChild ? Slot : "span"
|
||||
|
||||
return (
|
||||
<Comp
|
||||
data-slot="badge"
|
||||
className={cn(badgeVariants({ variant }), className)}
|
||||
{...props}
|
||||
/>
|
||||
)
|
||||
}
|
||||
|
||||
export { Badge, badgeVariants }
|
||||
59
components/ui/button.tsx
Normal file
59
components/ui/button.tsx
Normal file
@@ -0,0 +1,59 @@
|
||||
import * as React from "react"
|
||||
import { Slot } from "@radix-ui/react-slot"
|
||||
import { cva, type VariantProps } from "class-variance-authority"
|
||||
|
||||
import { cn } from "@/lib/utils"
|
||||
|
||||
const buttonVariants = cva(
|
||||
"inline-flex items-center justify-center gap-2 whitespace-nowrap rounded-md text-sm font-medium transition-all disabled:pointer-events-none disabled:opacity-50 [&_svg]:pointer-events-none [&_svg:not([class*='size-'])]:size-4 shrink-0 [&_svg]:shrink-0 outline-none focus-visible:border-ring focus-visible:ring-ring/50 focus-visible:ring-[3px] aria-invalid:ring-destructive/20 dark:aria-invalid:ring-destructive/40 aria-invalid:border-destructive",
|
||||
{
|
||||
variants: {
|
||||
variant: {
|
||||
default:
|
||||
"bg-primary text-primary-foreground shadow-xs hover:bg-primary/90",
|
||||
destructive:
|
||||
"bg-destructive text-white shadow-xs hover:bg-destructive/90 focus-visible:ring-destructive/20 dark:focus-visible:ring-destructive/40 dark:bg-destructive/60",
|
||||
outline:
|
||||
"border bg-background shadow-xs hover:bg-accent hover:text-accent-foreground dark:bg-input/30 dark:border-input dark:hover:bg-input/50",
|
||||
secondary:
|
||||
"bg-secondary text-secondary-foreground shadow-xs hover:bg-secondary/80",
|
||||
ghost:
|
||||
"hover:bg-accent hover:text-accent-foreground dark:hover:bg-accent/50",
|
||||
link: "text-primary underline-offset-4 hover:underline",
|
||||
},
|
||||
size: {
|
||||
default: "h-9 px-4 py-2 has-[>svg]:px-3",
|
||||
sm: "h-8 rounded-md gap-1.5 px-3 has-[>svg]:px-2.5",
|
||||
lg: "h-10 rounded-md px-6 has-[>svg]:px-4",
|
||||
icon: "size-9",
|
||||
},
|
||||
},
|
||||
defaultVariants: {
|
||||
variant: "default",
|
||||
size: "default",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
function Button({
|
||||
className,
|
||||
variant,
|
||||
size,
|
||||
asChild = false,
|
||||
...props
|
||||
}: React.ComponentProps<"button"> &
|
||||
VariantProps<typeof buttonVariants> & {
|
||||
asChild?: boolean
|
||||
}) {
|
||||
const Comp = asChild ? Slot : "button"
|
||||
|
||||
return (
|
||||
<Comp
|
||||
data-slot="button"
|
||||
className={cn(buttonVariants({ variant, size, className }))}
|
||||
{...props}
|
||||
/>
|
||||
)
|
||||
}
|
||||
|
||||
export { Button, buttonVariants }
|
||||
92
components/ui/card.tsx
Normal file
92
components/ui/card.tsx
Normal file
@@ -0,0 +1,92 @@
|
||||
import * as React from "react"
|
||||
|
||||
import { cn } from "@/lib/utils"
|
||||
|
||||
function Card({ className, ...props }: React.ComponentProps<"div">) {
|
||||
return (
|
||||
<div
|
||||
data-slot="card"
|
||||
className={cn(
|
||||
"bg-card text-card-foreground flex flex-col gap-6 rounded-xl border py-6 shadow-sm",
|
||||
className
|
||||
)}
|
||||
{...props}
|
||||
/>
|
||||
)
|
||||
}
|
||||
|
||||
function CardHeader({ className, ...props }: React.ComponentProps<"div">) {
|
||||
return (
|
||||
<div
|
||||
data-slot="card-header"
|
||||
className={cn(
|
||||
"@container/card-header grid auto-rows-min grid-rows-[auto_auto] items-start gap-1.5 px-6 has-data-[slot=card-action]:grid-cols-[1fr_auto] [.border-b]:pb-6",
|
||||
className
|
||||
)}
|
||||
{...props}
|
||||
/>
|
||||
)
|
||||
}
|
||||
|
||||
function CardTitle({ className, ...props }: React.ComponentProps<"div">) {
|
||||
return (
|
||||
<div
|
||||
data-slot="card-title"
|
||||
className={cn("leading-none font-semibold", className)}
|
||||
{...props}
|
||||
/>
|
||||
)
|
||||
}
|
||||
|
||||
function CardDescription({ className, ...props }: React.ComponentProps<"div">) {
|
||||
return (
|
||||
<div
|
||||
data-slot="card-description"
|
||||
className={cn("text-muted-foreground text-sm", className)}
|
||||
{...props}
|
||||
/>
|
||||
)
|
||||
}
|
||||
|
||||
function CardAction({ className, ...props }: React.ComponentProps<"div">) {
|
||||
return (
|
||||
<div
|
||||
data-slot="card-action"
|
||||
className={cn(
|
||||
"col-start-2 row-span-2 row-start-1 self-start justify-self-end",
|
||||
className
|
||||
)}
|
||||
{...props}
|
||||
/>
|
||||
)
|
||||
}
|
||||
|
||||
function CardContent({ className, ...props }: React.ComponentProps<"div">) {
|
||||
return (
|
||||
<div
|
||||
data-slot="card-content"
|
||||
className={cn("px-6", className)}
|
||||
{...props}
|
||||
/>
|
||||
)
|
||||
}
|
||||
|
||||
function CardFooter({ className, ...props }: React.ComponentProps<"div">) {
|
||||
return (
|
||||
<div
|
||||
data-slot="card-footer"
|
||||
className={cn("flex items-center px-6 [.border-t]:pt-6", className)}
|
||||
{...props}
|
||||
/>
|
||||
)
|
||||
}
|
||||
|
||||
export {
|
||||
Card,
|
||||
CardHeader,
|
||||
CardFooter,
|
||||
CardTitle,
|
||||
CardAction,
|
||||
CardDescription,
|
||||
CardContent,
|
||||
}
|
||||
28
components/ui/separator.tsx
Normal file
28
components/ui/separator.tsx
Normal file
@@ -0,0 +1,28 @@
|
||||
"use client"
|
||||
|
||||
import * as React from "react"
|
||||
import * as SeparatorPrimitive from "@radix-ui/react-separator"
|
||||
|
||||
import { cn } from "@/lib/utils"
|
||||
|
||||
function Separator({
|
||||
className,
|
||||
orientation = "horizontal",
|
||||
decorative = true,
|
||||
...props
|
||||
}: React.ComponentProps<typeof SeparatorPrimitive.Root>) {
|
||||
return (
|
||||
<SeparatorPrimitive.Root
|
||||
data-slot="separator"
|
||||
decorative={decorative}
|
||||
orientation={orientation}
|
||||
className={cn(
|
||||
"bg-border shrink-0 data-[orientation=horizontal]:h-px data-[orientation=horizontal]:w-full data-[orientation=vertical]:h-full data-[orientation=vertical]:w-px",
|
||||
className
|
||||
)}
|
||||
{...props}
|
||||
/>
|
||||
)
|
||||
}
|
||||
|
||||
export { Separator }
|
||||
61
components/ui/tooltip.tsx
Normal file
61
components/ui/tooltip.tsx
Normal file
@@ -0,0 +1,61 @@
|
||||
"use client"
|
||||
|
||||
import * as React from "react"
|
||||
import * as TooltipPrimitive from "@radix-ui/react-tooltip"
|
||||
|
||||
import { cn } from "@/lib/utils"
|
||||
|
||||
function TooltipProvider({
|
||||
delayDuration = 0,
|
||||
...props
|
||||
}: React.ComponentProps<typeof TooltipPrimitive.Provider>) {
|
||||
return (
|
||||
<TooltipPrimitive.Provider
|
||||
data-slot="tooltip-provider"
|
||||
delayDuration={delayDuration}
|
||||
{...props}
|
||||
/>
|
||||
)
|
||||
}
|
||||
|
||||
function Tooltip({
|
||||
...props
|
||||
}: React.ComponentProps<typeof TooltipPrimitive.Root>) {
|
||||
return (
|
||||
<TooltipProvider>
|
||||
<TooltipPrimitive.Root data-slot="tooltip" {...props} />
|
||||
</TooltipProvider>
|
||||
)
|
||||
}
|
||||
|
||||
function TooltipTrigger({
|
||||
...props
|
||||
}: React.ComponentProps<typeof TooltipPrimitive.Trigger>) {
|
||||
return <TooltipPrimitive.Trigger data-slot="tooltip-trigger" {...props} />
|
||||
}
|
||||
|
||||
function TooltipContent({
|
||||
className,
|
||||
sideOffset = 0,
|
||||
children,
|
||||
...props
|
||||
}: React.ComponentProps<typeof TooltipPrimitive.Content>) {
|
||||
return (
|
||||
<TooltipPrimitive.Portal>
|
||||
<TooltipPrimitive.Content
|
||||
data-slot="tooltip-content"
|
||||
sideOffset={sideOffset}
|
||||
className={cn(
|
||||
"bg-primary text-primary-foreground animate-in fade-in-0 zoom-in-95 data-[state=closed]:animate-out data-[state=closed]:fade-out-0 data-[state=closed]:zoom-out-95 data-[side=bottom]:slide-in-from-top-2 data-[side=left]:slide-in-from-right-2 data-[side=right]:slide-in-from-left-2 data-[side=top]:slide-in-from-bottom-2 z-50 w-fit origin-(--radix-tooltip-content-transform-origin) rounded-md px-3 py-1.5 text-xs text-balance",
|
||||
className
|
||||
)}
|
||||
{...props}
|
||||
>
|
||||
{children}
|
||||
<TooltipPrimitive.Arrow className="bg-primary fill-primary z-50 size-2.5 translate-y-[calc(-50%_-_2px)] rotate-45 rounded-[2px]" />
|
||||
</TooltipPrimitive.Content>
|
||||
</TooltipPrimitive.Portal>
|
||||
)
|
||||
}
|
||||
|
||||
export { Tooltip, TooltipTrigger, TooltipContent, TooltipProvider }
|
||||
35
components/util-tracked-button.tsx
Normal file
35
components/util-tracked-button.tsx
Normal file
@@ -0,0 +1,35 @@
|
||||
// file: components/util-tracked-button.tsx
|
||||
"use client";
|
||||
|
||||
import { Button } from "@/components/ui/button";
|
||||
import React from "react";
|
||||
|
||||
interface TrackedButtonProps extends React.ComponentProps<typeof Button> {
|
||||
eventName: string;
|
||||
}
|
||||
|
||||
|
||||
export const TrackedButton = ({
|
||||
eventName,
|
||||
onClick,
|
||||
children,
|
||||
...rest
|
||||
}: TrackedButtonProps) => {
|
||||
const handleTrackedClick = (
|
||||
e: React.MouseEvent<HTMLButtonElement, MouseEvent>
|
||||
) => {
|
||||
if (window.umami) {
|
||||
window.umami.track(eventName);
|
||||
}
|
||||
|
||||
if (onClick) {
|
||||
onClick(e);
|
||||
}
|
||||
};
|
||||
|
||||
return (
|
||||
<Button onClick={handleTrackedClick} {...rest}>
|
||||
{children}
|
||||
</Button>
|
||||
);
|
||||
};
|
||||
47
components/util-tracked-link.tsx
Normal file
47
components/util-tracked-link.tsx
Normal file
@@ -0,0 +1,47 @@
|
||||
// file: components/tracked-link.tsx
|
||||
"use client"; // This component needs to be a client component to use onClick
|
||||
|
||||
import Link from "next/link";
|
||||
import React from "react";
|
||||
|
||||
// Define the props for our component
|
||||
interface TrackedLinkProps
|
||||
extends React.AnchorHTMLAttributes<HTMLAnchorElement> {
|
||||
href: string;
|
||||
eventName: string; // The specific event name for Umami
|
||||
}
|
||||
|
||||
export const TrackedLink = ({
|
||||
href,
|
||||
eventName,
|
||||
children,
|
||||
...rest
|
||||
}: TrackedLinkProps) => {
|
||||
const handleTrack = () => {
|
||||
//console.log("Tracking Umami event:", eventName);
|
||||
|
||||
// Check if the umami function exists on the window object
|
||||
if (window.umami) {
|
||||
window.umami.track(eventName);
|
||||
}
|
||||
};
|
||||
|
||||
// For external links, use a standard <a> tag
|
||||
const isExternal =
|
||||
href.startsWith("http") || href.startsWith("mailto:") || href.startsWith("tel:");
|
||||
|
||||
if (isExternal) {
|
||||
return (
|
||||
<a href={href} onClick={handleTrack} {...rest}>
|
||||
{children}
|
||||
</a>
|
||||
);
|
||||
}
|
||||
|
||||
// For internal links, use Next.js's Link component for SPA routing
|
||||
return (
|
||||
<Link href={href} onClick={handleTrack} {...rest}>
|
||||
{children}
|
||||
</Link>
|
||||
);
|
||||
};
|
||||
308
content/_bibliography.bib
Normal file
308
content/_bibliography.bib
Normal file
@@ -0,0 +1,308 @@
|
||||
---
|
||||
---
|
||||
References
|
||||
==========
|
||||
@inbook{3,
|
||||
title = {Improving Primate Sounds Classification Using Binary Presorting for Deep Learning},
|
||||
isbn = {9783031390593},
|
||||
issn = {1865-0937},
|
||||
url = {http://dx.doi.org/10.1007/978-3-031-39059-3_2},
|
||||
doi = {10.1007/978-3-031-39059-3_2},
|
||||
booktitle = {Deep Learning Theory and Applications},
|
||||
publisher = {Springer Nature Switzerland},
|
||||
author = {K\"{o}lle, Michael and Illium, Steffen and Zorn, Maximilian and N\"{u}\ss{}lein, Jonas and Suchostawski, Patrick and Linnhoff-Popien, Claudia},
|
||||
year = {2023},
|
||||
pages = {19–34}
|
||||
}
|
||||
@inproceedings{zorn23surprise,
|
||||
series = {ALIFE 2023},
|
||||
title = {Social Neural Network Soups with Surprise Minimization},
|
||||
url = {http://dx.doi.org/10.1162/isal_a_00671},
|
||||
doi = {10.1162/isal_a_00671},
|
||||
booktitle = {The 2023 Conference on Artificial Life},
|
||||
publisher = {MIT Press},
|
||||
author = {Zorn, Maximilian and Illium, Steffen and Phan, Thomy and Kaiser, Tanja Katharina and Linnhoff-Popien, Claudia and Gabor, Thomas},
|
||||
year = {2023},
|
||||
collection = {ALIFE 2023}
|
||||
}
|
||||
@inproceedings{feld2018trajectory,
|
||||
title = {Trajectory annotation using sequences of spatial perception},
|
||||
author = {Feld, Sebastian and Illium, Steffen and Sedlmeier, Andreas and Belzner, Lenz},
|
||||
booktitle = {Proceedings of the 26th ACM SIGSPATIAL International Conference on Advances in Geographic Information Systems},
|
||||
pages = {329--338},
|
||||
year = {2018},
|
||||
doi = {10.1145/3274895.3274968},
|
||||
url = {https://doi.org/10.1145/3274895.3274968}
|
||||
}
|
||||
@article{gabor2022self,
|
||||
title = {Self-Replication in Neural Networks},
|
||||
volume = {28},
|
||||
issn = {1530-9185},
|
||||
url = {http://dx.doi.org/10.1162/artl_a_00359},
|
||||
doi = {10.1162/artl_a_00359},
|
||||
number = {2},
|
||||
journal = {Artificial Life},
|
||||
publisher = {MIT Press - Journals},
|
||||
author = {Gabor, Thomas and Illium, Steffen and Zorn, Maximilian and Lenta, Cristian and Mattausch, Andy and Belzner, Lenz and Linnhoff-Popien, Claudia},
|
||||
year = {2022},
|
||||
pages = {205–223}
|
||||
}
|
||||
@inproceedings{gabor2019self,
|
||||
series = {ALIFE 2019},
|
||||
title = {Self-Replication in Neural Networks},
|
||||
url = {http://dx.doi.org/10.1162/isal_a_00197},
|
||||
doi = {10.1162/isal_a_00197},
|
||||
booktitle = {The 2019 Conference on Artificial Life},
|
||||
publisher = {MIT Press},
|
||||
author = {Gabor, Thomas and Illium, Steffen and Mattausch, Andy and Belzner, Lenz and Linnhoff-Popien, Claudia},
|
||||
year = {2019},
|
||||
pages = {424–431},
|
||||
collection = {ALIFE 2019}
|
||||
}
|
||||
@inproceedings{elsner2019deep,
|
||||
series = {interspeech\_2019},
|
||||
title = {Deep Neural Baselines for Computational Paralinguistics},
|
||||
url = {http://dx.doi.org/10.21437/interspeech.2019-2478},
|
||||
doi = {10.21437/interspeech.2019-2478},
|
||||
booktitle = {Interspeech 2019},
|
||||
publisher = {ISCA},
|
||||
author = {Elsner, Daniel and Langer, Stefan and Ritz, Fabian and Mueller, Robert and Illium, Steffen},
|
||||
year = {2019},
|
||||
month = sep,
|
||||
pages = {2388–2392},
|
||||
collection = {interspeech\_2019}
|
||||
}
|
||||
@inbook{muller2020soccer,
|
||||
title = {Soccer Team Vectors},
|
||||
isbn = {9783030438876},
|
||||
issn = {1865-0937},
|
||||
url = {http://dx.doi.org/10.1007/978-3-030-43887-6_19},
|
||||
doi = {10.1007/978-3-030-43887-6_19},
|
||||
booktitle = {Machine Learning and Knowledge Discovery in Databases},
|
||||
publisher = {Springer International Publishing},
|
||||
author = {M\"{u}ller, Robert and Langer, Stefan and Ritz, Fabian and Roch, Christoph and Illium, Steffen and Linnhoff-Popien, Claudia},
|
||||
year = {2020},
|
||||
pages = {247–257}
|
||||
}
|
||||
@inproceedings{friedrich2020hybrid,
|
||||
title = {A Hybrid Approach for Segmenting and Fitting Solid Primitives to 3D Point Clouds},
|
||||
url = {http://dx.doi.org/10.5220/0008870600380048},
|
||||
doi = {10.5220/0008870600380048},
|
||||
booktitle = {Proceedings of the 15th International Joint Conference on Computer Vision, Imaging and Computer Graphics Theory and Applications},
|
||||
publisher = {SCITEPRESS - Science and Technology Publications},
|
||||
author = {Friedrich, Markus and Illium, Steffen and Fayolle, Pierre-Alain and Linnhoff-Popien, Claudia},
|
||||
year = {2020},
|
||||
pages = {38–48}
|
||||
}
|
||||
@inbook{sedlmeier2020policy,
|
||||
title = {Policy Entropy for Out-of-Distribution Classification},
|
||||
isbn = {9783030616168},
|
||||
issn = {1611-3349},
|
||||
url = {http://dx.doi.org/10.1007/978-3-030-61616-8_34},
|
||||
doi = {10.1007/978-3-030-61616-8_34},
|
||||
booktitle = {Artificial Neural Networks and Machine Learning – ICANN 2020},
|
||||
publisher = {Springer International Publishing},
|
||||
author = {Sedlmeier, Andreas and M\"{u}ller, Robert and Illium, Steffen and Linnhoff-Popien, Claudia},
|
||||
year = {2020},
|
||||
pages = {420–431}
|
||||
}
|
||||
@inproceedings{muller2020acoustic,
|
||||
title = {Acoustic Anomaly Detection for Machine Sounds based on Image Transfer Learning},
|
||||
url = {http://dx.doi.org/10.5220/0010185800490056},
|
||||
doi = {10.5220/0010185800490056},
|
||||
booktitle = {Proceedings of the 13th International Conference on Agents and Artificial Intelligence},
|
||||
publisher = {SCITEPRESS - Science and Technology Publications},
|
||||
author = {M\"{u}ller, Robert and Ritz, Fabian and Illium, Steffen and Linnhoff-Popien, Claudia},
|
||||
year = {2021},
|
||||
pages = {49–56}
|
||||
}
|
||||
@article{illium2020meantime,
|
||||
title = {What to do in the Meantime: A Service Coverage Analysis for Parked Autonomous Vehicles},
|
||||
volume = {1},
|
||||
issn = {2700-8150},
|
||||
url = {http://dx.doi.org/10.5194/agile-giss-1-7-2020},
|
||||
doi = {10.5194/agile-giss-1-7-2020},
|
||||
journal = {AGILE: GIScience Series},
|
||||
publisher = {Copernicus GmbH},
|
||||
author = {Illium, Steffen and Friese, Philipp Andreas and M\"{u}ller, Robert and Feld, Sebastian},
|
||||
year = {2020},
|
||||
month = jul,
|
||||
pages = {1–15}
|
||||
}
|
||||
@inproceedings{illium2020surgical,
|
||||
series = {interspeech\_2020},
|
||||
title = {Surgical Mask Detection with Convolutional Neural Networks and Data Augmentations on Spectrograms},
|
||||
url = {http://dx.doi.org/10.21437/interspeech.2020-1692},
|
||||
doi = {10.21437/interspeech.2020-1692},
|
||||
booktitle = {Interspeech 2020},
|
||||
publisher = {ISCA},
|
||||
author = {Illium, Steffen and M\"{u}ller, Robert and Sedlmeier, Andreas and Linnhoff-Popien, Claudia},
|
||||
year = {2020},
|
||||
month = oct,
|
||||
pages = {2052–2056},
|
||||
collection = {interspeech\_2020}
|
||||
}
|
||||
@inproceedings{muller2020analysis,
|
||||
title = {Analysis of Feature Representations for Anomalous Sound Detection},
|
||||
url = {http://dx.doi.org/10.5220/0010226800970106},
|
||||
doi = {10.5220/0010226800970106},
|
||||
booktitle = {Proceedings of the 13th International Conference on Agents and Artificial Intelligence},
|
||||
publisher = {SCITEPRESS - Science and Technology Publications},
|
||||
author = {M\"{u}ller, Robert and Illium, Steffen and Ritz, Fabian and Schmid, Kyrill},
|
||||
year = {2021},
|
||||
pages = {97–106}
|
||||
}
|
||||
@inproceedings{muller2021acoustic,
|
||||
title = {Acoustic Leak Detection in Water Networks},
|
||||
url = {http://dx.doi.org/10.5220/0010295403060313},
|
||||
doi = {10.5220/0010295403060313},
|
||||
booktitle = {Proceedings of the 13th International Conference on Agents and Artificial Intelligence},
|
||||
publisher = {SCITEPRESS - Science and Technology Publications},
|
||||
author = {M\"{u}ller, Robert and Illium, Steffen and Ritz, Fabian and Schr\"{o}der, Tobias and Platschek, Christian and Ochs, J\"{o}rg and Linnhoff-Popien, Claudia},
|
||||
year = {2021},
|
||||
pages = {306–313}
|
||||
}
|
||||
@inproceedings{gabor2021goals,
|
||||
series = {ALIFE 2021},
|
||||
title = {Goals for Self-Replicating Neural Networks},
|
||||
url = {http://dx.doi.org/10.1162/isal_a_00439},
|
||||
doi = {10.1162/isal_a_00439},
|
||||
booktitle = {The 2021 Conference on Artificial Life},
|
||||
publisher = {MIT Press},
|
||||
author = {Gabor, Thomas and Illium, Steffen and Zorn, Maximilian and Linnhoff-Popien, Claudia},
|
||||
year = {2021},
|
||||
collection = {ALIFE 2021}
|
||||
}
|
||||
@inproceedings{illium2021visual,
|
||||
series = {interspeech\_2021},
|
||||
title = {Visual Transformers for Primates Classification and Covid Detection},
|
||||
url = {http://dx.doi.org/10.21437/interspeech.2021-273},
|
||||
doi = {10.21437/interspeech.2021-273},
|
||||
booktitle = {Interspeech 2021},
|
||||
publisher = {ISCA},
|
||||
author = {Illium, Steffen and M\"{u}ller, Robert and Sedlmeier, Andreas and Popien, Claudia-Linnhoff},
|
||||
year = {2021},
|
||||
month = aug,
|
||||
pages = {451–455},
|
||||
collection = {interspeech\_2021}
|
||||
}
|
||||
@inproceedings{muller2021primate,
|
||||
series = {interspeech\_2021},
|
||||
title = {A Deep and Recurrent Architecture for Primate Vocalization Classification},
|
||||
url = {http://dx.doi.org/10.21437/interspeech.2021-1274},
|
||||
doi = {10.21437/interspeech.2021-1274},
|
||||
booktitle = {Interspeech 2021},
|
||||
publisher = {ISCA},
|
||||
author = {M\"{u}ller, Robert and Illium, Steffen and Linnhoff-Popien, Claudia},
|
||||
year = {2021},
|
||||
month = aug,
|
||||
pages = {461–465},
|
||||
collection = {interspeech\_2021}
|
||||
}
|
||||
@inproceedings{muller2021deep,
|
||||
title = {Deep Recurrent Interpolation Networks for Anomalous Sound Detection},
|
||||
url = {http://dx.doi.org/10.1109/ijcnn52387.2021.9533560},
|
||||
doi = {10.1109/ijcnn52387.2021.9533560},
|
||||
booktitle = {2021 International Joint Conference on Neural Networks (IJCNN)},
|
||||
publisher = {IEEE},
|
||||
author = {Muller, Robert and Illium, Steffen and Linnhoff-Popien, Claudia},
|
||||
year = {2021},
|
||||
month = jul,
|
||||
pages = {1–7}
|
||||
}
|
||||
@inbook{friedrich2022csg,
|
||||
title = {CSG Tree Extraction from 3D Point Clouds and Meshes Using a Hybrid Approach},
|
||||
isbn = {9783030948931},
|
||||
issn = {1865-0937},
|
||||
url = {http://dx.doi.org/10.1007/978-3-030-94893-1_3},
|
||||
doi = {10.1007/978-3-030-94893-1_3},
|
||||
booktitle = {Computer Vision, Imaging and Computer Graphics Theory and Applications},
|
||||
publisher = {Springer International Publishing},
|
||||
author = {Friedrich, Markus and Illium, Steffen and Fayolle, Pierre-Alain and Linnhoff-Popien, Claudia},
|
||||
year = {2022},
|
||||
pages = {53–79}
|
||||
}
|
||||
@inproceedings{illium2022empirical,
|
||||
title = {Empirical Analysis of Limits for Memory Distance in Recurrent Neural Networks},
|
||||
url = {http://dx.doi.org/10.5220/0010818500003116},
|
||||
doi = {10.5220/0010818500003116},
|
||||
booktitle = {Proceedings of the 14th International Conference on Agents and Artificial Intelligence},
|
||||
publisher = {SCITEPRESS - Science and Technology Publications},
|
||||
author = {Illium, Steffen and Schillman, Thore and M\"{u}ller, Robert and Gabor, Thomas and Linnhoff-Popien, Claudia},
|
||||
year = {2022},
|
||||
pages = {308–315}
|
||||
}
|
||||
}
|
||||
@inproceedings{muller2022towards,
|
||||
title = {Towards Anomaly Detection in Reinforcement Learning},
|
||||
author = {M\"{u}ller, Robert and Illium, Steffen and Phan, Thomy and Haider, Tom and Linnhoff-Popien, Claudia},
|
||||
booktitle = {Proceedings of the 21st International Conference on Autonomous Agents and Multiagent Systems},
|
||||
pages = {1799--1803},
|
||||
year = {2022}
|
||||
}
|
||||
@inbook{nusslein2022case,
|
||||
title = {Case-Based Inverse Reinforcement Learning Using Temporal Coherence},
|
||||
isbn = {9783031149238},
|
||||
issn = {1611-3349},
|
||||
url = {http://dx.doi.org/10.1007/978-3-031-14923-8_20},
|
||||
doi = {10.1007/978-3-031-14923-8_20},
|
||||
booktitle = {Case-Based Reasoning Research and Development},
|
||||
publisher = {Springer International Publishing},
|
||||
author = {N\"{u}\ss{}lein, Jonas and Illium, Steffen and M\"{u}ller, Robert and Gabor, Thomas and Linnhoff-Popien, Claudia},
|
||||
year = {2022},
|
||||
pages = {304–317}
|
||||
}
|
||||
@inproceedings{illium2022constructing,
|
||||
title = {Constructing Organism Networks from Collaborative Self-Replicators},
|
||||
url = {http://dx.doi.org/10.1109/ssci51031.2022.10022216},
|
||||
doi = {10.1109/ssci51031.2022.10022216},
|
||||
booktitle = {2022 IEEE Symposium Series on Computational Intelligence (SSCI)},
|
||||
publisher = {IEEE},
|
||||
author = {Illium, Steffen and Zorn, Maximilian and Lenta, Cristian and Kolle, Michael and Linnhoff-Popien, Claudia and Gabor, Thomas},
|
||||
year = {2022},
|
||||
month = dec,
|
||||
pages = {1268–1275}
|
||||
}
|
||||
@inproceedings{illium2023voronoipatches,
|
||||
title = {VoronoiPatches: Evaluating a New Data Augmentation Method},
|
||||
url = {http://dx.doi.org/10.5220/0011670000003393},
|
||||
doi = {10.5220/0011670000003393},
|
||||
booktitle = {Proceedings of the 15th International Conference on Agents and Artificial Intelligence},
|
||||
publisher = {SCITEPRESS - Science and Technology Publications},
|
||||
author = {Illium, Steffen and Griffin, Gretchen and K\"{o}lle, Michael and Zorn, Maximilian and N\"{u}\ss{}lein, Jonas and Linnhoff-Popien, Claudia},
|
||||
year = {2023},
|
||||
pages = {350–357}
|
||||
}
|
||||
@inproceedings{kolle2023compression,
|
||||
title = {Compression of GPS Trajectories Using Autoencoders},
|
||||
url = {http://dx.doi.org/10.5220/0011782100003393},
|
||||
doi = {10.5220/0011782100003393},
|
||||
booktitle = {Proceedings of the 15th International Conference on Agents and Artificial Intelligence},
|
||||
publisher = {SCITEPRESS - Science and Technology Publications},
|
||||
author = {K\"{o}lle, Michael and Illium, Steffen and Hahn, Carsten and Schauer, Lorenz and Hutter, Johannes and Linnhoff-Popien, Claudia},
|
||||
year = {2023},
|
||||
pages = {829–836}
|
||||
}
|
||||
@inbook{altmann2024emergence,
|
||||
title = {Emergence in Multi-agent Systems: A Safety Perspective},
|
||||
isbn = {9783031751073},
|
||||
issn = {1611-3349},
|
||||
url = {http://dx.doi.org/10.1007/978-3-031-75107-3_7},
|
||||
doi = {10.1007/978-3-031-75107-3_7},
|
||||
booktitle = {Leveraging Applications of Formal Methods, Verification and Validation. Rigorous Engineering of Collective Adaptive Systems},
|
||||
publisher = {Springer Nature Switzerland},
|
||||
author = {Altmann, Philipp and Sch\"{o}nberger, Julian and Illium, Steffen and Zorn, Maximilian and Ritz, Fabian and Haider, Tom and Burton, Simon and Gabor, Thomas},
|
||||
year = {2024},
|
||||
month = oct,
|
||||
pages = {104–120}
|
||||
}
|
||||
@inproceedings{kolle2024aquarium,
|
||||
title = {Aquarium: A Comprehensive Framework for Exploring Predator-Prey Dynamics Through Multi-Agent Reinforcement Learning Algorithms},
|
||||
url = {http://dx.doi.org/10.5220/0012382300003636},
|
||||
doi = {10.5220/0012382300003636},
|
||||
booktitle = {Proceedings of the 16th International Conference on Agents and Artificial Intelligence},
|
||||
publisher = {SCITEPRESS - Science and Technology Publications},
|
||||
author = {K\"{o}lle, Michael and Erpelding, Yannick and Ritz, Fabian and Phan, Thomy and Illium, Steffen and Linnhoff-Popien, Claudia},
|
||||
year = {2024},
|
||||
pages = {59–70}
|
||||
}
|
||||
@@ -1,19 +1,17 @@
|
||||
---
|
||||
title: "InnoMi Project"
|
||||
tags: [projects]
|
||||
excerpt: "Early-stage mobile/distributed tech transfer between academia and industry (Bavaria)."
|
||||
tags: [mobile-internet, technology-transfer, bavaria, industry]
|
||||
excerpt: "Early-stage mobile/distributed tech transfer between academia and industry."
|
||||
teaser: /images/projects/innomi.png
|
||||
icon: /images/projects/innomi.png
|
||||
|
||||
---
|
||||
|
||||
The InnoMi research initiative served as a vital bridge between academic research and industrial application within Bavaria. Funded by the state government and operating under the umbrella of the Zentrum Digitalisierung.Bayern, the project provided crucial resources and a collaborative framework.
|
||||
|
||||
<InfoBox title="Project Details">
|
||||
- **Project:** [InnoMi - Innovations for the Mobile Internet](https://innomi.org)
|
||||
- **Title:** [InnoMi - Innovations for the Mobile Internet](https://innomi.org)
|
||||
- **Affiliation:** Zentrum Digitalisierung.Bayern (ZDB)
|
||||
- **Funding:** Bavarian Ministry of Eco. Affairs, Regional Dev. and Energy (StMWi)
|
||||
- **Duration:** Supported the Chair for Mobile and Distributed Systems (2018-2023)
|
||||
- **Objective:** To strengthen the Bavarian economy by facilitating the early transfer of innovations from university research, specifically at the [Chair for Mobile and Distributed Systems at LMU Munich](https://www.mobile.ifi.lmu.de), to local industry partners.
|
||||
</InfoBox>
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
title: "Computer Architecture TA"
|
||||
tags: [teaching, computer architecture, LMU, coordination]
|
||||
tags: [computer architecture, coordination]
|
||||
excerpt: "Served as a Teaching Assistant and Tutorial Coordinator for the LMU Computer Architecture course, managing tutors and curriculum for over 600 students."
|
||||
teaser: "/images/teaching/computer_gear.png"
|
||||
icon: "/images/teaching/computer_gear.png"
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
title: "IoT Practical Exercise"
|
||||
tags: [teaching, iot, mqtt, python, influxdb, distributed-systems, practical-course]
|
||||
tags: [iot, mqtt, python, influxdb, distributed-systems, practical-course]
|
||||
excerpt: "Designed and taught an IoT practical exercise using MQTT and Python for approximately 200 students."
|
||||
teaser: "/images/teaching/server.png"
|
||||
icon: "/images/teaching/server.png"
|
||||
@@ -20,10 +20,10 @@ The goal was to provide hands-on experience with fundamental IoT communication p
|
||||
* **Communication Protocol:** Implementing a typical publish/subscribe system using the **[MQTT](https://mqtt.org/)** protocol.
|
||||
* **Data Persistence:** Storing simulated sensor data in an **[InfluxDB](https://www.influxdata.com/)** time-series database backend.
|
||||
* **Sensor Simulation:** Generating high-frequency data streams to mimic real-world sensor behavior.
|
||||
* **Implementation Language:** Requiring students to implement the entire pipeline from scratch using **[Python](https://www.python.org/)**. Foundational Python skills were covered in a [separate preparatory course](/teaching/python/).
|
||||
* **Implementation Language:** Requiring students to implement the entire pipeline from scratch using **[Python](https://www.python.org/)**. Foundational Python skills were covered in a [separate preparatory course](/experience/python/).
|
||||
|
||||
<div className="my-6 text-center">
|
||||
<Image src="/figures/iot_inflex_pipeline.png" alt="Diagram showing simulated sensors publishing via MQTT to a broker, which is subscribed to by an InfluxDB logger" width={800} height={323} />
|
||||
<Image src="/images/teaching/iot_inflex_pipeline.webp" alt="Diagram showing simulated sensors publishing via MQTT to a broker, which is subscribed to by an InfluxDB logger" width={800} height={323} />
|
||||
<figcaption className="text-sm text-muted-foreground mt-2">Conceptual pipeline for the MQTT-InfluxDB practical exercise.</figcaption>
|
||||
</div>
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
title: "Python 101 Course"
|
||||
tags: [teaching, python, programming, introductory-course, curriculum-development]
|
||||
tags: [python, programming, introductory-course, curriculum-development]
|
||||
excerpt: "Co-developed/taught intensive introductory Python course for 200 students."
|
||||
teaser: /images/teaching/py.png
|
||||
icon: /images/teaching/py.png
|
||||
44
content/experience/2019-02-24-dw.mdx
Normal file
44
content/experience/2019-02-24-dw.mdx
Normal file
@@ -0,0 +1,44 @@
|
||||
---
|
||||
title: "DW Editorial Lead"
|
||||
tags: [editorial, content management, digital strategy, magazine, workflow optimization, website relaunch]
|
||||
excerpt: "Led online editorial team for DIGITALE WELT Magazin (2018-2023)."
|
||||
teaser: "/images/projects/dw.png"
|
||||
icon: "/images/projects/dw.png"
|
||||
---
|
||||
|
||||
During my doctoral studies and research tenure at LMU Munich, I led the online editorial team for *DIGITALE WELT Magazin*. This role, supported by the [InnoMi project](/projects/innomi/), involved managing the publication's digital presence and strategic direction, aiming to effectively bridge scientific research and industry perspectives on digitalization trends.
|
||||
|
||||
<InfoBox title="Project Details">
|
||||
- **Role**: Head of Online Editorial Team
|
||||
- **Publication**: [DIGITALE WELT Magazin (DW)](https://digitaleweltmagazin.de)
|
||||
- **Affiliation**: [LMU Munich](/projects/innomi/)
|
||||
- **Duration**: 2018 - 2023
|
||||
</InfoBox>
|
||||
|
||||
## Key Responsibilities & Achievements
|
||||
|
||||
**Digital Strategy & Content Management:** Oversaw all online content publication and social media channels, defining the editorial calendar and ensuring alignment with the magazine's goal of integrating academic and economic discourse.
|
||||
|
||||
**Process Optimization & Automation:** Developed and implemented streamlined workflows for content acquisition, editing, and publication. Introduced automation solutions that significantly reduced manual workload and improved efficiency.
|
||||
|
||||
**Portfolio Expansion:** Actively broadened the scope and variety of online content to better serve the target audience and reflect emerging digital trends.
|
||||
|
||||
**Website Relaunch Oversight:** Played a key role in managing a major website redesign project, focusing on user experience, modern aesthetics, and facilitating a transition towards a digital-first content strategy.
|
||||
|
||||
<CenteredImage
|
||||
src="/images/projects/dw_screenshot.png"
|
||||
alt="Screenshot of the DIGITALE WELT Magazin Website"
|
||||
width={550}
|
||||
height={310}
|
||||
caption="DIGITALE WELT Magazin Website Interface"
|
||||
/>
|
||||
|
||||
Prior to leading the online team, I contributed to the print editions of the magazine, specifically managing the "Wissen" (Knowledge) section. These earlier contributions are archived and accessible [online](https://digitaleweltmagazin.de/alle-magazine/).
|
||||
|
||||
<CenteredImage
|
||||
src="/images/projects/dw_magazin.png"
|
||||
alt="Cover collage of printed DIGITALE WELT Magazin issues"
|
||||
width={550}
|
||||
height={412}
|
||||
caption="Examples of DIGITALE WELT Print Magazine Covers"
|
||||
/>
|
||||
@@ -1,23 +1,25 @@
|
||||
---
|
||||
title: "ErLoWa Leak Detection"
|
||||
excerpt: "Deep learning detects acoustic water leaks with SWM."
|
||||
tags: [projects, acoustic, anomaly-detection, deep-learning, real-world-data, signal-processing]
|
||||
tags: [acoustic, anomaly-detection, deep-learning, real-world-data, signal-processing, water-management, sensors]
|
||||
teaser: "/images/projects/pipe_leak.png"
|
||||
icon: "/images/projects/pipe_leak.png"
|
||||
---
|
||||
|
||||
In collaboration with Munich's municipal utility provider, Stadtwerke München (SWM), this project explored the feasibility of using acoustic monitoring for early leak detection in water pipe infrastructure. The primary goal was to develop machine learning models capable of identifying leak-indicating sound patterns within a real-world operational environment.
|
||||
|
||||
|
||||
<InfoBox title="Project Details">
|
||||
- **Project**: ErLoWa (Erkennung von Leckagen in Wasserleitungsnetzen)
|
||||
- **Partner**: [Stadtwerke München (SWM)](https://www.swm.de/)
|
||||
- **Duration**: Late 2018 - Early 2020
|
||||
- **Duration**: 2018 - 2020
|
||||
- **Role**: Data Scientist, Machine Learning Expert
|
||||
- **Skills**: Acoustic Signal Processing, Deep Learning (CNNs), Anomaly Detection, Real-world Data Handling, Sensor Data Analysis
|
||||
</InfoBox>
|
||||
|
||||
|
||||
The objective was to investigate and develop methods for the automated detection and localization of leaks in urban water distribution networks using acoustic sensor data.
|
||||
|
||||
|
||||
## Methodology & Activities
|
||||
|
||||
- **Data Acquisition**: Sensor networks comprising contact microphones were deployed across sections of Munich's suburban water network to capture continuous acoustic data.
|
||||
@@ -25,11 +27,25 @@ The objective was to investigate and develop methods for the automated detection
|
||||
- **Model Development**: Various machine learning approaches were evaluated. Deep neural networks, particularly Convolutional Neural Networks (CNNs), were trained on the spectrogram data to classify segments as containing leak sounds or normal background noise.
|
||||
- **Analysis & Validation**: The performance of the models was assessed against ground truth data provided by SWM, identifying both the successes and challenges of applying these methods in a complex, noisy, real-world setting.
|
||||
|
||||
|
||||
<div className="my-6 grid grid-cols-1 md:grid-cols-2 gap-4">
|
||||
<Image src="/images/projects/pipe_leak/1fe8265e-ff21-4e9c-8a2a-2ebcaec41728.jpeg" alt="Mel spectrogram of acoustic data" width={800} height={600} />
|
||||
<Image src="/images/projects/pipe_leak/8d2364f1-1b03-480d-9ed3-09d548f47383.jpeg" alt="Mel spectrogram of acoustic data" width={800} height={600} />
|
||||
<CenteredImage
|
||||
src="/images/projects/pipe_leak/1fe8265e-ff21-4e9c-8a2a-2ebcaec41728.jpeg"
|
||||
alt="A field technician using an acoustic sensor rod to detect leaks on a water pipe"
|
||||
width={800}
|
||||
height={600}
|
||||
caption="Me representing a field technician using an acoustic leak detection device equipped with headphones to pinpoint potential water leaks."
|
||||
/>
|
||||
<CenteredImage
|
||||
src="/images/projects/pipe_leak/8d2364f1-1b03-480d-9ed3-09d548f47383.jpeg"
|
||||
alt="Team performing acoustic leak detection around a manhole cover"
|
||||
width={800}
|
||||
height={600}
|
||||
caption="SWM personnel conducting on-site acoustic measurements around a manhole, illustrating data acquisition for the leak detection project."
|
||||
/>
|
||||
</div>
|
||||
|
||||
|
||||
## Key Findings & Outcomes
|
||||
|
||||
- The project demonstrated the potential of deep learning models applied to mel spectrograms for identifying relevant acoustic features indicative of water leaks.
|
||||
@@ -37,9 +53,23 @@ The objective was to investigate and develop methods for the automated detection
|
||||
- Significant insights were gained regarding the practical challenges of sensor deployment, data quality variability, and noise interference in real-world utility networks.
|
||||
- The research conducted within this project formed the basis for several scientific [publications](/publications) and a [paper writeup](/research/acoustic-leak-detection/) <Cite bibtexKey="muller2021acoustic" />.
|
||||
|
||||
|
||||
<div className="my-6 grid grid-cols-1 md:grid-cols-2 gap-4">
|
||||
<Image src="/images/projects/pipe_leak/5ea8f0ee-6c61-4835-944c-b77683d397ca.jpeg" alt="Mel spectrogram of acoustic data with potential leak signature" width={800} height={600} />
|
||||
<Image src="/images/projects/pipe_leak/cc01cc58-d3f6-4220-b4f4-c7ea26b3a116.jpeg" alt="Another example of a mel spectrogram" width={800} height={600} />
|
||||
<CenteredImage
|
||||
src="/images/projects/pipe_leak/5ea8f0ee-6c61-4835-944c-b77683d397ca.jpeg"
|
||||
alt="Workers performing actions on a pipe section"
|
||||
width={800}
|
||||
height={600}
|
||||
caption="On-site 'arificial leak' creation in the proximity of deployed acoustic sensors."
|
||||
/>
|
||||
<CenteredImage
|
||||
src="/images/projects/pipe_leak/cc01cc58-d3f6-4220-b4f4-c7ea26b3a116.jpeg"
|
||||
alt="Project team next to an SWM utility vehicle after field work"
|
||||
width={800}
|
||||
height={600}
|
||||
caption="Project team from SWM and LMU after a session of field data collection."
|
||||
/>
|
||||
</div>
|
||||
|
||||
|
||||
This applied research project provided valuable experience in handling real-world sensor data, adapting machine learning models for specific industrial challenges, and collaborating effectively with industry partners.
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
title: "TIMS Seminar Supervision"
|
||||
tags: [teaching, supervision, mentoring, research, academic writing]
|
||||
tags: [supervision, mentoring, academic writing]
|
||||
excerpt: "Supervised student research, writing, and presentation skills in Mobile/Distributed Systems, ML, and Quantum Computing."
|
||||
teaser: "/images/teaching/thesis.png"
|
||||
icon: "/images/teaching/thesis.png"
|
||||
@@ -11,21 +11,28 @@ icon: "/images/teaching/thesis.png"
|
||||
- **Role**: Seminar Supervisor / Teaching Assistant
|
||||
- **Duration**: 2020 - 2023 (Multiple Semesters)
|
||||
- **Skills**: Research Mentoring, Scientific Writing Guidance, Presentation Coaching, Academic Assessment, Topic Curation
|
||||
- **Past Iterations**:
|
||||
- Summer '23: [TIMS](https://www.mobile.ifi.lmu.de/lehrveranstaltungen/seminar-trends-in-mobilen-und-verteilten-systemen-sose23/)
|
||||
- Winter '22/23: [TIMS](https://www.mobile.ifi.lmu.de/lehrveranstaltungen/seminar-trends-in-mobilen-und-verteilten-systemen-ws2223/)
|
||||
- Summer '22: [TIMS](https://www.mobile.ifi.lmu.de/lehrveranstaltungen/seminar-trends-in-mobilen-und-verteilten-systemen-sose22/)
|
||||
- Winter '21/22: [TIMS](https://www.mobile.ifi.lmu.de/lehrveranstaltungen/seminar-vertiefte-themen-in-mobilen-und-verteilten-systemen-ws2122-2/)
|
||||
- Summer '21: [TIMS](https://www.mobile.ifi.lmu.de/lehrveranstaltungen/seminar-trends-in-mobilen-und-verteilten-systemen-sose21/)
|
||||
- Winter '20/21: [TIMS](https://www.mobile.ifi.lmu.de/lehrveranstaltungen/seminar-trends-in-mobilen-und-verteilten-systemen-wise2021/)
|
||||
</InfoBox>
|
||||
|
||||
As part of my teaching responsibilities at the Chair for Mobile and Distributed Systems (LMU Munich), I regularly supervised the **"Trends in Mobile and Distributed Systems" (TIMS)** seminar series for both Bachelor and Master students.
|
||||
|
||||
This seminar is designed to introduce students to the process of scientific research and academic work. Each semester focused on specific cutting-edge topics within the chair's main research areas, primarily **Mobile and Distributed Systems**, with recent iterations emphasizing **Machine Learning** and **Quantum Computing**.
|
||||
|
||||
|
||||
|
||||
The core objectives involved guiding students through:
|
||||
|
||||
<InfoBox title="Seminar Details">
|
||||
### Summer Semesters (TIMS)
|
||||
- [Summer '23](https://www.mobile.ifi.lmu.de/lehrveranstaltungen/seminar-trends-in-mobilen-und-verteilten-systemen-sose23/)
|
||||
- [Summer '22](https://www.mobile.ifi.lmu.de/lehrveranstaltungen/seminar-trends-in-mobilen-und-verteilten-systemen-sose22/)
|
||||
- [Summer '21](https://www.mobile.ifi.lmu.de/lehrveranstaltungen/seminar-trends-in-mobilen-und-verteilten-systemen-sose21/)
|
||||
|
||||
### Winter Semesters (TIMS)
|
||||
- [Winter '22/23](https://www.mobile.ifi.lmu.de/lehrveranstaltungen/seminar-trends-in-mobilen-und-verteilten-systemen-ws2223/)
|
||||
- [Winter '21/22](https://www.mobile.ifi.lmu.de/lehrveranstaltungen/seminar-vertiefte-themen-in-mobilen-und-verteilten-systemen-ws2122-2/)
|
||||
- [Winter '20/21](https://www.mobile.ifi.lmu.de/lehrveranstaltungen/seminar-trends-in-mobilen-und-verteilten-systemen-wise2021/)
|
||||
</InfoBox>
|
||||
|
||||
- **Topic Exploration:** Selecting and defining a research topic within the semester's theme.
|
||||
- **Literature Review:** Conducting thorough searches and critically analyzing relevant scientific papers.
|
||||
- **Scientific Writing:** Structuring and writing a formal academic seminar paper summarizing their findings.
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
title: "VTIMS Advanced Seminar"
|
||||
tags: [teaching]
|
||||
tags: [machine learning, distributed systems, quantum computing, research mentoring, scientific writing, presentation coaching, critical analysis, academic assessment]
|
||||
excerpt: "Supervised Master's advanced research/analysis in Mobile/Distributed Systems, ML, QC."
|
||||
teaser: "/images/teaching/thesis_master.png"
|
||||
icon: "/images/teaching/thesis_master.png"
|
||||
@@ -25,12 +25,15 @@ Compared to the introductory seminar, VTIMS placed greater emphasis on:
|
||||
The structure involved guiding students through the research process, including topic selection, intensive literature review, rigorous academic writing, and polished presentation delivery. Dedicated support included sessions on advanced scientific methods, presentation refinement, and personalized coaching. Assessment criteria mirrored those of TIMS but with expectations adjusted for the Master's level, focusing on the depth of academic work, presentation quality, and insightful participation.
|
||||
|
||||
<InfoBox title="Past Seminar Iterations">
|
||||
- **SoSe 2023:** [VTIMS](https://www.mobile.ifi.lmu.de/lehrveranstaltungen/seminar-vertiefte-themen-in-mobilen-und-verteilten-systemen-sose23/)
|
||||
- **WiSe 22/23:** [VTIMS](https://www.mobile.ifi.lmu.de/lehrveranstaltungen/seminar-vertiefte-themen-in-mobilen-und-verteilten-systemen-ws2223/)
|
||||
- **SoSe 2022:** [VTIMS](https://www.mobile.ifi.lmu.de/lehrveranstaltungen/seminar-vertiefte-themen-in-mobilen-und-verteilten-systemen-sose22/)
|
||||
- **WiSe 21/22:** [VTIMS](https://www.mobile.ifi.lmu.de/lehrveranstaltungen/seminar-vertiefte-themen-in-mobilen-und-verteilten-systemen-ws2122/)
|
||||
- **SoSe 2021:** [VTIMS](https://www.mobile.ifi.lmu.de/lehrveranstaltungen/seminar-vertiefte-themen-in-mobilen-und-verteilten-systemen-sose21/)
|
||||
- **WiSe 20/21:** [VTIMS](https://www.mobile.ifi.lmu.de/lehrveranstaltungen/seminar-vertiefte-themen-in-mobilen-und-verteilten-systemen-ws2021/)
|
||||
### Summer Semesters (VTIMS)
|
||||
- [SoSe 2023](https://www.mobile.ifi.lmu.de/lehrveranstaltungen/seminar-vertiefte-themen-in-mobilen-und-verteilten-systemen-sose23/)
|
||||
- [SoSe 2022](https://www.mobile.ifi.lmu.de/lehrveranstaltungen/seminar-vertiefte-themen-in-mobilen-und-verteilten-systemen-sose22/)
|
||||
- [SoSe 2021](https://www.mobile.ifi.lmu.de/lehrveranstaltungen/seminar-vertiefte-themen-in-mobilen-und-verteilten-systemen-sose21/)
|
||||
|
||||
### Winter Semesters (VTIMS)
|
||||
- [WiSe 22/23](https://www.mobile.ifi.lmu.de/lehrveranstaltungen/seminar-vertiefte-themen-in-mobilen-und-verteilten-systemen-ws2223/)
|
||||
- [WiSe 21/22](https://www.mobile.ifi.lmu.de/lehrveranstaltungen/seminar-vertiefte-themen-in-mobilen-und-verteilten-systemen-ws2122/)
|
||||
- [WiSe 20/21](https://www.mobile.ifi.lmu.de/lehrveranstaltungen/seminar-vertiefte-themen-in-mobilen-und-verteilten-systemen-ws2021/)
|
||||
</InfoBox>
|
||||
|
||||
### Seminar Objectives (Master Level)
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
title: "OpenMunich Conference Organization"
|
||||
tags: [projects, community-engagement, event-management]
|
||||
tags: [community-engagement, event-management, conference, open-source, industry, technology]
|
||||
excerpt: "Led the OpenMunich conference series (2018-19), connecting academia, industry, and students on open-source topics."
|
||||
teaser: "/images/projects/openmunich.png"
|
||||
icon: "/images/projects/openmunich.png"
|
||||
@@ -12,9 +12,7 @@ Organized by the LMU Chair for Mobile and Distributed Systems in collaboration w
|
||||
- **Role:** Lead Conference Organizer
|
||||
- **Event:** [OpenMunich Conference](https://openmunich.eu)
|
||||
- **Partners:** [Accenture](https://www.accenture.com/de-de), [Red Hat](https://www.redhat.com/en)
|
||||
- **Affiliation:** LMU Munich
|
||||
- **Duration:** 2018 - 2019
|
||||
- **Skills:** Event Management, Stakeholder Coordination, Project Planning, Website Management
|
||||
</InfoBox>
|
||||
|
||||
As Lead Organizer for the 2018 and 2019 editions, I was responsible for the end-to-end planning and execution of the events. The conference offered a platform to showcase university research—covering topics from Machine Learning to Quantum Computing—alongside practical insights and technology demonstrations from our industry partners.
|
||||
@@ -22,12 +20,19 @@ As Lead Organizer for the 2018 and 2019 editions, I was responsible for the end-
|
||||
## Key Responsibilities
|
||||
|
||||
- **Event Management:** Oversaw all logistical aspects of conference planning and execution, including venue coordination, scheduling, and technical infrastructure setup.
|
||||
|
||||
- **Stakeholder Coordination:** Served as the primary point of contact between the university, industry partners (Accenture, Red Hat), external speakers, and attendees.
|
||||
|
||||
- **Program Development Support:** Collaborated with partners on defining the conference agenda, ensuring a balanced mix of academic research and industry sessions.
|
||||
|
||||
- **Website & Communication:** Managed the official conference website (openmunich.eu, now offline), including content creation, structural design, updates, and promotions.
|
||||
|
||||
- **Sponsorship Liaison:** Coordinated with Accenture and Red Hat regarding their sponsorship contributions and participation requirements.
|
||||
|
||||
<div className="my-6 text-center">
|
||||
<Image src="/images/projects/openmunich_website.png" alt="Screenshot of the OpenMunich conference website homepage" width={800} height={450} />
|
||||
<figcaption className="text-sm text-muted-foreground mt-2">Screenshot of the former OpenMunich website.</figcaption>
|
||||
</div>
|
||||
<CenteredImage
|
||||
src="/images/projects/openmunich_website.png"
|
||||
alt="Screenshot of the OpenMunich conference website homepage"
|
||||
width={800}
|
||||
height={450}
|
||||
caption="Screenshot of the former OpenMunich website."
|
||||
/>
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
title: "Operating Systems TA"
|
||||
tags: [teaching, system programming, java, lmu munich]
|
||||
tags: [system programming, java, lmu munich]
|
||||
excerpt: "TA & Coordinator for the Operating Systems lecture, focusing on system programming concepts and concurrent programming in Java for over 350 students."
|
||||
teaser: /images/teaching/computer_os.png
|
||||
icon: /images/teaching/computer_os.png
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
title: "iOS App Development"
|
||||
tags: [teaching, ios, swift, mobile-development, app-development, agile, teamwork]
|
||||
tags: [mobile-development, app-development, agile, teamwork]
|
||||
excerpt: "Supervised iOS Praktikum: student teams built Swift apps using agile."
|
||||
teaser: /images/teaching/ios.png
|
||||
icon: /images/teaching/ios.png
|
||||
64
content/experience/2020-05-01-FIKS.mdx
Normal file
64
content/experience/2020-05-01-FIKS.mdx
Normal file
@@ -0,0 +1,64 @@
|
||||
---
|
||||
title: "AI-Fusion Safety"
|
||||
tags: [MARL, reinforcement-learning, AI-safety, emergence, simulation, python, unity, software-engineering, industry-collaboration]
|
||||
excerpt: "Studied MARL emergence and safety, built simulations with Fraunhofer."
|
||||
teaser: /images/projects/robot.png
|
||||
icon: /images/projects/robot.png
|
||||
|
||||
---
|
||||
|
||||
In collaboration with Fraunhofer IKS, the AI-Fusion project addressed the critical challenge of understanding and ensuring safety in multi-agent reinforcement learning (MARL) systems.
|
||||
|
||||
Emergence, defined as the arising of complex, often unpredictable, system-level dynamics from local interactions between agents and their environment, was a central focus due to its implications for system safety and reliability.
|
||||
|
||||
The project's objective was to investigate the detection and mitigation of potentially unsafe emergent behaviors in complex systems composed of multiple interacting AI agents, particularly in scenarios involving heterogeneous agents (e.g., mixed-vendor autonomous systems).
|
||||
|
||||
<InfoBox title="Project Details">
|
||||
## Overview
|
||||
|
||||
**Project:** AI-Fusion<br/>
|
||||
**Partner:** [Fraunhofer IKS](https://www.iks.fraunhofer.de/)<br/>
|
||||
**Duration:** 2022 - 2023<br/>
|
||||
<CenteredImage src="/images/projects/full_domain.png" alt="Visual representation of the AI-Fusion project domain" width={400} height={200} caption="EDDYs Domain Overview" />
|
||||
|
||||
---
|
||||
|
||||
## Resources
|
||||
|
||||
<ul>
|
||||
<li><a href="https://github.com/illiumst/marl-factory-grid/" target="_blank" rel="noopener noreferrer"><FaGithub className="inline-block align-middle mr-2" /> GitHub Repo</a></li>
|
||||
<li><a href="https://pypi.org/project/Marl-Factory-Grid/" target="_blank" rel="noopener noreferrer"><FaPython className="inline-block align-middle mr-2" /> Install via PyPI</a></li>
|
||||
<li><a href="https://marl-factory-grid.readthedocs.io/en/latest/" target="_blank" rel="noopener noreferrer"><FaBook className="inline-block align-middle mr-2" /> ReadTheDocs</a></li>
|
||||
<li><FaFileAlt className="inline-block align-middle mr-2" /> <Cite bibtexKey="altmann2024emergence" /></li>
|
||||
</ul>
|
||||
</InfoBox>
|
||||
|
||||
To facilitate research into these phenomena, key contributions included the development of specialized simulation tools:
|
||||
|
||||
**1. High-Performance MARL Simulation Environment:**
|
||||
|
||||
* A flexible and efficient simulation environment was developed in Python, adhering to the [Gymnasium (formerly Gym) API specification](https://gymnasium.farama.org/main/).
|
||||
* **Purpose:** Designed specifically for training and evaluating reinforcement learning algorithms in multi-agent contexts prone to emergent behaviors.
|
||||
* **Features:**
|
||||
* **Modularity:** Supports diverse scenarios through configurable `modules` and `configurations`.
|
||||
* **Observation/Action Spaces:** Handles complex agent interactions, including per-agent observations and sequential/multi-agent action coordination.
|
||||
* **Performance:** Optimized for efficient simulation runs, enabling extensive experimentation.
|
||||
|
||||
**2. Unity-Based Demonstrator Unit:**
|
||||
|
||||
* A complementary visualization tool was created using the Unity engine.
|
||||
* **Purpose:** Allows for the replay, inspection, and detailed analysis of specific simulation scenarios and agent interactions.
|
||||
* **Utility:** Aids researchers in identifying and understanding the mechanisms behind observed emergent dynamics.
|
||||
* [View Demonstrator on GitHub](https://github.com/illiumst/F-IKS_demonstrator)
|
||||
|
||||
<CenteredImage
|
||||
src="/images/projects/rel_emergence.png"
|
||||
alt="Diagram illustrating the concept of emergence from interactions between agents and environment"
|
||||
width={800}
|
||||
height={300}
|
||||
caption="Conceptual relationship defining emergence in multi-agent systems."
|
||||
/>
|
||||
|
||||
This project involved close collaboration with industry-focused researchers, software development adhering to modern standards, and deep investigation into the theoretical underpinnings of emergence and safety in MARL systems.
|
||||
|
||||
The developed tools provide a valuable platform for continued research in this critical area.
|
||||
51
content/experience/2022-10-15-android.mdx
Normal file
51
content/experience/2022-10-15-android.mdx
Normal file
@@ -0,0 +1,51 @@
|
||||
---
|
||||
title: "MSP Android Course"
|
||||
tags: [java, kotlin, mobile-development, app-development, agile, teamwork]
|
||||
excerpt: "Supervised MSP: teams built Android apps (Java/Kotlin) using agile."
|
||||
teaser: "/images/teaching/android.png"
|
||||
icon: "/images/teaching/android.png"
|
||||
|
||||
---
|
||||
|
||||
<InfoBox title="Course Details">
|
||||
- **Role**: Practical Course Supervisor / Teaching Assistant
|
||||
- **Duration**: 2018 - 2023 (Multiple Semesters)
|
||||
</InfoBox>
|
||||
|
||||
Over several semesters during my time at LMU Munich, I co-supervised the **"Praktikum Mobile und Verteilte Systeme" (MSP)**, often referred to as the Android development practical course. This intensive lab course provided students with hands-on experience in designing, developing, and testing native applications for the **Android** platform, primarily using **Java** and later **Kotlin**.
|
||||
|
||||
The course consistently followed a two-phase structure:
|
||||
|
||||
1. **Introductory Phase:** Focused on imparting fundamental concepts of Android development, relevant APIs, architectural patterns, and necessary tooling through lectures and guided practical exercises.
|
||||
2. **Project Phase:** Student teams collaborated on developing a complete Android application based on their own concepts or provided themes. My role involved providing continuous technical mentorship, architectural guidance, code review feedback, and support in project planning and agile execution to each team.
|
||||
|
||||
Emphasis was placed on applying software engineering best practices within the context of mobile application development.
|
||||
|
||||
<InfoBox title="Past Course Iterations">
|
||||
### Winter Semesters (MSP)
|
||||
- [WiSe 22/23](https://www.mobile.ifi.lmu.de/lehrveranstaltungen/praktikum-mobile-und-verteilte-systeme-ws2223/)
|
||||
- [WiSe 21/22](https://www.mobile.ifi.lmu.de/lehrveranstaltungen/praktikum-mobile-und-verteilte-systeme-ws2122/)
|
||||
- [WiSe 20/21](https://www.mobile.ifi.lmu.de/lehrveranstaltungen/praktikum-mobile-und-verteilte-systeme-ws2021/)
|
||||
- [WiSe 19/20](https://www.mobile.ifi.lmu.de/lehrveranstaltungen/praktikum-mobile-und-verteilte-systeme-ws1920/)
|
||||
- [WiSe 18/19](https://www.mobile.ifi.lmu.de/lehrveranstaltungen/msp-ws1819/)
|
||||
|
||||
### Summer Semesters (MSP)
|
||||
- [SoSe 2022](https://www.mobile.ifi.lmu.de/lehrveranstaltungen/praktikum-mobile-und-verteilte-systeme-sose22/)
|
||||
- [SoSe 2021](https://www.mobile.ifi.lmu.de/lehrveranstaltungen/praktikum-mobile-und-verteilte-systeme-sose21/)
|
||||
- [SoSe 2020](https://www.mobile.ifi.lmu.de/lehrveranstaltungen/praktikum-mobile-und-verteilte-systeme-sose20/)
|
||||
- [SoSe 2019](https://www.mobile.ifi.lmu.de/lehrveranstaltungen/msp-sose19/)
|
||||
</InfoBox>
|
||||
|
||||
## Key Learning Areas
|
||||
|
||||
Students gained practical experience in:
|
||||
- Native Android App Development<br/> (Java/Kotlin)
|
||||
- Activity/Fragment Lifecycle, UI Design<br/> (XML Layouts, Jetpack Compose later)
|
||||
- Client-Server Architecture & Networking<br/> (e.g., Retrofit, Volley)
|
||||
- Using Wireless Local Networks<br/> (WiFi / Bluetooth APIs)
|
||||
- Implementing Location Services<br/> (GPS / Fused Location Provider)
|
||||
- Background Processing and Services
|
||||
- Data Persistence<br/> (SharedPreferences, SQLite, Room)
|
||||
- Teamwork and Collaborative Software Development (Git)
|
||||
- Agile Methodologies and Project Management Tools
|
||||
|
||||
@@ -12,7 +12,6 @@ During my tenure at the LMU Chair for Mobile and Distributed Systems, alongside
|
||||
|
||||
<InfoBox title="Project Overview">
|
||||
- **Role**: System Administrator, DevOps Engineer, Network Administrator
|
||||
- **Affiliation**: Chair for Mobile and Distributed Systems, LMU Munich
|
||||
- **Duration**: 2018 - 2023
|
||||
- **Technologies**: Kubernetes (K3S), Ansible, Docker, CI/CD (GitLab CI, Argo CD), GitOps, Linux Server Administration, Traefik, WireGuard, ZFS, Longhorn, IaC
|
||||
</InfoBox>
|
||||
@@ -41,7 +40,7 @@ During my tenure at the LMU Chair for Mobile and Distributed Systems, alongside
|
||||
* Utilized the self-hosted **GitLab Container Registry** for storing ML environment images.
|
||||
|
||||
---
|
||||
|
||||
<br/>
|
||||
## Outcomes & Philosophy
|
||||
|
||||
This hands-on role provided deep practical experience in modern system administration, networking, Infrastructure as Code (IaC), and cloud-native technologies within an academic research setting. It fostered my preference for minimalist, reproducible, and microservice-oriented architectures. These principles and skills are actively applied in my personal projects, including the self-hosting and management of this website and various other containerized services.
|
||||
@@ -1,36 +0,0 @@
|
||||
---
|
||||
title: "DW Editorial Lead"
|
||||
tags: [projects]
|
||||
excerpt: "Led online editorial team for DIGITALE WELT Magazin (2018-2023)."
|
||||
teaser: "/images/projects/dw.png"
|
||||
icon: "/images/projects/dw.png"
|
||||
|
||||
---
|
||||
|
||||
During my doctoral studies and research tenure at LMU Munich, I led the online editorial team for *DIGITALE WELT Magazin*. This role, supported by the [InnoMi project](/projects/innomi/), involved managing the publication's digital presence and strategic direction, aiming to effectively bridge scientific research and industry perspectives on digitalization trends.
|
||||
|
||||
<InfoBox title="Project Details">
|
||||
- **Role**: Head of Online Editorial Team
|
||||
- **Publication**: [DIGITALE WELT Magazin (DW)](https://digitaleweltmagazin.de)
|
||||
- **Affiliation**: [LMU Munich](/projects/innomi/)
|
||||
- **Duration**: 2018 - 2023
|
||||
</InfoBox>
|
||||
|
||||
## Key Responsibilities & Achievements
|
||||
|
||||
* **Digital Strategy & Content Management:** Oversaw all online content publication and social media channels, defining the editorial calendar and ensuring alignment with the magazine's goal of integrating academic and economic discourse.
|
||||
* **Process Optimization & Automation:** Developed and implemented streamlined workflows for content acquisition, editing, and publication. Introduced automation solutions that significantly reduced manual workload and improved efficiency.
|
||||
* **Portfolio Expansion:** Actively broadened the scope and variety of online content to better serve the target audience and reflect emerging digital trends.
|
||||
* **Website Relaunch Oversight:** Played a key role in managing a major website redesign project, focusing on user experience, modern aesthetics, and facilitating a transition towards a digital-first content strategy.
|
||||
|
||||
<div className="my-6 text-center">
|
||||
<Image src="/images/projects/dw_screenshot.png" alt="Screenshot of the DIGITALE WELT Magazin Website" width={550} height={310} />
|
||||
<figcaption className="text-sm text-muted-foreground mt-2">DIGITALE WELT Magazin Website Interface</figcaption>
|
||||
</div>
|
||||
|
||||
Prior to leading the online team, I contributed to the print editions of the magazine, specifically managing the "Wissen" (Knowledge) section. These earlier contributions are archived and accessible [online](https://digitaleweltmagazin.de/alle-magazine/).
|
||||
|
||||
<div className="my-6 text-center">
|
||||
<Image src="/images/projects/dw_magazin.png" alt="Cover collage of printed DIGITALE WELT Magazin issues" width={550} height={412} />
|
||||
<figcaption className="text-sm text-muted-foreground mt-2">Examples of DIGITALE WELT Print Magazine Covers</figcaption>
|
||||
</div>
|
||||
@@ -1,57 +0,0 @@
|
||||
---
|
||||
title: "AI-Fusion Safety"
|
||||
tags: [MARL, reinforcement-learning, safety, emergence, simulation]
|
||||
excerpt: "Studied MARL emergence and safety, built simulations with Fraunhofer."
|
||||
teaser: /images/projects/robot.png
|
||||
icon: /images/projects/robot.png
|
||||
role: Researcher, Software Developer
|
||||
skills: Multi-Agent Reinforcement Learning (MARL), Emergence Analysis, AI Safety, Simulation Environment Design, Python, Gymnasium API, Software Engineering, Unity (Visualization), Industry Collaboration
|
||||
---
|
||||
|
||||
In collaboration with Fraunhofer IKS, the AI-Fusion project addressed the critical challenge of understanding and ensuring safety in multi-agent reinforcement learning (MARL) systems. Emergence, defined as the arising of complex, often unpredictable, system-level dynamics from local interactions between agents and their environment, was a central focus due to its implications for system safety and reliability. The project's objective was to investigate the detection and mitigation of potentially unsafe emergent behaviors in complex systems composed of multiple interacting AI agents, particularly in scenarios involving heterogeneous agents (e.g., mixed-vendor autonomous systems).
|
||||
|
||||
<InfoBox title="Project Details">
|
||||
{/* Section 2: Project Info */}
|
||||
<h4>Overview</h4>
|
||||
<p style={{ lineHeight: '1.5', margin: 0 }}>
|
||||
<strong>Project:</strong> AI-Fusion<br/>
|
||||
<strong>Partner:</strong> <a href="https://www.iks.fraunhofer.de/" target="_blank" rel="noopener noreferrer">Fraunhofer IKS</a><br/>
|
||||
<strong>Duration:</strong> 2022 - 2023
|
||||
</p>
|
||||
|
||||
<Image src="/images/projects/full_domain.png" alt="logo" width="400" height="200" />
|
||||
<br />
|
||||
<hr />
|
||||
<br />
|
||||
{/* Section 1: Resources */}
|
||||
<h4 style={{marginTop: 0, marginRight: 0}}>Resources</h4>
|
||||
<ul style={{ listStyle: 'none', paddingLeft: '0' }}>
|
||||
<li><a href="https://github.com/illiumst/marl-factory-grid/" target="_blank" rel="noopener noreferrer"><i className="fab fa-fw fa-github" aria-hidden="true"></i> GitHub Repo</a></li>
|
||||
<li><a href="https://pypi.org/project/Marl-Factory-Grid/" target="_blank" rel="noopener noreferrer"><i className="fab fa-fw fa-python" aria-hidden="true"></i> Install via PyPI</a></li>
|
||||
<li><a href="https://marl-factory-grid.readthedocs.io/en/latest/" target="_blank" rel="noopener noreferrer"><i className="fas fa-fw fa-book" aria-hidden="true"></i> ReadTheDocs</a></li>
|
||||
<li><i className="fas fa-fw fa-file-alt" aria-hidden="true"></i> <Cite bibtexKey="altmann2024emergence" /></li>
|
||||
</ul>
|
||||
</InfoBox>
|
||||
|
||||
To facilitate research into these phenomena, key contributions included the development of specialized simulation tools:
|
||||
|
||||
**1. High-Performance MARL Simulation Environment:**
|
||||
* A flexible and efficient simulation environment was developed in Python, adhering to the [Gymnasium (formerly Gym) API specification](https://gymnasium.farama.org/main/).
|
||||
* **Purpose:** Designed specifically for training and evaluating reinforcement learning algorithms in multi-agent contexts prone to emergent behaviors.
|
||||
* **Features:**
|
||||
* **Modularity:** Supports diverse scenarios through configurable `modules` and `configurations`.
|
||||
* **Observation/Action Spaces:** Handles complex agent interactions, including per-agent observations and sequential/multi-agent action coordination.
|
||||
* **Performance:** Optimized for efficient simulation runs, enabling extensive experimentation.
|
||||
|
||||
**2. Unity-Based Demonstrator Unit:**
|
||||
* A complementary visualization tool was created using the Unity engine.
|
||||
* **Purpose:** Allows for the replay, inspection, and detailed analysis of specific simulation scenarios and agent interactions.
|
||||
* **Utility:** Aids researchers in identifying and understanding the mechanisms behind observed emergent dynamics.
|
||||
* [View Demonstrator on GitHub](https://github.com/illiumst/F-IKS_demonstrator)
|
||||
|
||||
<div className="text-center">
|
||||
<Image src="/images/projects/rel_emergence.png" alt="Diagram illustrating the concept of emergence from interactions between agents and environment" width="800" height="300"/>
|
||||
<figcaption>Conceptual relationship defining emergence in multi-agent systems.</figcaption>
|
||||
</div>
|
||||
|
||||
This project involved close collaboration with industry-focused researchers, software development adhering to modern standards, and deep investigation into the theoretical underpinnings of emergence and safety in MARL systems. The developed tools provide a valuable platform for continued research in this critical area.
|
||||
@@ -1,21 +1,25 @@
|
||||
---
|
||||
title: "Learned Trajectory Annotation"
|
||||
tags: [research, geoinformatics, machine-learning, unsupervised-learning, human-robot-interaction, autoencoder]
|
||||
tags: [geoinformatics, machine-learning, unsupervised-learning, human-robot-interaction, autoencoder, clustering, trajectory, perception, spatial-context, representation-learning]
|
||||
excerpt: "Unsupervised autoencoder learns spatial context from trajectory data for annotation."
|
||||
teaser: "/figures/0_trajectory_reconstruction_teaser.png"
|
||||
|
||||
---
|
||||
|
||||
<div className="my-6">
|
||||
<div className="flex flex-col sm:flex-row justify-center items-start gap-4">
|
||||
<Image src="/figures/0_trajectory_isovist.jpg" alt="Visualization of spatial perception field (e.g., isovist) from a point on a trajectory" width={800} height={600} />
|
||||
<Image src="/figures/0_trajectory_reconstruction.jpg" alt="Clustered or reconstructed trajectories based on learned spatial representations" width={800} height={600} />
|
||||
</div>
|
||||
<figcaption className="text-center text-sm text-muted-foreground mt-2">Learning spatial context representations (left) enables clustering and annotation of trajectories (right).</figcaption>
|
||||
</div>
|
||||
|
||||
This research addresses the challenge of enabling more intuitive human-robot interaction in shared spaces, particularly focusing on grounding verbal communication in spatial understanding. The work introduces a novel unsupervised learning methodology based on neural autoencoders.
|
||||
|
||||
<CenteredImage
|
||||
src="/figures/0_trajectory_isovist.jpg"
|
||||
alt="Visualization of spatial perception field (e.g., isovist) from a point on a trajectory"
|
||||
width={450}
|
||||
height={600}
|
||||
caption="Learning spatial context representations from a trajectory."
|
||||
/>
|
||||
The core contribution is a system that learns continuous, low-dimensional representations of spatial context directly from trajectory data, without requiring explicit environmental maps or predefined regions. By processing sequences of spatial perceptions (analogous to visibility fields or isovists) along a path, the autoencoder captures salient environmental features relevant to movement.
|
||||
|
||||
<CenteredImage
|
||||
src="/figures/0_trajectory_reconstruction.jpg"
|
||||
alt="Clustered or reconstructed trajectories based on learned spatial representations"
|
||||
width={450}
|
||||
height={600}
|
||||
caption="Clustering and annotation of trajectories based on learned spatial representations."
|
||||
/>
|
||||
These learned latent representations facilitate the effective clustering of trajectories based on shared spatial experiences. The outcome is a set of semantically meaningful encodings and prototypical representations of movement patterns within an environment. This approach lays essential groundwork for developing robotic systems capable of understanding, interpreting, and potentially describing movement through space in human-comprehensible terms, representing a promising direction for future human-robot collaboration. <Cite bibtexKey="feld2018trajectory" />
|
||||
@@ -1,18 +1,26 @@
|
||||
---
|
||||
title: "Neural Self-Replication"
|
||||
tags: [research, neural-networks, artificial-life, complex-systems, self-organization]
|
||||
tags: [neural-networks, artificial-life, complex-systems, self-organization, machine-learning, evolution, replication]
|
||||
excerpt: "Neural networks replicating weights, inspired by biology and artificial life."
|
||||
teaser: "/figures/1_self_replication_pca_space.jpg"
|
||||
icon: "/figures/1_self_replication_robustness.jpg"
|
||||
---
|
||||
Drawing inspiration from the fundamental process of self-replication in biological systems, this research explores the potential for implementing analogous mechanisms within neural networks. The objective is to develop computational models capable of autonomously reproducing their own structure (specifically, their connection weights), potentially leading to the emergence of complex, adaptive behaviors <Cite bibtexKey="gabor2019self" />.
|
||||
|
||||
Drawing inspiration from the fundamental process of self-replication in biological systems, this research explores the potential for implementing analogous mechanisms within neural networks. The objective is to develop computational models capable of autonomously reproducing their own structure (specifically, their connection weights), potentially leading to the emergence of complex, adaptive behaviors.
|
||||
<FloatingImage
|
||||
src="/figures/1_self_replication_robustness.jpg"
|
||||
alt="Box plot showing the variation of 'Time to Vergence' and 'Time as Fixpoint' across different perturbation magnitudes, demonstrating robustness."
|
||||
width={600}
|
||||
height={400}
|
||||
float="right"
|
||||
caption="Robustness analysis illustrating the time to reach convergence and the duration spent as a fixpoint under varying degrees of perturbation."
|
||||
/>
|
||||
|
||||
The study investigates various neural network architectures and learning paradigms suitable for achieving self-replication. A key finding highlights the efficacy of leveraging backpropagation-like mechanisms, not for a typical supervised task, but for navigating the weight space in a manner conducive to replication. This approach facilitates the development of non-trivial self-replicating networks.
|
||||
|
||||
Furthermore, the research extends this concept by proposing an "artificial chemistry" environment. This framework involves populations of interacting neural networks, where self-replication dynamics can lead to emergent properties and complex ecosystem behaviors. This work offers a novel computational perspective on self-replication, providing tools and insights for exploring artificial life and the principles of self-organization in computational systems. For a detailed discussion, please refer to the publication by <Cite bibtexKey="gabor2019self" />.
|
||||
|
||||
<div className="my-6 text-center">
|
||||
<Image src="/figures/1_self_replication_pca_space.jpg" alt="PCA visualization showing clusters or trajectories of self-replicating networks in a latent space" width={800} height={600} />
|
||||
<figcaption className="text-sm text-muted-foreground mt-2">Visualization of self-replicator populations evolving in a PCA-reduced weight space.</figcaption>
|
||||
</div>
|
||||
<CenteredImage
|
||||
src="/figures/1_self_replication_pca_space.jpg"
|
||||
alt="PCA visualization showing clusters or trajectories of self-replicating networks in a latent space"
|
||||
width={800}
|
||||
height={600}
|
||||
caption="Visualization of self-replicator populations evolving in a PCA-reduced weight space."
|
||||
/>
|
||||
Furthermore, the research extends this concept by proposing an "artificial chemistry" environment. This framework involves populations of interacting neural networks, where self-replication dynamics can lead to emergent properties and complex ecosystem behaviors. This work offers a novel computational perspective on self-replication, providing tools and insights for exploring artificial life and the principles of self-organization in computational systems.
|
||||
|
||||
@@ -1,16 +1,31 @@
|
||||
---
|
||||
title: "Deep Audio Baselines"
|
||||
tags: [research, deep-learning, audio-classification, paralinguistics, speech-analysis]
|
||||
tags: [
|
||||
deep-learning,
|
||||
audio-classification,
|
||||
paralinguistics,
|
||||
speech-analysis,
|
||||
interspeech-2019,
|
||||
compare-challenge,
|
||||
sleepiness-detection,
|
||||
end-to-end-learning,
|
||||
cnn
|
||||
]
|
||||
excerpt: "Deep learning audio baseline for Interspeech 2019 ComParE challenge."
|
||||
teaser: "/figures/3_deep_neural_baselines_teaser.jpg"
|
||||
icon: "/figures/3_deep_neural_baselines.jpg"
|
||||
|
||||
---
|
||||
|
||||
# Deep Audio Baselines
|
||||
|
||||
|
||||
This research, presented as part of the Interspeech 2019 Computational Paralinguistics Challenge (ComParE), specifically addresses the Sleepiness Sub-Challenge. We introduced a robust, end-to-end deep learning methodology designed to serve as a strong baseline for audio classification tasks within the paralinguistics domain.
|
||||
|
||||
<FloatingImage
|
||||
src="/figures/3_deep_neural_baselines.jpg"
|
||||
alt="Illustration for Deep Audio Baselines research"
|
||||
width={300}
|
||||
height={200}
|
||||
float="right"
|
||||
caption="Visual representation of the Deep Audio Baselines architecture"
|
||||
/>
|
||||
The core innovation lies in utilizing a deep neural network architecture (e.g., CNNs, potentially combined with recurrent layers) that directly processes raw or minimally processed audio data (such as spectrograms). This end-to-end approach bypasses the need for extensive, task-specific manual feature engineering, which is often a complex and time-consuming aspect of traditional audio analysis pipelines.
|
||||
|
||||
Our proposed baseline model achieved performance comparable to established state-of-the-art methods on the sleepiness detection task. Furthermore, the architecture was designed with adaptability in mind, demonstrating its potential applicability to a broader range of audio classification challenges beyond sleepiness detection. This work underscores the power of deep learning to automatically extract relevant features from audio signals for complex paralinguistic tasks. For further details, please consult the publication by <Cite bibtexKey="elsner2019deep" />.
|
||||
@@ -1,17 +1,19 @@
|
||||
---
|
||||
title: "Soccer Team Vectors"
|
||||
tags: [machine-learning, representation-learning, sports-analytics, similarity-search]
|
||||
tags: [machine-learning, representation-learning, sports-analytics, similarity-search, soccer, embeddings, team-performance, prediction]
|
||||
excerpt: "STEVE learns soccer team embeddings from match data for analysis."
|
||||
teaser: "/figures/2_steve_algo.jpg"
|
||||
|
||||
---
|
||||
|
||||
<div className="md:float-right md:w-1/2 lg:w-2/5 md:ml-6 mb-4">
|
||||
<Image src="/figures/2_steve_algo.jpg" alt="Diagram of the STEVE methodology showing data input, model training, and applications like similarity search and ranking." width={1024} height={768} />
|
||||
</div>
|
||||
|
||||
This research introduces **STEVE (Soccer Team Vectors)**, a novel methodology for learning meaningful, real-valued vector representations (embeddings) for professional soccer teams. The primary goal is to capture intrinsic team characteristics and relationships within a continuous vector space, such that teams with similar playing styles, strengths, or performance levels are positioned closely together.
|
||||
|
||||
<FloatingImage
|
||||
src="/figures/2_steve_algo.jpg"
|
||||
alt="Diagram of the STEVE methodology showing data input, model training, and applications like similarity search and ranking."
|
||||
width={1024}
|
||||
height={768}
|
||||
float="right"
|
||||
/>
|
||||
Leveraging widely available public data from soccer matches (e.g., results, possibly performance statistics), STEVE employs machine learning techniques to generate these low-dimensional team vectors.
|
||||
|
||||
The utility of these learned representations is demonstrated through several downstream applications:
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
title: "3D Primitive Segmentation"
|
||||
tags: [research, computer-vision, 3d-processing, point-clouds, segmentation, deep-learning, genetic-algorithms]
|
||||
tags: [computer-vision, 3d-processing, point-clouds, segmentation, deep-learning, genetic-algorithms]
|
||||
excerpt: "Hybrid method segments/fits primitives in large 3D point clouds."
|
||||
teaser: "/figures/4_point_cloud_segmentation_teaser.jpg"
|
||||
|
||||
|
||||
@@ -1,24 +1,16 @@
|
||||
---
|
||||
title: "PEOC OOD Detection"
|
||||
tags: [deep-reinforcement-learning, out-of-distribution-detection, safety, anomaly-detection]
|
||||
tags: [deep-reinforcement-learning, out-of-distribution-detection, safety, anomaly-detection, policy-entropy, machine-learning-safety]
|
||||
excerpt: "PEOC uses policy entropy for OOD detection in deep RL."
|
||||
teaser: "/figures/6_ood_pipeline.jpg"
|
||||
|
||||
---
|
||||
|
||||
<InfoBox title="Performance Comparison">
|
||||
<Image src="/figures/6_ood_performance.jpg" alt="Graph comparing PEOC performance against other OOD detection methods" width={800} height={600} />
|
||||
</InfoBox>
|
||||
|
||||
Ensuring the safety and reliability of deep reinforcement learning (RL) agents deployed in real-world environments necessitates the ability to detect when the agent encounters states significantly different from those seen during training (i.e., out-of-distribution or OOD states). This research introduces **PEOC (Policy Entropy-based OOD Classifier)**, a novel and computationally efficient method designed for this purpose.
|
||||
|
||||
<CenteredImage src="/figures/6_ood_pipeline.jpg" alt="Diagram showing the PEOC pipeline integrated with a deep RL agent" width={1000} height={550} caption="Conceptual pipeline of the PEOC method for OOD detection in deep RL." />
|
||||
The core idea behind PEOC is to leverage the entropy of the agent's learned policy as an intrinsic indicator of state familiarity. High policy entropy often correlates with uncertainty, suggesting the agent is in a less familiar or potentially OOD state. PEOC utilizes this readily available metric as a scoring function to distinguish between in-distribution and out-of-distribution inputs.
|
||||
|
||||
PEOC's effectiveness was rigorously evaluated within procedurally generated environments, which allow for controlled introduction of novel states. Its performance was benchmarked against several state-of-the-art one-class classification methods adapted for the RL context. The results demonstrate that PEOC achieves competitive performance in identifying OOD states while being simple to implement and integrate into existing deep RL frameworks.
|
||||
|
||||
Furthermore, this work contributes a structured benchmarking process specifically designed for evaluating OOD classification methods within the context of reinforcement learning, providing a valuable framework for assessing the reliability of such safety-critical components. For a detailed methodology and evaluation, please refer to the publication by <Cite bibtexKey="sedlmeier2020policy" />.
|
||||
<CenteredImage src="/figures/6_ood_performance.jpg" alt="Graph comparing PEOC performance against other OOD detection methods" width={800} height={600} caption="Performance Comparison" />
|
||||
|
||||
<div className="my-6 text-center">
|
||||
<Image src="/figures/6_ood_pipeline.jpg" alt="Diagram showing the PEOC pipeline integrated with a deep RL agent" width={1000} height={550} />
|
||||
<figcaption className="text-sm text-muted-foreground mt-2">Conceptual pipeline of the PEOC method for OOD detection in deep RL.</figcaption>
|
||||
</div>
|
||||
Furthermore, this work contributes a structured benchmarking process specifically designed for evaluating OOD classification methods within the context of reinforcement learning, providing a valuable framework for assessing the reliability of such safety-critical components. For a detailed methodology and evaluation, please refer to the publication by <Cite bibtexKey="sedlmeier2020policy" />.
|
||||
|
||||
@@ -1,19 +1,23 @@
|
||||
---
|
||||
title: "AV Meantime Coverage"
|
||||
tags: [research, autonomous-vehicles, shared-mobility, transportation-systems, urban-computing, geoinformatics]
|
||||
tags: [autonomous-vehicles, shared-mobility, transportation-systems, urban-computing, geoinformatics, mobility-as-a-service, resource-optimization]
|
||||
excerpt: "Analyzing service coverage of parked AVs during downtime ('meantime')."
|
||||
teaser: "/figures/5_meantime_coverage.jpg"
|
||||
---
|
||||
|
||||
<div className="my-6 text-center">
|
||||
<Image src="/figures/5_meantime_coverage.jpg" alt="Map visualization showing estimated service coverage areas from parked autonomous vehicles" width={800} height={600} className="w-4/5 mx-auto rounded-lg" />
|
||||
<figcaption className="text-sm text-muted-foreground mt-2">Visualization of estimated service coverage achievable by utilizing parked autonomous vehicles.</figcaption>
|
||||
</div>
|
||||
|
||||
|
||||
This research investigates a potential transitional model towards future transportation systems, focusing on **privately owned shared autonomous vehicles (SAVs)**. The central idea, termed "What to do in the Meantime," explores the feasibility of leveraging these vehicles for ride-sharing services during the significant portions of the day when they are typically parked and idle (e.g., while the owner is at work).
|
||||
|
||||
To assess the potential impact and viability of such a model, we developed and applied **two distinct reachability analysis methods**. These methods estimate the geographic area that could be effectively served by SAVs originating from their parking locations within given time constraints.
|
||||
|
||||
<CenteredImage
|
||||
src="/figures/5_meantime_coverage.jpg"
|
||||
alt="Map visualization showing estimated service coverage areas from parked autonomous vehicles"
|
||||
width={800}
|
||||
height={600}
|
||||
caption="Visualization of estimated service coverage achievable by utilizing parked autonomous vehicles."
|
||||
maxWidth="75%"
|
||||
/>
|
||||
The analysis was conducted using a real-world dataset representing mobility patterns and parking durations in the greater **Munich metropolitan area**. Key findings reveal the significant influence of spatio-temporal factors on potential service coverage:
|
||||
|
||||
* **Time Dependency:** Service potential fluctuates considerably throughout the day, heavily impacted by rush hours which affect travel times and vehicle availability.
|
||||
@@ -21,7 +25,11 @@ The analysis was conducted using a real-world dataset representing mobility patt
|
||||
|
||||
This study provides quantitative insights into the opportunities and limitations of utilizing the "meantime" of privately owned autonomous vehicles, contributing to the understanding of how future shared mobility systems might evolve. <Cite bibtexKey="illium2020meantime" />
|
||||
|
||||
<div className="my-6 text-center">
|
||||
<Image src="/figures/5_meantime_availability.jpg" alt="Graph or map showing the temporal or spatial availability of parked vehicles" width={800} height={600} className="w-4/5 mx-auto rounded-lg" />
|
||||
<figcaption className="text-sm text-muted-foreground mt-2">Analysis of spatio-temporal availability patterns of potentially shareable parked vehicles.</figcaption>
|
||||
</div>
|
||||
<CenteredImage
|
||||
src="/figures/5_meantime_availability.jpg"
|
||||
alt="Graph or map showing the temporal or spatial availability of parked vehicles"
|
||||
width={800}
|
||||
height={600}
|
||||
caption="Analysis of spatio-temporal availability patterns of potentially shareable parked vehicles."
|
||||
maxWidth="75%"
|
||||
/>
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
title: "Surgical-Mask Detection"
|
||||
tags: [research, audio-classification, deep-learning, data-augmentation, computer-vision, paralinguistics]
|
||||
tags: [audio-classification, deep-learning, data-augmentation, computer-vision, paralinguistics, speech-processing, audio-analysis, signal-processing, spectrograms]
|
||||
excerpt: "CNN mask detection in speech using augmented spectrograms."
|
||||
teaser: "/figures/7_mask_models.jpg"
|
||||
|
||||
@@ -10,14 +10,22 @@ This study investigates the efficacy of various **data augmentation techniques**
|
||||
|
||||
We systematically evaluated the impact of data augmentation when training **Convolutional Neural Networks (CNNs)** for this binary classification task. The input to the networks consisted of mel-spectrograms derived from voice samples. The effectiveness of augmentation strategies (such as frequency masking, time masking, or combined approaches like SpecAugment) was assessed across **four different CNN architectures**.
|
||||
|
||||
<div className="my-6 text-center">
|
||||
<Image src="/figures/7_mask_mels.jpg" alt="Examples of mel-spectrograms of speech with and without a surgical mask" width={800} height={400} className="w-4/5 mx-auto rounded-lg" />
|
||||
<figcaption className="text-sm text-muted-foreground mt-2">Mel-spectrogram representations of speech signals used as input for CNNs.</figcaption>
|
||||
</div>
|
||||
<CenteredImage
|
||||
src="/figures/7_mask_mels.jpg"
|
||||
alt="Examples of mel-spectrograms of speech with and without a surgical mask"
|
||||
width={800}
|
||||
height={400}
|
||||
caption="Mel-spectrogram representations of speech signals used as input for CNNs."
|
||||
maxWidth="75%"
|
||||
/>
|
||||
|
||||
The core finding of this research is that applying appropriate data augmentation directly to the spectrogram inputs significantly enhances the performance and generalization capabilities of the CNN models for surgical mask detection. The augmented models demonstrated improved accuracy, robustness, and notably **surpassed many established benchmark results** from the relevant ComParE (Computational Paralinguistics Challenge) tasks. This highlights the importance of data augmentation as a crucial component in building effective deep learning models for audio classification, particularly when dealing with limited or variable datasets. For a detailed description of the methods and results, please refer to <Cite bibtexKey="illium2020surgical" />.
|
||||
|
||||
<div className="my-6 text-center">
|
||||
<Image src="/figures/7_mask_models.jpg" alt="Diagrams illustrating the different CNN architectures tested" width={800} height={600} className="w-full mx-auto rounded-lg" />
|
||||
<figcaption className="text-sm text-muted-foreground mt-2">Overview of the different Convolutional Neural Network architectures evaluated.</figcaption>
|
||||
</div>
|
||||
<CenteredImage
|
||||
src="/figures/7_mask_models.jpg"
|
||||
alt="Diagrams illustrating the different CNN architectures tested"
|
||||
width={800}
|
||||
height={600}
|
||||
caption="Overview of the different Convolutional Neural Network architectures evaluated."
|
||||
maxWidth="100%"
|
||||
/>
|
||||
@@ -1,15 +1,12 @@
|
||||
---
|
||||
title: "Anomalous Sound Features"
|
||||
tags: [research, anomaly-detection, audio-classification, deep-learning, transfer-learning, feature-extraction]
|
||||
tags: [anomaly-detection, audio-classification, deep-learning, transfer-learning, feature-extraction, machine-learning, predictive-maintenance, dcase, sound-analysis, unsupervised-learning]
|
||||
excerpt: "Pretrained networks extract features for anomalous industrial sound detection."
|
||||
teaser: "/figures/8_anomalous_sound_teaser.jpg"
|
||||
|
||||
---
|
||||
|
||||
Detecting anomalous sounds, particularly in industrial settings, is crucial for predictive maintenance and safety. This often involves unsupervised or semi-supervised approaches where models learn a representation of 'normal' sounds. This research explores the effectiveness of leveraging **transfer learning** for this task by using **pretrained deep neural networks** as fixed feature extractors.
|
||||
<div className="float-right w-1/5 ml-6 mb-4">
|
||||
<Image src="/figures/8_anomalous_sound_features.jpg" alt="Diagram showing features extracted by different pretrained networks visualized in a latent space" width={800} height={600} />
|
||||
</div>
|
||||
|
||||
The core methodology involves:
|
||||
|
||||
1. Taking pretrained networks trained on large datasets from various domains.
|
||||
@@ -17,6 +14,15 @@ The core methodology involves:
|
||||
3. Modeling the distribution of features extracted from 'normal' sounds using a **Gaussian Mixture Model (GMM)**.
|
||||
4. Identifying anomalous sounds as those whose extracted features have low likelihood under the learned normality model.
|
||||
|
||||
<FloatingImage
|
||||
src="/figures/8_anomalous_sound_features.jpg"
|
||||
alt="Diagram showing features extracted by different pretrained networks visualized in a latent space"
|
||||
width={800}
|
||||
height={600}
|
||||
float="right"
|
||||
caption="Features extracted by different pretrained networks visualized in a latent space for anomalous sound detection."
|
||||
/>
|
||||
|
||||
A key aspect of this study was comparing feature extractors pretrained on distinctly different domains:
|
||||
|
||||
- **Images** (e.g., models trained on ImageNet)
|
||||
@@ -25,4 +31,6 @@ A key aspect of this study was comparing feature extractors pretrained on distin
|
||||
|
||||
These were evaluated alongside a baseline autoencoder trained directly on the target machine sound data.
|
||||
|
||||
Surprisingly, the results indicated that features derived from networks pretrained on **music data** often yielded the best performance in detecting anomalous industrial sounds, frequently surpassing features from environmental sound models and the autoencoder baseline. This counter-intuitive finding suggests that direct domain similarity between the pretraining dataset and the target application data is not necessarily the primary factor determining the utility of transferred features for anomaly detection. <Cite bibtexKey="muller2020analysis" />
|
||||
Surprisingly, the results indicated that features derived from networks pretrained on **music data** often yielded the best performance in detecting anomalous industrial sounds, frequently surpassing features from environmental sound models and the autoencoder baseline. This counter-intuitive finding suggests that direct domain similarity between the pretraining dataset and the target application data is not necessarily the primary factor determining the utility of transferred features for anomaly detection.
|
||||
|
||||
<Cite bibtexKey="muller2020analysis" />
|
||||
@@ -1,12 +1,16 @@
|
||||
---
|
||||
title: "Sound Anomaly Transfer"
|
||||
tags: [research, anomaly-detection, audio-classification, deep-learning, transfer-learning, feature-extraction, computer-vision]
|
||||
tags: [anomaly-detection, audio-classification, deep-learning, transfer-learning, feature-extraction, computer-vision, industrial-monitoring, machine-learning]
|
||||
excerpt: "Image nets detect acoustic anomalies in machinery via spectrograms."
|
||||
teaser: "/figures/9_image_transfer_sound_teaser.jpg"
|
||||
icon: "/figures/9_image_transfer_sound_workflow.jpg"
|
||||
---
|
||||
|
||||
This study investigates an effective approach for **acoustic anomaly detection** in industrial machinery, focusing on identifying malfunctions through sound analysis. The core methodology leverages **transfer learning** by repurposing deep neural networks originally trained for large-scale **image classification** (e.g., on ImageNet) as powerful feature extractors for audio data represented as **mel-spectrograms**.
|
||||
<FloatingImage src="/figures/9_image_transfer_sound_workflow.jpg" alt="Workflow of sound anomaly detection using image transfer learning" width={800} height={400} float="right" caption="Overall workflow for acoustic anomaly detection using transfer learning from image classification models." />
|
||||
|
||||
This study investigates an effective approach for **acoustic anomaly detection** in industrial machinery, focusing on identifying malfunctions through sound analysis.
|
||||
|
||||
The core methodology leverages **transfer learning** by repurposing deep neural networks originally trained for large-scale **image classification** (e.g., on ImageNet) as powerful feature extractors for audio data represented as **mel-spectrograms**.
|
||||
|
||||
The process involves:
|
||||
1. Converting audio signals from machinery into mel-spectrogram images.
|
||||
@@ -20,9 +24,6 @@ Key findings from the experiments, conducted across different machine types and
|
||||
* Features extracted using **ResNet architectures consistently yielded superior anomaly detection performance** compared to those from AlexNet and SqueezeNet.
|
||||
* **GMMs and OC-SVMs proved highly effective** as anomaly detection classifiers when applied to these transferred features.
|
||||
|
||||
<div className="my-6 text-center">
|
||||
<Image src="/figures/9_image_transfer_sound_mels.jpg" alt="Examples of mel-spectrograms from normal and anomalous machine sounds" width={1024} height={400} className="w-5/6 mx-auto rounded-lg" />
|
||||
<figcaption className="text-sm text-muted-foreground mt-2">Mel-spectrogram examples of normal vs. anomalous machine sounds.</figcaption>
|
||||
</div>
|
||||
<CenteredImage src="/figures/9_image_transfer_sound_mels.jpg" alt="Examples of mel-spectrograms from normal and anomalous machine sounds" width={1024} height={400} caption="Mel-spectrogram examples illustrating the difference between normal and anomalous machine sounds." maxWidth="75%" />
|
||||
|
||||
This work demonstrates the surprising effectiveness of transferring knowledge from the visual domain to the acoustic domain for anomaly detection, offering a robust and readily implementable method for monitoring industrial equipment. <Cite bibtexKey="muller2020acoustic" />
|
||||
@@ -1,13 +1,19 @@
|
||||
---
|
||||
title: "Acoustic Leak Detection"
|
||||
tags: [anomaly-detection, audio-processing, deep-learning, signal-processing, real-world-application]
|
||||
tags: [anomaly-detection, audio-processing, deep-learning, signal-processing, real-world-application, water-networks, infrastructure-monitoring]
|
||||
excerpt: "Anomaly detection models for acoustic leak detection in water networks."
|
||||
teaser: "/figures/10_water_networks_teaser.jpg"
|
||||
icon: "/figures/10_water_networks_approach.jpg"
|
||||
---
|
||||
|
||||
Detecting leaks in vast municipal water distribution networks is critical for resource conservation and infrastructure maintenance. This study introduces and evaluates an **anomaly detection approach for acoustic leak identification**, specifically designed with **energy efficiency** and **ease of deployment** as key considerations.
|
||||
|
||||
<FloatingImage
|
||||
src="/figures/10_water_networks_approach.jpg"
|
||||
alt="Diagram illustrating the anomaly detection approach for acoustic leak identification in water networks."
|
||||
width={400}
|
||||
height={300}
|
||||
float="right"
|
||||
caption="Illustration of the anomaly detection approach for acoustic leak identification in water networks."
|
||||
/>
|
||||
The methodology leverages acoustic recordings captured by microphones deployed directly on a section of a real-world **municipal water network**. Instead of requiring continuous monitoring, the proposed system mimics human inspection routines by performing **intermittent checks**, significantly reducing power consumption and data load.
|
||||
|
||||
Various **anomaly detection models**, ranging from traditional "shallow" methods (e.g., GMMs, OC-SVMs) to more complex **deep learning architectures** (e.g., autoencoders, potentially CNNs on spectrograms), were trained using data representing normal network operation. These models were then evaluated on their ability to distinguish anomalous sounds indicative of leaks.
|
||||
@@ -16,9 +22,13 @@ Key findings include:
|
||||
* Detecting leaks occurring acoustically **nearby** the sensor proved relatively straightforward for most evaluated models.
|
||||
* **Neural network-based methods demonstrated superior performance** in identifying leaks originating **further away** from the sensor, showcasing their ability to capture more subtle acoustic signatures amidst background noise.
|
||||
|
||||
<div className="my-6 text-center">
|
||||
<Image src="/figures/10_water_networks_mel.jpg" alt="Mel-spectrogram examples showing acoustic signatures of normal operation versus leak sounds" width={800} height={400} className="w-[90%] mx-auto rounded-lg" />
|
||||
<figcaption className="text-sm text-muted-foreground mt-2">Mel-spectrogram visualizations comparing normal sounds and leak-related acoustic patterns.</figcaption>
|
||||
</div>
|
||||
<CenteredImage
|
||||
src="/figures/10_water_networks_mel.jpg"
|
||||
alt="Mel-spectrogram examples showing acoustic signatures of normal operation versus leak sounds"
|
||||
width={800}
|
||||
height={400}
|
||||
caption="Mel-spectrogram visualizations comparing normal sounds and leak-related acoustic patterns."
|
||||
maxWidth="100%"
|
||||
/>
|
||||
|
||||
This research validates the feasibility of using anomaly detection for practical, energy-efficient acoustic leak monitoring in water networks, highlighting the advantages of deep learning techniques for detecting more challenging, distant leaks. <Cite bibtexKey="muller2021acoustic" />
|
||||
@@ -1,21 +1,16 @@
|
||||
---
|
||||
title: "Primate Vocalization Classification"
|
||||
tags: [research, deep-learning, audio-classification, bioacoustics, conservation-technology, recurrent-neural-networks]
|
||||
tags: [deep-learning, audio-classification, bioacoustics, conservation-technology, recurrent-neural-networks, machine-learning, wildlife-monitoring, pytorch, animal-conservation, bayesian-optimization]
|
||||
excerpt: "Deep BiLSTM classifies primate vocalizations for acoustic wildlife monitoring."
|
||||
teaser: /figures/11_recurrent_primate_workflow.jpg
|
||||
icon: /figures/11_recurrent_primate_workflow.jpg
|
||||
|
||||
---
|
||||
|
||||
# Primate Vocalization Classification
|
||||
|
||||
<InfoBox title="Project Details">
|
||||
- **Contribution**: Model architecture design, training, and evaluation.
|
||||
- **Tech**: PyTorch, Deep Learning, Bayesian Optimization
|
||||
- **Publication**: [ICPR 2020](https://ieeexplore.ieee.org/abstract/document/9412586)
|
||||
</InfoBox>
|
||||
Acoustic monitoring offers a powerful, non-invasive tool for wildlife conservation, enabling the study and tracking of animal populations through their vocalizations.
|
||||
|
||||
Acoustic monitoring offers a powerful, non-invasive tool for wildlife conservation, enabling the study and tracking of animal populations through their vocalizations. This research focuses on improving the automated classification of **primate vocalizations**, a challenging task due to call variability and environmental noise.
|
||||
This research focuses on improving the automated classification of **primate vocalizations**, a challenging task due to call variability and environmental noise.
|
||||
<FloatingImage src="/figures/11_recurrent_primate_workflow.jpg" alt="Workflow diagram for recurrent neural network primate vocalization classification" width={400} height={225} caption="Overall workflow of the deep recurrent neural network for primate vocalization classification." float="right" />
|
||||
|
||||
We propose a novel **deep, recurrent neural network architecture** specifically designed for this purpose. The core of the model utilizes **bidirectional Long Short-Term Memory (BiLSTM) networks**, which are adept at capturing temporal dependencies within the audio signals (represented, for example, as spectrograms or MFCCs).
|
||||
|
||||
@@ -26,9 +21,13 @@ To further enhance classification performance, particularly in potentially imbal
|
||||
|
||||
Hyperparameter tuning, a critical step for optimizing deep learning models, was systematically performed using **Bayesian optimization**.
|
||||
|
||||
<div className="my-6 text-center">
|
||||
<Image src="/figures/11_recurrent_primate_results.jpg" alt="Graph showing classification accuracy for primate calls" width={800} height={450} className="w-[90%] mx-auto rounded-lg" />
|
||||
<figcaption className="text-sm text-muted-foreground mt-2">Performance results demonstrating classification accuracy.</figcaption>
|
||||
</div>
|
||||
<CenteredImage
|
||||
src="/figures/11_recurrent_primate_results.jpg"
|
||||
alt="Graph showing classification accuracy for primate calls"
|
||||
width={800}
|
||||
height={450}
|
||||
caption="Performance results demonstrating classification accuracy of the deep recurrent model on primate calls."
|
||||
maxWidth="100%"
|
||||
/>
|
||||
|
||||
The model's effectiveness was evaluated on a challenging real-world dataset comprising diverse primate calls recorded at an **African wildlife sanctuary**. The results demonstrate the capability of the proposed deep recurrent architecture for accurate primate vocalization classification, underscoring the potential of advanced deep learning techniques combined with automated acoustic monitoring for practical wildlife conservation efforts. <Cite bibtexKey="muller2021deep" />
|
||||
The model's effectiveness was evaluated on a challenging real-world dataset comprising diverse primate calls recorded at an **African wildlife sanctuary**. The results demonstrate the capability of the proposed deep recurrent architecture for accurate primate vocalization classification, underscoring the potential of advanced deep learning techniques combined with automated acoustic monitoring for practical wildlife conservation efforts. <Cite bibtexKey="muller2021deep" />
|
||||
|
||||
@@ -1,17 +1,23 @@
|
||||
---
|
||||
title: "Audio Vision Transformer"
|
||||
tags: [research, deep-learning, audio-classification, computer-vision, attention-mechanisms, transformers]
|
||||
tags: [deep-learning, audio-classification, computer-vision, attention-mechanisms, transformers, mel-spectrograms, ComParE-2021]
|
||||
excerpt: "Vision Transformer on spectrograms for audio classification, with data augmentation."
|
||||
teaser: /figures/12_vision_transformer_teaser.jpg
|
||||
|
||||
---
|
||||
|
||||
This research explores the application of the **Vision Transformer (ViT)** architecture, originally designed for image processing, to the domain of audio classification by operating on **mel-spectrogram representations**. The ViT's attention mechanisms offer a potentially powerful alternative to convolutional approaches for capturing relevant patterns in spectrogram data.
|
||||
This research explores the application of the **Vision Transformer (ViT)** architecture, originally designed for image processing, to the domain of audio classification by operating on **mel-spectrogram representations**.
|
||||
|
||||
<div className="my-6 text-center">
|
||||
<Image src="/figures/12_vision_transformer_models.jpg" alt="Diagram illustrating the Vision Transformer architecture adapted for mel-spectrogram input" width={800} height={600} className="w-4/5 mx-auto rounded-lg" />
|
||||
<figcaption className="text-sm text-muted-foreground mt-2">Adapting the Vision Transformer architecture for processing mel-spectrograms.</figcaption>
|
||||
</div>
|
||||
The ViT's attention mechanisms offer a potentially powerful alternative to convolutional approaches for capturing relevant patterns in spectrogram data.
|
||||
|
||||
<CenteredImage
|
||||
src="/figures/12_vision_transformer_models.jpg"
|
||||
alt="Diagram illustrating the Vision Transformer architecture adapted for mel-spectrogram input"
|
||||
width={800}
|
||||
height={600}
|
||||
caption="Adapting the Vision Transformer architecture for processing mel-spectrograms."
|
||||
maxWidth="100%"
|
||||
/>
|
||||
|
||||
Key aspects of the methodology include:
|
||||
|
||||
@@ -20,6 +26,9 @@ Key aspects of the methodology include:
|
||||
* **Sample Weighting:** Utilizing sample weighting strategies to address potential class imbalances or focus on specific aspects of the dataset.
|
||||
* **Patching Strategy:** Introducing and evaluating an **overlapping vertical patching** method, potentially better suited for capturing temporal structures in spectrograms compared to standard non-overlapping patches.
|
||||
|
||||
The effectiveness of this "Mel-Vision Transformer" approach was demonstrated within the context of the **ComParE 2021 (Computational Paralinguistics Challenge)**. The proposed model achieved notable performance, **surpassing many established single-model baseline results** on the challenge tasks.
|
||||
The effectiveness of this "Mel-Vision Transformer" approach was demonstrated within the context of the **ComParE 2021 (Computational Paralinguistics Challenge)**.
|
||||
The proposed model achieved notable performance, **surpassing many established single-model baseline results** on the challenge tasks.
|
||||
|
||||
Furthermore, the study includes an analysis of different parameter configurations and architectural choices, providing insights into optimizing ViT models for audio processing tasks. This work showcases the adaptability and potential of transformer architectures, particularly ViT, for effectively tackling audio classification challenges. <Cite bibtexKey="illium2021visual" />
|
||||
Furthermore, the study includes an analysis of different parameter configurations and architectural choices, providing insights into optimizing ViT models for audio processing tasks.
|
||||
|
||||
This work showcases the adaptability and potential of transformer architectures, particularly ViT, for effectively tackling audio classification challenges. <Cite bibtexKey="illium2021visual" />
|
||||
@@ -1,20 +1,31 @@
|
||||
---
|
||||
title: "Tasked Self-Replication"
|
||||
tags: [research, artificial-life, complex-systems, neural-networks, self-organization, multi-task-learning]
|
||||
tags: [artificial-life, complex-systems, neural-networks, self-organization, multi-task-learning, self-replication, artificial-chemistry, evolution, computational-systems, guided-evolution, artificial-intelligence]
|
||||
excerpt: "Self-replicating networks perform tasks, exploring stabilization in artificial chemistry."
|
||||
teaser: "/figures/13_sr_teaser.jpg"
|
||||
|
||||
---
|
||||
|
||||
Building upon the concept of self-replicating neural networks, this research explores the integration of **auxiliary functional goals** alongside the primary objective of self-replication. The aim is to create networks that can not only reproduce their own weights but also perform useful computations or interact meaningfully with an environment simultaneously.
|
||||
Building upon the concept of self-replicating neural networks <Cite bibtexKey="gabor2019self" />, this research explores the integration of **auxiliary functional goals** alongside the primary objective of self-replication. The aim is to create networks that can not only reproduce their own weights but also perform useful computations or interact meaningfully with an environment simultaneously.
|
||||
|
||||
<div className="my-6 text-center">
|
||||
<Image src="/figures/13_sr_analysis.jpg" alt="Analysis graphs or visualizations related to dual-task self-replicating networks" width={800} height={600} className="w-4/5 mx-auto rounded-lg" />
|
||||
<figcaption className="text-sm text-muted-foreground mt-2">Analysis of networks balancing self-replication and auxiliary tasks.</figcaption>
|
||||
</div>
|
||||
<CenteredImage
|
||||
src="/figures/13_sr_analysis.jpg"
|
||||
alt="Analysis graphs or visualizations related to dual-task self-replicating networks"
|
||||
width={800}
|
||||
height={600}
|
||||
caption="Analysis of networks balancing self-replication and auxiliary tasks."
|
||||
/>
|
||||
|
||||
The study introduces a methodology for **dual-task training**, utilizing distinct input/output vectors to manage both the replication process and the execution of a secondary task. A key finding is that the presence of an auxiliary task does not necessarily hinder self-replication; instead, it can sometimes **complement and even stabilize** the replication dynamics.
|
||||
|
||||
<FloatingImage
|
||||
src="/figures/13_sr_recovering.png"
|
||||
alt="3D plot showing trajectories of network states over timesteps with starting and ending points"
|
||||
width={400}
|
||||
height={400}
|
||||
float="right"
|
||||
caption="Visualization of network state trajectories and recovery in a transformed 3D space over timesteps, illustrating dynamic evolution."
|
||||
/>
|
||||
Further investigations were conducted within the framework of an **"artificial chemistry" environment**, where populations of these dual-task networks interact:
|
||||
|
||||
* The impact of varying **action parameters** (related to the secondary task) on the collective learning or emergent behavior of the network population was examined.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
title: "RNN Memory Limits"
|
||||
tags: [research, deep-learning, recurrent-neural-networks, sequence-modeling, theoretical-ml]
|
||||
tags: [deep-learning, recurrent-neural-networks, sequence-modeling, theoretical-ml, machine-learning, memory-systems]
|
||||
excerpt: "Investigated memory limits of RNNs in recalling uncorrelated sequences."
|
||||
teaser: "/figures/22_rnn_limits.png"
|
||||
|
||||
@@ -10,15 +10,21 @@ Recurrent Neural Networks (RNNs), including variants like Long Short-Term Memory
|
||||
|
||||
This research investigates the fundamental memory capacity of these architectures under challenging conditions: specifically, when processing sequences where data points are generated independently, possessing **no inherent temporal correlation**. In such scenarios, any recall of past inputs relies solely on the network's ability to explicitly memorize information through standard backpropagation training, rather than leveraging statistical patterns in the sequence.
|
||||
|
||||
<CenteredImage
|
||||
src="/figures/22_rnn_limits.png"
|
||||
alt="Clearly visible memory horizon, established by empirical analysis of an atomic experiment."
|
||||
width={800}
|
||||
height={450}
|
||||
caption="RNN Memory Horizon."
|
||||
maxWidth="100%"
|
||||
/>
|
||||
|
||||
Our empirical analysis demonstrates that while RNNs *can* learn to recall a limited number of past inputs even from uncorrelated sequences, this capability is significantly constrained:
|
||||
|
||||
* **Limited Recall Range:** The effective range over which vanilla RNNs, LSTMs, and GRUs can reliably reproduce past inputs from uncorrelated data is substantially shorter than the recall range achievable when even minimal temporal correlations are present.
|
||||
|
||||
* **Architectural Influence:** This limitation is influenced by both the specific RNN architecture (vanilla, LSTM, GRU) and the network size (number of hidden units).
|
||||
|
||||
* **Practical Bound:** The findings suggest a practical upper bound on the temporal memory achievable through standard training in these scenarios, which appears well below theoretical information storage limits.
|
||||
|
||||
<div className="my-6 text-center">
|
||||
<Image src="/figures/22_rnn_limits.png" alt="Clearly visible memory horizon, established by empirical analysis of an atomic experiment." width={800} height={450} className="w-full mx-auto rounded-lg" />
|
||||
<figcaption className="text-sm text-muted-foreground mt-2">RNN Memory Horizon.</figcaption>
|
||||
</div>
|
||||
|
||||
These results highlight an inherent constraint in the capacity of standard RNN architectures to identify and utilize long-range dependencies when processing sequences lacking temporal structure, providing insights into their limitations in specific types of sequence modeling tasks. <Cite bibtexKey="illium2022empirical" />
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
title: "RL Anomaly Detection"
|
||||
tags: [research, reinforcement-learning, anomaly-detection, safety, lifelong-learning, generalization]
|
||||
tags: [reinforcement-learning, anomaly-detection, safety, lifelong-learning, generalization]
|
||||
excerpt: "Perspective on anomaly detection challenges and future in reinforcement learning."
|
||||
teaser: "/figures/14_ad_rl_teaser.jpg"
|
||||
|
||||
@@ -8,16 +8,21 @@ teaser: "/figures/14_ad_rl_teaser.jpg"
|
||||
|
||||
Anomaly Detection (AD) is crucial for the safe deployment of Reinforcement Learning (RL) agents, especially in safety-critical applications where encountering unexpected or out-of-distribution situations can lead to catastrophic failures. This work provides a perspective on the state and future directions of AD research specifically tailored for the complexities inherent in RL.
|
||||
|
||||
<FloatingImage
|
||||
src="/figures/14_ad_rl.jpg"
|
||||
alt="Mathematical formalism or diagram related to the block contextual MDP framework"
|
||||
width={400}
|
||||
height={300}
|
||||
float="right"
|
||||
caption="Formalizing non-stationary anomalies using the BCMDP framework."
|
||||
/>
|
||||
|
||||
The paper argues that current AD research within RL often relies on overly simplified scenarios that do not fully capture the challenges of sequential decision-making under uncertainty. It establishes important conceptual connections between AD and other critical areas of RL research:
|
||||
|
||||
* **Lifelong Reinforcement Learning:** AD is framed as a necessary component for agents that must continually adapt to changing environments and tasks. Detecting anomalies signals the need for adaptation or learning updates.
|
||||
|
||||
* **Generalization:** The ability to detect anomalies is closely related to an agent's generalization capabilities; anomalies often represent situations outside the agent's learned experience manifold.
|
||||
|
||||
The study highlights **non-stationarity** (i.e., changes in the environment dynamics or reward structure over time) as a particularly critical and under-explored challenge for AD in RL. To address this formally, the paper proposes utilizing the framework of **block contextual Markov decision processes (BCMDPs)** as a suitable model for defining and analyzing non-stationary anomalies.
|
||||
|
||||
<div className="my-6 text-center">
|
||||
<Image src="/figures/14_ad_rl.jpg" alt="Mathematical formalism or diagram related to the block contextual MDP framework" width={800} height={600} className="w-1/2 mx-auto rounded-lg" />
|
||||
<figcaption className="text-sm text-muted-foreground mt-2">Formalizing non-stationary anomalies using the BCMDP framework.</figcaption>
|
||||
</div>
|
||||
|
||||
Finally, it outlines practical requirements and desiderata for future research in this area, advocating for more rigorous evaluation protocols and benchmark environments to advance the development of robust and reliable AD methods for RL agents. <Cite bibtexKey="muller2022towards" />
|
||||
@@ -1,29 +1,27 @@
|
||||
---
|
||||
title: "Extended Self-Replication"
|
||||
tags: [artificial-life, complex-systems, neural-networks, self-organization, dynamical-systems]
|
||||
tags: [artificial-life, complex-systems, neural-networks, self-organization, dynamical-systems, self-replication, emergent-behavior, robustness, replication-fidelity]
|
||||
excerpt: "Journal extension: self-replication, noise robustness, emergence, dynamical system analysis."
|
||||
teaser: "/figures/15_sr_journal_teaser.jpg"
|
||||
|
||||
---
|
||||
|
||||
# Extended Self-Replication
|
||||
<FloatingImage src="/figures/15_sr_journal_teaser.jpg" alt="Scatter plot showing the relationship between relative parent distance and replication outcome or child distance" width={600} height={480} float="right" caption="An analysis of replication fidelity, showing how the distance between parent and child networks relates to different parent distances." />
|
||||
|
||||
<div className="my-6 text-center">
|
||||
<Image src="/figures/15_sr_journal_children.jpg" alt="Visualization showing the evolution or diversity of 'child' networks generated through self-replication" className="w-2/3 mx-auto rounded-lg" />
|
||||
<figcaption className="text-sm text-muted-foreground mt-2">Analyzing the lineage and diversity in populations of self-replicating networks.</figcaption>
|
||||
</div>
|
||||
This journal article <Cite bibtexKey="gabor2022self" /> provides an extended and more in-depth exploration of self-replicating neural networks <Cite bibtexKey="gabor2019self" />, building upon earlier foundational work ([Gabor et al., 2019](link-to-previous-paper-if-available)).
|
||||
|
||||
This journal article provides an extended and more in-depth exploration of self-replicating neural networks, building upon earlier foundational work ([Gabor et al., 2019](link-to-previous-paper-if-available)). The research further investigates the use of **backpropagation-like mechanisms** not for typical supervised learning, but as an effective means to enable **non-trivial self-replication** – where networks learn to reproduce their own connection weights.
|
||||
The research further investigates the use of **backpropagation-like mechanisms** not for typical supervised learning, but as an effective means to enable **non-trivial self-replication** – where networks learn to reproduce their own connection weights.
|
||||
|
||||
<CenteredImage src="/figures/15_sr_journal_children.jpg" alt="Visualization showing the evolution or diversity of 'child' networks generated through self-replication" width={600} height={300} caption="Analyzing the lineage and diversity in populations of self-replicating networks." />
|
||||
|
||||
Key extensions and analyses presented in this work include:
|
||||
|
||||
* **Robustness Analysis:** A systematic evaluation of the self-replicating networks' resilience and stability when subjected to various levels of **noise** during the replication process.
|
||||
|
||||
* **Artificial Chemistry Environments:** Further development and analysis of simulated environments where populations of self-replicating networks interact, leading to observable **emergent collective behaviors** and ecosystem dynamics.
|
||||
|
||||
* **Dynamical Systems Perspective:** A detailed theoretical analysis of the self-replication process viewed as a dynamical system. This includes identifying **fixpoint weight configurations** (networks that perfectly replicate themselves) and characterizing their **attractor basins** (the regions in weight space from which networks converge towards a specific fixpoint).
|
||||
|
||||
<div className="my-6 text-center">
|
||||
<Image src="/figures/15_noise_levels.jpg" alt="Graph showing the impact of different noise levels on self-replication fidelity or population dynamics" className="w-2/3 mx-auto rounded-lg" />
|
||||
<figcaption className="text-sm text-muted-foreground mt-2">Investigating the influence of noise on the self-replication process.</figcaption>
|
||||
</div>
|
||||
<CenteredImage src="/figures/15_noise_levels.jpg" alt="Graph showing the impact of different noise levels on self-replication fidelity or population dynamics" width={600} height={450} caption="Investigating the influence of noise on the self-replication process." />
|
||||
|
||||
By delving deeper into the mechanisms, robustness, emergent properties, and underlying dynamics, this study significantly enhances the understanding of how self-replication can be achieved and analyzed within neural network models, contributing valuable insights to the fields of artificial life and complex systems. <Cite bibtexKey="gabor2022self" />
|
||||
|
||||
By delving deeper into the mechanisms, robustness, emergent properties, and underlying dynamics, this study significantly enhances the understanding of how self-replication can be achieved and analyzed within neural network models, contributing valuable insights to the fields of artificial life and complex systems.
|
||||
@@ -1,29 +1,40 @@
|
||||
---
|
||||
title: "Organism Network Emergence"
|
||||
tags: [artificial-life, complex-systems, neural-networks, self-organization, emergent-computation]
|
||||
tags: [artificial-life, complex-systems, neural-networks, self-organization, emergent-computation, artificial-intelligence, collective-intelligence, evolutionary-computation]
|
||||
excerpt: "Self-replicating networks collaborate forming higher-level Organism Networks with emergent functionalities."
|
||||
teaser: "/figures/16_on_teaser.jpg"
|
||||
|
||||
---
|
||||
|
||||
This research investigates the transition from simple self-replication to higher levels of organization by exploring how populations of basic, self-replicating neural network units can form **"Organism Networks" (ONs)** through **collaboration and emergent differentiation**. Moving beyond the replication of individual networks, the focus shifts to the collective dynamics and functional capabilities that arise when these units interact within a shared environment (akin to an "artificial chemistry").
|
||||
This research investigates the transition from simple self-replication <Cite bibtexKey="gabor2019self" /> to higher levels of organization by exploring how populations of basic, self-replicating neural network units can form **"Organism Networks" (ONs)** through **collaboration and emergent differentiation**. Moving beyond the replication of individual networks, the focus shifts to the collective dynamics and functional capabilities that arise when these units interact within a shared environment (akin to an "artificial chemistry").
|
||||
|
||||
<div className="my-6 text-center">
|
||||
<Image src="/figures/16_on_architecture.jpg" alt="Diagram showing individual self-replicating units interacting to form a larger Organism Network structure" width={800} height={500} className="w-[65%] mx-auto" />
|
||||
<figcaption className="text-sm text-muted-foreground mt-2">Conceptual architecture of an Organism Network emerging from interacting self-replicators.</figcaption>
|
||||
</div>
|
||||
<CenteredImage
|
||||
src="/figures/16_on_architecture.jpg"
|
||||
alt="Diagram showing individual self-replicating units interacting to form a larger Organism Network structure"
|
||||
width={800}
|
||||
height={500}
|
||||
maxWidth="75%"
|
||||
caption="Conceptual architecture of an Organism Network emerging from interacting self-replicators."
|
||||
/>
|
||||
|
||||
The core hypothesis is that through local interactions and potentially shared environmental feedback, initially homogeneous populations of self-replicators can spontaneously develop specialized roles or structures, leading to a collective entity with capabilities exceeding those of individual units.
|
||||
|
||||
<div className="float-right w-[45%] ml-6 mb-4">
|
||||
<Image src="/figures/16_on_dropout.jpg" alt="Visualization potentially related to network robustness, differentiation, or communication channels." width={600} height={600} />
|
||||
</div>
|
||||
<FloatingImage
|
||||
src="/figures/16_on_dropout.jpg"
|
||||
alt="Visualization potentially related to network robustness, differentiation, or communication channels."
|
||||
width={600}
|
||||
height={600}
|
||||
float="right"
|
||||
caption="Illustration of network dynamics, possibly related to robustness or specialization facilitated by dropout mechanisms."
|
||||
/>
|
||||
|
||||
Key aspects explored in this work include:
|
||||
|
||||
* **Mechanisms for Collaboration:** Investigating how communication or resource sharing between self-replicating units can be established and influence collective behavior.
|
||||
|
||||
* **Emergent Differentiation:** Analyzing scenarios where units within the population begin to specialize, adopting different internal states (weight configurations) or functions, analogous to cellular differentiation in biological organisms.
|
||||
|
||||
* **Formation of Structure:** Studying how interactions lead to stable spatial or functional structures within the population, forming the basis of the Organism Network.
|
||||
|
||||
* **Functional Advantages:** Assessing whether these emergent ONs exhibit novel collective functionalities or improved problem-solving capabilities compared to non-interacting populations. (The role of dropout, as suggested by the image, might relate to promoting robustness or specialization within this context).
|
||||
|
||||
This study bridges the gap between single-unit self-replication and the emergence of complex, multi-unit systems in artificial life research, offering insights into how collaborative dynamics can lead to higher-order computational structures. For more detailed insights, refer to <Cite bibtexKey="illium2022constructing" />.
|
||||
@@ -1,17 +1,13 @@
|
||||
---
|
||||
title: "Voronoi Data Augmentation"
|
||||
tags: [research, data-augmentation, computer-vision, deep-learning, convolutional-neural-networks]
|
||||
tags: [data-augmentation, computer-vision, deep-learning, convolutional-neural-networks, voronoi, machine-learning, image-processing]
|
||||
excerpt: "VoronoiPatches improves CNN robustness via non-linear recombination augmentation."
|
||||
teaser: "/figures/17_vp_teaser.jpg"
|
||||
|
||||
---
|
||||
|
||||
Data augmentation is essential for improving the performance and generalization of Convolutional Neural Networks (CNNs), especially when training data is limited. This research introduces **VoronoiPatches (VP)**, a novel data augmentation algorithm based on the principle of **non-linear recombination** of image information.
|
||||
|
||||
<div className="my-6 text-center">
|
||||
<Image src="/figures/17_vp_lion.jpg" alt="Example of an image augmented with VoronoiPatches, showing polygon patches blended onto a lion image" width={800} height={600} className="w-3/4 mx-auto" />
|
||||
<figcaption className="text-sm text-muted-foreground mt-2">Visual example of the VoronoiPatches augmentation applied to an image.</figcaption>
|
||||
</div>
|
||||
<CenteredImage src="/figures/17_vp_lion.jpg" alt="Example of an image augmented with VoronoiPatches, showing polygon patches blended onto a lion image" width={800} height={600} caption="A visual example of the VoronoiPatches augmentation applied to an image, demonstrating the non-linear recombination of image information." />
|
||||
|
||||
Unlike traditional methods that often apply uniform transformations or cutout regions, VP operates by:
|
||||
1. Generating a random layout of points within an image.
|
||||
@@ -28,10 +24,7 @@ This approach potentially allows for smoother transitions between augmented regi
|
||||
</div>
|
||||
|
||||
---
|
||||
|
||||
<br/>
|
||||
Evaluations demonstrate that VoronoiPatches can effectively **reduce model variance and combat overfitting**. Comparative studies indicate that VP **outperforms several existing state-of-the-art data augmentation techniques** in improving the robustness and generalization performance of CNN models on unseen data across various benchmarks. <Cite bibtexKey="illium2023voronoipatches" />
|
||||
|
||||
<div className="my-6 text-center">
|
||||
<Image src="/figures/17_vp_results.jpg" alt="Graphs showing performance comparison (e.g., accuracy, loss) of VoronoiPatches against other augmentation methods" width={800} height={450} className="w-11/12 mx-auto" />
|
||||
<figcaption className="text-sm text-muted-foreground mt-2">Comparative results illustrating the performance benefits of VoronoiPatches.</figcaption>
|
||||
</div>
|
||||
<CenteredImage src="/figures/17_vp_results.jpg" alt="Graphs showing performance comparison (e.g., accuracy, loss) of VoronoiPatches against other augmentation methods" width={800} height={450} caption="Comparative results illustrating the performance benefits of VoronoiPatches against other augmentation techniques on various benchmarks." />
|
||||
@@ -1,29 +1,37 @@
|
||||
---
|
||||
title: "Autoencoder Trajectory Compression"
|
||||
tags: [research, deep-learning, recurrent-neural-networks, trajectory-analysis, data-compression, geoinformatics]
|
||||
tags: [deep-learning, recurrent-neural-networks, trajectory-analysis, data-compression, geoinformatics, autoencoders, LSTM, GPS]
|
||||
excerpt: "LSTM autoencoder better DP for trajectory compression (Fréchet/DTW)."
|
||||
teaser: /figures/23_trajectory_model.png
|
||||
|
||||
---
|
||||
|
||||
The proliferation of location-aware mobile devices generates vast amounts of GPS trajectory data, necessitating efficient storage solutions. While various compression techniques aim to reduce data volume, preserving essential spatio-temporal information remains crucial.
|
||||
|
||||
<div className="my-6 text-center">
|
||||
<Image src="/figures/23_trajectory_model.png" alt="Schematic diagram of the LSTM autoencoder model architecture used for trajectory compression" width={800} height={450} className="w-3/4 mx-auto rounded-lg" />
|
||||
<figcaption className="text-sm text-muted-foreground mt-2">Schematic of the LSTM Decoder Architecture.</figcaption>
|
||||
</div>
|
||||
<CenteredImage
|
||||
src="/figures/23_trajectory_model.png"
|
||||
alt="Schematic diagram of the LSTM autoencoder model architecture used for trajectory compression"
|
||||
width={450}
|
||||
height={450}
|
||||
caption="Schematic of the LSTM Autoencoder Decoder Architecture."
|
||||
/>
|
||||
|
||||
This paper introduces a novel approach for **compressing and reconstructing GPS trajectories** using a **Long Short-Term Memory (LSTM) autoencoder**. The autoencoder learns a compressed latent representation of the trajectory sequence, which can then be decoded to reconstruct the original path.
|
||||
|
||||
<div className="float-right ml-6 mb-4 w-full sm:w-1/2">
|
||||
<Image src="/figures/23_trajectory_scores.png" alt="Graphs showing mean distance errors (e.g., Fréchet, DTW) for different compression ratios on the T-Drive dataset" width={1000} height={800} />
|
||||
</div>
|
||||
|
||||
Our method was evaluated on two distinct datasets: one from a gaming context and another real-world dataset (T-Drive). We assessed performance across a range of compression ratios and trajectory lengths, comparing it against the widely used traditional **Douglas-Peucker algorithm**.
|
||||
|
||||
<FloatingImage
|
||||
src="/figures/23_trajectory_scores.png"
|
||||
alt="Graphs showing mean distance errors (e.g., Fréchet, DTW) for different compression ratios on the T-Drive dataset"
|
||||
width={1000}
|
||||
height={800}
|
||||
float="right"
|
||||
caption="Comparison of mean distance errors for different compression ratios on the T-Drive dataset."
|
||||
/>
|
||||
|
||||
**Key findings:**
|
||||
|
||||
* The LSTM autoencoder approach significantly **outperforms Douglas-Peucker** in terms of reconstruction accuracy, as measured by both **discrete Fréchet distance** and **Dynamic Time Warping (DTW)**.
|
||||
|
||||
* Unlike point-reduction techniques like Douglas-Peucker, our method performs a **lossy reconstruction at every point** along the trajectory. This offers potential advantages in maintaining temporal resolution and providing greater flexibility for downstream analysis.
|
||||
|
||||
Experimental results demonstrate the effectiveness and potential benefits of using deep learning, specifically LSTM autoencoders, for GPS trajectory compression, offering improved accuracy over conventional geometric algorithms. <Cite bibtexKey="kolle2023compression" />
|
||||
@@ -1,25 +1,24 @@
|
||||
---
|
||||
title: "Emergent Social Dynamics"
|
||||
tags: [artificial-life, complex-systems, neural-networks, self-organization, emergent-behavior, predictive-coding]
|
||||
tags: [artificial-life, complex-systems, neural-networks, self-organization, emergent-behavior, predictive-coding, artificial-chemistry, social-interaction]
|
||||
excerpt: "Artificial chemistry networks develop predictive models via surprise minimization."
|
||||
teaser: "/figures/18_surprised_soup_teaser.jpg"
|
||||
---
|
||||
|
||||
<InfoBox title="System Schematic">
|
||||
<Image src="/figures/18_surprised_soup_schematic.jpg" alt="Schematic diagram illustrating interacting neural network particles in the 'social soup'" width={600} height={450} />
|
||||
</InfoBox>
|
||||
This research extends the study of **artificial chemistry** systems populated by neural network "particles" <Cite bibtexKey="gabor2019self" />, focusing on the emergence of complex behaviors driven by **social interaction** rather than explicit programming. Building on systems where particles may exhibit self-replication, we introduce interactions based on principles of **predictive processing and surprise minimization** (akin to the Free Energy Principle).
|
||||
|
||||
This research extends the study of **artificial chemistry** systems populated by neural network "particles," focusing on the emergence of complex behaviors driven by **social interaction** rather than explicit programming. Building on systems where particles may exhibit self-replication, we introduce interactions based on principles of **predictive processing and surprise minimization** (akin to the Free Energy Principle).
|
||||
<FloatingImage src="/figures/18_surprised_soup_teaser.jpg" alt="A stylized depiction of particles interacting in a soup, representing the core concept of the system." width={1200} height={800} float="right" caption="A stylized depiction of particles interacting in a soup, representing the core concept of the system." />
|
||||
|
||||
Specifically, particles are equipped with mechanisms enabling them to **recognize and build predictive models of their peers' behavior**. The learning process is driven by the minimization of prediction error, or "surprise," incentivizing particles to accurately anticipate the actions or state changes of others within the "soup."
|
||||
|
||||
Key observations from this setup include:
|
||||
|
||||
* The emergence of **stable behavioral patterns and population dynamics** purely from these local, predictive interactions. Notably, these emergent patterns often resemble the stability observed in systems where self-replication was an explicitly trained objective.
|
||||
|
||||
* The introduction of a unique **"catalyst" particle** designed to exert evolutionary pressure on the system, demonstrating how external influences or specialized agents can shape the collective dynamics.
|
||||
|
||||
<div className="my-6 text-center">
|
||||
<Image src="/figures/18_surprised_soup_trajec.jpg" alt="Trajectories or state space visualization of the particle population dynamics over time" width={1200} height={800} className="w-full mx-auto rounded-lg" />
|
||||
<figcaption className="text-sm text-muted-foreground mt-2">Visualization of particle trajectories or population dynamics within the 'social soup'.</figcaption>
|
||||
<CenteredImage src="/figures/18_surprised_soup_trajec.jpg" alt="Trajectories or state space visualization of the particle population dynamics over time" width={1200} height={800} caption="Visualization of particle trajectories or population dynamics within the 'social soup'." />
|
||||
</div>
|
||||
|
||||
This study highlights how complex, seemingly goal-directed social behaviors and stable ecosystem structures can emerge from simple, local rules based on mutual prediction and surprise minimization among interacting agents, offering insights into the self-organization of complex adaptive systems. <Cite bibtexKey="zorn23surprise" />
|
||||
This study <Cite bibtexKey="zorn23surprise" /> highlights how complex, seemingly goal-directed social behaviors and stable ecosystem structures can emerge from simple, local rules based on mutual prediction and surprise minimization among interacting agents, offering insights into the self-organization of complex adaptive systems.
|
||||
@@ -1,30 +1,20 @@
|
||||
---
|
||||
title: "Primate Subsegment Sorting"
|
||||
tags: [bioacoustics, audio-classification, deep-learning, data-labeling, signal-processing]
|
||||
tags: [bioacoustics, audio-classification, deep-learning, data-labeling, signal-processing, primate-vocalizations, wildlife-monitoring, machine-learning, spectrograms, cnn]
|
||||
excerpt: "Binary subsegment presorting improves noisy primate sound classification."
|
||||
teaser: /figures/19_binary_primates_teaser.jpg
|
||||
|
||||
---
|
||||
|
||||
<div className="not-prose flex flex-col md:flex-row gap-8 items-start">
|
||||
<div className="flex-1 md:max-w-2xl">
|
||||
Automated acoustic classification plays a vital role in wildlife monitoring and bioacoustics research. This study introduces a sophisticated pre-processing and training strategy to significantly enhance the accuracy of multi-class audio classification, specifically targeting the identification of different primate species from field recordings.
|
||||
</div>
|
||||
<div className="md:w-96 flex-shrink-0">
|
||||
<div className="mt-4 text-right">
|
||||
<Image
|
||||
src="/figures/19_binary_primates_pipeline.jpg"
|
||||
alt="Visualization related to the thresholding or selection process for subsegment labeling"
|
||||
width={300}
|
||||
height={600}
|
||||
className="w-full h-auto rounded-md shadow-md"
|
||||
/>
|
||||
<figcaption className="text-sm text-muted-foreground mt-2 block text-center md:text-right">
|
||||
Thresholding or selection criteria for subsegment refinement.
|
||||
</figcaption>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<FloatingImage
|
||||
src="/figures/19_binary_primates_pipeline.jpg"
|
||||
alt="Visualization related to the thresholding or selection process for subsegment labeling"
|
||||
width={250}
|
||||
height={600}
|
||||
float="right"
|
||||
caption="Thresholding or selection criteria for subsegment refinement."
|
||||
/>
|
||||
|
||||
Automated acoustic classification plays a vital role in wildlife monitoring and bioacoustics research. This study introduces a sophisticated pre-processing and training strategy to significantly enhance the accuracy of multi-class audio classification, specifically targeting the identification of different primate species from field recordings.
|
||||
|
||||
A key challenge in bioacoustics is dealing with datasets containing weak labels (where calls of interest occupy only a portion of a labeled segment), varying segment lengths, and poor signal-to-noise ratios (SNR). Our approach addresses this by:
|
||||
1. **Subsegment Analysis:** Processing audio recordings represented as **MEL spectrograms**.
|
||||
@@ -32,16 +22,24 @@ A key challenge in bioacoustics is dealing with datasets containing weak labels
|
||||
3. **CNN Training:** Training **Convolutional Neural Networks (CNNs)** on these refined, higher-quality subsegment inputs.
|
||||
4. **Data Augmentation:** Employing innovative **data augmentation techniques** suitable for spectrogram data to further improve model robustness.
|
||||
|
||||
<div className="my-6 text-center">
|
||||
<Image src="/figures/19_binary_primates_thresholding.jpg" alt="Visualization related to the thresholding or selection process for subsegment labeling" width={800} height={600} className="w-3/4 mx-auto rounded-lg" />
|
||||
<figcaption className="text-sm text-muted-foreground mt-2">Thresholding or selection criteria for subsegment refinement.</figcaption>
|
||||
</div>
|
||||
<CenteredImage
|
||||
src="/figures/19_binary_primates_thresholding.jpg"
|
||||
alt="Visualization related to the thresholding or selection process for subsegment labeling"
|
||||
width={800}
|
||||
height={600}
|
||||
maxWidth="75%"
|
||||
caption="Thresholding or selection criteria for subsegment refinement."
|
||||
/>
|
||||
|
||||
The effectiveness of this methodology was evaluated on the challenging **ComParE 2021 Primate dataset**. The results demonstrate remarkable improvements in classification performance, achieving substantially higher accuracy and Unweighted Average Recall (UAR) scores compared to existing baseline methods.
|
||||
|
||||
<div className="my-6 text-center">
|
||||
<Image src="/figures/19_binary_primates_results.jpg" alt="Graphs or tables showing improved classification results (accuracy, UAR) compared to baselines" width={800} height={600} className="w-3/4 mx-auto rounded-lg" />
|
||||
<figcaption className="text-sm text-muted-foreground mt-2">Comparative performance results on the ComParE 2021 dataset.</figcaption>
|
||||
</div>
|
||||
<CenteredImage
|
||||
src="/figures/19_binary_primates_results.jpg"
|
||||
alt="Graphs or tables showing improved classification results (accuracy, UAR) compared to baselines"
|
||||
width={800}
|
||||
height={600}
|
||||
maxWidth="75%"
|
||||
caption="Comparative performance results on the ComParE 2021 dataset."
|
||||
/>
|
||||
|
||||
This work represents a significant advancement in handling difficult, real-world bioacoustic data, showcasing how careful data refinement prior to deep learning model training can dramatically enhance classification outcomes. <Cite bibtexKey="koelle23primate" />
|
||||
@@ -1,9 +1,8 @@
|
||||
---
|
||||
title: "Aquarium MARL Environment"
|
||||
tags: [MARL, simulation, emergence, complex-systems, environment]
|
||||
tags: [MARL, simulation, emergence, complex-systems, environment, predator-prey, reinforcement-learning, multi-agent]
|
||||
excerpt: "Aquarium: Open-source MARL environment for predator-prey studies."
|
||||
teaser: /figures/20_aquarium.png
|
||||
|
||||
---
|
||||
|
||||
<FloatingImage
|
||||
@@ -11,26 +10,35 @@ teaser: /figures/20_aquarium.png
|
||||
alt="The Multi-Agent Reinforcement Learning Cycle. Plot showing how Agent receive individual rewards from the environment."
|
||||
width={450}
|
||||
height={350}
|
||||
caption="The Multi-Agent Reinforcement Learning Cycle."
|
||||
float="right"
|
||||
caption="The Multi-Agent Reinforcement Learning Cycle: Agents receive individual rewards from the environment."
|
||||
/>
|
||||
The study of complex interactions using Multi-Agent Reinforcement Learning (MARL), particularly **predator-prey dynamics**, often requires specialized simulation environments. To streamline research and avoid redundant development efforts, we introduce **Aquarium**: a versatile, open-source MARL environment specifically designed for investigating predator-prey scenarios and related **emergent behaviors**.
|
||||
|
||||
The study of complex interactions using Multi-Agent Reinforcement Learning (MARL), particularly **predator-prey dynamics**, often requires specialized simulation environments.
|
||||
To streamline research and avoid redundant development efforts, we introduce **Aquarium**: a versatile, open-source MARL environment specifically designed for investigating predator-prey scenarios and related **emergent behaviors**.
|
||||
|
||||
Key Features of Aquarium:
|
||||
|
||||
* **Framework Integration:** Built upon and seamlessly integrates with the popular **PettingZoo API**, allowing researchers to readily apply existing MARL algorithm implementations (e.g., from Stable-Baselines3, RLlib).
|
||||
|
||||
* **Physics-Based Movement:** Simulates agent movement on a two-dimensional, continuous plane with edge-wrapping boundaries, incorporating basic physics for more realistic interactions.
|
||||
|
||||
* **High Customizability:** Offers extensive configuration options for:
|
||||
* **Agent-Environment Interactions:** Observation spaces, action spaces, and reward functions can be tailored to specific research questions.
|
||||
* **Environmental Parameters:** Key dynamics like agent speeds, prey reproduction rates, predator starvation mechanisms, sensor ranges, and more are fully adjustable.
|
||||
|
||||
* **Visualization & Recording:** Includes a resource-efficient visualizer and supports video recording of simulation runs, facilitating qualitative analysis and understanding of agent behaviors.
|
||||
|
||||
<CenteredImage
|
||||
src="/figures/20_observation_vector.png"
|
||||
alt="Diagram detailing the construction of the observation vector for an agent"
|
||||
width={450}
|
||||
height={350}
|
||||
maxWidth="75%"
|
||||
caption="Construction details of the agent observation vector."
|
||||
caption="Construction details of the agent observation vector, illustrating the information available to each agent."
|
||||
/>
|
||||
|
||||
Key Features of Aquarium:
|
||||
|
||||
* **Framework Integration:** Built upon and seamlessly integrates with the popular **PettingZoo API**, allowing researchers to readily apply existing MARL algorithm implementations (e.g., from Stable-Baselines3, RLlib).
|
||||
* **Physics-Based Movement:** Simulates agent movement on a two-dimensional, continuous plane with edge-wrapping boundaries, incorporating basic physics for more realistic interactions.
|
||||
* **High Customizability:** Offers extensive configuration options for:
|
||||
* **Agent-Environment Interactions:** Observation spaces, action spaces, and reward functions can be tailored to specific research questions.
|
||||
* **Environmental Parameters:** Key dynamics like agent speeds, prey reproduction rates, predator starvation mechanisms, sensor ranges, and more are fully adjustable.
|
||||
* **Visualization & Recording:** Includes a resource-efficient visualizer and supports video recording of simulation runs, facilitating qualitative analysis and understanding of agent behaviors.
|
||||
To demonstrate its capabilities, we conducted preliminary studies using **Proximal Policy Optimization (PPO)** to train multiple prey agents learning to evade a predator within Aquarium.
|
||||
|
||||
<CenteredImage
|
||||
src="/figures/20_capture_statistics.png"
|
||||
@@ -38,7 +46,7 @@ Key Features of Aquarium:
|
||||
width={450}
|
||||
height={350}
|
||||
maxWidth="75%"
|
||||
caption="Performance metrics (e.g., average captures/rewards) comparing training strategies."
|
||||
caption="Performance metrics (e.g., average captures/rewards) comparing different training strategies in Aquarium."
|
||||
/>
|
||||
|
||||
To demonstrate its capabilities, we conducted preliminary studies using **Proximal Policy Optimization (PPO)** to train multiple prey agents learning to evade a predator within Aquarium. Consistent with findings in existing MARL literature, our results showed that training agents with **individual policies led to suboptimal performance**, whereas utilizing **parameter sharing** among prey agents significantly improved coordination, sample efficiency, and overall evasion success. <Cite bibtexKey="kolle2024aquarium" />
|
||||
Consistent with findings in existing MARL literature, our results showed that training agents with **individual policies led to suboptimal performance**, whereas utilizing **parameter sharing** among prey agents significantly improved coordination, sample efficiency, and overall evasion success. <Cite bibtexKey="kolle2024aquarium" />
|
||||
@@ -1,25 +1,39 @@
|
||||
---
|
||||
title: "MAS Emergence Safety"
|
||||
tags: [multi-agent-systems, MARL, safety, emergence, system-specification]
|
||||
tags: [multi-agent-systems, MARL, safety, emergence, system-specification, decentralized-AI, AI-safety, system-design]
|
||||
excerpt: "Formalized MAS emergence misalignment; proposed safety mitigation strategies."
|
||||
teaser: "/figures/21_coins_teaser.png"
|
||||
|
||||
---
|
||||
|
||||
<FloatingImage
|
||||
src="/figures/21_envs.png"
|
||||
alt="Grid of two gridworld scenarios in a three-dimensional space."
|
||||
width={600}
|
||||
height={600}
|
||||
caption="Visualization of the 3D representation for the two gridworld scenarios."
|
||||
/>
|
||||
|
||||
Multi-Agent Systems (MAS), particularly those employing decentralized decision-making based on local information (common in MARL), can exhibit **emergent effects**. These phenomena, arising from complex interactions, range from minor behavioral quirks to potentially catastrophic system failures, posing significant **safety challenges**.
|
||||
|
||||
This research provides a framework for understanding and mitigating undesirable emergence from a **safety perspective**. We propose a formal definition: emergent effects arise from **misalignments between the *global inherent specification*** (the intended overall system goal or behavior) **and its *local approximation*** used by individual agents (e.g., distinct reward components, limited observations).
|
||||
|
||||
<div className="my-6 text-center">
|
||||
<Image src="/figures/21_coins.png" alt="Visualization showing agents exhibiting emergent coin-collecting behavior" width={800} height={450} className="w-3/4 mx-auto rounded-lg" />
|
||||
<figcaption className="text-sm text-muted-foreground mt-2">Example of emergent behavior (e.g., coin hoarding) due to specification misalignment.</figcaption>
|
||||
</div>
|
||||
<CenteredImage
|
||||
src="/figures/21_coins.png"
|
||||
alt="Visualization showing agents exhibiting emergent coin-collecting behavior"
|
||||
width={600}
|
||||
height={600}
|
||||
caption="Example of emergent behavior (e.g., coin hoarding) due to specification misalignment."
|
||||
/>
|
||||
|
||||
Leveraging established concepts from system safety engineering, we analyze how such misalignments can lead to deviations from intended global behavior. To illustrate the practical implications, we examine two highly configurable gridworld scenarios. These demonstrate how inadequate or independently derived local specifications (rewards/observations) can predictably result in unintended emergent behaviors, such as resource hoarding or inefficient coordination.
|
||||
|
||||
<div className="my-6 text-center">
|
||||
<Image src="/figures/21_blocking.png" alt="Visualization showing agents exhibiting emergent blocking behavior" width={800} height={500} className="w-2/3 mx-auto rounded-lg" />
|
||||
<figcaption className="text-sm text-muted-foreground mt-2">Example of emergent behavior (e.g., mutual blocking) due to specification misalignment.</figcaption>
|
||||
</div>
|
||||
<CenteredImage
|
||||
src="/figures/21_blocking.png"
|
||||
alt="Visualization showing agents exhibiting emergent blocking behavior"
|
||||
width={450}
|
||||
height={600}
|
||||
caption="Example of emergent behavior (e.g., mutual blocking) due to specification misalignment."
|
||||
/>
|
||||
|
||||
Recognizing that achieving a perfectly aligned global specification might be impractical in complex systems, we propose strategies focused on **adjusting the underlying local parameterizations** (e.g., reward shaping, observation design) to mitigate harmful emergence. By carefully tuning these local components, system alignment can be improved, reducing the risk of emergent failures and enhancing overall safety. <Cite bibtexKey="altmann2024emergence" />
|
||||
@@ -1,5 +1,128 @@
|
||||
{
|
||||
"MARL": {
|
||||
"definition": "Multi-Agent Reinforcement Learning (MARL) is a subfield of artificial intelligence that combines multi-agent systems and reinforcement learning. It is concerned with the study of how multiple agents can learn to interact with each other in a shared environment to achieve their goals."
|
||||
},
|
||||
"safety": {
|
||||
"definition": "In the context of AI, safety refers to the practice of ensuring systems operate without causing unintended harm, adverse effects, or behaving in undesirable ways."
|
||||
},
|
||||
"emergence": {
|
||||
"definition": "The arising of novel, coherent structures, patterns, and properties during the process of self-organization in complex systems, which are not present in the individual components."
|
||||
},
|
||||
"AI-safety": {
|
||||
"definition": "A field of research focused on ensuring that artificial intelligence systems are designed and operate in ways that are beneficial to humans and do not pose existential risks."
|
||||
},
|
||||
"simulation": {
|
||||
"definition": "The imitation of the operation of a real-world process or system over time, often involving computer models to study its behavior, test hypotheses, or train AI agents."
|
||||
},
|
||||
"complex-systems": {
|
||||
"definition": "A field of science that studies how a large number of components, often interacting locally, can give rise to collective behavior, structure, and information processing at a larger scale."
|
||||
},
|
||||
"reinforcement-learning": {
|
||||
"definition": "An area of machine learning where an agent learns to make decisions by performing actions in an environment to maximize a cumulative reward."
|
||||
},
|
||||
"bioacoustics": {
|
||||
"definition": "The study of the production, transmission, and reception of sound by animals, often involving computational methods for analysis and monitoring."
|
||||
},
|
||||
"audio-classification": {
|
||||
"definition": "The task of assigning a label or category to a given audio recording, such as identifying a specific animal call, speaker, or musical genre."
|
||||
},
|
||||
"deep-learning": {
|
||||
"definition": "A subfield of machine learning based on artificial neural networks with multiple layers (deep architectures) that learn representations of data."
|
||||
},
|
||||
"signal-processing": {
|
||||
"definition": "The analysis, interpretation, and manipulation of signals, such as sound, images, and sensor data, to extract meaningful information."
|
||||
},
|
||||
"wildlife-monitoring": {
|
||||
"definition": "The observation of animal populations and their environments to understand their status, trends, and threats, often aided by technology like acoustic sensors and AI."
|
||||
},
|
||||
"machine-learning": {
|
||||
"definition": "A field of artificial intelligence that uses statistical techniques to give computer systems the ability to 'learn' from data without being explicitly programmed."
|
||||
},
|
||||
"spectrograms": {
|
||||
"definition": "A visual representation of the spectrum of frequencies of a signal as it varies with time. Commonly used in audio analysis to turn sound into an image-like format for CNNs."
|
||||
},
|
||||
"cnn": {
|
||||
"definition": "Convolutional Neural Network (CNN) is a class of deep neural networks, most commonly applied to analyzing visual imagery, but also effective for other data types like audio spectrograms."
|
||||
},
|
||||
"artificial-life": {
|
||||
"definition": "A field of study wherein researchers examine systems related to life, its processes, and its evolution through the use of simulations and other computational methods."
|
||||
},
|
||||
"neural-networks": {
|
||||
"definition": "Computational models inspired by the structure and function of biological neural networks in animal brains, used for tasks like pattern recognition and prediction."
|
||||
},
|
||||
"self-organization": {
|
||||
"definition": "A process where some form of overall order or coordination arises out of the local interactions between the components of an initially disordered system."
|
||||
},
|
||||
"emergent-behavior": {
|
||||
"definition": "Complex, collective behaviors that arise from the simple interactions of individual agents or components in a system, without a central controller."
|
||||
},
|
||||
"artificial-chemistry": {
|
||||
"definition": "A computational modeling approach that studies the properties of chemical systems, often used to explore concepts like self-organization and the origin of life."
|
||||
},
|
||||
"recurrent-neural-networks": {
|
||||
"definition": "Recurrent Neural Networks (RNNs) are a class of neural networks well-suited for modeling sequential data, such as time series or natural language, due to their internal memory."
|
||||
},
|
||||
"geoinformatics": {
|
||||
"definition": "The science and technology dealing with the acquisition, storage, processing, analysis, and visualization of geospatial data."
|
||||
},
|
||||
"data-augmentation": {
|
||||
"definition": "A technique used to increase the size and diversity of a training dataset by creating modified copies of existing data or synthesizing new data from it."
|
||||
},
|
||||
"computer-vision": {
|
||||
"definition": "A field of artificial intelligence that trains computers to interpret and understand the visual world from digital images or videos."
|
||||
},
|
||||
"artificial-intelligence": {
|
||||
"definition": "A wide-ranging branch of computer science concerned with building smart machines capable of performing tasks that typically require human intelligence."
|
||||
},
|
||||
"java": {
|
||||
"definition": "A high-level, class-based, object-oriented programming language that is designed to have as few implementation dependencies as possible."
|
||||
},
|
||||
"mobile-development": {
|
||||
"definition": "The process of creating software applications that run on mobile devices, such as smartphones and tablets."
|
||||
},
|
||||
"app-development": {
|
||||
"definition": "The complete process of creating a software application, from conception and design to implementation, testing, and maintenance."
|
||||
},
|
||||
"agile": {
|
||||
"definition": "A project management and software development methodology that emphasizes iterative development, team collaboration, and customer feedback."
|
||||
},
|
||||
"teamwork": {
|
||||
"definition": "The collaborative effort of a group to achieve a common goal or to complete a task in the most effective and efficient way."
|
||||
},
|
||||
"self-replication": {
|
||||
"definition": "The process by which a system, be it biological or computational, can produce copies of itself."
|
||||
},
|
||||
"anomaly-detection": {
|
||||
"definition": "The identification of rare items, events, or observations which raise suspicions by differing significantly from the majority of the data."
|
||||
},
|
||||
"evolution": {
|
||||
"definition": "In a computational context, refers to evolutionary algorithms and computation, which use mechanisms inspired by biological evolution, such as selection and mutation, to find solutions to problems."
|
||||
},
|
||||
"AI": {
|
||||
"definition": "Artificial Intelligence. A branch of computer science for building smart machines capable of tasks that typically require human intelligence."
|
||||
},
|
||||
"transfer-learning": {
|
||||
"definition": "A machine learning method where a model developed for a task is reused as the starting point for a model on a second, related task."
|
||||
},
|
||||
"feature-extraction": {
|
||||
"definition": "The process of transforming raw data into numerical features that can be processed by machine learning algorithms, while preserving the information in the original data set."
|
||||
},
|
||||
"unsupervised-learning": {
|
||||
"definition": "A type of machine learning where the algorithm learns patterns from untagged or unlabeled data, without explicit instructions on what to look for."
|
||||
},
|
||||
"paralinguistics": {
|
||||
"definition": "The study of non-lexical vocal cues in speech, such as pitch, tone, and speaking rate, which can convey attitude and emotion."
|
||||
},
|
||||
"python": {
|
||||
"definition": "A high-level, interpreted, general-purpose programming language known for its simple syntax, making it popular for web development, data science, and AI."
|
||||
},
|
||||
"representation-learning": {
|
||||
"definition": "A set of techniques in machine learning that allows a system to automatically discover the representations needed for feature detection or classification from raw data."
|
||||
},
|
||||
"academia": {
|
||||
"definition": "The environment or community concerned with the pursuit of research, education, and scholarship."
|
||||
},
|
||||
"industry": {
|
||||
"definition": "The sector of an economy concerned with the commercial production and sale of goods and services, often focused on practical application and product development."
|
||||
}
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user