<?xml version="1.0" encoding="UTF-8"?>
<!-- This sitemap was dynamically generated on April 3, 2026 at 12:29 PM by All in One SEO v4.9.5.1 - the original SEO plugin for WordPress. -->

<?xml-stylesheet type="text/xsl" href="https://anhnguyen.me/default-sitemap.xsl"?>

<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom">
	<channel>
		<title>Anh Totti Nguyen</title>
		<link><![CDATA[https://anhnguyen.me]]></link>
		<description><![CDATA[Anh Totti Nguyen]]></description>
		<lastBuildDate><![CDATA[Tue, 17 Feb 2026 14:51:51 +0000]]></lastBuildDate>
		<docs>https://validator.w3.org/feed/docs/rss2.html</docs>
		<atom:link href="https://anhnguyen.me/sitemap.rss" rel="self" type="application/rss+xml" />
		<ttl><![CDATA[60]]></ttl>

		<item>
			<guid><![CDATA[https://anhnguyen.me/work-with-me/]]></guid>
			<link><![CDATA[https://anhnguyen.me/work-with-me/]]></link>
			<title>Work with me</title>
			<pubDate><![CDATA[Tue, 17 Feb 2026 14:51:51 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://anhnguyen.me/about/]]></guid>
			<link><![CDATA[https://anhnguyen.me/about/]]></link>
			<title>About me</title>
			<pubDate><![CDATA[Tue, 09 Dec 2025 03:39:21 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://anhnguyen.me/2024/vlms-are-blind/]]></guid>
			<link><![CDATA[https://anhnguyen.me/2024/vlms-are-blind/]]></link>
			<title>Vision Language Models Are Blind</title>
			<pubDate><![CDATA[Thu, 22 Jan 2026 21:50:22 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://anhnguyen.me/2025/transformer-attention-bottlenecks/]]></guid>
			<link><![CDATA[https://anhnguyen.me/2025/transformer-attention-bottlenecks/]]></link>
			<title>TAB: Transformer Attention Bottlenecks enable User Intervention and Debugging in Vision-Language Models</title>
			<pubDate><![CDATA[Thu, 22 Jan 2026 19:55:40 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://anhnguyen.me/2026/vlms-are-biased/]]></guid>
			<link><![CDATA[https://anhnguyen.me/2026/vlms-are-biased/]]></link>
			<title>Vision Language Models are Biased</title>
			<pubDate><![CDATA[Mon, 26 Jan 2026 17:13:42 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://anhnguyen.me/2026/interactive-llm/]]></guid>
			<link><![CDATA[https://anhnguyen.me/2026/interactive-llm/]]></link>
			<title>Improving Human Verification of LLM Reasoning through Interactive Explanation Interfaces</title>
			<pubDate><![CDATA[Fri, 23 Jan 2026 22:37:07 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://anhnguyen.me/lab/]]></guid>
			<link><![CDATA[https://anhnguyen.me/lab/]]></link>
			<title>Lab</title>
			<pubDate><![CDATA[Fri, 23 Jan 2026 20:01:52 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://anhnguyen.me/2026/genai-vs-humans/]]></guid>
			<link><![CDATA[https://anhnguyen.me/2026/genai-vs-humans/]]></link>
			<title>Understanding Generative AI Capabilities in Everyday Image Editing Tasks</title>
			<pubDate><![CDATA[Fri, 23 Jan 2026 04:48:18 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://anhnguyen.me/2015/understanding-neural-networks-through-deep-visualization/]]></guid>
			<link><![CDATA[https://anhnguyen.me/2015/understanding-neural-networks-through-deep-visualization/]]></link>
			<title>Understanding Neural Networks Through Deep Visualization</title>
			<pubDate><![CDATA[Sat, 23 Aug 2025 06:01:45 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://anhnguyen.me/2015/3dtouch/]]></guid>
			<link><![CDATA[https://anhnguyen.me/2015/3dtouch/]]></link>
			<title>3DTouch: A Wearable 3D Input Device for 3D Applications</title>
			<pubDate><![CDATA[Sat, 23 Aug 2025 06:00:59 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://anhnguyen.me/2015/innovation-engine/]]></guid>
			<link><![CDATA[https://anhnguyen.me/2015/innovation-engine/]]></link>
			<title>Innovation Engine: Automated Creativity and Improving Stochastic Optimization via Deep Learning</title>
			<pubDate><![CDATA[Sat, 23 Aug 2025 05:58:07 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://anhnguyen.me/2016/mfv/]]></guid>
			<link><![CDATA[https://anhnguyen.me/2016/mfv/]]></link>
			<title>Multifaceted Feature Visualization: Uncovering the Different Types of Features Learned By Each Neuron in Deep Neural Networks</title>
			<pubDate><![CDATA[Sat, 23 Aug 2025 05:55:49 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://anhnguyen.me/2016/synthesizing/]]></guid>
			<link><![CDATA[https://anhnguyen.me/2016/synthesizing/]]></link>
			<title>Synthesizing the preferred inputs for neurons in neural networks via deep generator networks</title>
			<pubDate><![CDATA[Sat, 23 Aug 2025 05:54:26 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://anhnguyen.me/2017/ppgn/]]></guid>
			<link><![CDATA[https://anhnguyen.me/2017/ppgn/]]></link>
			<title>Plug &#038; Play Generative Networks: Conditional Iterative Generation of Images in Latent Space</title>
			<pubDate><![CDATA[Sat, 23 Aug 2025 05:52:47 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://anhnguyen.me/2018/identifying-wild-animals/]]></guid>
			<link><![CDATA[https://anhnguyen.me/2018/identifying-wild-animals/]]></link>
			<title>Automatically identifying, counting, and describing wild animals in camera-trap images with deep learning</title>
			<pubDate><![CDATA[Sat, 23 Aug 2025 05:50:44 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://anhnguyen.me/2018/vectordefense/]]></guid>
			<link><![CDATA[https://anhnguyen.me/2018/vectordefense/]]></link>
			<title>VectorDefense: Vectorization as a Defense to Adversarial Examples</title>
			<pubDate><![CDATA[Sat, 23 Aug 2025 05:48:57 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://anhnguyen.me/2019/strike-with-a-pose/]]></guid>
			<link><![CDATA[https://anhnguyen.me/2019/strike-with-a-pose/]]></link>
			<title>Strike (with) a Pose: Neural networks are easily fooled by strange poses of familiar objects</title>
			<pubDate><![CDATA[Sat, 23 Aug 2025 05:48:00 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://anhnguyen.me/2019/biggan-am/]]></guid>
			<link><![CDATA[https://anhnguyen.me/2019/biggan-am/]]></link>
			<title>A cost-effective method for improving and re-purposing large, pre-trained GANs by fine-tuning their class-embeddings</title>
			<pubDate><![CDATA[Sat, 23 Aug 2025 05:46:52 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://anhnguyen.me/2020/counterfactual-generation/]]></guid>
			<link><![CDATA[https://anhnguyen.me/2020/counterfactual-generation/]]></link>
			<title>Explaining image classifiers by removing input features using generative models</title>
			<pubDate><![CDATA[Sat, 23 Aug 2025 05:44:24 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://anhnguyen.me/2020/sam/]]></guid>
			<link><![CDATA[https://anhnguyen.me/2020/sam/]]></link>
			<title>SAM: The Sensitivity of Attribution Methods to Hyperparameters</title>
			<pubDate><![CDATA[Sat, 23 Aug 2025 05:42:38 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://anhnguyen.me/2020/shape-simplicity-bias-cnn/]]></guid>
			<link><![CDATA[https://anhnguyen.me/2020/shape-simplicity-bias-cnn/]]></link>
			<title>The shape and simplicity biases of adversarially robust ImageNet-trained CNNs</title>
			<pubDate><![CDATA[Sat, 23 Aug 2025 05:41:08 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://anhnguyen.me/press/]]></guid>
			<link><![CDATA[https://anhnguyen.me/press/]]></link>
			<title>Press &#038; Exhibition</title>
			<pubDate><![CDATA[Mon, 09 Mar 2026 03:10:03 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://anhnguyen.me/2021/word-order/]]></guid>
			<link><![CDATA[https://anhnguyen.me/2021/word-order/]]></link>
			<title>Out of Order: How important is the sequential order of words in a sentence in Natural Language Understanding tasks?</title>
			<pubDate><![CDATA[Sat, 23 Aug 2025 05:38:46 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://anhnguyen.me/2013/low-cost-augmented-reality-prototype-for-controlling-network-devices/]]></guid>
			<link><![CDATA[https://anhnguyen.me/2013/low-cost-augmented-reality-prototype-for-controlling-network-devices/]]></link>
			<title>Low-cost Augmented Reality prototype for controlling network devices</title>
			<pubDate><![CDATA[Tue, 18 Nov 2014 17:19:44 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://anhnguyen.me/2021/feature-attribution-effectiveness/]]></guid>
			<link><![CDATA[https://anhnguyen.me/2021/feature-attribution-effectiveness/]]></link>
			<title>The effectiveness of feature attribution methods and its correlation with automatic evaluation scores</title>
			<pubDate><![CDATA[Sat, 23 Aug 2025 05:37:20 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://anhnguyen.me/2021/baller2vec/]]></guid>
			<link><![CDATA[https://anhnguyen.me/2021/baller2vec/]]></link>
			<title>baller2vec: A Multi-Entity Transformer For Multi-Agent Spatiotemporal Modeling</title>
			<pubDate><![CDATA[Sat, 23 Aug 2025 05:36:10 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://anhnguyen.me/2025/change-detection-correspondence/]]></guid>
			<link><![CDATA[https://anhnguyen.me/2025/change-detection-correspondence/]]></link>
			<title>Improving zero-shot object-level change detection by incorporating visual correspondence</title>
			<pubDate><![CDATA[Sat, 23 Aug 2025 05:33:15 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://anhnguyen.me/2025/hot/]]></guid>
			<link><![CDATA[https://anhnguyen.me/2025/hot/]]></link>
			<title>HoT: Highlighted Chain of Thought for Referencing Supporting Facts from Inputs</title>
			<pubDate><![CDATA[Sat, 23 Aug 2025 05:29:36 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://anhnguyen.me/2025/b-score/]]></guid>
			<link><![CDATA[https://anhnguyen.me/2025/b-score/]]></link>
			<title>B-score: Detecting biases in large language models using response history</title>
			<pubDate><![CDATA[Sat, 23 Aug 2025 05:26:03 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://anhnguyen.me/2024/pcnn/]]></guid>
			<link><![CDATA[https://anhnguyen.me/2024/pcnn/]]></link>
			<title>PCNN: Probable-Class Nearest-Neighbor Explanations Improve Fine-Grained Image Classification Accuracy for AIs and Humans</title>
			<pubDate><![CDATA[Sat, 23 Aug 2025 05:24:58 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://anhnguyen.me/2024/peeb/]]></guid>
			<link><![CDATA[https://anhnguyen.me/2024/peeb/]]></link>
			<title>PEEB: Part-based Image Classifiers with an Explainable and Editable Language Bottleneck</title>
			<pubDate><![CDATA[Sat, 23 Aug 2025 05:20:34 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://anhnguyen.me/2024/glitchbench/]]></guid>
			<link><![CDATA[https://anhnguyen.me/2024/glitchbench/]]></link>
			<title>GlitchBench: Can large multimodal models detect video game glitches?</title>
			<pubDate><![CDATA[Sat, 23 Aug 2025 05:19:25 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://anhnguyen.me/2024/face-id-vit/]]></guid>
			<link><![CDATA[https://anhnguyen.me/2024/face-id-vit/]]></link>
			<title>Fast and Interpretable Face Identification for Out-Of-Distribution Data Using Vision Transformers</title>
			<pubDate><![CDATA[Sat, 23 Aug 2025 05:17:53 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://anhnguyen.me/2023/imagenet-hard/]]></guid>
			<link><![CDATA[https://anhnguyen.me/2023/imagenet-hard/]]></link>
			<title>ImageNet-Hard: The Hardest Images Remaining from a Study of the Power of Zoom and Spatial Biases in Image Classification</title>
			<pubDate><![CDATA[Sat, 23 Aug 2025 05:16:43 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://anhnguyen.me/2022/gscorecam/]]></guid>
			<link><![CDATA[https://anhnguyen.me/2022/gscorecam/]]></link>
			<title>gScoreCAM: What objects is CLIP looking at?</title>
			<pubDate><![CDATA[Sat, 23 Aug 2025 05:14:43 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://anhnguyen.me/2022/correspondence-explanation/]]></guid>
			<link><![CDATA[https://anhnguyen.me/2022/correspondence-explanation/]]></link>
			<title>Visual correspondence-based explanations improve AI robustness and human-AI team accuracy</title>
			<pubDate><![CDATA[Sat, 23 Aug 2025 05:05:54 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://anhnguyen.me/2023/pic/]]></guid>
			<link><![CDATA[https://anhnguyen.me/2023/pic/]]></link>
			<title>PiC: A Phrase-in-Context Dataset for Phrase Understanding and Semantic Search</title>
			<pubDate><![CDATA[Sat, 23 Aug 2025 05:02:43 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://anhnguyen.me/2022/explainable-robust-cnns/]]></guid>
			<link><![CDATA[https://anhnguyen.me/2022/explainable-robust-cnns/]]></link>
			<title>How explainable are adversarially-robust CNNs?</title>
			<pubDate><![CDATA[Sat, 23 Aug 2025 04:59:53 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://anhnguyen.me/k6-ai-club/]]></guid>
			<link><![CDATA[https://anhnguyen.me/k6-ai-club/]]></link>
			<title>K-6 AI club</title>
			<pubDate><![CDATA[Wed, 17 Apr 2024 23:15:19 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://anhnguyen.me/2022/deepface-emd/]]></guid>
			<link><![CDATA[https://anhnguyen.me/2022/deepface-emd/]]></link>
			<title>DeepFace-EMD: Re-ranking Using Patch-wise Earth Mover&#8217;s Distance Improves Out-Of-Distribution Face Identification</title>
			<pubDate><![CDATA[Sat, 23 Aug 2025 04:53:10 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://anhnguyen.me/2013/low-cost-ar/]]></guid>
			<link><![CDATA[https://anhnguyen.me/2013/low-cost-ar/]]></link>
			<title>Low-cost Augmented Reality prototype for controlling network devices</title>
			<pubDate><![CDATA[Sat, 31 May 2025 04:12:17 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://anhnguyen.me/2022/double-trouble/]]></guid>
			<link><![CDATA[https://anhnguyen.me/2022/double-trouble/]]></link>
			<title>Double Trouble: How to not explain a text classifier’s decisions using counterfactuals synthesized by masked language models</title>
			<pubDate><![CDATA[Sat, 23 Aug 2025 04:50:59 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://anhnguyen.me/2021/inverting-robust-networks/]]></guid>
			<link><![CDATA[https://anhnguyen.me/2021/inverting-robust-networks/]]></link>
			<title>Inverting Adversarially Robust Networks for Image Synthesis</title>
			<pubDate><![CDATA[Sat, 23 Aug 2025 04:47:03 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://anhnguyen.me/2025/plan-of-sqls/]]></guid>
			<link><![CDATA[https://anhnguyen.me/2025/plan-of-sqls/]]></link>
			<title>Interpretable LLM-based Table Question Answering</title>
			<pubDate><![CDATA[Sat, 23 Aug 2025 04:42:49 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://anhnguyen.me/2014/3dtouch-a-modular-wearable-3d-input-device/]]></guid>
			<link><![CDATA[https://anhnguyen.me/2014/3dtouch-a-modular-wearable-3d-input-device/]]></link>
			<title>A modular wearable 3D input device</title>
			<pubDate><![CDATA[Mon, 06 Mar 2017 17:25:19 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://anhnguyen.me/2010/extjs-vertical-tabpanel-example/]]></guid>
			<link><![CDATA[https://anhnguyen.me/2010/extjs-vertical-tabpanel-example/]]></link>
			<title>ExtJS Vertical TabPanel example</title>
			<pubDate><![CDATA[Mon, 01 Dec 2014 05:27:30 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://anhnguyen.me/2015/fooling/]]></guid>
			<link><![CDATA[https://anhnguyen.me/2015/fooling/]]></link>
			<title>Deep Neural Network are Easily Fooled: High Confidence Predictions for Unrecognizable Images</title>
			<pubDate><![CDATA[Fri, 22 Aug 2025 20:24:10 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://anhnguyen.me/courses/]]></guid>
			<link><![CDATA[https://anhnguyen.me/courses/]]></link>
			<title>Courses</title>
			<pubDate><![CDATA[Fri, 22 Aug 2025 04:15:56 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://anhnguyen.me/2014/how-to-configure-a-waf-cxx-project-in-eclipse/]]></guid>
			<link><![CDATA[https://anhnguyen.me/2014/how-to-configure-a-waf-cxx-project-in-eclipse/]]></link>
			<title>How to configure a Waf C++ project in Eclipse</title>
			<pubDate><![CDATA[Wed, 30 Jul 2014 17:44:59 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://anhnguyen.me/research/]]></guid>
			<link><![CDATA[https://anhnguyen.me/research/]]></link>
			<title>Research</title>
			<pubDate><![CDATA[Fri, 30 Jan 2026 22:54:28 +0000]]></pubDate>
		</item>
				</channel>
</rss>
