<?xml version="1.0" encoding="UTF-8"?>
<!-- This sitemap was 动态地 generated on 3 4 月, 2026 at 8:23 下午 by All in One SEO v4.3.6.1 - the original SEO plugin for WordPress. -->

<?xml-stylesheet type="text/xsl" href="https://eng.bigai.ai/default.xsl"?>

<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom">
	<channel>
		<title>BIGAI:Beijing Institute for General Artificial Intelligence</title>
		<link><![CDATA[http://eng.bigai.ai]]></link>
		<description><![CDATA[Official Website for BIGAI]]></description>
		<lastBuildDate><![CDATA[Thu, 19 Mar 2026 09:55:15 +0000]]></lastBuildDate>
		<docs>https://validator.w3.org/feed/docs/rss2.html</docs>
		<atom:link href="https://eng.bigai.ai/sitemap.rss" rel="self" type="application/rss+xml" />
		<ttl><![CDATA[60]]></ttl>

		<item>
			<guid><![CDATA[https://eng.bigai.ai/paper/narrativeloom-enhancing-creative-storytelling-through-multi-persona-collaborative-improvisation/]]></guid>
			<link><![CDATA[https://eng.bigai.ai/paper/narrativeloom-enhancing-creative-storytelling-through-multi-persona-collaborative-improvisation/]]></link>
			<title>NarrativeLoom: Enhancing Creative Storytelling through Multi-Persona Collaborative Improvisation</title>
			<pubDate><![CDATA[Thu, 19 Mar 2026 09:55:15 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://eng.bigai.ai/paper/scenecot-eliciting-chain-of-thought-reasoning-in-3d-scenes/]]></guid>
			<link><![CDATA[https://eng.bigai.ai/paper/scenecot-eliciting-chain-of-thought-reasoning-in-3d-scenes/]]></link>
			<title>SceneCOT: Eliciting Chain-of-Thought Reasoning in 3D Scenes</title>
			<pubDate><![CDATA[Thu, 19 Mar 2026 09:14:12 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://eng.bigai.ai/paper/read-the-room-video-social-reasoning-with-mental-physical-causal-chains/]]></guid>
			<link><![CDATA[https://eng.bigai.ai/paper/read-the-room-video-social-reasoning-with-mental-physical-causal-chains/]]></link>
			<title>Read the Room: Video Social Reasoning with Mental-Physical Causal Chains</title>
			<pubDate><![CDATA[Thu, 19 Mar 2026 09:13:47 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://eng.bigai.ai/paper/g4splat-geometry-guided-gaussian-splatting-with-generative-prior/]]></guid>
			<link><![CDATA[https://eng.bigai.ai/paper/g4splat-geometry-guided-gaussian-splatting-with-generative-prior/]]></link>
			<title>G4Splat: Geometry-Guided Gaussian Splatting with Generative Prior</title>
			<pubDate><![CDATA[Thu, 19 Mar 2026 09:04:14 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://eng.bigai.ai/paper/linking-process-to-outcome-conditonal-reward-modeling-for-llm-reasoning/]]></guid>
			<link><![CDATA[https://eng.bigai.ai/paper/linking-process-to-outcome-conditonal-reward-modeling-for-llm-reasoning/]]></link>
			<title>Linking Process to Outcome: Conditonal Reward Modeling for LLM Reasoning</title>
			<pubDate><![CDATA[Thu, 19 Mar 2026 09:02:25 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://eng.bigai.ai/paper/aegis-automated-error-generation-and-identification-for-multi-agent-systems/]]></guid>
			<link><![CDATA[https://eng.bigai.ai/paper/aegis-automated-error-generation-and-identification-for-multi-agent-systems/]]></link>
			<title>Aegis: Automated Error Generation and Identification for Multi-Agent Systems</title>
			<pubDate><![CDATA[Thu, 19 Mar 2026 08:39:36 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://eng.bigai.ai/paper/dexmove-learning-tactile-guided-non-prehensile-manipulation-with-dexterous-hands/]]></guid>
			<link><![CDATA[https://eng.bigai.ai/paper/dexmove-learning-tactile-guided-non-prehensile-manipulation-with-dexterous-hands/]]></link>
			<title>DexMove: Learning Tactile-Guided Non-Prehensile Manipulation with Dexterous Hands</title>
			<pubDate><![CDATA[Thu, 19 Mar 2026 08:35:59 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://eng.bigai.ai/paper/towards-brigding-the-gap-between-large-scale-pretraining-and-efficient-finetuning-for-humanoid-control/]]></guid>
			<link><![CDATA[https://eng.bigai.ai/paper/towards-brigding-the-gap-between-large-scale-pretraining-and-efficient-finetuning-for-humanoid-control/]]></link>
			<title>Towards Brigding the Gap Between Large-scale Pretraining and Efficient Finetuning for Humanoid Control</title>
			<pubDate><![CDATA[Thu, 19 Mar 2026 08:33:24 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://eng.bigai.ai/paper/stvg-r1-incentivizing-instance-level-reasoning-and-grounding-in-videos-via-reinforcement-learning/]]></guid>
			<link><![CDATA[https://eng.bigai.ai/paper/stvg-r1-incentivizing-instance-level-reasoning-and-grounding-in-videos-via-reinforcement-learning/]]></link>
			<title>STVG-R1: Incentivizing Instance-Level Reasoning and Grounding in Videos via Reinforcement Learning</title>
			<pubDate><![CDATA[Thu, 19 Mar 2026 08:03:02 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://eng.bigai.ai/paper/mvrmulti-view-video-reward-shaping-for-reinforcement-learning/]]></guid>
			<link><![CDATA[https://eng.bigai.ai/paper/mvrmulti-view-video-reward-shaping-for-reinforcement-learning/]]></link>
			<title>MVR:Multi-view Video Reward Shaping for Reinforcement Learning</title>
			<pubDate><![CDATA[Thu, 19 Mar 2026 08:01:08 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://eng.bigai.ai/paper/milr-improving-multimodal-image-generation-via-test-time-latent-reasoning/]]></guid>
			<link><![CDATA[https://eng.bigai.ai/paper/milr-improving-multimodal-image-generation-via-test-time-latent-reasoning/]]></link>
			<title>MILR: Improving Multimodal Image Generation via Test-Time Latent Reasoning</title>
			<pubDate><![CDATA[Thu, 19 Mar 2026 08:00:08 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://eng.bigai.ai/paper/generating-objects-with-part-articulation-from-a-single-image/]]></guid>
			<link><![CDATA[https://eng.bigai.ai/paper/generating-objects-with-part-articulation-from-a-single-image/]]></link>
			<title>Generating Objects with Part-Articulation from a Single Image</title>
			<pubDate><![CDATA[Wed, 03 Dec 2025 05:51:48 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://eng.bigai.ai/paper/when-large-multimodal-models-confront-evolving-knowledge-challenges-and-explorations/]]></guid>
			<link><![CDATA[https://eng.bigai.ai/paper/when-large-multimodal-models-confront-evolving-knowledge-challenges-and-explorations/]]></link>
			<title>When Large Multimodal Models Confront Evolving Knowledge: Challenges and Explorations</title>
			<pubDate><![CDATA[Thu, 19 Mar 2026 07:54:54 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://eng.bigai.ai/paper/learning-what-matters-now-dynamic-preference-inference-under-contextual-shifts/]]></guid>
			<link><![CDATA[https://eng.bigai.ai/paper/learning-what-matters-now-dynamic-preference-inference-under-contextual-shifts/]]></link>
			<title>Learning What Matters Now: Dynamic Preference Inference under Contextual Shifts</title>
			<pubDate><![CDATA[Thu, 19 Mar 2026 07:52:39 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://eng.bigai.ai/paper/rulereasoner-reinforced-rule-based-reasoning-via-domain-aware-dynamic-sampling/]]></guid>
			<link><![CDATA[https://eng.bigai.ai/paper/rulereasoner-reinforced-rule-based-reasoning-via-domain-aware-dynamic-sampling/]]></link>
			<title>RuleReasoner: Reinforced Rule-based Reasoning via Domain-aware Dynamic Sampling</title>
			<pubDate><![CDATA[Thu, 19 Mar 2026 07:49:40 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://eng.bigai.ai/paper/policon-evaluating-llms-on-achieving-diverse-political-consensus-objectives/]]></guid>
			<link><![CDATA[https://eng.bigai.ai/paper/policon-evaluating-llms-on-achieving-diverse-political-consensus-objectives/]]></link>
			<title>PoliCon: Evaluating LLMs on Achieving Diverse Political Consensus Objectives</title>
			<pubDate><![CDATA[Thu, 19 Mar 2026 07:45:26 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://eng.bigai.ai/paper/proposing-and-solving-olympiad-geometry-with-guided-tree-search/]]></guid>
			<link><![CDATA[https://eng.bigai.ai/paper/proposing-and-solving-olympiad-geometry-with-guided-tree-search/]]></link>
			<title>Proposing and solving olympiad geometry with guided tree search</title>
			<pubDate><![CDATA[Thu, 19 Mar 2026 07:42:16 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://eng.bigai.ai/paper/3d-scene-change-modeling-with-consistent-multi-view-aggregation/]]></guid>
			<link><![CDATA[https://eng.bigai.ai/paper/3d-scene-change-modeling-with-consistent-multi-view-aggregation/]]></link>
			<title>3D Scene Change Modeling With Consistent Multi-View Aggregation</title>
			<pubDate><![CDATA[Thu, 19 Mar 2026 07:39:31 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://eng.bigai.ai/paper/mind-the-gap-the-divergence-between-human-and-llm-generated-tasks/]]></guid>
			<link><![CDATA[https://eng.bigai.ai/paper/mind-the-gap-the-divergence-between-human-and-llm-generated-tasks/]]></link>
			<title>Mind the Gap: The Divergence Between Human and LLM-Generated Tasks</title>
			<pubDate><![CDATA[Thu, 19 Mar 2026 07:36:07 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://eng.bigai.ai/paper/tongui-building-generalized-gui-agents-by-learning-from-multimodal-web-tutorials/]]></guid>
			<link><![CDATA[https://eng.bigai.ai/paper/tongui-building-generalized-gui-agents-by-learning-from-multimodal-web-tutorials/]]></link>
			<title>TongUI: Building Generalized GUI Agents by Learning from Multimodal Web Tutorials</title>
			<pubDate><![CDATA[Thu, 19 Mar 2026 07:33:20 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://eng.bigai.ai/paper/reasoning-with-exploration-an-entropy-perspective/]]></guid>
			<link><![CDATA[https://eng.bigai.ai/paper/reasoning-with-exploration-an-entropy-perspective/]]></link>
			<title>Reasoning with Exploration: An Entropy Perspective</title>
			<pubDate><![CDATA[Thu, 19 Mar 2026 07:31:11 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://eng.bigai.ai/paper/adapt-adaptive-decentralized-architecture-with-perception-aligned-training-for-structural-generalization-in-multi-agent-rl/]]></guid>
			<link><![CDATA[https://eng.bigai.ai/paper/adapt-adaptive-decentralized-architecture-with-perception-aligned-training-for-structural-generalization-in-multi-agent-rl/]]></link>
			<title>ADAPT: Adaptive Decentralized Architecture with Perception-aligned Training for Structural Generalization in Multi-Agent RL</title>
			<pubDate><![CDATA[Thu, 19 Mar 2026 07:28:11 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://eng.bigai.ai/paper/from-objects-to-anywhere-a-holistic-benchmark-for-multi-level-visual-grounding-in-3d-scenes/]]></guid>
			<link><![CDATA[https://eng.bigai.ai/paper/from-objects-to-anywhere-a-holistic-benchmark-for-multi-level-visual-grounding-in-3d-scenes/]]></link>
			<title>From Objects to Anywhere: A Holistic Benchmark for Multi-level Visual Grounding in 3D Scenes</title>
			<pubDate><![CDATA[Tue, 02 Dec 2025 07:25:10 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://eng.bigai.ai/paper/a-vr-based-robotic-teleoperation-system-with-haptic-feedback-and-adaptive-collision-avoidance/]]></guid>
			<link><![CDATA[https://eng.bigai.ai/paper/a-vr-based-robotic-teleoperation-system-with-haptic-feedback-and-adaptive-collision-avoidance/]]></link>
			<title>A VR-Based Robotic Teleoperation System With Haptic Feedback and Adaptive Collision Avoidance</title>
			<pubDate><![CDATA[Tue, 02 Dec 2025 07:24:35 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://eng.bigai.ai/]]></guid>
			<link><![CDATA[https://eng.bigai.ai/]]></link>
			<title>Home</title>
			<pubDate><![CDATA[Thu, 15 Jun 2023 03:58:26 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://eng.bigai.ai/paper/m3bench-benchmarking-whole-body-motion-generation-for-mobile-manipulation-in-3d-scenes/]]></guid>
			<link><![CDATA[https://eng.bigai.ai/paper/m3bench-benchmarking-whole-body-motion-generation-for-mobile-manipulation-in-3d-scenes/]]></link>
			<title>M3Bench: Benchmarking Whole-Body Motion Generation for Mobile Manipulation in 3D Scenes</title>
			<pubDate><![CDATA[Tue, 02 Dec 2025 07:21:52 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://eng.bigai.ai/paper/integration-of-robot-and-scene-kinematics-for-sequential-mobile-manipulation-planning/]]></guid>
			<link><![CDATA[https://eng.bigai.ai/paper/integration-of-robot-and-scene-kinematics-for-sequential-mobile-manipulation-planning/]]></link>
			<title>Integration of Robot and Scene Kinematics for Sequential Mobile Manipulation Planning</title>
			<pubDate><![CDATA[Tue, 02 Dec 2025 07:17:35 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://eng.bigai.ai/paper/pr2-a-physics-and-photo-realistic-humanoid-testbed-with-pilot-study-in-competition/]]></guid>
			<link><![CDATA[https://eng.bigai.ai/paper/pr2-a-physics-and-photo-realistic-humanoid-testbed-with-pilot-study-in-competition/]]></link>
			<title>PR2: A Physics- and Photo-realistic Humanoid Testbed with Pilot Study in Competition</title>
			<pubDate><![CDATA[Tue, 02 Dec 2025 07:12:58 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://eng.bigai.ai/paper/adaptive-preference-optimization-with-uncertainty-aware-utility-anchor/]]></guid>
			<link><![CDATA[https://eng.bigai.ai/paper/adaptive-preference-optimization-with-uncertainty-aware-utility-anchor/]]></link>
			<title>Adaptive Preference Optimization with Uncertainty-aware Utility Anchor</title>
			<pubDate><![CDATA[Thu, 27 Nov 2025 08:58:47 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://eng.bigai.ai/paper/reinforced-query-reasoners-for-reasoning-intensive-retrieval-tasks/]]></guid>
			<link><![CDATA[https://eng.bigai.ai/paper/reinforced-query-reasoners-for-reasoning-intensive-retrieval-tasks/]]></link>
			<title>Reinforced Query Reasoners for Reasoning-intensive Retrieval Tasks</title>
			<pubDate><![CDATA[Thu, 27 Nov 2025 08:57:32 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://eng.bigai.ai/paper/understanding-and-leveraging-the-expert-specialization-of-context-faithfulness-in-mixture-of-experts-llms/]]></guid>
			<link><![CDATA[https://eng.bigai.ai/paper/understanding-and-leveraging-the-expert-specialization-of-context-faithfulness-in-mixture-of-experts-llms/]]></link>
			<title>Understanding and Leveraging the Expert Specialization of Context Faithfulness in Mixture-of-Experts LLMs</title>
			<pubDate><![CDATA[Thu, 27 Nov 2025 08:56:21 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://eng.bigai.ai/paper/enhancing-llm-based-social-bot-via-an-adversarial-learning-framework/]]></guid>
			<link><![CDATA[https://eng.bigai.ai/paper/enhancing-llm-based-social-bot-via-an-adversarial-learning-framework/]]></link>
			<title>Enhancing LLM-Based Social Bot via an Adversarial Learning Framework</title>
			<pubDate><![CDATA[Thu, 27 Nov 2025 08:55:21 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://eng.bigai.ai/paper/iterative-tool-usage-exploration-for-multimodal-agents-via-step-wise-preference-tuning/]]></guid>
			<link><![CDATA[https://eng.bigai.ai/paper/iterative-tool-usage-exploration-for-multimodal-agents-via-step-wise-preference-tuning/]]></link>
			<title>Iterative Tool Usage Exploration for Multimodal Agents via Step-wise Preference Tuning</title>
			<pubDate><![CDATA[Thu, 27 Nov 2025 03:12:12 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://eng.bigai.ai/paper/all-in-one-3d-scene-synthesis-with-an-extensible-and-self-reflective-agent/]]></guid>
			<link><![CDATA[https://eng.bigai.ai/paper/all-in-one-3d-scene-synthesis-with-an-extensible-and-self-reflective-agent/]]></link>
			<title>All-in-one 3D Scene Synthesis with an Extensible and Self-Reflective Agent</title>
			<pubDate><![CDATA[Thu, 27 Nov 2025 03:11:09 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://eng.bigai.ai/paper/taccel-scaling-up-vision-based-tactile-robotics-via-high-performance-gpu-simulation/]]></guid>
			<link><![CDATA[https://eng.bigai.ai/paper/taccel-scaling-up-vision-based-tactile-robotics-via-high-performance-gpu-simulation/]]></link>
			<title>Taccel: Scaling Up Vision-based Tactile Robotics via High-performance GPU Simulation</title>
			<pubDate><![CDATA[Thu, 27 Nov 2025 03:10:04 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://eng.bigai.ai/paper/absolute-zero-reinforced-self-play-reasoning-with-zero-data/]]></guid>
			<link><![CDATA[https://eng.bigai.ai/paper/absolute-zero-reinforced-self-play-reasoning-with-zero-data/]]></link>
			<title>Absolute Zero: Reinforced Self-play Reasoning with Zero Data</title>
			<pubDate><![CDATA[Thu, 27 Nov 2025 03:09:10 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://eng.bigai.ai/paper/world-models-should-prioritize-the-unification-of-physical-and-social-dynamics/]]></guid>
			<link><![CDATA[https://eng.bigai.ai/paper/world-models-should-prioritize-the-unification-of-physical-and-social-dynamics/]]></link>
			<title>World Models Should Prioritize the Unification of Physical and Social Dynamics</title>
			<pubDate><![CDATA[Thu, 27 Nov 2025 03:07:48 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://eng.bigai.ai/paper/nep-autoregressive-lmage-editing-via-next-editingtoken-prediction/]]></guid>
			<link><![CDATA[https://eng.bigai.ai/paper/nep-autoregressive-lmage-editing-via-next-editingtoken-prediction/]]></link>
			<title>NEP: Autoregressive lmage Editing via Next EditingToken Prediction</title>
			<pubDate><![CDATA[Wed, 26 Nov 2025 10:28:07 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://eng.bigai.ai/paper/videollamb-long-context-video-understanding-with-recurrent-memory-bridges/]]></guid>
			<link><![CDATA[https://eng.bigai.ai/paper/videollamb-long-context-video-understanding-with-recurrent-memory-bridges/]]></link>
			<title>VideoLLaMB: Long-context Video Understanding with Recurrent Memory Bridges</title>
			<pubDate><![CDATA[Wed, 26 Nov 2025 09:51:51 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://eng.bigai.ai/paper/taco-taming-diffusion-for-in-the-wild-video-amodal-completion/]]></guid>
			<link><![CDATA[https://eng.bigai.ai/paper/taco-taming-diffusion-for-in-the-wild-video-amodal-completion/]]></link>
			<title>TACO: Taming Diffusion for in-the-wild Video Amodal Completion</title>
			<pubDate><![CDATA[Wed, 26 Nov 2025 09:50:45 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://eng.bigai.ai/paper/gwm-towards-scalable-gaussian-world-models-for-robotic-manipulation/]]></guid>
			<link><![CDATA[https://eng.bigai.ai/paper/gwm-towards-scalable-gaussian-world-models-for-robotic-manipulation/]]></link>
			<title>GWM: Towards Scalable Gaussian World Models for Robotic Manipulation</title>
			<pubDate><![CDATA[Wed, 26 Nov 2025 09:45:45 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://eng.bigai.ai/paper/trace3d-consistent-segmentation-lifting-via-gaussian-instance-tracing/]]></guid>
			<link><![CDATA[https://eng.bigai.ai/paper/trace3d-consistent-segmentation-lifting-via-gaussian-instance-tracing/]]></link>
			<title>Trace3D: Consistent Segmentation Lifting via Gaussian Instance Tracing</title>
			<pubDate><![CDATA[Wed, 26 Nov 2025 09:44:14 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://eng.bigai.ai/paper/embodied-videoagent-persistent-memory-from-egocentric-videos-and-embodied-sensors-enables-dynamic-scene-understanding/]]></guid>
			<link><![CDATA[https://eng.bigai.ai/paper/embodied-videoagent-persistent-memory-from-egocentric-videos-and-embodied-sensors-enables-dynamic-scene-understanding/]]></link>
			<title>Embodied VideoAgent: Persistent Memory from Egocentric Videos and Embodied Sensors Enables Dynamic Scene Understanding</title>
			<pubDate><![CDATA[Wed, 26 Nov 2025 09:42:46 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://eng.bigai.ai/paper/move-to-understand-a-3d-scene-bridging-visual-grounding-and-exploration-for-efficient-and-versatile-embodied-navigation/]]></guid>
			<link><![CDATA[https://eng.bigai.ai/paper/move-to-understand-a-3d-scene-bridging-visual-grounding-and-exploration-for-efficient-and-versatile-embodied-navigation/]]></link>
			<title>Move to Understand a 3D Scene: Bridging Visual Grounding and Exploration for Efficient and Versatile Embodied Navigation</title>
			<pubDate><![CDATA[Wed, 26 Nov 2025 09:41:19 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://eng.bigai.ai/paper/primhoi-compositional-human-object-interaction-via-reusable-primitives/]]></guid>
			<link><![CDATA[https://eng.bigai.ai/paper/primhoi-compositional-human-object-interaction-via-reusable-primitives/]]></link>
			<title>PrimHOI: Compositional Human-Object Interaction via Reusable Primitives</title>
			<pubDate><![CDATA[Wed, 26 Nov 2025 09:39:57 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://eng.bigai.ai/paper/social-world-model-augmented-mechanism-design-policy-learning/]]></guid>
			<link><![CDATA[https://eng.bigai.ai/paper/social-world-model-augmented-mechanism-design-policy-learning/]]></link>
			<title>Social World Model-Augmented Mechanism Design Policy Learning</title>
			<pubDate><![CDATA[Thu, 27 Nov 2025 03:06:12 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://eng.bigai.ai/paper/learning-unified-force-and-position-control-for-legged-loco-manipulation/]]></guid>
			<link><![CDATA[https://eng.bigai.ai/paper/learning-unified-force-and-position-control-for-legged-loco-manipulation/]]></link>
			<title>Learning Unified Force and Position Control for Legged Loco-Manipulation</title>
			<pubDate><![CDATA[Sun, 28 Sep 2025 09:48:59 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://eng.bigai.ai/paper/clone-holistic-closed-loop-whole-body-teleoperation-for-long-horizon-humanoid-control/]]></guid>
			<link><![CDATA[https://eng.bigai.ai/paper/clone-holistic-closed-loop-whole-body-teleoperation-for-long-horizon-humanoid-control/]]></link>
			<title>CLONE: Holistic Closed-Loop Whole-Body Teleoperation for Long-Horizon Humanoid Control</title>
			<pubDate><![CDATA[Sun, 28 Sep 2025 09:47:40 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://eng.bigai.ai/paper/controlvla-few-shot-object-centric-adaptation-for-pre-trained-vision-language-action-models/]]></guid>
			<link><![CDATA[https://eng.bigai.ai/paper/controlvla-few-shot-object-centric-adaptation-for-pre-trained-vision-language-action-models/]]></link>
			<title>ControlVLA: Few-shot Object-centric Adaptation for Pre-trained Vision-Language-Action Models</title>
			<pubDate><![CDATA[Sun, 28 Sep 2025 09:44:53 +0000]]></pubDate>
		</item>
					<item>
			<guid><![CDATA[https://eng.bigai.ai/paper/cross-robot-behavior-adaptation-through-intention-alignment/]]></guid>
			<link><![CDATA[https://eng.bigai.ai/paper/cross-robot-behavior-adaptation-through-intention-alignment/]]></link>
			<title>Cross-Robot Behavior Adaptation through Intention Alignment</title>
			<pubDate><![CDATA[Mon, 23 Mar 2026 07:26:21 +0000]]></pubDate>
		</item>
				</channel>
</rss>
