SurveyBench / ref_bench /Retrieval-Augmented Generation for Large Language Models_bench.json
yxc97's picture
Upload SurveyBench Data
7cffc2b verified
{
"1810.04805": {
"arxivId": "1810.04805",
"title": "BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding"
},
"2005.14165": {
"arxivId": "2005.14165",
"title": "Language Models are Few-Shot Learners"
},
"1910.10683": {
"arxivId": "1910.10683",
"title": "Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer"
},
"1908.10084": {
"arxivId": "1908.10084",
"title": "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks"
},
"1910.13461": {
"arxivId": "1910.13461",
"title": "BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension"
},
"2307.09288": {
"arxivId": "2307.09288",
"title": "Llama 2: Open Foundation and Fine-Tuned Chat Models"
},
"2204.02311": {
"arxivId": "2204.02311",
"title": "PaLM: Scaling Language Modeling with Pathways"
},
"2107.03374": {
"arxivId": "2107.03374",
"title": "Evaluating Large Language Models Trained on Code"
},
"2101.00190": {
"arxivId": "2101.00190",
"title": "Prefix-Tuning: Optimizing Continuous Prompts for Generation"
},
"2004.04906": {
"arxivId": "2004.04906",
"title": "Dense Passage Retrieval for Open-Domain Question Answering"
},
"1909.01066": {
"arxivId": "1909.01066",
"title": "Language Models as Knowledge Bases?"
},
"1704.00051": {
"arxivId": "1704.00051",
"title": "Reading Wikipedia to Answer Open-Domain Questions"
},
"2002.08909": {
"arxivId": "2002.08909",
"title": "REALM: Retrieval-Augmented Language Model Pre-Training"
},
"1902.07243": {
"arxivId": "1902.07243",
"title": "Graph Neural Networks for Social Recommendation"
},
"2210.03629": {
"arxivId": "2210.03629",
"title": "ReAct: Synergizing Reasoning and Acting in Language Models"
},
"2302.04761": {
"arxivId": "2302.04761",
"title": "Toolformer: Language Models Can Teach Themselves to Use Tools"
},
"2202.12837": {
"arxivId": "2202.12837",
"title": "Rethinking the Role of Demonstrations: What Makes In-Context Learning Work?"
},
"2101.06804": {
"arxivId": "2101.06804",
"title": "What Makes Good In-Context Examples for GPT-3?"
},
"2004.12832": {
"arxivId": "2004.12832",
"title": "ColBERT: Efficient and Effective Passage Search via Contextualized Late Interaction over BERT"
},
"2007.01282": {
"arxivId": "2007.01282",
"title": "Leveraging Passage Retrieval with Generative Models for Open Domain Question Answering"
},
"2312.10997": {
"arxivId": "2312.10997",
"title": "Retrieval-Augmented Generation for Large Language Models: A Survey"
},
"1904.02232": {
"arxivId": "1904.02232",
"title": "BERT Post-Training for Review Reading Comprehension and Aspect-based Sentiment Analysis"
},
"2208.03299": {
"arxivId": "2208.03299",
"title": "Few-shot Learning with Retrieval Augmented Language Models"
},
"2112.08633": {
"arxivId": "2112.08633",
"title": "Learning To Retrieve Prompts for In-Context Learning"
},
"1702.01932": {
"arxivId": "1702.01932",
"title": "A Knowledge-Grounded Neural Conversation Model"
},
"2207.05221": {
"arxivId": "2207.05221",
"title": "Language Models (Mostly) Know What They Know"
},
"2104.07567": {
"arxivId": "2104.07567",
"title": "Retrieval Augmentation Reduces Hallucination in Conversation"
},
"2301.12652": {
"arxivId": "2301.12652",
"title": "REPLUG: Retrieval-Augmented Black-Box Language Models"
},
"2211.17192": {
"arxivId": "2211.17192",
"title": "Fast Inference from Transformers via Speculative Decoding"
},
"2302.00083": {
"arxivId": "2302.00083",
"title": "In-Context Retrieval-Augmented Language Models"
},
"2310.11511": {
"arxivId": "2310.11511",
"title": "Self-RAG: Learning to Retrieve, Generate, and Critique through Self-Reflection"
},
"1612.04426": {
"arxivId": "1612.04426",
"title": "Improving Neural Language Models with a Continuous Cache"
},
"2106.01760": {
"arxivId": "2106.01760",
"title": "Template-Based Named Entity Recognition Using BART"
},
"2209.10063": {
"arxivId": "2209.10063",
"title": "Generate rather than Retrieve: Large Language Models are Strong Context Generators"
},
"2212.10509": {
"arxivId": "2212.10509",
"title": "Interleaving Retrieval with Chain-of-Thought Reasoning for Knowledge-Intensive Multi-Step Questions"
},
"2107.07566": {
"arxivId": "2107.07566",
"title": "Internet-Augmented Dialogue Generation"
},
"2302.01318": {
"arxivId": "2302.01318",
"title": "Accelerating Large Language Model Decoding with Speculative Sampling"
},
"2004.10645": {
"arxivId": "2004.10645",
"title": "AmbigQA: Answering Ambiguous Open-domain Questions"
},
"2110.07904": {
"arxivId": "2110.07904",
"title": "SPoT: Better Frozen Model Adaptation through Soft Prompt Transfer"
},
"2012.04584": {
"arxivId": "2012.04584",
"title": "Distilling Knowledge from Reader to Retriever for Question Answering"
},
"2306.13063": {
"arxivId": "2306.13063",
"title": "Can LLMs Express Their Uncertainty? An Empirical Evaluation of Confidence Elicitation in LLMs"
},
"2203.11147": {
"arxivId": "2203.11147",
"title": "Teaching language models to support answers with verified quotes"
},
"2107.07567": {
"arxivId": "2107.07567",
"title": "Beyond Goldfish Memory: Long-Term Open-Domain Conversation"
},
"2005.04611": {
"arxivId": "2005.04611",
"title": "How Context Affects Language Models' Factual Predictions"
},
"2212.14024": {
"arxivId": "2212.14024",
"title": "Demonstrate-Search-Predict: Composing retrieval and language models for knowledge-intensive NLP"
},
"2209.01975": {
"arxivId": "2209.01975",
"title": "Selective Annotation Makes Language Models Better Few-Shot Learners"
},
"2209.14610": {
"arxivId": "2209.14610",
"title": "Dynamic Prompt Learning via Policy Gradient for Semi-structured Mathematical Reasoning"
},
"2307.02046": {
"arxivId": "2307.02046",
"title": "Recommender Systems in the Era of Large Language Models (LLMs)"
},
"1202.6101": {
"arxivId": "1202.6101",
"title": "Maximum inner-product search using cone trees"
},
"2212.10496": {
"arxivId": "2212.10496",
"title": "Precise Zero-Shot Dense Retrieval without Relevance Labels"
},
"2107.06641": {
"arxivId": "2107.06641",
"title": "Trustworthy AI: A Computational Perspective"
},
"2203.08913": {
"arxivId": "2203.08913",
"title": "Memorizing Transformers"
},
"2212.02437": {
"arxivId": "2212.02437",
"title": "In-context Examples Selection for Machine Translation"
},
"2006.15020": {
"arxivId": "2006.15020",
"title": "Pre-training via Paraphrasing"
},
"1906.05807": {
"arxivId": "1906.05807",
"title": "Real-Time Open-Domain Question Answering with Dense-Sparse Phrase Index"
},
"2106.05346": {
"arxivId": "2106.05346",
"title": "End-to-End Training of Multi-Document Reader and Retriever for Open-Domain Question Answering"
},
"2004.07202": {
"arxivId": "2004.07202",
"title": "Entities as Experts: Sparse Memory Access with Entity Supervision"
},
"1911.02707": {
"arxivId": "1911.02707",
"title": "Grounded Conversation Generation as Guided Traverses in Commonsense Knowledge Graphs"
},
"2305.06983": {
"arxivId": "2305.06983",
"title": "Active Retrieval Augmented Generation"
},
"2108.11601": {
"arxivId": "2108.11601",
"title": "Retrieval Augmented Code Generation and Summarization"
},
"2211.05110": {
"arxivId": "2211.05110",
"title": "Large Language Models with Controllable Working Memory"
},
"2205.12674": {
"arxivId": "2205.12674",
"title": "Training Language Models with Memory Augmentation"
},
"2305.15294": {
"arxivId": "2305.15294",
"title": "Enhancing Retrieval-Augmented Large Language Models with Iterative Retrieval-Generation Synergy"
},
"2310.01558": {
"arxivId": "2310.01558",
"title": "Making Retrieval-Augmented Language Models Robust to Irrelevant Context"
},
"2203.05115": {
"arxivId": "2203.05115",
"title": "Internet-augmented language models through few-shot prompting for open-domain question answering"
},
"2301.13808": {
"arxivId": "2301.13808",
"title": "Large Language Models are Versatile Decomposers: Decomposing Evidence and Questions for Table-based Reasoning"
},
"2207.05987": {
"arxivId": "2207.05987",
"title": "DocPrompting: Generating Code by Retrieving the Docs"
},
"2204.02849": {
"arxivId": "2204.02849",
"title": "KNN-Diffusion: Image Generation via Large-Scale Retrieval"
},
"2212.10789": {
"arxivId": "2212.10789",
"title": "Multi-modal Molecule Structure-text Model for Text-based Retrieval and Editing"
},
"2102.02557": {
"arxivId": "2102.02557",
"title": "Adaptive Semiparametric Language Models"
},
"2109.04212": {
"arxivId": "2109.04212",
"title": "Efficient Nearest Neighbor Language Models"
},
"2304.01116": {
"arxivId": "2304.01116",
"title": "ReMoDiffuse: Retrieval-Augmented Motion Diffusion Model"
},
"2310.08319": {
"arxivId": "2310.08319",
"title": "Fine-Tuning LLaMA for Multi-Stage Text Retrieval"
},
"2402.19473": {
"arxivId": "2402.19473",
"title": "Retrieval-Augmented Generation for AI-Generated Content: A Survey"
},
"2305.04320": {
"arxivId": "2305.04320",
"title": "Unified Demonstration Retriever for In-Context Learning"
},
"2302.05698": {
"arxivId": "2302.05698",
"title": "Compositional Exemplars for In-context Learning"
},
"2108.05552": {
"arxivId": "2108.05552",
"title": "Graph Trend Filtering Networks for Recommendation"
},
"2310.01352": {
"arxivId": "2310.01352",
"title": "RA-DIT: Retrieval-Augmented Dual Instruction Tuning"
},
"2210.02627": {
"arxivId": "2210.02627",
"title": "Improving the Domain Adaptation of Retrieval Augmented Generation (RAG) Models for Open Domain Question Answering"
},
"2005.08147": {
"arxivId": "2005.08147",
"title": "Attacking Black-box Recommendations via Copying Cross-domain User Profiles"
},
"2212.05221": {
"arxivId": "2212.05221",
"title": "Reveal: Retrieval-Augmented Visual-Language Pre-Training with Multi-Source Multimodal Knowledge Memory"
},
"2209.14290": {
"arxivId": "2209.14290",
"title": "FiD-Light: Efficient and Effective Retrieval-Augmented Text Generation"
},
"2305.14002": {
"arxivId": "2305.14002",
"title": "Improving Language Models via Plug-and-Play Retrieval Feedback"
},
"2112.07708": {
"arxivId": "2112.07708",
"title": "Learning to Retrieve Passages without Supervision"
},
"2106.00957": {
"arxivId": "2106.00957",
"title": "RevCore: Review-Augmented Conversational Recommendation"
},
"2209.15323": {
"arxivId": "2209.15323",
"title": "Smallcap: Lightweight Image Captioning Prompted with Retrieval Augmentation"
},
"2207.06300": {
"arxivId": "2207.06300",
"title": "Re2G: Retrieve, Rerank, Generate"
},
"2305.02437": {
"arxivId": "2305.02437",
"title": "Lift Yourself Up: Retrieval-augmented Text Generation with Self Memory"
},
"2206.08082": {
"arxivId": "2206.08082",
"title": "Self-Generated In-Context Learning: Leveraging Auto-regressive Language Models as a Demonstration Generator"
},
"2210.17236": {
"arxivId": "2210.17236",
"title": "When Language Model Meets Private Library"
},
"2304.06762": {
"arxivId": "2304.06762",
"title": "Shall We Pretrain Autoregressive Language Models with Retrieval? A Comprehensive Study"
},
"2310.04027": {
"arxivId": "2310.04027",
"title": "Enhancing Financial Sentiment Analysis via Retrieval Augmented Large Language Models"
},
"2303.08518": {
"arxivId": "2303.08518",
"title": "UPRISE: Universal Prompt Retrieval for Improving Zero-Shot Evaluation"
},
"2212.01349": {
"arxivId": "2212.01349",
"title": "Nonparametric Masked Language Modeling"
},
"2310.15141": {
"arxivId": "2310.15141",
"title": "SpecTr: Fast Speculative Decoding via Optimal Transport"
},
"2207.13162": {
"arxivId": "2207.13162",
"title": "Retrieval-Augmented Transformer for Image Captioning"
},
"2207.10307": {
"arxivId": "2207.10307",
"title": "Knowledge-enhanced Black-box Attacks for Recommendations"
},
"2209.10117": {
"arxivId": "2209.10117",
"title": "A Comprehensive Survey on Trustworthy Recommender Systems"
},
"2304.14732": {
"arxivId": "2304.14732",
"title": "Search-in-the-Chain: Towards the Accurate, Credible and Traceable Content Generation for Complex Knowledge-intensive Tasks"
},
"2305.18846": {
"arxivId": "2305.18846",
"title": "Knowledge Graph-Augmented Language Models for Knowledge-Grounded Dialogue Generation"
},
"2210.13693": {
"arxivId": "2210.13693",
"title": "XRICL: Cross-lingual Retrieval-Augmented In-Context Learning for Cross-lingual Text-to-SQL Semantic Parsing"
},
"2401.01301": {
"arxivId": "2401.01301",
"title": "Large Legal Fictions: Profiling Legal Hallucinations in Large Language Models"
},
"2302.08266": {
"arxivId": "2302.08266",
"title": "Fairly Adaptive Negative Sampling for Recommendations"
},
"2305.16171": {
"arxivId": "2305.16171",
"title": "Multi-lingual and Multi-cultural Figurative Language Understanding"
},
"2310.14393": {
"arxivId": "2310.14393",
"title": "Merging Generated and Retrieved Knowledge for Open-Domain QA"
},
"2211.05165": {
"arxivId": "2211.05165",
"title": "Uni-Parser: Unified Semantic Parser for Question Answering on Knowledge Base and Database"
},
"2310.05002": {
"arxivId": "2310.05002",
"title": "Self-Knowledge Guided Retrieval Augmentation for Large Language Models"
},
"2402.16893": {
"arxivId": "2402.16893",
"title": "The Good and The Bad: Exploring Privacy Issues in Retrieval-Augmented Generation (RAG)"
},
"2309.10954": {
"arxivId": "2309.10954",
"title": "In-Context Learning for Text Classification with Many Labels"
},
"2402.08416": {
"arxivId": "2402.08416",
"title": "Pandora: Jailbreak GPTs by Retrieval Augmented Generation Poisoning"
},
"2210.12360": {
"arxivId": "2210.12360",
"title": "Prompt-Tuning Can Be Much Better Than Fine-Tuning on Cross-lingual Understanding With Multilingual Language Models"
},
"2307.06962": {
"arxivId": "2307.06962",
"title": "Copy is All You Need"
},
"2210.05758": {
"arxivId": "2210.05758",
"title": "Decoupled Context Processing for Context Augmented Language Modeling"
},
"2310.18347": {
"arxivId": "2310.18347",
"title": "PRCA: Fitting Black-Box Large Language Models for Retrieval Question Answering via Pluggable Reward-Driven Contextual Adapter"
},
"2305.05181": {
"arxivId": "2305.05181",
"title": "MoT: Memory-of-Thought Enables ChatGPT to Self-Improve"
},
"2305.19912": {
"arxivId": "2305.19912",
"title": "Structure-Aware Language Model Pretraining Improves Dense Retrieval on Structured Data"
},
"1901.01474": {
"arxivId": "1901.01474",
"title": "Bilinear Supervised Hashing Based on 2D Image Features"
},
"2402.13973": {
"arxivId": "2402.13973",
"title": "Linear-Time Graph Neural Networks for Scalable Recommendations"
},
"2312.11361": {
"arxivId": "2312.11361",
"title": "NoMIRACL: Knowing When You Don't Know for Robust Multilingual Retrieval-Augmented Generation"
},
"1706.03762": {
"arxivId": "1706.03762",
"title": "Attention is All you Need"
},
"2203.02155": {
"arxivId": "2203.02155",
"title": "Training language models to follow instructions with human feedback"
},
"2303.08774": {
"arxivId": "2303.08774",
"title": "GPT-4 Technical Report"
},
"1911.02116": {
"arxivId": "1911.02116",
"title": "Unsupervised Cross-lingual Representation Learning at Scale"
},
"2005.11401": {
"arxivId": "2005.11401",
"title": "Retrieval-Augmented Generation for Knowledge-Intensive NLP Tasks"
},
"1702.08734": {
"arxivId": "1702.08734",
"title": "Billion-Scale Similarity Search with GPUs"
},
"2205.01068": {
"arxivId": "2205.01068",
"title": "OPT: Open Pre-trained Transformer Language Models"
},
"2104.08821": {
"arxivId": "2104.08821",
"title": "SimCSE: Simple Contrastive Learning of Sentence Embeddings"
},
"2009.03300": {
"arxivId": "2009.03300",
"title": "Measuring Massive Multitask Language Understanding"
},
"1905.00537": {
"arxivId": "1905.00537",
"title": "SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems"
},
"1705.03551": {
"arxivId": "1705.03551",
"title": "TriviaQA: A Large Scale Distantly Supervised Challenge Dataset for Reading Comprehension"
},
"1809.09600": {
"arxivId": "1809.09600",
"title": "HotpotQA: A Dataset for Diverse, Explainable Multi-hop Question Answering"
},
"2211.05100": {
"arxivId": "2211.05100",
"title": "BLOOM: A 176B-Parameter Open-Access Multilingual Language Model"
},
"2101.00027": {
"arxivId": "2101.00027",
"title": "The Pile: An 800GB Dataset of Diverse Text for Language Modeling"
},
"2202.03629": {
"arxivId": "2202.03629",
"title": "Survey of Hallucination in Natural Language Generation"
},
"1803.05355": {
"arxivId": "1803.05355",
"title": "FEVER: a Large-scale Dataset for Fact Extraction and VERification"
},
"1603.09320": {
"arxivId": "1603.09320",
"title": "Efficient and Robust Approximate Nearest Neighbor Search Using Hierarchical Navigable Small World Graphs"
},
"2103.10360": {
"arxivId": "2103.10360",
"title": "GLM: General Language Model Pretraining with Autoregressive Blank Infilling"
},
"2007.00808": {
"arxivId": "2007.00808",
"title": "Approximate Nearest Neighbor Negative Contrastive Learning for Dense Text Retrieval"
},
"2112.09332": {
"arxivId": "2112.09332",
"title": "WebGPT: Browser-assisted question-answering with human feedback"
},
"1811.01241": {
"arxivId": "1811.01241",
"title": "Wizard of Wikipedia: Knowledge-Powered Conversational agents"
},
"2112.04426": {
"arxivId": "2112.04426",
"title": "Improving language models by retrieving from trillions of tokens"
},
"1911.00172": {
"arxivId": "1911.00172",
"title": "Generalization through Memorization: Nearest Neighbor Language Models"
},
"2204.06745": {
"arxivId": "2204.06745",
"title": "GPT-NeoX-20B: An Open-Source Autoregressive Language Model"
},
"2306.01116": {
"arxivId": "2306.01116",
"title": "The RefinedWeb Dataset for Falcon LLM: Outperforming Curated Corpora with Web Data, and Web Data Only"
},
"1909.06146": {
"arxivId": "1909.06146",
"title": "PubMedQA: A Dataset for Biomedical Research Question Answering"
},
"2112.09118": {
"arxivId": "2112.09118",
"title": "Unsupervised Dense Information Retrieval with Contrastive Learning"
},
"2009.02252": {
"arxivId": "2009.02252",
"title": "KILT: a Benchmark for Knowledge Intensive Language Tasks"
},
"2304.03277": {
"arxivId": "2304.03277",
"title": "Instruction Tuning with GPT-4"
},
"2009.13081": {
"arxivId": "2009.13081",
"title": "What Disease does this Patient Have? A Large-scale Open Domain Question Answering Dataset from Medical Exams"
},
"2003.06713": {
"arxivId": "2003.06713",
"title": "Document Ranking with a Pretrained Sequence-to-Sequence Model"
},
"2212.03533": {
"arxivId": "2212.03533",
"title": "Text Embeddings by Weakly-Supervised Contrastive Pre-training"
},
"2112.07899": {
"arxivId": "2112.07899",
"title": "Large Dual Encoders Are Generalizable Retrievers"
},
"1910.14424": {
"arxivId": "1910.14424",
"title": "Multi-Stage Document Ranking with BERT"
},
"2205.05131": {
"arxivId": "2205.05131",
"title": "UL2: Unifying Language Learning Paradigms"
},
"2112.10668": {
"arxivId": "2112.10668",
"title": "Few-shot Learning with Multilingual Generative Language Models"
},
"2010.04389": {
"arxivId": "2010.04389",
"title": "A Survey of Knowledge-enhanced Text Generation"
},
"2202.06991": {
"arxivId": "2202.06991",
"title": "Transformer Memory as a Differentiable Search Index"
},
"2203.14371": {
"arxivId": "2203.14371",
"title": "MedMCQA : A Large-scale Multi-Subject Multi-Choice Dataset for Medical domain Question Answering"
},
"1804.05936": {
"arxivId": "1804.05936",
"title": "Learning a Deep Listwise Context Model for Ranking Refinement"
},
"2209.11755": {
"arxivId": "2209.11755",
"title": "Promptagator: Few-shot Dense Retrieval From 8 Examples"
},
"2308.07107": {
"arxivId": "2308.07107",
"title": "Large Language Models for Information Retrieval: A Survey"
},
"2305.18486": {
"arxivId": "2305.18486",
"title": "A Systematic Study and Comprehensive Evaluation of ChatGPT on Benchmark Datasets"
},
"2202.08904": {
"arxivId": "2202.08904",
"title": "SGPT: GPT Sentence Embeddings for Semantic Search"
},
"2202.01110": {
"arxivId": "2202.01110",
"title": "A Survey on Retrieval-Augmented Text Generation"
},
"2006.05009": {
"arxivId": "2006.05009",
"title": "Few-Shot Generative Conversational Query Rewriting"
},
"2209.14491": {
"arxivId": "2209.14491",
"title": "Re-Imagen: Retrieval-Augmented Text-to-Image Generator"
},
"2204.10628": {
"arxivId": "2204.10628",
"title": "Autoregressive Search Engines: Generating Substrings as Document Identifiers"
},
"1804.04526": {
"arxivId": "1804.04526",
"title": "EventKG: A Multilingual Event-Centric Temporal Knowledge Graph"
},
"2008.09093": {
"arxivId": "2008.09093",
"title": "PARADE: Passage Representation Aggregation forDocument Reranking"
},
"2310.04408": {
"arxivId": "2310.04408",
"title": "RECOMP: Improving Retrieval-Augmented LMs with Compression and Selective Augmentation"
},
"2312.15166": {
"arxivId": "2312.15166",
"title": "SOLAR 10.7B: Scaling Large Language Models with Simple yet Effective Depth Up-Scaling"
},
"2210.10634": {
"arxivId": "2210.10634",
"title": "RankT5: Fine-Tuning T5 for Text Ranking with Ranking Losses"
},
"2210.02928": {
"arxivId": "2210.02928",
"title": "MuRAG: Multimodal Retrieval-Augmented Generator for Open Question Answering over Images and Text"
},
"2205.12035": {
"arxivId": "2205.12035",
"title": "RetroMAE: Pre-Training Retrieval-oriented Language Models Via Masked Auto-Encoder"
},
"2302.07452": {
"arxivId": "2302.07452",
"title": "How to Train Your DRAGON: Diverse Augmentation Towards Generalizable Dense Retrieval"
},
"2402.13178": {
"arxivId": "2402.13178",
"title": "Benchmarking Retrieval-Augmented Generation for Medicine"
},
"2401.14887": {
"arxivId": "2401.14887",
"title": "The Power of Noise: Redefining Retrieval for RAG Systems"
},
"2310.06117": {
"arxivId": "2310.06117",
"title": "Take a Step Back: Evoking Reasoning via Abstraction in Large Language Models"
},
"2305.13269": {
"arxivId": "2305.13269",
"title": "Chain-of-Knowledge: Grounding Large Language Models via Dynamic Knowledge Adapting over Heterogeneous Sources"
},
"2202.00217": {
"arxivId": "2202.00217",
"title": "WebFormer: The Web-page Transformer for Structure Information Extraction"
},
"2311.09476": {
"arxivId": "2311.09476",
"title": "ARES: An Automated Evaluation Framework for Retrieval-Augmented Generation Systems"
},
"2310.05029": {
"arxivId": "2310.05029",
"title": "Walking Down the Memory Maze: Beyond Context Limit through Interactive Reading"
},
"2204.05511": {
"arxivId": "2204.05511",
"title": "GERE: Generative Evidence Retrieval for Fact Verification"
},
"2305.17331": {
"arxivId": "2305.17331",
"title": "Augmentation-Adapted Retriever Improves Generalization of Language Models as Generic Plug-In"
},
"2305.04757": {
"arxivId": "2305.04757",
"title": "Augmented Large Language Models with Parametric Knowledge Guiding"
},
"2304.10453": {
"arxivId": "2304.10453",
"title": "Phoenix: Democratizing ChatGPT across Languages"
},
"2302.04858": {
"arxivId": "2302.04858",
"title": "Re-ViLM: Retrieval-Augmented Visual Language Model for Zero and Few-Shot Image Captioning"
},
"2405.07437": {
"arxivId": "2405.07437",
"title": "Evaluation of Retrieval-Augmented Generation: A Survey"
},
"2308.11761": {
"arxivId": "2308.11761",
"title": "KnowledGPT: Enhancing Large Language Models with Retrieval and Storage Access on Knowledge Bases"
},
"2306.04504": {
"arxivId": "2306.04504",
"title": "Evaluation of ChatGPT on Biomedical Tasks: A Zero-Shot Comparison with Fine-Tuned Generative Transformers"
},
"2311.08147": {
"arxivId": "2311.08147",
"title": "RECALL: A Benchmark for LLMs Robustness against External Counterfactual Knowledge"
},
"2404.05970": {
"arxivId": "2404.05970",
"title": "Optimization Methods for Personalizing Large Language Models through Retrieval Augmentation"
},
"2309.08051": {
"arxivId": "2309.08051",
"title": "Retrieval-Augmented Text-to-Audio Generation"
},
"2403.05676": {
"arxivId": "2403.05676",
"title": "PipeRAG: Fast Retrieval-Augmented Generation via Algorithm-System Co-design"
},
"2310.13682": {
"arxivId": "2310.13682",
"title": "Optimizing Retrieval-augmented Reader Models via Token Elimination"
},
"2111.07267": {
"arxivId": "2111.07267",
"title": "Understanding Jargon: Combining Extraction and Generation for Definition Modeling"
},
"1609.02907": {
"arxivId": "1609.02907",
"title": "Semi-Supervised Classification with Graph Convolutional Networks"
},
"1907.11692": {
"arxivId": "1907.11692",
"title": "RoBERTa: A Robustly Optimized BERT Pretraining Approach"
},
"1710.10903": {
"arxivId": "1710.10903",
"title": "Graph Attention Networks"
},
"1706.02216": {
"arxivId": "1706.02216",
"title": "Inductive Representation Learning on Large Graphs"
},
"2104.08691": {
"arxivId": "2104.08691",
"title": "The Power of Scale for Parameter-Efficient Prompt Tuning"
},
"1707.01476": {
"arxivId": "1707.01476",
"title": "Convolutional 2D Knowledge Graph Embeddings"
},
"2305.14314": {
"arxivId": "2305.14314",
"title": "QLoRA: Efficient Finetuning of Quantized LLMs"
},
"1811.00937": {
"arxivId": "1811.00937",
"title": "CommonsenseQA: A Question Answering Challenge Targeting Commonsense Knowledge"
},
"1911.11641": {
"arxivId": "1911.11641",
"title": "PIQA: Reasoning about Physical Commonsense in Natural Language"
},
"1907.10903": {
"arxivId": "1907.10903",
"title": "DropEdge: Towards Deep Graph Convolutional Networks on Node Classification"
},
"1809.02789": {
"arxivId": "1809.02789",
"title": "Can a Suit of Armor Conduct Electricity? A New Dataset for Open Book Question Answering"
},
"2103.10385": {
"arxivId": "2103.10385",
"title": "GPT Understands, Too"
},
"2407.21783": {
"arxivId": "2407.21783",
"title": "The Llama 3 Herd of Models"
},
"1606.03126": {
"arxivId": "1606.03126",
"title": "Key-Value Memory Networks for Directly Reading Documents"
},
"2307.03172": {
"arxivId": "2307.03172",
"title": "Lost in the Middle: How Language Models Use Long Contexts"
},
"2007.08663": {
"arxivId": "2007.08663",
"title": "TUDataset: A collection of benchmark datasets for learning with graphs"
},
"1506.02075": {
"arxivId": "1506.02075",
"title": "Large-scale Simple Question Answering with Memory Networks"
},
"1803.06643": {
"arxivId": "1803.06643",
"title": "The Web as a Knowledge-Base for Answering Complex Questions"
},
"2104.06378": {
"arxivId": "2104.06378",
"title": "QA-GNN: Reasoning with Language Models and Knowledge Graphs for Question Answering"
},
"1711.05851": {
"arxivId": "1711.05851",
"title": "Go for a Walk and Arrive at the Answer: Reasoning Over Paths in Knowledge Bases using Reinforcement Learning"
},
"2306.08302": {
"arxivId": "2306.08302",
"title": "Unifying Large Language Models and Knowledge Graphs: A Roadmap"
},
"1909.02151": {
"arxivId": "1909.02151",
"title": "KagNet: Knowledge-Aware Graph Networks for Commonsense Reasoning"
},
"1709.04071": {
"arxivId": "1709.04071",
"title": "Variational Reasoning for Question Answering with Knowledge Graph"
},
"1809.00782": {
"arxivId": "1809.00782",
"title": "Open Domain Question Answering Using Early Fusion of Knowledge Bases and Text"
},
"2010.05953": {
"arxivId": "2010.05953",
"title": "COMET-ATOMIC 2020: On Symbolic and Neural Commonsense Knowledge Graphs"
},
"1904.09537": {
"arxivId": "1904.09537",
"title": "PullNet: Open Domain Question Answering with Iterative Retrieval on Knowledge Bases and Text"
},
"2311.05232": {
"arxivId": "2311.05232",
"title": "A Survey on Hallucination in Large Language Models: Principles, Taxonomy, Challenges, and Open Questions"
},
"2407.10671": {
"arxivId": "2407.10671",
"title": "Qwen2 Technical Report"
},
"1906.07348": {
"arxivId": "1906.07348",
"title": "Zero-Shot Entity Linking by Reading Entity Descriptions"
},
"2005.00646": {
"arxivId": "2005.00646",
"title": "Scalable Multi-Hop Relational Reasoning for Knowledge-Aware Question Answering"
},
"2305.09645": {
"arxivId": "2305.09645",
"title": "StructGPT: A General Framework for Large Language Model to Reason over Structured Data"
},
"2011.07743": {
"arxivId": "2011.07743",
"title": "Beyond I.I.D.: Three Levels of Generalization for Question Answering on Knowledge Bases"
},
"2101.03737": {
"arxivId": "2101.03737",
"title": "Improving Multi-hop Knowledge Base Question Answering by Learning Intermediate Supervision Signals"
},
"2105.11644": {
"arxivId": "2105.11644",
"title": "A Survey on Complex Knowledge Base Question Answering: Methods, Challenges and Solutions"
},
"2305.10037": {
"arxivId": "2305.10037",
"title": "Can Language Models Solve Graph Problems in Natural Language?"
},
"2308.07134": {
"arxivId": "2308.07134",
"title": "Language is All a Graph Needs"
},
"2310.01061": {
"arxivId": "2310.01061",
"title": "Reasoning on Graphs: Faithful and Interpretable Large Language Model Reasoning"
},
"2109.08678": {
"arxivId": "2109.08678",
"title": "RNG-KBQA: Generation Augmented Iterative Ranking for Knowledge Base Question Answering"
},
"2305.15066": {
"arxivId": "2305.15066",
"title": "GPT4Graph: Can Large Language Models Understand Graph Structured Data ? An Empirical Evaluation and Benchmarking"
},
"2404.16130": {
"arxivId": "2404.16130",
"title": "From Local to Global: A Graph RAG Approach to Query-Focused Summarization"
},
"2007.13069": {
"arxivId": "2007.13069",
"title": "A Survey on Complex Question Answering over Knowledge Base: Recent Advances and Challenges"
},
"2307.07697": {
"arxivId": "2307.07697",
"title": "Think-on-Graph: Deep and Responsible Reasoning of Large Language Model with Knowledge Graph"
},
"2202.13296": {
"arxivId": "2202.13296",
"title": "Subgraph Retrieval Enhanced Model for Multi-hop Knowledge Base Question Answering"
},
"2312.02783": {
"arxivId": "2312.02783",
"title": "Large Language Models on Graphs: A Comprehensive Survey"
},
"2311.10723": {
"arxivId": "2311.10723",
"title": "Large Language Models in Finance: A Survey"
},
"2109.01653": {
"arxivId": "2109.01653",
"title": "CREAK: A Dataset for Commonsense Reasoning over Entity Knowledge"
},
"2310.11829": {
"arxivId": "2310.11829",
"title": "Towards Graph Foundation Models: A Survey and Beyond"
},
"2108.06688": {
"arxivId": "2108.06688",
"title": "Complex Knowledge Base Question Answering: A Survey"
},
"2102.08942": {
"arxivId": "2102.08942",
"title": "A Survey on Locality Sensitive Hashing Algorithms and their Applications"
},
"2306.04136": {
"arxivId": "2306.04136",
"title": "Knowledge-Augmented Language Model Prompting for Zero-Shot Knowledge Graph Question Answering"
},
"2101.00376": {
"arxivId": "2101.00376",
"title": "RiddleSense: Reasoning about Riddle Questions Featuring Linguistic Creativity and Commonsense Knowledge"
},
"2310.01089": {
"arxivId": "2310.01089",
"title": "GraphText: Graph Reasoning in Text Space"
},
"2204.08109": {
"arxivId": "2204.08109",
"title": "ArcaneQA: Dynamic Program Induction and Contextualized Encoding for Knowledge Base Question Answering"
},
"2308.11730": {
"arxivId": "2308.11730",
"title": "Knowledge Graph Prompting for Multi-Document Question Answering"
},
"2210.01613": {
"arxivId": "2210.01613",
"title": "Mintaka: A Complex, Natural, and Multilingual Dataset for End-to-End Question Answering"
},
"2308.13259": {
"arxivId": "2308.13259",
"title": "Knowledge-Driven CoT: Exploring Faithful Reasoning in LLMs for Knowledge-intensive Question Answering"
},
"2212.00959": {
"arxivId": "2212.00959",
"title": "UniKGQA: Unified Retrieval and Reasoning for Solving Multi-hop Question Answering Over Knowledge Graph"
},
"2202.00120": {
"arxivId": "2202.00120",
"title": "QALD-9-plus: A Multilingual Dataset for Question Answering over DBpedia and Wikidata Translated by Native Speakers"
},
"2309.11206": {
"arxivId": "2309.11206",
"title": "Retrieve-Rewrite-Answer: A KG-to-Text Enhanced LLMs Framework for Knowledge Graph Question Answering"
},
"2309.03118": {
"arxivId": "2309.03118",
"title": "Knowledge Solver: Teaching LLMs to Search for Domain Knowledge from Knowledge Graphs"
},
"2305.06590": {
"arxivId": "2305.06590",
"title": "FactKG: Fact Verification via Reasoning on Knowledge Graphs"
},
"2403.18105": {
"arxivId": "2403.18105",
"title": "Large Language Models for Education: A Survey and Outlook"
},
"2402.08170": {
"arxivId": "2402.08170",
"title": "LLaGA: Large Language and Graph Assistant"
},
"2405.06211": {
"arxivId": "2405.06211",
"title": "A Survey on RAG Meeting LLMs: Towards Retrieval-Augmented Large Language Models"
},
"2202.06129": {
"arxivId": "2202.06129",
"title": "RETE: Retrieval-Enhanced Temporal Event Forecasting on Unified Query Product Evolutionary Graph"
},
"2402.11163": {
"arxivId": "2402.11163",
"title": "KG-Agent: An Efficient Autonomous Agent Framework for Complex Reasoning over Knowledge Graph"
},
"2402.07197": {
"arxivId": "2402.07197",
"title": "GraphTranslator: Aligning Graph Model to Large Language Model for Open-ended Tasks"
},
"2404.00579": {
"arxivId": "2404.00579",
"title": "A Review of Modern Recommender Systems Using Generative Models (Gen-RecSys)"
},
"2310.08975": {
"arxivId": "2310.08975",
"title": "ChatKBQA: A Generate-then-Retrieve Framework for Knowledge Base Question Answering with Fine-tuned Large Language Models"
},
"2404.07103": {
"arxivId": "2404.07103",
"title": "Graph Chain-of-Thought: Augmenting Large Language Models by Reasoning on Graphs"
},
"2305.18742": {
"arxivId": "2305.18742",
"title": "Graph Reasoning for Question Answering with Triplet Retrieval"
},
"2405.04819": {
"arxivId": "2405.04819",
"title": "DALK: Dynamic Co-Augmentation of LLMs and KG to answer Alzheimer's Disease Questions with Scientific Literature"
},
"2401.00426": {
"arxivId": "2401.00426",
"title": "keqing: knowledge-based question answering is a nature chain-of-thought mentor of LLM"
},
"2311.03758": {
"arxivId": "2311.03758",
"title": "Large Language Model based Long-tail Query Rewriting in Taobao Search"
},
"2404.17723": {
"arxivId": "2404.17723",
"title": "Retrieval-Augmented Generation with Knowledge Graphs for Customer Service Question Answering"
},
"2308.10173": {
"arxivId": "2308.10173",
"title": "FoodGPT: A Large Language Model in Food Testing Domain with Incremental Pre-training and Knowledge Graph Prompt"
},
"2305.12416": {
"arxivId": "2305.12416",
"title": "Direct Fact Retrieval from Knowledge Graphs without Entity Linking"
},
"2205.01841": {
"arxivId": "2205.01841",
"title": "Great Truths are Always Simple: A Rather Simple Knowledge Encoder for Enhancing the Commonsense Reasoning Capacity of Pre-Trained Models"
},
"2403.05881": {
"arxivId": "2403.05881",
"title": "KG-Rank: Enhancing Large Language Models for Medical QA with Knowledge Graphs and Ranking Techniques"
},
"2401.15569": {
"arxivId": "2401.15569",
"title": "Efficient Tuning and Inference for Large Language Models on Textual Graphs"
},
"2312.15883": {
"arxivId": "2312.15883",
"title": "HyKGE: A Hypothesis Knowledge Graph Enhanced Framework for Accurate and Reliable Medical LLMs Responses"
},
"2308.14436": {
"arxivId": "2308.14436",
"title": "Bridging the KB-Text Gap: Leveraging Structured Knowledge-aware Pre-training for KBQA"
},
"2303.12320": {
"arxivId": "2303.12320",
"title": "GrapeQA: GRaph Augmentation and Pruning to Enhance Question-Answering"
},
"2405.14831": {
"arxivId": "2405.14831",
"title": "HippoRAG: Neurobiologically Inspired Long-Term Memory for Large Language Models"
},
"2211.10991": {
"arxivId": "2211.10991",
"title": "Modeling Fine-grained Information via Knowledge-aware Hierarchical Graph for Zero-shot Entity Retrieval"
},
"2210.13650": {
"arxivId": "2210.13650",
"title": "ReaRev: Adaptive Reasoning for Question Answering over Knowledge Graphs"
},
"2404.00492": {
"arxivId": "2404.00492",
"title": "Multi-hop Question Answering under Temporal Knowledge Editing"
},
"1606.05250": {
"arxivId": "1606.05250",
"title": "SQuAD: 100,000+ Questions for Machine Comprehension of Text"
},
"2105.00691": {
"arxivId": "2105.00691",
"title": "Hybrid Intelligence"
},
"2309.07930": {
"arxivId": "2309.07930",
"title": "Generative AI"
},
"2201.11227": {
"arxivId": "2201.11227",
"title": "Synchromesh: Reliable code generation from pre-trained language models"
},
"1808.10025": {
"arxivId": "1808.10025",
"title": "Retrieval-Based Neural Code Generation"
},
"2401.05856": {
"arxivId": "2401.05856",
"title": "Seven Failure Points When Engineering a Retrieval Augmented Generation System"
},
"2108.13934": {
"arxivId": "2108.13934",
"title": "Robust Retrieval Augmented Generation for Zero-shot Slot Filling"
},
"1310.4546": {
"arxivId": "1310.4546",
"title": "Distributed Representations of Words and Phrases and their Compositionality"
},
"1301.3781": {
"arxivId": "1301.3781",
"title": "Efficient Estimation of Word Representations in Vector Space"
},
"1901.02860": {
"arxivId": "1901.02860",
"title": "Transformer-XL: Attentive Language Models beyond a Fixed-Length Context"
},
"2004.05150": {
"arxivId": "2004.05150",
"title": "Longformer: The Long-Document Transformer"
},
"2107.13586": {
"arxivId": "2107.13586",
"title": "Pre-train, Prompt, and Predict: A Systematic Survey of Prompting Methods in Natural Language Processing"
},
"2109.01652": {
"arxivId": "2109.01652",
"title": "Finetuned Language Models Are Zero-Shot Learners"
},
"2110.08207": {
"arxivId": "2110.08207",
"title": "Multitask Prompted Training Enables Zero-Shot Task Generalization"
},
"2205.14135": {
"arxivId": "2205.14135",
"title": "FlashAttention: Fast and Memory-Efficient Exact Attention with IO-Awareness"
},
"2003.08271": {
"arxivId": "2003.08271",
"title": "Pre-trained models for natural language processing: A survey"
},
"2111.00396": {
"arxivId": "2111.00396",
"title": "Efficiently Modeling Long Sequences with Structured State Spaces"
},
"2205.10625": {
"arxivId": "2205.10625",
"title": "Least-to-Most Prompting Enables Complex Reasoning in Large Language Models"
},
"2112.00861": {
"arxivId": "2112.00861",
"title": "A General Language Assistant as a Laboratory for Alignment"
},
"2112.00114": {
"arxivId": "2112.00114",
"title": "Show Your Work: Scratchpads for Intermediate Computation with Language Models"
},
"1808.08949": {
"arxivId": "1808.08949",
"title": "Dissecting Contextual Word Embeddings: Architecture and Representation"
},
"2104.05240": {
"arxivId": "2104.05240",
"title": "Factual Probing Is [MASK]: Learning vs. Learning to Recall"
},
"2306.15595": {
"arxivId": "2306.15595",
"title": "Extending Context Window of Large Language Models via Positional Interpolation"
},
"2309.01219": {
"arxivId": "2309.01219",
"title": "Siren's Song in the AI Ocean: A Survey on Hallucination in Large Language Models"
},
"2208.04933": {
"arxivId": "2208.04933",
"title": "Simplified State Space Layers for Sequence Modeling"
},
"2110.07178": {
"arxivId": "2110.07178",
"title": "Symbolic Knowledge Distillation: from General Language Models to Commonsense Models"
},
"2212.14052": {
"arxivId": "2212.14052",
"title": "Hungry Hungry Hippos: Towards Language Modeling with State Space Models"
},
"2302.10866": {
"arxivId": "2302.10866",
"title": "Hyena Hierarchy: Towards Larger Convolutional Language Models"
},
"2304.08467": {
"arxivId": "2304.08467",
"title": "Learning to Compress Prompts with Gist Tokens"
},
"2309.01431": {
"arxivId": "2309.01431",
"title": "Benchmarking Large Language Models in Retrieval-Augmented Generation"
},
"2110.07814": {
"arxivId": "2110.07814",
"title": "Meta-learning via Language Model In-context Tuning"
},
"2305.14788": {
"arxivId": "2305.14788",
"title": "Adapting Language Models to Compress Contexts"
},
"2303.15647": {
"arxivId": "2303.15647",
"title": "Scaling Down to Scale Up: A Guide to Parameter-Efficient Fine-Tuning"
},
"2310.06839": {
"arxivId": "2310.06839",
"title": "LongLLMLingua: Accelerating and Enhancing LLMs in Long Context Scenarios via Prompt Compression"
},
"2309.12307": {
"arxivId": "2309.12307",
"title": "LongLoRA: Efficient Fine-tuning of Long-Context Large Language Models"
},
"2207.06881": {
"arxivId": "2207.06881",
"title": "Recurrent Memory Transformer"
},
"2310.05736": {
"arxivId": "2310.05736",
"title": "LLMLingua: Compressing Prompts for Accelerated Inference of Large Language Models"
},
"2305.13304": {
"arxivId": "2305.13304",
"title": "RecurrentGPT: Interactive Generation of (Arbitrarily) Long Text"
},
"2307.06945": {
"arxivId": "2307.06945",
"title": "In-context Autoencoder for Context Compression in a Large Language Model"
},
"2210.03162": {
"arxivId": "2210.03162",
"title": "Prompt Compression and Contrastive Conditioning for Controllability and Toxicity Reduction in Language Models"
},
"2403.12968": {
"arxivId": "2403.12968",
"title": "LLMLingua-2: Data Distillation for Efficient and Faithful Task-Agnostic Prompt Compression"
},
"2209.15189": {
"arxivId": "2209.15189",
"title": "Learning by Distilling Context"
},
"2311.12351": {
"arxivId": "2311.12351",
"title": "Advancing Transformer Architecture in Long-Context Large Language Models: A Comprehensive Survey"
},
"2312.09571": {
"arxivId": "2312.09571",
"title": "Extending Context Window of Large Language Models via Semantic Compression"
},
"2103.00020": {
"arxivId": "2103.00020",
"title": "Learning Transferable Visual Models From Natural Language Supervision"
},
"2006.11239": {
"arxivId": "2006.11239",
"title": "Denoising Diffusion Probabilistic Models"
},
"2112.10752": {
"arxivId": "2112.10752",
"title": "High-Resolution Image Synthesis with Latent Diffusion Models"
},
"2302.13971": {
"arxivId": "2302.13971",
"title": "LLaMA: Open and Efficient Foundation Language Models"
},
"2106.09685": {
"arxivId": "2106.09685",
"title": "LoRA: Low-Rank Adaptation of Large Language Models"
},
"2201.11903": {
"arxivId": "2201.11903",
"title": "Chain of Thought Prompting Elicits Reasoning in Large Language Models"
},
"2105.05233": {
"arxivId": "2105.05233",
"title": "Diffusion Models Beat GANs on Image Synthesis"
},
"2204.06125": {
"arxivId": "2204.06125",
"title": "Hierarchical Text-Conditional Image Generation with CLIP Latents"
},
"1503.03585": {
"arxivId": "1503.03585",
"title": "Deep Unsupervised Learning using Nonequilibrium Thermodynamics"
},
"2010.02502": {
"arxivId": "2010.02502",
"title": "Denoising Diffusion Implicit Models"
},
"2011.13456": {
"arxivId": "2011.13456",
"title": "Score-Based Generative Modeling through Stochastic Differential Equations"
},
"2102.12092": {
"arxivId": "2102.12092",
"title": "Zero-Shot Text-to-Image Generation"
},
"2001.08361": {
"arxivId": "2001.08361",
"title": "Scaling Laws for Neural Language Models"
},
"1907.05600": {
"arxivId": "1907.05600",
"title": "Generative Modeling by Estimating Gradients of the Data Distribution"
},
"2102.09672": {
"arxivId": "2102.09672",
"title": "Improved Denoising Diffusion Probabilistic Models"
},
"1609.09430": {
"arxivId": "1609.09430",
"title": "CNN architectures for large-scale audio classification"
},
"2002.08155": {
"arxivId": "2002.08155",
"title": "CodeBERT: A Pre-Trained Model for Programming and Natural Languages"
},
"2101.03961": {
"arxivId": "2101.03961",
"title": "Switch Transformers: Scaling to Trillion Parameter Models with Simple and Efficient Sparsity"
},
"2012.07805": {
"arxivId": "2012.07805",
"title": "Extracting Training Data from Large Language Models"
},
"2308.12950": {
"arxivId": "2308.12950",
"title": "Code Llama: Open Foundation Models for Code"
},
"2109.00859": {
"arxivId": "2109.00859",
"title": "CodeT5: Identifier-aware Unified Pre-trained Encoder-Decoder Models for Code Understanding and Generation"
},
"2210.02303": {
"arxivId": "2210.02303",
"title": "Imagen Video: High Definition Video Generation with Diffusion Models"
},
"1901.04085": {
"arxivId": "1901.04085",
"title": "Passage Re-ranking with BERT"
},
"2106.15282": {
"arxivId": "2106.15282",
"title": "Cascaded Diffusion Models for High Fidelity Image Generation"
},
"2009.06732": {
"arxivId": "2009.06732",
"title": "Efficient Transformers: A Survey"
},
"2006.09011": {
"arxivId": "2006.09011",
"title": "Improved Techniques for Training Score-Based Generative Models"
},
"2009.08366": {
"arxivId": "2009.08366",
"title": "GraphCodeBERT: Pre-training Code Representations with Data Flow"
},
"2102.04664": {
"arxivId": "2102.04664",
"title": "CodeXGLUE: A Machine Learning Benchmark Dataset for Code Understanding and Generation"
},
"2209.00796": {
"arxivId": "2209.00796",
"title": "Diffusion Models: A Comprehensive Survey of Methods and Applications"
},
"2104.00650": {
"arxivId": "2104.00650",
"title": "Frozen in Time: A Joint Video and Image Encoder for End-to-End Retrieval"
},
"2001.06937": {
"arxivId": "2001.06937",
"title": "A Review on Generative Adversarial Networks: Algorithms, Theory, and Applications"
},
"2107.03006": {
"arxivId": "2107.03006",
"title": "Structured Denoising Diffusion Models in Discrete State-Spaces"
},
"2103.06333": {
"arxivId": "2103.06333",
"title": "Unified Pre-training for Program Understanding and Generation"
},
"2205.14217": {
"arxivId": "2205.14217",
"title": "Diffusion-LM Improves Controllable Text Generation"
},
"2303.01469": {
"arxivId": "2303.01469",
"title": "Consistency Models"
},
"2305.16213": {
"arxivId": "2305.16213",
"title": "ProlificDreamer: High-Fidelity and Diverse Text-to-3D Generation with Variational Score Distillation"
},
"2010.08191": {
"arxivId": "2010.08191",
"title": "RocketQA: An Optimized Training Approach to Dense Passage Retrieval for Open-Domain Question Answering"
},
"2101.09258": {
"arxivId": "2101.09258",
"title": "Maximum Likelihood Training of Score-Based Diffusion Models"
},
"2203.17003": {
"arxivId": "2203.17003",
"title": "Equivariant Diffusion for Molecule Generation in 3D"
},
"2104.14951": {
"arxivId": "2104.14951",
"title": "SRDiff: Single Image Super-Resolution with Diffusion Probabilistic Models"
},
"2203.02923": {
"arxivId": "2203.02923",
"title": "GeoDiff: a Geometric Diffusion Model for Molecular Conformation Generation"
},
"2209.03003": {
"arxivId": "2209.03003",
"title": "Flow Straight and Fast: Learning to Generate and Transfer Data with Rectified Flow"
},
"2208.15001": {
"arxivId": "2208.15001",
"title": "MotionDiffuse: Text-Driven Human Motion Generation With Diffusion Model"
},
"2212.10511": {
"arxivId": "2212.10511",
"title": "When Not to Trust Language Models: Investigating Effectiveness of Parametric and Non-Parametric Memories"
},
"2109.05014": {
"arxivId": "2109.05014",
"title": "An Empirical Study of GPT-3 for Few-Shot Knowledge-Based VQA"
},
"2211.06687": {
"arxivId": "2211.06687",
"title": "Large-Scale Contrastive Language-Audio Pretraining with Feature Fusion and Keyword-to-Caption Augmentation"
},
"2305.07922": {
"arxivId": "2305.07922",
"title": "CodeT5+: Open Code Large Language Models for Code Understanding and Generation"
},
"2012.00955": {
"arxivId": "2012.00955",
"title": "How Can We Know When Language Models Know? On the Calibration of Language Models for Question Answering"
},
"2309.07597": {
"arxivId": "2309.07597",
"title": "C-Pack: Packaged Resources To Advance General Chinese Embedding"
},
"2302.00923": {
"arxivId": "2302.00923",
"title": "Multimodal Chain-of-Thought Reasoning in Language Models"
},
"2010.00710": {
"arxivId": "2010.00710",
"title": "Nearest Neighbor Machine Translation"
},
"2211.08411": {
"arxivId": "2211.08411",
"title": "Large Language Models Struggle to Learn Long-Tail Knowledge"
},
"1809.06181": {
"arxivId": "1809.06181",
"title": "Dual Encoding for Zero-Example Video Retrieval"
},
"2104.08051": {
"arxivId": "2104.08051",
"title": "Optimizing Dense Retrieval Model Training with Hard Negatives"
},
"2210.08933": {
"arxivId": "2210.08933",
"title": "DiffuSeq: Sequence to Sequence Text Generation with Diffusion Models"
},
"2205.11495": {
"arxivId": "2205.11495",
"title": "Flexible Diffusion Modeling of Long Videos"
},
"2104.08253": {
"arxivId": "2104.08253",
"title": "Condenser: a Pre-training Architecture for Dense Retrieval"
},
"2301.12661": {
"arxivId": "2301.12661",
"title": "Make-An-Audio: Text-To-Audio Generation with Prompt-Enhanced Diffusion Models"
},
"2208.04202": {
"arxivId": "2208.04202",
"title": "Analog Bits: Generating Discrete Data using Diffusion Models with Self-Conditioning"
},
"2208.03188": {
"arxivId": "2208.03188",
"title": "BlenderBot 3: a deployed conversational agent that continually learns to responsibly engage"
},
"2206.01729": {
"arxivId": "2206.01729",
"title": "Torsional Diffusion for Molecular Conformer Generation"
},
"2203.09481": {
"arxivId": "2203.09481",
"title": "Diffusion Probabilistic Modeling for Video Generation"
},
"1904.11574": {
"arxivId": "1904.11574",
"title": "TVQA+: Spatio-Temporal Grounding for Video Question Answering"
},
"2012.12627": {
"arxivId": "2012.12627",
"title": "Bridging Textual and Tabular Data for Cross-Domain Text-to-SQL Semantic Parsing"
},
"2102.10407": {
"arxivId": "2102.10407",
"title": "VisualGPT: Data-efficient Adaptation of Pretrained Language Models for Image Captioning"
},
"1812.01194": {
"arxivId": "1812.01194",
"title": "A Retrieve-and-Edit Framework for Predicting Structured Outputs"
},
"2009.12677": {
"arxivId": "2009.12677",
"title": "KG-BART: Knowledge Graph-Augmented BART for Generative Commonsense Reasoning"
},
"2210.07128": {
"arxivId": "2210.07128",
"title": "Language Models of Code are Few-Shot Commonsense Learners"
},
"2203.13474": {
"arxivId": "2203.13474",
"title": "A Conversational Paradigm for Program Synthesis"
},
"2205.15019": {
"arxivId": "2205.15019",
"title": "Protein Structure and Sequence Generation with Equivariant Denoising Diffusion Probabilistic Models"
},
"2104.08762": {
"arxivId": "2104.08762",
"title": "Case-based Reasoning for Natural Language Queries over Knowledge Bases"
},
"2006.05405": {
"arxivId": "2006.05405",
"title": "Retrieval-Augmented Generation for Code Summarization via Hybrid GNN"
},
"2303.12570": {
"arxivId": "2303.12570",
"title": "RepoCoder: Repository-Level Code Completion Through Iterative Retrieval and Generation"
},
"2306.15626": {
"arxivId": "2306.15626",
"title": "LeanDojo: Theorem Proving with Retrieval-Augmented Language Models"
},
"2104.12836": {
"arxivId": "2104.12836",
"title": "Multimodal Contrastive Training for Visual Representation Learning"
},
"2109.05070": {
"arxivId": "2109.05070",
"title": "Instance-Conditioned GAN"
},
"2402.03216": {
"arxivId": "2402.03216",
"title": "BGE M3-Embedding: Multi-Lingual, Multi-Functionality, Multi-Granularity Text Embeddings Through Self-Knowledge Distillation"
},
"2206.02743": {
"arxivId": "2206.02743",
"title": "A Neural Corpus Indexer for Document Retrieval"
},
"2203.07722": {
"arxivId": "2203.07722",
"title": "ReACC: A Retrieval-Augmented Code Completion Framework"
},
"2204.11824": {
"arxivId": "2204.11824",
"title": "Retrieval-Augmented Diffusion Models"
},
"1910.10419": {
"arxivId": "1910.10419",
"title": "Retrieve and Refine: Exemplar-Based Neural Comment Generation"
},
"2205.10747": {
"arxivId": "2205.10747",
"title": "Language Models with Image Descriptors are Strong Few-Shot Video-Language Learners"
},
"2302.05965": {
"arxivId": "2302.05965",
"title": "RESDSQL: Decoupling Schema Linking and Skeleton Parsing for Text-to-SQL"
},
"2110.03611": {
"arxivId": "2110.03611",
"title": "Adversarial Retriever-Ranker for dense text retrieval"
},
"2304.09667": {
"arxivId": "2304.09667",
"title": "GeneGPT: Augmenting Large Language Models with Domain Tools for Improved Access to Biomedical Information"
},
"2305.01625": {
"arxivId": "2305.01625",
"title": "Unlimiformer: Long-Range Transformers with Unlimited Length Input"
},
"2208.11640": {
"arxivId": "2208.11640",
"title": "Repair Is Nearly Generation: Multilingual Program Repair with LLMs"
},
"2303.07263": {
"arxivId": "2303.07263",
"title": "InferFix: End-to-End Program Repair with LLMs"
},
"2309.15217": {
"arxivId": "2309.15217",
"title": "RAGAs: Automated Evaluation of Retrieval Augmented Generation"
},
"2302.12246": {
"arxivId": "2302.12246",
"title": "Active Prompting with Chain-of-Thought for Large Language Models"
},
"2105.11269": {
"arxivId": "2105.11269",
"title": "Neural Machine Translation with Monolingual Translation Memory"
},
"2307.11019": {
"arxivId": "2307.11019",
"title": "Investigating the Factual Knowledge Boundary of Large Language Models with Retrieval Augmentation"
},
"2402.04333": {
"arxivId": "2402.04333",
"title": "LESS: Selecting Influential Data for Targeted Instruction Tuning"
},
"1809.05296": {
"arxivId": "1809.05296",
"title": "Skeleton-to-Response: Dialogue Generation Guided by Retrieval Memory"
},
"2110.04330": {
"arxivId": "2110.04330",
"title": "KG-FiD: Infusing Knowledge Graph in Fusion-in-Decoder for Open-Domain Question Answering"
},
"2012.14610": {
"arxivId": "2012.14610",
"title": "UniK-QA: Unified Representations of Structured and Unstructured Knowledge for Open-Domain Question Answering"
},
"2004.12744": {
"arxivId": "2004.12744",
"title": "Augmenting Transformers with KNN-Based Composite Memory for Dialog"
},
"2308.16137": {
"arxivId": "2308.16137",
"title": "LM-Infinite: Simple On-the-Fly Length Generalization for Large Language Models"
},
"2303.07678": {
"arxivId": "2303.07678",
"title": "Query2doc: Query Expansion with Large Language Models"
},
"2107.11976": {
"arxivId": "2107.11976",
"title": "One Question Answering Model for Many Languages with Cross-lingual Dense Passage Retrieval"
},
"2207.13038": {
"arxivId": "2207.13038",
"title": "Text-Guided Synthesis of Artistic Images with Retrieval-Augmented Diffusion Models"
},
"2101.00133": {
"arxivId": "2101.00133",
"title": "NeurIPS 2020 EfficientQA Competition: Systems, Analyses and Lessons Learned"
},
"2401.18059": {
"arxivId": "2401.18059",
"title": "RAPTOR: Recursive Abstractive Processing for Tree-Organized Retrieval"
},
"2302.06144": {
"arxivId": "2302.06144",
"title": "SkCoder: A Sketch-based Approach for Automatic Code Generation"
},
"2308.13775": {
"arxivId": "2308.13775",
"title": "EditSum: A Retrieve-and-Edit Framework for Source Code Summarization"
},
"2403.14403": {
"arxivId": "2403.14403",
"title": "Adaptive-RAG: Learning to Adapt Retrieval-Augmented Large Language Models through Question Complexity"
},
"2210.12925": {
"arxivId": "2210.12925",
"title": "TIARA: Multi-grained Retrieval for Robust Question Answering over Large Knowledge Base"
},
"2401.11708": {
"arxivId": "2401.11708",
"title": "Mastering Text-to-Image Diffusion: Recaptioning, Planning, and Generating with Multimodal LLMs"
},
"2304.06815": {
"arxivId": "2304.06815",
"title": "Automatic Semantic Augmentation of Language Model Prompts (for Code Summarization)"
},
"2303.10868": {
"arxivId": "2303.10868",
"title": "Retrieving Multimodal Information for Augmented Generation: A Survey"
},
"2303.06573": {
"arxivId": "2303.06573",
"title": "Large Language Models Know Your Contextual Search Intent: A Prompting Framework for Conversational Search"
},
"2212.10007": {
"arxivId": "2212.10007",
"title": "CoCoMIC: Code Completion by Jointly Modeling In-file and Cross-file Context"
},
"2210.00063": {
"arxivId": "2210.00063",
"title": "DecAF: Joint Decoding of Answers and Logical Forms for Question Answering over Knowledge Bases"
},
"2210.03809": {
"arxivId": "2210.03809",
"title": "Retrieval Augmented Visual Question Answering with Outside Knowledge"
},
"2007.08513": {
"arxivId": "2007.08513",
"title": "RetrieveGAN: Image Synthesis via Differentiable Patch Retrieval"
},
"2205.12253": {
"arxivId": "2205.12253",
"title": "Evaluating the Impact of Model Scale for Compositional Generalization in Semantic Parsing"
},
"2309.11325": {
"arxivId": "2309.11325",
"title": "DISC-LawLLM: Fine-tuning Large Language Models for Intelligent Legal Services"
},
"2212.01588": {
"arxivId": "2212.01588",
"title": "RHO ($\u03c1$): Reducing Hallucination in Open-domain Dialogues with Knowledge Grounding"
},
"2311.08377": {
"arxivId": "2311.08377",
"title": "Learning to Filter Context for Retrieval-Augmented Generation"
},
"2110.06176": {
"arxivId": "2110.06176",
"title": "MENTION MEMORY : INCORPORATING TEXTUAL KNOWLEDGE INTO TRANSFORMERS THROUGH ENTITY MENTION ATTENTION"
},
"2311.08252": {
"arxivId": "2311.08252",
"title": "REST: Retrieval-Based Speculative Decoding"
},
"2309.05767": {
"arxivId": "2309.05767",
"title": "Natural Language Supervision For General-Purpose Audio Representations"
},
"2012.07331": {
"arxivId": "2012.07331",
"title": "Audio Captioning using Pre-Trained Large-Scale Language Model Guided by Audio-based Similar Caption Retrieval"
},
"2307.06940": {
"arxivId": "2307.06940",
"title": "Animate-A-Story: Storytelling with Retrieval-Augmented Video Generation"
},
"2401.15884": {
"arxivId": "2401.15884",
"title": "Corrective Retrieval Augmented Generation"
},
"2203.10299": {
"arxivId": "2203.10299",
"title": "Neural Machine Translation with Phrase-Level Universal Visual Representations"
},
"2310.07554": {
"arxivId": "2310.07554",
"title": "Retrieve Anything To Augment Large Language Models"
},
"2309.06057": {
"arxivId": "2309.06057",
"title": "RAP-Gen: Retrieval-Augmented Patch Generation with CodeT5 for Automatic Program Repair"
},
"2204.11677": {
"arxivId": "2204.11677",
"title": "Conversational Question Answering on Heterogeneous Sources"
},
"2402.16347": {
"arxivId": "2402.16347",
"title": "CodeS: Towards Building Open-source Language Models for Text-to-SQL"
},
"2207.03637": {
"arxivId": "2207.03637",
"title": "OmniTab: Pretraining with Natural and Synthetic Data for Few-shot Table-based Question Answering"
},
"2401.15391": {
"arxivId": "2401.15391",
"title": "MultiHop-RAG: Benchmarking Retrieval-Augmented Generation for Multi-Hop Queries"
},
"2211.07067": {
"arxivId": "2211.07067",
"title": "Retrieval-Augmented Generative Question Answering for Event Argument Extraction"
},
"2108.02866": {
"arxivId": "2108.02866",
"title": "Dual Reader-Parser on Hybrid Textual and Tabular Evidence for Open Domain Question Answering"
},
"2401.07339": {
"arxivId": "2401.07339",
"title": "CodeAgent: Enhancing Code Generation with Tool-Integrated Agent Systems for Real-World Repo-level Coding Challenges"
},
"2203.02700": {
"arxivId": "2203.02700",
"title": "RACE: Retrieval-augmented Commit Message Generation"
},
"2106.06471": {
"arxivId": "2106.06471",
"title": "Writing by Memorizing: Hierarchical Retrieval-based Medical Report Generation"
},
"2311.16543": {
"arxivId": "2311.16543",
"title": "RTLFixer: Automatically Fixing RTL Syntax Errors with Large Language Models"
},
"2305.03653": {
"arxivId": "2305.03653",
"title": "Query Expansion by Prompting Large Language Models"
},
"2208.11126": {
"arxivId": "2208.11126",
"title": "Retrieval-based Controllable Molecule Generation"
},
"2105.13073": {
"arxivId": "2105.13073",
"title": "Maria: A Visual Experience Powered Conversational Agent"
},
"2307.07164": {
"arxivId": "2307.07164",
"title": "Learning to Retrieve In-Context Examples for Large Language Models"
},
"2303.00807": {
"arxivId": "2303.00807",
"title": "UDAPDR: Unsupervised Domain Adaptation via LLM Prompting and Distillation of Rerankers"
},
"2403.05313": {
"arxivId": "2403.05313",
"title": "RAT: Retrieval Augmented Thoughts Elicit Context-Aware Reasoning in Long-Horizon Generation"
},
"2402.10790": {
"arxivId": "2402.10790",
"title": "In Search of Needles in a 11M Haystack: Recurrent Memory Finds What LLMs Miss"
},
"2402.07630": {
"arxivId": "2402.07630",
"title": "G-Retriever: Retrieval-Augmented Generation for Textual Graph Understanding and Question Answering"
},
"2404.00610": {
"arxivId": "2404.00610",
"title": "RQ-RAG: Learning to Refine Queries for Retrieval Augmented Generation"
},
"2402.10828": {
"arxivId": "2402.10828",
"title": "RAG-Driver: Generalisable Driving Explanations with Retrieval-Augmented In-Context Learning in Multi-Modal Large Language Model"
},
"2210.02933": {
"arxivId": "2210.02933",
"title": "Grape: Knowledge Graph Enhanced Passage Reader for Open-domain Question Answering"
},
"2306.10998": {
"arxivId": "2306.10998",
"title": "RepoFusion: Training Code Models to Understand Your Repository"
},
"2311.06318": {
"arxivId": "2311.06318",
"title": "Knowledge-Augmented Large Language Models for Personalized Contextual Query Suggestion"
},
"2311.02962": {
"arxivId": "2311.02962",
"title": "Retrieval-Augmented Code Generation for Universal Information Extraction"
},
"2310.03184": {
"arxivId": "2310.03184",
"title": "Retrieval-augmented Generation to Improve Math Question-Answering: Trade-offs Between Groundedness and Human Preference"
},
"2302.08268": {
"arxivId": "2302.08268",
"title": "Retrieval-augmented Image Captioning"
},
"2303.17780": {
"arxivId": "2303.17780",
"title": "AceCoder: Utilizing Existing Code to Enhance Code Generation"
},
"2211.08380": {
"arxivId": "2211.08380",
"title": "Empowering Language Models with Knowledge Graph Reasoning for Open-Domain Question Answering"
},
"2206.13325": {
"arxivId": "2206.13325",
"title": "BashExplainer: Retrieval-Augmented Bash Code Comment Generation based on Fine-tuned CodeBERT"
},
"2104.07921": {
"arxivId": "2104.07921",
"title": "VGNMN: Video-grounded Neural Module Networks for Video-Grounded Dialogue Systems"
},
"2402.03181": {
"arxivId": "2402.03181",
"title": "C-RAG: Certified Generation Risks for Retrieval-Augmented Language Models"
},
"2401.02015": {
"arxivId": "2401.02015",
"title": "Improving Diffusion-Based Image Synthesis with Context Prediction"
},
"2310.14696": {
"arxivId": "2310.14696",
"title": "Tree of Clarifications: Answering Ambiguous Questions with Retrieval-Augmented Large Language Models"
},
"2403.10446": {
"arxivId": "2403.10446",
"title": "Enhancing LLM Factual Accuracy with RAG to Counter Hallucinations: A Case Study on Domain-Specific Queries in Private Knowledge-Bases"
},
"2402.11782": {
"arxivId": "2402.11782",
"title": "What Evidence Do Language Models Find Convincing?"
},
"2306.06156": {
"arxivId": "2306.06156",
"title": "PoET: A generative model of protein families as sequences-of-sequences"
},
"2310.15657": {
"arxivId": "2310.15657",
"title": "Testing the Limits: Unusual Text Inputs Generation for Mobile App Crash Detection with Large Language Model"
},
"2306.11732": {
"arxivId": "2306.11732",
"title": "Retrieving-to-Answer: Zero-Shot Video Question Answering with Frozen Large Language Models"
},
"2305.04032": {
"arxivId": "2305.04032",
"title": "ToolCoder: Teach Code Generation Models to use API search tools"
},
"2306.14722": {
"arxivId": "2306.14722",
"title": "FC-KBQA: A Fine-to-Coarse Composition Framework for Knowledge Base Question Answering"
},
"2302.05578": {
"arxivId": "2302.05578",
"title": "Characterizing Attribution and Fluency Tradeoffs for Retrieval-Augmented Large Language Models"
},
"2212.08632": {
"arxivId": "2212.08632",
"title": "Enhancing Multi-modal Multi-hop Question Answering via Structured Knowledge and Unified Retrieval-Generation"
},
"2202.13972": {
"arxivId": "2202.13972",
"title": "The impact of lexical and grammatical processing on generating code from natural language"
},
"2311.13534": {
"arxivId": "2311.13534",
"title": "LM-Cocktail: Resilient Tuning of Language Models via Model Merging"
},
"2212.09651": {
"arxivId": "2212.09651",
"title": "Cross-Lingual Retrieval Augmented Prompt for Low-Resource Languages"
},
"2205.10471": {
"arxivId": "2205.10471",
"title": "Retrieval-Augmented Multilingual Keyphrase Generation with Retriever-Generator Iterative Training"
},
"2203.16714": {
"arxivId": "2203.16714",
"title": "End-to-End Table Question Answering via Retrieval-Augmented Generation"
},
"2401.01701": {
"arxivId": "2401.01701",
"title": "De-Hallucinator: Iterative Grounding for LLM-Based Code Completion"
},
"2404.07220": {
"arxivId": "2404.07220",
"title": "Blended RAG: Improving RAG (Retriever-Augmented Generation) Accuracy with Semantic Search and Hybrid Query-Based Retrievers"
},
"2401.17043": {
"arxivId": "2401.17043",
"title": "CRUD-RAG: A Comprehensive Chinese Benchmark for Retrieval-Augmented Generation of Large Language Models"
},
"2310.20158": {
"arxivId": "2310.20158",
"title": "GAR-meets-RAG Paradigm for Zero-Shot Information Retrieval"
},
"2210.12338": {
"arxivId": "2210.12338",
"title": "Open-domain Question Answering via Chain of Reasoning over Heterogeneous Knowledge"
},
"2209.02071": {
"arxivId": "2209.02071",
"title": "CONCRETE: Improving Cross-lingual Fact-checking with Cross-lingual Retrieval"
},
"2401.07883": {
"arxivId": "2401.07883",
"title": "The Chronicles of RAG: The Retriever, the Chunk and the Generator"
},
"2310.06302": {
"arxivId": "2310.06302",
"title": "Selective Demonstrations for Cross-domain Text-to-SQL"
},
"2308.09313": {
"arxivId": "2308.09313",
"title": "Domain Adaptive Code Completion via Language Models and Decoupled Domain Databases"
},
"2305.18170": {
"arxivId": "2305.18170",
"title": "Leveraging Training Data in Few-Shot Prompting for Numerical Reasoning"
},
"2208.07022": {
"arxivId": "2208.07022",
"title": "Memory-Driven Text-to-Image Generation"
},
"2402.12908": {
"arxivId": "2402.12908",
"title": "RealCompo: Dynamic Equilibrium between Realism and Compositionality Improves Text-to-Image Diffusion Models"
},
"2402.16063": {
"arxivId": "2402.16063",
"title": "Citation-Enhanced Generation for LLM-based Chatbots"
},
"2402.12317": {
"arxivId": "2402.12317",
"title": "ARKS: Active Retrieval in Knowledge Soup for Code Generation"
},
"2401.13256": {
"arxivId": "2401.13256",
"title": "UniMS-RAG: A Unified Multi-source Retrieval-Augmented Generation for Personalized Dialogue Systems"
},
"2309.07372": {
"arxivId": "2309.07372",
"title": "Training Audio Captioning Models without Audio"
},
"2401.06800": {
"arxivId": "2401.06800",
"title": "Reinforcement Learning for Optimizing RAG for Domain Chatbots"
},
"2309.09836": {
"arxivId": "2309.09836",
"title": "Recap: Retrieval-Augmented Audio Captioning"
},
"1409.3215": {
"arxivId": "1409.3215",
"title": "Sequence to Sequence Learning with Neural Networks"
},
"1506.02626": {
"arxivId": "1506.02626",
"title": "Learning both Weights and Connections for Efficient Neural Network"
},
"2403.10131": {
"arxivId": "2403.10131",
"title": "RAFT: Adapting Language Model to Domain Specific RAG"
},
"2211.12561": {
"arxivId": "2211.12561",
"title": "Retrieval-Augmented Multimodal Language Modeling"
},
"2312.15503": {
"arxivId": "2312.15503",
"title": "Making Large Language Models A Better Foundation For Dense Retrieval"
},
"2308.14263": {
"arxivId": "2308.14263",
"title": "Cross-Modal Retrieval: A Systematic Review of Methods and Future Directions"
},
"1902.00751": {
"arxivId": "1902.00751",
"title": "Parameter-Efficient Transfer Learning for NLP"
},
"2201.12086": {
"arxivId": "2201.12086",
"title": "BLIP: Bootstrapping Language-Image Pre-training for Unified Vision-Language Understanding and Generation"
},
"1611.09268": {
"arxivId": "1611.09268",
"title": "MS MARCO: A Human Generated MAchine Reading COmprehension Dataset"
},
"2203.11171": {
"arxivId": "2203.11171",
"title": "Self-Consistency Improves Chain of Thought Reasoning in Language Models"
},
"2305.10601": {
"arxivId": "2305.10601",
"title": "Tree of Thoughts: Deliberate Problem Solving with Large Language Models"
},
"1903.00161": {
"arxivId": "1903.00161",
"title": "DROP: A Reading Comprehension Benchmark Requiring Discrete Reasoning Over Paragraphs"
},
"2110.04366": {
"arxivId": "2110.04366",
"title": "Towards a Unified View of Parameter-Efficient Transfer Learning"
},
"2304.12244": {
"arxivId": "2304.12244",
"title": "WizardLM: Empowering Large Language Models to Follow Complex Instructions"
},
"2303.11366": {
"arxivId": "2303.11366",
"title": "Reflexion: language agents with verbal reinforcement learning"
},
"2211.01910": {
"arxivId": "2211.01910",
"title": "Large Language Models Are Human-Level Prompt Engineers"
},
"2308.11432": {
"arxivId": "2308.11432",
"title": "A Survey on Large Language Model based Autonomous Agents"
},
"2101.02235": {
"arxivId": "2101.02235",
"title": "Did Aristotle Use a Laptop? A Question Answering Benchmark with Implicit Reasoning Strategies"
},
"2301.13688": {
"arxivId": "2301.13688",
"title": "The Flan Collection: Designing Data and Methods for Effective Instruction Tuning"
},
"2304.07327": {
"arxivId": "2304.07327",
"title": "OpenAssistant Conversations - Democratizing Large Language Model Alignment"
},
"2210.03350": {
"arxivId": "2210.03350",
"title": "Measuring and Narrowing the Compositionality Gap in Language Models"
},
"2210.03493": {
"arxivId": "2210.03493",
"title": "Automatic Chain of Thought Prompting in Large Language Models"
},
"2302.00093": {
"arxivId": "2302.00093",
"title": "Large Language Models Can Be Easily Distracted by Irrelevant Context"
},
"2308.00352": {
"arxivId": "2308.00352",
"title": "MetaGPT: Meta Programming for Multi-Agent Collaborative Framework"
},
"2005.00181": {
"arxivId": "2005.00181",
"title": "Sparse, Dense, and Attentional Representations for Text Retrieval"
},
"2308.10792": {
"arxivId": "2308.10792",
"title": "Instruction Tuning for Large Language Models: A Survey"
},
"2104.06967": {
"arxivId": "2104.06967",
"title": "Efficiently Teaching an Effective Dense Retriever with Balanced Topic Aware Sampling"
},
"2303.08128": {
"arxivId": "2303.08128",
"title": "ViperGPT: Visual Inference via Python Execution for Reasoning"
},
"2306.04751": {
"arxivId": "2306.04751",
"title": "How Far Can Camels Go? Exploring the State of Instruction Tuning on Open Resources"
},
"2203.14465": {
"arxivId": "2203.14465",
"title": "STaR: Bootstrapping Reasoning With Reasoning"
},
"2205.12548": {
"arxivId": "2205.12548",
"title": "RLPrompt: Optimizing Discrete Text Prompts with Reinforcement Learning"
},
"2309.05653": {
"arxivId": "2309.05653",
"title": "MAmmoTH: Building Math Generalist Models through Hybrid Instruction Tuning"
},
"2106.04489": {
"arxivId": "2106.04489",
"title": "Parameter-efficient Multi-task Fine-tuning for Transformers via Shared Hypernetworks"
},
"2309.03409": {
"arxivId": "2309.03409",
"title": "Large Language Models as Optimizers"
},
"2011.01060": {
"arxivId": "2011.01060",
"title": "Constructing A Multi-hop QA Dataset for Comprehensive Evaluation of Reasoning Steps"
},
"2212.12017": {
"arxivId": "2212.12017",
"title": "OPT-IML: Scaling Language Model Instruction Meta Learning through the Lens of Generalization"
},
"2303.14070": {
"arxivId": "2303.14070",
"title": "ChatDoctor: A Medical Chat Model Fine-Tuned on a Large Language Model Meta-AI (LLaMA) Using Medical Domain Knowledge"
},
"2308.00692": {
"arxivId": "2308.00692",
"title": "LISA: Reasoning Segmentation via Large Language Model"
},
"2105.03011": {
"arxivId": "2105.03011",
"title": "A Dataset of Information-Seeking Questions and Answers Anchored in Research Papers"
},
"2311.16452": {
"arxivId": "2311.16452",
"title": "Can Generalist Foundation Models Outcompete Special-Purpose Tuning? Case Study in Medicine"
},
"2305.03495": {
"arxivId": "2305.03495",
"title": "Automatic Prompt Optimization with \"Gradient Descent\" and Beam Search"
},
"2304.11015": {
"arxivId": "2304.11015",
"title": "DIN-SQL: Decomposed In-Context Learning of Text-to-SQL with Self-Correction"
},
"2309.12288": {
"arxivId": "2309.12288",
"title": "The Reversal Curse: LLMs trained on \"A is B\" fail to learn \"B is A\""
},
"2402.09353": {
"arxivId": "2402.09353",
"title": "DoRA: Weight-Decomposed Low-Rank Adaptation"
},
"2108.00573": {
"arxivId": "2108.00573",
"title": "\u266b MuSiQue: Multihop Questions via Single-hop Question Composition"
},
"2106.05707": {
"arxivId": "2106.05707",
"title": "FEVEROUS: Fact Extraction and VERification Over Unstructured and Structured information"
},
"2305.07001": {
"arxivId": "2305.07001",
"title": "Recommendation as Instruction Following: A Large Language Model Empowered Recommendation Approach"
},
"2306.06031": {
"arxivId": "2306.06031",
"title": "FinGPT: Open-Source Financial Large Language Models"
},
"2204.06092": {
"arxivId": "2204.06092",
"title": "ASQA: Factoid Questions Meet Long-Form Answers"
},
"2403.14608": {
"arxivId": "2403.14608",
"title": "Parameter-Efficient Fine-Tuning for Large Models: A Comprehensive Survey"
},
"2210.07558": {
"arxivId": "2210.07558",
"title": "DyLoRA: Parameter-Efficient Tuning of Pre-trained Models using Dynamic Search-Free Low-Rank Adaptation"
},
"2112.08608": {
"arxivId": "2112.08608",
"title": "QuALITY: Question Answering with Long Input Texts, Yes!"
},
"2302.12822": {
"arxivId": "2302.12822",
"title": "Automatic Prompt Augmentation and Selection with Chain-of-Thought from Labeled Data"
},
"2203.07281": {
"arxivId": "2203.07281",
"title": "GrIPS: Gradient-free, Edit-based Instruction Search for Prompting Large Language Models"
},
"2308.11462": {
"arxivId": "2308.11462",
"title": "LegalBench: A Collaboratively Built Benchmark for Measuring Legal Reasoning in Large Language Models"
},
"2311.10537": {
"arxivId": "2311.10537",
"title": "MedAgents: Large Language Models as Collaborators for Zero-shot Medical Reasoning"
},
"2301.11916": {
"arxivId": "2301.11916",
"title": "Large Language Models Are Latent Variable Models: Explaining and Finding Good Demonstrations for In-Context Learning"
},
"2404.05961": {
"arxivId": "2404.05961",
"title": "LLM2Vec: Large Language Models Are Secretly Powerful Text Encoders"
},
"2108.08513": {
"arxivId": "2108.08513",
"title": "Fast Passage Re-ranking with Contextualized Exact Term Matching and Efficient Passage Expansion"
},
"2402.00157": {
"arxivId": "2402.00157",
"title": "Large Language Models for Mathematical Reasoning: Progresses and Challenges"
},
"1610.10001": {
"arxivId": "1610.10001",
"title": "Off the Beaten Path: Let's Replace Term-Based Retrieval with k-NN Search"
},
"2306.08640": {
"arxivId": "2306.08640",
"title": "AssistGPT: A General Multi-modal Assistant that can Plan, Execute, Inspect, and Learn"
},
"2302.07027": {
"arxivId": "2302.07027",
"title": "AdapterSoup: Weight Averaging to Improve Generalization of Pretrained Language Models"
},
"2305.14283": {
"arxivId": "2305.14283",
"title": "Query Rewriting for Retrieval-Augmented Large Language Models"
},
"2405.05904": {
"arxivId": "2405.05904",
"title": "Does Fine-Tuning LLMs on New Knowledge Encourage Hallucinations?"
},
"2310.02374": {
"arxivId": "2310.02374",
"title": "Conversational Health Agents: A Personalized LLM-Powered Agent Framework"
},
"2404.11018": {
"arxivId": "2404.11018",
"title": "Many-Shot In-Context Learning"
},
"2303.10512": {
"arxivId": "2303.10512",
"title": "AdaLoRA: Adaptive Budget Allocation for Parameter-Efficient Fine-Tuning"
},
"2303.02913": {
"arxivId": "2303.02913",
"title": "OpenICL: An Open-Source Framework for In-context Learning"
},
"2304.04947": {
"arxivId": "2304.04947",
"title": "Conditional Adapters: Parameter-efficient Transfer Learning with Fast Inference"
},
"2405.02957": {
"arxivId": "2405.02957",
"title": "Agent Hospital: A Simulacrum of Hospital with Evolvable Medical Agents"
},
"2211.11890": {
"arxivId": "2211.11890",
"title": "TEMPERA: Test-Time Prompting via Reinforcement Learning"
},
"2310.07713": {
"arxivId": "2310.07713",
"title": "InstructRetro: Instruction Tuning post Retrieval-Augmented Pretraining"
},
"2303.08119": {
"arxivId": "2303.08119",
"title": "How Many Demonstrations Do You Need for In-context Learning?"
},
"2310.08184": {
"arxivId": "2310.08184",
"title": "Learn From Model Beyond Fine-Tuning: A Survey"
},
"2304.14979": {
"arxivId": "2304.14979",
"title": "MLCopilot: Unleashing the Power of Large Language Models in Solving Machine Learning Tasks"
},
"2311.11696": {
"arxivId": "2311.11696",
"title": "Sparse Low-rank Adaptation of Pre-trained Language Models"
},
"2305.09955": {
"arxivId": "2305.09955",
"title": "Knowledge Card: Filling LLMs' Knowledge Gaps with Plug-in Specialized Language Models"
},
"2212.08286": {
"arxivId": "2212.08286",
"title": "ALERT: Adapt Language Models to Reasoning Tasks"
},
"2401.08967": {
"arxivId": "2401.08967",
"title": "ReFT: Reasoning with Reinforced Fine-Tuning"
},
"2310.05149": {
"arxivId": "2310.05149",
"title": "Retrieval-Generation Synergy Augmented Large Language Models"
},
"2402.05403": {
"arxivId": "2402.05403",
"title": "In-Context Principle Learning from Mistakes"
},
"2312.06648": {
"arxivId": "2312.06648",
"title": "Dense X Retrieval: What Retrieval Granularity Should We Use?"
},
"2310.19698": {
"arxivId": "2310.19698",
"title": "When Do Prompting and Prefix-Tuning Work? A Theory of Capabilities and Limitations"
},
"2404.14851": {
"arxivId": "2404.14851",
"title": "From Matching to Generation: A Survey on Generative Information Retrieval"
},
"2310.05066": {
"arxivId": "2310.05066",
"title": "Guideline Learning for In-context Information Extraction"
},
"2406.11903": {
"arxivId": "2406.11903",
"title": "A Survey of Large Language Models for Financial Applications: Progress, Prospects and Challenges"
},
"2402.05131": {
"arxivId": "2402.05131",
"title": "Financial Report Chunking for Effective Retrieval Augmented Generation"
}
}