Skip to content

Generate In-Context Examples

How can we generate examples for our prompt?

Self-Generated In-Context Learning (SG-ICL) is a technique which uses an LLM to generate examples to be used during the task. This allows for in-context learning, where examples of the task are provided in the prompt.

We can implement SG-ICL using instructor as seen below.

import instructor
from pydantic import BaseModel
from openai import OpenAI
from typing import Literal

n = 4  # num examples to generate per class


class GeneratedReview(BaseModel):
    review: str
    sentiment: Literal["positive", "negative"]


class SentimentPrediction(BaseModel):
    sentiment: Literal["positive", "negative"]


client = instructor.from_openai(OpenAI())


def generate_sample(input_review, sentiment):
    return client.chat.completions.create(
        model="gpt-4o",
        response_model=GeneratedReview,
        messages=[
            {
                "role": "user",
                "content": f"""
                           Generate a '{sentiment}' review similar to: {input_review}
                           Generated review:
                           """,
            }
        ],
    )


def predict_sentiment(input_review, in_context_samples):
    return client.chat.completions.create(
        model="gpt-4o",
        response_model=SentimentPrediction,
        messages=[
            {
                "role": "user",
                "content": "".join(
                    [
                        f"Review: {sample.review}\nSentiment: {sample.sentiment}\n\n"
                        for sample in in_context_samples
                    ]
                )
                + f"Review: {input_review}\nSentiment:",
            }
        ],
    ).sentiment


if __name__ == "__main__":
    input_review = (
        "This movie was a rollercoaster of emotions, keeping me engaged throughout."
    )

    # Generate in-context samples
    samples = [
        generate_sample(input_review, sentiment)
        for sentiment in ('positive', 'negative')
        for _ in range(n)
    ]
    for sample in samples:
        print(sample)
        """
        review='This film was an emotional journey, keeping me captivated from start to finish.' sentiment='positive'
        """
        """
        review='This film was an emotional journey, captivating me from start to finish.' sentiment='positive'
        """
        """
        review='This film captivated me from start to finish with its thrilling plot and emotional depth.' sentiment='positive'
        """
        """
        review='This movie was a breathtaking journey, capturing my attention from start to finish.' sentiment='positive'
        """
        """
        review='This movie was a chaotic mess of emotions, losing me at every turn.' sentiment='negative'
        """
        """
        review='This movie was a confusing mess, leaving me disengaged throughout.' sentiment='negative'
        """
        """
        review='This movie was a chore to sit through, leaving me bored most of the time.' sentiment='negative'
        """
        """
        review='This movie was a mishmash of confusing scenes, leaving me frustrated throughout.' sentiment='negative'
        """

    # Predict sentiment
    print(predict_sentiment(input_review, samples))
    #> positive

References

1: Self-Generated In-Context Learning: Leveraging Auto-regressive Language Models as a Demonstration Generator

*: The Prompt Report: A Systematic Survey of Prompting Techniques