U
    hL<                     @  s>  d Z ddlmZ ddlZddlmZmZmZmZm	Z	m
Z
mZmZ ddlmZ ddlmZmZmZmZmZ ddlmZmZ ddlmZ dd	lmZmZ dd
lmZmZm Z  ddl!m"Z" ddl#m$Z$m%Z% ddl&m'Z' ddl(m)Z)m*Z*m+Z+m,Z, ddl-m.Z. ddl/m0Z0 ddl1m2Z2 eddddG dd de2Z3dddddZ4dS )z2Chain that just formats a prompt and calls an LLM.    )annotationsN)AnyDictListOptionalSequenceTupleUnioncast)
deprecated)AsyncCallbackManagerAsyncCallbackManagerForChainRunCallbackManagerCallbackManagerForChainRun	Callbacks)BaseLanguageModelLanguageModelInput)BaseMessage)BaseLLMOutputParserStrOutputParser)ChatGeneration
Generation	LLMResult)PromptValue)BasePromptTemplatePromptTemplate)Field)RunnableRunnableBindingRunnableBranchRunnableWithFallbacks)DynamicRunnable)get_colored_text)Chainz0.1.17z&RunnableSequence, e.g., `prompt | llm`z1.0)ZsincealternativeZremovalc                   @  s$  e Zd ZU dZeddddZded< ded	< d
Zded< ee	dZ
ded< dZded< eedZded< G dd dZeddddZeddddZd[ddddd d!Zd\d"dd#d$d%d&Zd]d"d'd#d$d(d)Zd^d"dd*d$d+d,Zd_d"d'd*d$d-d.Zd`d"d/d0d1d2d3Zdad"d/d0d1d4d5Zeddd6d7Zd#d"d8d9d:Zdbdd'ddd;d<Zdcd/d=dd>d?d@Zddd/d=dd>dAdBZded/d=dCd>dDdEZdfd/d=dFd>dGdHZdgd"d/dId1dJdKZ d0dIdLdMdNZ!dhd"d/dId1dOdPZ"edddQdRZ#edSdd dTdUdVZ$ddWdXdYdZZ%dS )iLLMChaina^  Chain to run queries against LLMs.

    This class is deprecated. See below for an example implementation using
    LangChain runnables:

        .. code-block:: python

            from langchain_core.output_parsers import StrOutputParser
            from langchain_core.prompts import PromptTemplate
            from langchain_openai import OpenAI

            prompt_template = "Tell me a {adjective} joke"
            prompt = PromptTemplate(
                input_variables=["adjective"], template=prompt_template
            )
            llm = OpenAI()
            chain = prompt | llm | StrOutputParser()

            chain.invoke("your adjective here")

    Example:
        .. code-block:: python

            from langchain.chains import LLMChain
            from langchain_community.llms import OpenAI
            from langchain_core.prompts import PromptTemplate
            prompt_template = "Tell me a {adjective} joke"
            prompt = PromptTemplate(
                input_variables=["adjective"], template=prompt_template
            )
            llm = LLMChain(llm=OpenAI(), prompt=prompt)
    bool)returnc                 C  s   dS )NT selfr(   r(   8/tmp/pip-unpacked-wheel-bo69hh5q/langchain/chains/llm.pyis_lc_serializableM   s    zLLMChain.is_lc_serializabler   promptzSUnion[Runnable[LanguageModelInput, str], Runnable[LanguageModelInput, BaseMessage]]llmtextstr
output_key)default_factoryr   output_parserTreturn_final_onlydict
llm_kwargsc                   @  s   e Zd ZdZdZdS )zLLMChain.ConfigTZforbidN)__name__
__module____qualname__Zarbitrary_types_allowedextrar(   r(   r(   r+   Configa   s   r;   z	List[str]c                 C  s   | j jS )zJWill be whatever keys the prompt expects.

        :meta private:
        )r-   input_variablesr)   r(   r(   r+   
input_keyse   s    zLLMChain.input_keysc                 C  s   | j r| jgS | jdgS dS )z=Will always return text key.

        :meta private:
        full_generationN)r4   r1   r)   r(   r(   r+   output_keysm   s    zLLMChain.output_keysNzDict[str, Any]z$Optional[CallbackManagerForChainRun]zDict[str, str])inputsrun_managerr'   c                 C  s   | j |g|d}| |d S NrA   r   )generatecreate_outputsr*   r@   rA   responser(   r(   r+   _callx   s    zLLMChain._callzList[Dict[str, Any]]r   )
input_listrA   r'   c           	      C  s   | j ||d\}}|r| nd}t| jtrJ| jj||fd|i| jS | jjf d|i| jt	t
|d|i}g }|D ]4}t|tr|t|dg q||t|dg q|t|dS dS z Generate LLM result from inputs.rC   N	callbacksstop)message)r/   )generations)prep_prompts	get_child
isinstancer.   r   Zgenerate_promptr6   bindbatchr
   r   r   appendr   r   r   	r*   rI   rA   promptsrL   rK   resultsrN   resr(   r(   r+   rD      s*     
zLLMChain.generatez)Optional[AsyncCallbackManagerForChainRun]c           	        s   | j ||dI dH \}}|r$| nd}t| jtrV| jj||fd|i| jI dH S | jjf d|i| jt	t
|d|iI dH }g }|D ]4}t|tr|t|dg q|t|dg qt|dS dS rJ   )aprep_promptsrP   rQ   r.   r   Zagenerate_promptr6   rR   Zabatchr
   r   r   rT   r   r   r   rU   r(   r(   r+   	agenerate   s*     

zLLMChain.ageneratez-Tuple[List[PromptValue], Optional[List[str]]]c           	        s   d}t |dkrg |fS d|d kr0|d d }g }|D ]z  fdd| jjD }| jjf |}t| d}d| }|r|j|d| jd	 d kr d |krtd
|	| q8||fS )Prepare prompts from inputs.Nr   rL   c                   s   i | ]}| | qS r(   r(   .0kr@   r(   r+   
<dictcomp>   s      z)LLMChain.prep_prompts.<locals>.<dictcomp>greenPrompt after formatting:

endverbose=If `stop` is present in any inputs, should be present in all.
lenr-   r<   Zformat_promptr"   Z	to_stringZon_textrf   
ValueErrorrT   	r*   rI   rA   rL   rV   Zselected_inputsr-   Z_colored_textZ_textr(   r_   r+   rO      s&    zLLMChain.prep_promptsc           	        s   d}t |dkrg |fS d|d kr0|d d }g }|D ]  fdd| jjD }| jjf |}t| d}d| }|r|j|d| jd	I dH  d kr d |krtd
|	| q8||fS )r[   Nr   rL   c                   s   i | ]}| | qS r(   r(   r\   r_   r(   r+   r`      s      z*LLMChain.aprep_prompts.<locals>.<dictcomp>ra   rb   rc   rd   rg   rh   rk   r(   r_   r+   rY      s&    zLLMChain.aprep_promptsr   zList[Dict[str, str]])rI   rK   r'   c              
   C  s   t || j| j}|dd|i}z| j||d}W n0 tk
rd } z|| |W 5 d}~X Y nX | |}|	d|i |S z0Utilize the LLM generate method for speed gains.NrI   rC   outputs)
r   	configurerK   rf   on_chain_startrD   BaseExceptionon_chain_errorrE   on_chain_endr*   rI   rK   Zcallback_managerrA   rG   erm   r(   r(   r+   apply   s"      

zLLMChain.applyc              
     s   t || j| j}|dd|iI dH }z| j||dI dH }W n6 tk
rv } z||I dH  |W 5 d}~X Y nX | |}|	d|iI dH  |S rl   )
r   rn   rK   rf   ro   rZ   rp   rq   rE   rr   rs   r(   r(   r+   aapply   s"      

zLLMChain.aapplyc                 C  s   | j S Nr1   r)   r(   r(   r+   _run_output_key  s    zLLMChain._run_output_key)
llm_resultr'   c                   s0    fdd|j D } jr, fdd|D }|S )zCreate outputs from response.c                   s"   g | ]} j  j|d |iqS )r>   )r1   r3   Zparse_result)r]   
generationr)   r(   r+   
<listcomp>  s    
 z+LLMChain.create_outputs.<locals>.<listcomp>c                   s   g | ]} j | j  iqS r(   rx   )r]   rr)   r(   r+   r|   !  s     )rN   r4   )r*   rz   resultr(   r)   r+   rE     s    
zLLMChain.create_outputsc                   s$   | j |g|dI d H }| |d S rB   )rZ   rE   rF   r(   r(   r+   _acall$  s    zLLMChain._acallr   )rK   kwargsr'   c                 K  s   | ||d| j  S )S  Format prompt with kwargs and pass to LLM.

        Args:
            callbacks: Callbacks to pass to LLMChain
            **kwargs: Keys to pass to prompt template.

        Returns:
            Completion from LLM.

        Example:
            .. code-block:: python

                completion = llm.predict(adjective="funny")
        rK   rx   r*   rK   r   r(   r(   r+   predict,  s    zLLMChain.predictc                   s   | j ||dI dH | j S )r   r   N)Zacallr1   r   r(   r(   r+   apredict=  s    zLLMChain.apredictz%Union[str, List[str], Dict[str, Any]]c                 K  s@   t d | jf d|i|}| jjdk	r8| jj|S |S dS )z(Call predict and then parse the results.z_The predict_and_parse method is deprecated, instead pass an output parser directly to LLMChain.rK   N)warningswarnr   r-   r3   parser*   rK   r   r~   r(   r(   r+   predict_and_parseN  s    zLLMChain.predict_and_parsez%Union[str, List[str], Dict[str, str]]c                   sF   t d | jf d|i|I dH }| jjdk	r>| jj|S |S dS )z)Call apredict and then parse the results.z`The apredict_and_parse method is deprecated, instead pass an output parser directly to LLMChain.rK   N)r   r   r   r-   r3   r   r   r(   r(   r+   apredict_and_parse\  s    zLLMChain.apredict_and_parsez/Sequence[Union[str, List[str], Dict[str, str]]]c                 C  s"   t d | j||d}| |S )&Call apply and then parse the results.z]The apply_and_parse method is deprecated, instead pass an output parser directly to LLMChain.r   )r   r   ru   _parse_generationr*   rI   rK   r~   r(   r(   r+   apply_and_parsej  s
    zLLMChain.apply_and_parse)r{   r'   c                   s&    j jd k	r fdd|D S |S d S )Nc                   s    g | ]} j j| j qS r(   )r-   r3   r   r1   )r]   rX   r)   r(   r+   r|   y  s   z.LLMChain._parse_generation.<locals>.<listcomp>)r-   r3   )r*   r{   r(   r)   r+   r   u  s
    
zLLMChain._parse_generationc                   s(   t d | j||dI dH }| |S )r   z^The aapply_and_parse method is deprecated, instead pass an output parser directly to LLMChain.r   N)r   r   rv   r   r   r(   r(   r+   aapply_and_parse  s
    zLLMChain.aapply_and_parsec                 C  s   dS )NZ	llm_chainr(   r)   r(   r(   r+   _chain_type  s    zLLMChain._chain_typer   )r.   templater'   c                 C  s   t |}| ||dS )z&Create LLMChain from LLM and template.)r.   r-   )r   Zfrom_template)clsr.   r   Zprompt_templater(   r(   r+   from_string  s    
zLLMChain.from_stringint)r/   r'   c                 C  s   t | j|S rw   )_get_language_modelr.   Zget_num_tokens)r*   r/   r(   r(   r+   _get_num_tokens  s    zLLMChain._get_num_tokens)N)N)N)N)N)N)N)N)N)N)N)N)N)N)&r7   r8   r9   __doc__classmethodr,   __annotations__r1   r   r   r3   r4   r5   r6   r;   propertyr=   r?   rH   rD   rZ   rO   rY   ru   rv   ry   rE   r   r   r   r   r   r   r   r   r   r   r   r(   r(   r(   r+   r%   &   sb   
!            r%   r   r   )llm_liker'   c                 C  sd   t | tr| S t | tr"t| jS t | tr6t| jS t | ttfrNt| j	S t
dt|  d S )NzAUnable to extract BaseLanguageModel from llm_like object of type )rQ   r   r   r   boundr    Zrunnabler   r!   defaultrj   type)r   r(   r(   r+   r     s    





r   )5r   
__future__r   r   typingr   r   r   r   r   r   r	   r
   Zlangchain_core._apir   Zlangchain_core.callbacksr   r   r   r   r   Zlangchain_core.language_modelsr   r   Zlangchain_core.messagesr   Zlangchain_core.output_parsersr   r   Zlangchain_core.outputsr   r   r   Zlangchain_core.prompt_valuesr   Zlangchain_core.promptsr   r   Zlangchain_core.pydantic_v1r   Zlangchain_core.runnablesr   r   r   r    Z%langchain_core.runnables.configurabler!   Zlangchain_core.utils.inputr"   Zlangchain.chains.baser#   r%   r   r(   r(   r(   r+   <module>   s2   (  p