U
    h:                     @   s   d dl Z d dlZd dlmZmZmZmZmZmZm	Z	 d dl
mZ d dlmZ d dlmZ d dlmZmZmZ d dlmZ d dlmZmZmZ e eZed	d
ddG dd deZdS )    N)AnyDictIteratorListMappingOptionalUnion)
deprecated)CallbackManagerForLLMRun)BaseLLM)
GenerationGenerationChunk	LLMResult)	SecretStr)convert_to_secret_strget_from_dict_or_envpre_initz0.0.18z1.0zlangchain_ibm.WatsonxLLM)ZsinceZremovalZalternative_importc                   @   sZ  e Zd ZU dZdZeed< dZeed< dZeed< dZ	eed< dZ
ee ed< dZee ed	< dZee ed
< dZee ed< dZee ed< dZee ed< dZee ed< dZee ed< dZeeef ed< dZeed< eed< G dd dZeedddZeeeef dddZe eedddZ!ee"eef dddZ#eedd d!Z$e%d5ee&eeef   eeef d"d#d$Z'd6ee&e  eeef d%d&d'Z(e&e e)d"d(d)Z*eeef e+d*d+d,Z,d7eee&e  ee- eed-d.d/Z.d8e&e ee&e  ee- ee ee)d0d1d2Z/d9eee&e  ee- ee0e+ d-d3d4Z1dS ):
WatsonxLLMa-  
    IBM watsonx.ai large language models.

    To use, you should have ``ibm_watsonx_ai`` python package installed,
    and the environment variable ``WATSONX_APIKEY`` set with your API key, or pass
    it as a named parameter to the constructor.


    Example:
        .. code-block:: python

            from ibm_watsonx_ai.metanames import GenTextParamsMetaNames
            parameters = {
                GenTextParamsMetaNames.DECODING_METHOD: "sample",
                GenTextParamsMetaNames.MAX_NEW_TOKENS: 100,
                GenTextParamsMetaNames.MIN_NEW_TOKENS: 1,
                GenTextParamsMetaNames.TEMPERATURE: 0.5,
                GenTextParamsMetaNames.TOP_K: 50,
                GenTextParamsMetaNames.TOP_P: 1,
            }

            from langchain_community.llms import WatsonxLLM
            watsonx_llm = WatsonxLLM(
                model_id="google/flan-ul2",
                url="https://us-south.ml.cloud.ibm.com",
                apikey="*****",
                project_id="*****",
                params=parameters,
            )
     model_iddeployment_id
project_idspace_idNurlapikeytokenpasswordusernameinstance_idversionparamsverifyF	streamingwatsonx_modelc                   @   s   e Zd ZdZdS )zWatsonxLLM.ConfigZforbidN)__name__
__module____qualname__extra r(   r(   G/tmp/pip-unpacked-wheel-9gdii04g/langchain_community/llms/watsonxllm.pyConfigb   s   r*   )returnc                 C   s   dS )NFr(   )clsr(   r(   r)   is_lc_serializablee   s    zWatsonxLLM.is_lc_serializablec                 C   s   dddddddS )NWATSONX_URLWATSONX_APIKEYWATSONX_TOKENWATSONX_PASSWORDWATSONX_USERNAMEWATSONX_INSTANCE_ID)r   r   r   r   r   r   r(   selfr(   r(   r)   
lc_secretsi   s    zWatsonxLLM.lc_secrets)valuesr+   c              	   C   sv  t t|dd|d< d|dd kr@t t|dd|d< n
|d sdtjkr|d	 sd
tjkr|d sdtjkrtdn|d sdtjkrt t|dd|d< nx|d	 sd
tjkrt t|d	d
|d	< t t|dd|d< n<|d sdtjkr t t|dd|d< t t|dd|d< |d r6dtjkrJt t|dd|d< zddlm} |d rp|d  nd|d r|d  nd|d r|d  nd|d	 r|d	  nd|d r|d  nd|d r|d  nd|d r |d  ndd}dd |	 D }||d |d ||d |d |d |d d}||d< W n t
k
rp   t
dY nX |S ) zCValidate that credentials and python package exists in environment.r   r.   zcloud.ibm.comr   r   r/   r   r0   r   r1   zDid not find 'token', 'password' or 'apikey', please add an environment variable `WATSONX_TOKEN`, 'WATSONX_PASSWORD' or 'WATSONX_APIKEY' which contains it, or pass 'token', 'password' or 'apikey' as a named parameter.r   r2   r   r3   r   )ModelInferenceNr   )r   r   r   r   r   r   r   c                 S   s   i | ]\}}|d k	r||qS )Nr(   ).0keyvaluer(   r(   r)   
<dictcomp>   s      z3WatsonxLLM.validate_environment.<locals>.<dictcomp>r   r   r    r   r   r!   )r   r   credentialsr    r   r   r!   r#   zdCould not import ibm_watsonx_ai python package. Please install it with `pip install ibm_watsonx_ai`.)r   r   getZget_secret_valueosenviron
ValueErrorZ ibm_watsonx_ai.foundation_modelsr8   itemsImportError)r,   r7   r8   r=   Zcredentials_without_none_valuer#   r(   r(   r)   validate_environmentt   s    









	
zWatsonxLLM.validate_environmentc                 C   s   | j | j| j| j| jdS )zGet the identifying parameters.r   r   r    r   r   rE   r4   r(   r(   r)   _identifying_params   s    zWatsonxLLM._identifying_paramsc                 C   s   dS )zReturn type of llm.zIBM watsonx.air(   r4   r(   r(   r)   	_llm_type   s    zWatsonxLLM._llm_type)responser+   c                 C   sz   | d krdddS d}d}t tt tf tddd}| D ]6}|d}|r8||d|d 7 }||d|d 7 }q8||dS )	Nr   )generated_token_countinput_token_count)r:   resultr+   c                 S   s   | | dpdS )Nr   )r>   )r:   rK   r(   r(   r)   get_count_value   s    z8WatsonxLLM._extract_token_usage.<locals>.get_count_valueresultsrJ   rI   )strr   r   intr>   )rH   rJ   rI   rL   resrM   r(   r(   r)   _extract_token_usage   s     

 zWatsonxLLM._extract_token_usage)stopr+   c                 C   s&   | j r| j ni }|d k	r"||d< |S )NZstop_sequences)r    )r5   rR   r    r(   r(   r)   _get_chat_params  s    zWatsonxLLM._get_chat_paramsc           	      C   sv   g }|D ]F}| d}|r|d  d}t|d  dd|id}||g q| |}|| j| jd}t||dS )	z2Create the LLMResult from the choices and prompts.rM   r   stop_reasongenerated_textfinish_reasontextgeneration_info)Ztoken_usager   r   generations
llm_output)r>   r   appendrQ   r   r   r   )	r5   rH   r[   rP   rM   rV   genZfinal_token_usager\   r(   r(   r)   _create_llm_result
  s     

zWatsonxLLM._create_llm_result)stream_responser+   c                 C   sL   |d st ddS t |d d d t|d d dd| j| jdd	d
S )z0Convert a stream response to a generation chunk.rM   r   rX   r   rU   rT   N)r   r   )rV   r\   rW   )r   dictr>   r   r   )r5   r`   r(   r(   r)   $_stream_response_to_generation_chunk  s    
z/WatsonxLLM._stream_response_to_generation_chunk)promptrR   run_managerkwargsr+   c                 K   s*   | j f |g||d|}|jd d jS )a  Call the IBM watsonx.ai inference endpoint.
        Args:
            prompt: The prompt to pass into the model.
            stop: Optional list of stop words to use when generating.
            run_manager: Optional callback manager.
        Returns:
            The string generated by the model.
        Example:
            .. code-block:: python

                response = watsonx_llm.invoke("What is a molecule")
        )promptsrR   re   r   )	_generater[   rX   )r5   rd   rR   re   rf   rK   r(   r(   r)   _call0  s      zWatsonxLLM._call)rg   rR   re   streamrf   r+   c                 K   s   | j |d}|dk	r|n| j}|rt|dkr<td| tdd}| j|d f||d|}	|	D ]}
|dkrx|
}qf||
7 }qf|dk	stt|jt	r|j
d	}t|gg|d
S t|ggdS | jj||d}| |S dS )a  Call the IBM watsonx.ai inference endpoint which then generate the response.
        Args:
            prompts: List of strings (prompts) to pass into the model.
            stop: Optional list of stop words to use when generating.
            run_manager: Optional callback manager.
        Returns:
            The full LLMResult output.
        Example:
            .. code-block:: python

                response = watsonx_llm.generate(["What is a molecule"])
        rR   N   z6WatsonxLLM currently only supports single prompt, got r   ra   r   )rR   re   r\   rZ   )r[   )rd   r    )rS   r"   lenrA   r   _streamAssertionError
isinstancerY   rb   popr   r#   generater_   )r5   rg   rR   re   rj   rf   r    Zshould_streamZ
generationZstream_iterchunkr\   rH   r(   r(   r)   rh   H  s6    
 
zWatsonxLLM._generatec                 k   sL   | j |d}| jj|d|dD ](}| |}|r@|j|j|d |V  qdS )a4  Call the IBM watsonx.ai inference endpoint which then streams the response.
        Args:
            prompt: The prompt to pass into the model.
            stop: Optional list of stop words to use when generating.
            run_manager: Optional callback manager.
        Returns:
            The iterator which yields generation chunks.
        Example:
            .. code-block:: python

                response = watsonx_llm.stream("What is a molecule")
                for chunk in response:
                    print(chunk, end='')  # noqa: T201
        rk   T)rd   Zraw_responser    )rs   N)rS   r#   Zgenerate_text_streamrc   Zon_llm_new_tokenrX   )r5   rd   rR   re   rf   r    Zstream_resprs   r(   r(   r)   rn   u  s      

zWatsonxLLM._stream)N)N)NN)NNN)NN)2r$   r%   r&   __doc__r   rN   __annotations__r   r   r   r   r   r   r   r   r   r   r   r   r    rb   r!   r   boolr"   r   r*   classmethodr-   propertyr   r6   r   rD   r   rF   rG   staticmethodr   rQ   rS   r   r_   r   rc   r
   ri   rh   r   rn   r(   r(   r(   r)   r      s   

e
 
"
  
   
0  
r   )loggingr?   typingr   r   r   r   r   r   r   Zlangchain_core._api.deprecationr	   Zlangchain_core.callbacksr
   Z#langchain_core.language_models.llmsr   Zlangchain_core.outputsr   r   r   Zlangchain_core.pydantic_v1r   Zlangchain_core.utilsr   r   r   	getLoggerr$   loggerr   r(   r(   r(   r)   <module>   s   $
  