U
    h                     @   sr   d dl Z d dlmZmZmZmZ d dlmZ d dlm	Z	 d dl
mZmZ d dlmZ e eZG dd de	ZdS )	    N)AnyDictListOptional)CallbackManagerForLLMRun)LLM)get_from_dict_or_envpre_init)enforce_stop_tokensc                   @   s
  e Zd ZU dZeed< dZee ed< dZ	ee
eef  ed< dZeed< d	Zeed
< dZee ed< dZeee  ed< G dd dZee
e
dddZee
eef dddZee
eef dddZeedddZdeeee  ee eedddZdS )PredictionGuarda  Prediction Guard large language models.

    To use, you should have the ``predictionguard`` python package installed, and the
    environment variable ``PREDICTIONGUARD_TOKEN`` set with your access token, or pass
    it as a named parameter to the constructor. To use Prediction Guard's API along
    with OpenAI models, set the environment variable ``OPENAI_API_KEY`` with your
    OpenAI API key as well.

    Example:
        .. code-block:: python

            pgllm = PredictionGuard(model="MPT-7B-Instruct",
                                    token="my-access-token",
                                    output={
                                        "type": "boolean"
                                    })
    clientzMPT-7B-InstructmodelNoutput   
max_tokensg      ?temperaturetokenstopc                   @   s   e Zd ZdZdS )zPredictionGuard.ConfigZforbidN)__name__
__module____qualname__extra r   r   L/tmp/pip-unpacked-wheel-9gdii04g/langchain_community/llms/predictionguard.pyConfig2   s   r   )valuesreturnc                 C   sJ   t |dd}zddl}|j|d|d< W n tk
rD   tdY nX |S )zHValidate that the access token and python package exists in environment.r   ZPREDICTIONGUARD_TOKENr   N)r   r   zfCould not import predictionguard python package. Please install it with `pip install predictionguard`.)r   predictionguardZClientImportError)clsr   r   pgr   r   r   validate_environment5   s    
z$PredictionGuard.validate_environment)r   c                 C   s   | j | jdS )z@Get the default parameters for calling the Prediction Guard API.r   r   r"   selfr   r   r   _default_paramsD   s    zPredictionGuard._default_paramsc                 C   s   d| j i| jS )zGet the identifying parameters.r   )r   r%   r#   r   r   r   _identifying_paramsL   s    z#PredictionGuard._identifying_paramsc                 C   s   dS )zReturn type of llm.r   r   r#   r   r   r   	_llm_typeQ   s    zPredictionGuard._llm_type)promptr   run_managerkwargsr   c           	      K   s   ddl }| j}| jdk	r*|dk	r*tdn| jdk	r@| j|d< n||d< |jjf | j|| j|d |d d|}|d d d	 }|dk	s| jdk	rt||d }|S )
a&  Call out to Prediction Guard's model API.
        Args:
            prompt: The prompt to pass into the model.
        Returns:
            The string generated by the model.
        Example:
            .. code-block:: python
                response = pgllm.invoke("Tell me a joke.")
        r   Nz2`stop` found in both the input and default params.Zstop_sequencesr   r   )r   r(   r   r   r   choicestext)	r   r%   r   
ValueErrorZ
Completioncreater   r   r
   )	r$   r(   r   r)   r*   r    paramsresponser,   r   r   r   _callV   s(    

zPredictionGuard._call)NN)r   r   r   __doc__r   __annotations__r   r   strr   r   r   intr   floatr   r   r   r   r	   r!   propertyr%   r&   r'   r   r1   r   r   r   r   r      s4   
  
r   )loggingtypingr   r   r   r   Zlangchain_core.callbacksr   Z#langchain_core.language_models.llmsr   Zlangchain_core.utilsr   r	   Zlangchain_community.llms.utilsr
   	getLoggerr   loggerr   r   r   r   r   <module>   s   
