U
    h                     @   sN   d dl mZmZmZmZ d dlmZ d dlmZm	Z	m
Z
 G dd deeZdS )    )AnyDictListOptional)
Embeddings)	BaseModelFieldroot_validatorc                   @   s|  e Zd ZU dZeed< eed< edddZe	ed< edddZ
e	ed< edd	dZe	ed	< ed
ddZeed< ed
ddZeed< ed
ddZeed< ed
ddZeed< edddZee	 ed< edddZee	 ed< edddZee	 ed< edddZeed< edddZee ed< G dd dZed
ddeedddZee eee  dddZeee dd d!ZdS )"LlamaCppEmbeddingsa  llama.cpp embedding models.

    To use, you should have the llama-cpp-python library installed, and provide the
    path to the Llama model as a named parameter to the constructor.
    Check out: https://github.com/abetlen/llama-cpp-python

    Example:
        .. code-block:: python

            from langchain_community.embeddings import LlamaCppEmbeddings
            llama = LlamaCppEmbeddings(model_path="/path/to/model.bin")
    client
model_pathi   n_ctx)aliasn_partsseedFf16_kv
logits_all
vocab_only	use_mlockN	n_threadsn_batchn_gpu_layersTverbosedevicec                   @   s   e Zd ZdZdS )zLlamaCppEmbeddings.ConfigZforbidN)__name__
__module____qualname__extra r   r   K/tmp/pip-unpacked-wheel-9gdii04g/langchain_community/embeddings/llamacpp.pyConfig?   s   r!   )preZskip_on_failure)valuesreturnc                    s    d }dddddddd	d
ddg} fdd|D } d dk	rL d |d< z(ddl m} ||fddi| d< W nP tk
r   tdY n6 tk
r } ztd| d| W 5 d}~X Y nX  S )z4Validate that llama-cpp-python library is installed.r   r   r   r   r   r   r   r   r   r   r   r   c                    s   i | ]}| | qS r   r   ).0kr#   r   r    
<dictcomp>S   s      z;LlamaCppEmbeddings.validate_environment.<locals>.<dictcomp>r   Nr   )Llama	embeddingTr   zCould not import llama-cpp-python library. Please install the llama-cpp-python library to use this embedding model: pip install llama-cpp-pythonz&Could not load Llama model from path: z. Received error )Z	llama_cppr)   ImportError	Exception
ValueError)clsr#   r   Zmodel_param_namesZmodel_paramsr)   er   r'   r    validate_environmentB   s8    z'LlamaCppEmbeddings.validate_environment)textsr$   c                 C   s   | j |}dd |d D S )zEmbed a list of documents using the Llama model.

        Args:
            texts: The list of texts to embed.

        Returns:
            List of embeddings, one for each text.
        c                 S   s   g | ]}t tt|d  qS )r*   )listmapfloat)r%   r/   r   r   r    
<listcomp>t   s     z6LlamaCppEmbeddings.embed_documents.<locals>.<listcomp>data)r   Zcreate_embedding)selfr1   Z
embeddingsr   r   r    embed_documentsj   s    	z"LlamaCppEmbeddings.embed_documents)textr$   c                 C   s   | j |}ttt|S )zEmbed a query using the Llama model.

        Args:
            text: The text to embed.

        Returns:
            Embeddings for the text.
        )r   embedr2   r3   r4   )r7   r9   r*   r   r   r    embed_queryv   s    	zLlamaCppEmbeddings.embed_query)r   r   r   __doc__r   __annotations__strr   r   intr   r   r   boolr   r   r   r   r   r   r   r   r   r!   r	   r   r0   r   r4   r8   r;   r   r   r   r    r
      s(   

'r
   N)typingr   r   r   r   Zlangchain_core.embeddingsr   Zlangchain_core.pydantic_v1r   r   r	   r
   r   r   r   r    <module>   s   