
    -igQ                     |   S r SSKrSSKJr  SSKJr  SSKrSSK	J
r  SSKJr  SSKJr  SSKJrJrJrJr  S	S
KJrJr  \" \\" \SSSS9\R2                  \" S5      /\" \S	SSS9S/SS/S\" S15      /S/\" \SSSS9/\/S.SS9S\R4                  SSSSSS.S j5       rS rS#S jr\" S\\R2                  \" S5      \" \SSSS9/\" \S	SSS9S/SS/S\" S15      /S/\" \SSSS9/\/S.SS9S\R4                  SSSSSS.S j5       rS r\" SS/SS/\\" S/5      /S/\\ S/SS/S/S .SS9SSSSS!.S" j5       r!g)$a  This module implements a loader and dumper for the svmlight format

This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.

The first element of each line can be used to store a target variable to
predict.

This format is used as the default format for both svmlight and the
libsvm command line programs.
    N)closing)Integral   )__version__)check_array)
HasMethodsInterval
StrOptionsvalidate_params   )_dump_svmlight_file_load_svmlight_fileleft)closedreadno_validationbooleanautof
n_featuresdtype
multilabel
zero_basedquery_idoffsetlengthT)prefer_skip_nested_validationFr   r   r   r   r   r   r   c                6    [        [        U /UUUUUUUS95      $ )ae  Load datasets in the svmlight / libsvm format into sparse CSR matrix.

This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.

The first element of each line can be used to store a target variable
to predict.

This format is used as the default format for both svmlight and the
libsvm command line programs.

Parsing a text based source can be expensive. When repeatedly
working on the same dataset, it is recommended to wrap this
loader with joblib.Memory.cache to store a memmapped backup of the
CSR results of the first call and benefit from the near instantaneous
loading of memmapped structures for the subsequent calls.

In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.

This implementation is written in Cython and is reasonably fast.
However, a faster API-compatible loader is also available at:
https://github.com/mblondel/svmlight-loader

Parameters
----------
f : str, path-like, file-like or int
    (Path to) a file to load. If a path ends in ".gz" or ".bz2", it will
    be uncompressed on the fly. If an integer is passed, it is assumed to
    be a file descriptor. A file-like or file descriptor will not be closed
    by this function. A file-like object must be opened in binary mode.

    .. versionchanged:: 1.2
       Path-like objects are now accepted.

n_features : int, default=None
    The number of features to use. If None, it will be inferred. This
    argument is useful to load several files that are subsets of a
    bigger sliced dataset: each subset might not have examples of
    every feature, hence the inferred shape might vary from one
    slice to another.
    n_features is only required if ``offset`` or ``length`` are passed a
    non-default value.

dtype : numpy data type, default=np.float64
    Data type of dataset to be loaded. This will be the data type of the
    output numpy arrays ``X`` and ``y``.

multilabel : bool, default=False
    Samples may have several labels each (see
    https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html).

zero_based : bool or "auto", default="auto"
    Whether column indices in f are zero-based (True) or one-based
    (False). If column indices are one-based, they are transformed to
    zero-based to match Python/NumPy conventions.
    If set to "auto", a heuristic check is applied to determine this from
    the file contents. Both kinds of files occur "in the wild", but they
    are unfortunately not self-identifying. Using "auto" or True should
    always be safe when no ``offset`` or ``length`` is passed.
    If ``offset`` or ``length`` are passed, the "auto" mode falls back
    to ``zero_based=True`` to avoid having the heuristic check yield
    inconsistent results on different segments of the file.

query_id : bool, default=False
    If True, will return the query_id array for each file.

offset : int, default=0
    Ignore the offset first bytes by seeking forward, then
    discarding the following bytes up until the next new line
    character.

length : int, default=-1
    If strictly positive, stop reading any new line of data once the
    position in the file has reached the (offset + length) bytes threshold.

Returns
-------
X : scipy.sparse matrix of shape (n_samples, n_features)
    The data matrix.

y : ndarray of shape (n_samples,), or a list of tuples of length n_samples
    The target. It is a list of tuples when ``multilabel=True``, else a
    ndarray.

query_id : array of shape (n_samples,)
   The query_id for each sample. Only returned when query_id is set to
   True.

See Also
--------
load_svmlight_files : Similar function for loading multiple files in this
    format, enforcing the same number of features/columns on all of them.

Examples
--------
To use joblib.Memory to cache the svmlight file::

    from joblib import Memory
    from sklearn.datasets import load_svmlight_file
    mem = Memory("./mycache")

    @mem.cache
    def get_data():
        data = load_svmlight_file("mysvmlightfile")
        return data[0], data[1]

    X, y = get_data()
r    )tupleload_svmlight_filesr   s           W/var/www/html/venv/lib/python3.13/site-packages/sklearn/datasets/_svmlight_format_io.pyload_svmlight_filer%       s4    ^ C!!!		
     c                    [        U [        5      (       a  [        U SSS9$ [        U [        R                  5      (       a  [        R
                  " U 5      n O,[        U [        5      (       d  [        S[        U 5      -  5      e[        R                  R                  U 5      u  pUS:X  a  SS KnUR                  U S5      $ US:X  a  SSKJn  U" U S5      $ [        U S5      $ )	NrbF)closefdz1expected {str, int, path-like, file-like}, got %sz.gzr   z.bz2)BZ2File)
isinstanceintopenosPathLikefspathstr	TypeErrortypepathsplitextgzipbz2r*   )r   _extr6   r*   s        r$   	_gen_openr:      s    !SAtU++	Ar{{	#	#IIaL3KdSTgUVVWWa FA
e|yyD!!	q$At}r&   c           
      ^   [        U S5      (       a  [        XX#XEU5      u  pxppO/[        [        U 5      5       n [        XX#XEU5      u  pxppS S S 5        U(       d%  [        R
                  " W[        R                  5      n[        R
                  " WW5      n[        R
                  " W	[        R                  5      n[        R
                  " W
[        R                  S9n
[        R
                  " W[        R                  5      n[        R                  " XS9nXU
WU4$ ! , (       d  f       N= f)Nr   r   )
hasattrr   r   r:   np
frombufferfloat64longlongint64asarray)r   r   r   r   r   r   r   actual_dtypedataindindptrlabelsqueryindicess                 r$   _open_and_loadrK      s    q&9Ljh:
6C Yq\"a=P*(F>:LV # vrzz2==|,DmmC-G]]65FMM%*E::d(D&&%// #"s   D
D,z
array-like)filesr   r   r   r   r   r   r   c                   US:w  d  US:  a  US:X  a  SnUS:w  d  US:  a  Uc  [        S5      eU  Vs/ s H$  n[        UUU[        U5      [        U5      UUS9PM&     n	nUSL d  US:X  a*  [        S U	 5       5      (       a  U	 H  u  p    n
US-  nM     [	        S	 U	 5       5      S-   nUc  UnOX:  a  [        S
R                  X5      5      e/ nU	 Hf  u  pnnnUR                  S   S-
  U4n[        R                  " XU4U5      nUR                  5         UUU4-  nU(       d  MU  UR                  U5        Mh     U$ s  snf )a  Load dataset from multiple files in SVMlight format.

This function is equivalent to mapping load_svmlight_file over a list of
files, except that the results are concatenated into a single, flat list
and the samples vectors are constrained to all have the same number of
features.

In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.

Parameters
----------
files : array-like, dtype=str, path-like, file-like or int
    (Paths of) files to load. If a path ends in ".gz" or ".bz2", it will
    be uncompressed on the fly. If an integer is passed, it is assumed to
    be a file descriptor. File-likes and file descriptors will not be
    closed by this function. File-like objects must be opened in binary
    mode.

    .. versionchanged:: 1.2
       Path-like objects are now accepted.

n_features : int, default=None
    The number of features to use. If None, it will be inferred from the
    maximum column index occurring in any of the files.

    This can be set to a higher value than the actual number of features
    in any of the input files, but setting it to a lower value will cause
    an exception to be raised.

dtype : numpy data type, default=np.float64
    Data type of dataset to be loaded. This will be the data type of the
    output numpy arrays ``X`` and ``y``.

multilabel : bool, default=False
    Samples may have several labels each (see
    https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html).

zero_based : bool or "auto", default="auto"
    Whether column indices in f are zero-based (True) or one-based
    (False). If column indices are one-based, they are transformed to
    zero-based to match Python/NumPy conventions.
    If set to "auto", a heuristic check is applied to determine this from
    the file contents. Both kinds of files occur "in the wild", but they
    are unfortunately not self-identifying. Using "auto" or True should
    always be safe when no offset or length is passed.
    If offset or length are passed, the "auto" mode falls back
    to zero_based=True to avoid having the heuristic check yield
    inconsistent results on different segments of the file.

query_id : bool, default=False
    If True, will return the query_id array for each file.

offset : int, default=0
    Ignore the offset first bytes by seeking forward, then
    discarding the following bytes up until the next new line
    character.

length : int, default=-1
    If strictly positive, stop reading any new line of data once the
    position in the file has reached the (offset + length) bytes threshold.

Returns
-------
[X1, y1, ..., Xn, yn] or [X1, y1, q1, ..., Xn, yn, qn]: list of arrays
    Each (Xi, yi) pair is the result from load_svmlight_file(files[i]).
    If query_id is set to True, this will return instead (Xi, yi, qi)
    triplets.

See Also
--------
load_svmlight_file: Similar function for loading a single file in this
    format.

Notes
-----
When fitting a model to a matrix X_train and evaluating it against a
matrix X_test, it is essential that X_train and X_test have the same
number of features (X_train.shape[1] == X_test.shape[1]). This may not
be the case if you load the files individually with load_svmlight_file.

Examples
--------
To use joblib.Memory to cache the svmlight file::

    from joblib import Memory
    from sklearn.datasets import load_svmlight_file
    mem = Memory("./mycache")

    @mem.cache
    def get_data():
        data_train, target_train, data_test, target_test = load_svmlight_files(
            ["svmlight_file_train", "svmlight_file_test"]
        )
        return data_train, target_train, data_test, target_test

    X_train, y_train, X_test, y_test = get_data()
r   r   Tz:n_features is required when offset or length is specified.)r   r   Fc              3      #    U  H6  n[        US    5      =(       a    [        R                  " US    5      S:  v   M8     g7fr   r   N)lenr>   min).0tmps     r$   	<genexpr>&load_svmlight_files.<locals>.<genexpr>  s3     $USTCSQ[%GRVVCF^a5G%GSTs   >A r   c              3   r   #    U  H-  n[        US    5      (       a  US    R                  5       OSv   M/     g7frO   )rP   max)rR   rF   s     r$   rT   rU     s+     >ASc#a&kkc!fjjlq0As   57z=n_features was set to {}, but input file contains {} features)
ValueErrorrK   boolallrW   formatshapesp
csr_matrixsort_indicesappend)rL   r   r   r   r   r   r   r   r   rr8   rJ   n_fresultrE   rG   yquery_valuesr\   Xs                       r$   r#   r#      s   J 	!vzzV'; 
!vzz'9UVV 	 A 	N	
   	 Uf$UST$U!U!U#$A1aqLG $% >A>
>
BC
		KRR
 	
 F23.vq,a1$j1MM4&159	!Q$8MM,' 34 MO	s   +D?c           
         U(       a  UR                  S[        -  R                  5       5        UR                  SSS/U   -  R                  5       5        UR                  S5        UR                  S UR	                  5        5       5        [
        R                  " U 5      n[
        R                  " U5      nU(       d  U(       d  US S 2[        R                  4   n[        U UUUUUUU5        g )Nz7# Generated by dump_svmlight_file from scikit-learn %s
z# Column indices are %s-based
zeroones   #
c              3   ,   #    U  H
  nS U-  v   M     g7f)s   # %s
N )rR   lines     r$   rT   !_dump_svmlight.<locals>.<genexpr>  s     G2F$Y%2Fs   )
writer   encode
writelines
splitlinesr]   issparser>   newaxisr   )	rf   rd   r   r   	one_basedcommentr   X_is_spy_is_sps	            r$   _dump_svmlightrx     s    	J[Xfh	

 	
.&%1KKSSU	
 	
	G'2D2D2FGGkk!nGkk!nGgam				r&   zsparse matrixrn   )rf   rd   r   r   ru   r   r   )r   ru   r   r   c          
      &   UbI  [        U[        5      (       a  UR                  S5        OUR                  S5      nSU;   a  [	        S5      e[        USSS9n[        R                  " U5      (       a4  UR                  S	   S	:w  a   U(       d  [	        S
UR                  < 35      eO0UR                  S	:w  a   U(       d  [	        SUR                  < 35      e[        U SS9nUR                  S   UR                  S   :w  a.  [	        SUR                  S   < SUR                  S   < S35      eXqL a"  [        US5      (       a  UR                  5       nO#Un[        US5      (       a  UR                  5         XL a"  [        US5      (       a  UR                  5       n O#Un [        U S5      (       a  U R                  5         Uc$  [        R                  " / [        R                  S9nOO[        R                   " U5      nUR                  S   UR                  S   :w  a  [	        SUR                  < 35      eU(       + n	[        US5      (       a  [#        XX&XU5        g[%        US5       n[#        XX&XU5        SSS5        g! , (       d  f       g= f)a  Dump the dataset in svmlight / libsvm file format.

This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.

The first element of each line can be used to store a target variable
to predict.

Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
    Training vectors, where `n_samples` is the number of samples and
    `n_features` is the number of features.

y : {array-like, sparse matrix}, shape = (n_samples,) or (n_samples, n_labels)
    Target values. Class labels must be an
    integer or float, or array-like objects of integer or float for
    multilabel classifications.

f : str or file-like in binary mode
    If string, specifies the path that will contain the data.
    If file-like, data will be written to f. f should be opened in binary
    mode.

zero_based : bool, default=True
    Whether column indices should be written zero-based (True) or one-based
    (False).

comment : str or bytes, default=None
    Comment to insert at the top of the file. This should be either a
    Unicode string, which will be encoded as UTF-8, or an ASCII byte
    string.
    If a comment is given, then it will be preceded by one that identifies
    the file as having been dumped by scikit-learn. Note that not all
    tools grok comments in SVMlight files.

query_id : array-like of shape (n_samples,), default=None
    Array containing pairwise preference constraints (qid in svmlight
    format).

multilabel : bool, default=False
    Samples may have several labels each (see
    https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html).

    .. versionadded:: 0.17
       parameter `multilabel` to support multilabel datasets.

Examples
--------
>>> from sklearn.datasets import dump_svmlight_file, make_classification
>>> X, y = make_classification(random_state=0)
>>> output_file = "my_dataset.svmlight"
>>> dump_svmlight_file(X, y, output_file)  # doctest: +SKIP
Nasciizutf-8    z comment string contains NUL bytecsrF)accept_sparse	ensure_2dr   z(expected y of shape (n_samples, 1), got z&expected y of shape (n_samples,), got )r}   r   z2X.shape[0] and y.shape[0] should be the same, got z and z	 instead.sorted_indicesr_   r<   z-expected query_id of shape (n_samples,), got rn   wb)r+   bytesdecodero   rX   r   r]   rr   r\   ndimr=   r   r_   r>   arrayint32rC   rx   r-   )
rf   rd   r   r   ru   r   r   yvalXvalrt   s
             r$   dump_svmlight_filer     s   X  gu%%NN7#nnW-GG?@@q?D	{{4::a=Aj@D

L  99>*4::WXXq.Dzz!}

1%zz!}djjm-
 	
 yWT#344!1n%%NNyWT#344!1n%%NN
 88Bbhh/::h'>>!
*EM^^U  Iq'qQII!T]a1	HM ]]s   *J
J)r   r   )"__doc__os.pathr.   
contextlibr   numbersr   numpyr>   scipy.sparsesparser]    r   utilsr   utils._param_validationr   r	   r
   r   _svmlight_format_fastr   r   r1   r/   r@   r%   r:   rK   r#   rx   r   r   rk   r&   r$   <module>r      s  
        W W  Xq$v6KKv	
  !T&A4H  k *fX"67KHaf=>* #'!* 
**H%$HV*0.  KKvXq$v6
  !T&A4H  k *fX"67KHaf=>*  #'#, 
**a'&aH: O,O,:wi() k%!4( k #'" @N@Nr&   