o
    g}                     @  s   d Z ddlmZ ddlZddlZddlZddlZddlZddlZddl	Z	ddl
mZmZmZmZmZ ddlmZ ddlmZmZ ee	j Zdd
dZG dd dejZdddZG dd dZe jZdddZdS )z"Better tokenizing for coverage.py.    )annotationsN)IterableListOptionalSetTuple)env)TLineNoTSourceTokenLinestoks
TokenInfosreturnc              	   c  s    d}d}d}| D ]p\}}\}}\}}	}
||kr_|r]| dr]d}| dr)d}n|tjkr@d|v r@|dd	d
 d dkr@d}|r]t|dd d	 }tdd||f||d f|V  |
}|tjtjfvri|}t||||f||	f|
V  |}q	dS )aB  Return all physical tokens, even line continuations.

    tokenize.generate_tokens() doesn't return a token for the backslash that
    continues lines.  This wrapper provides those tokens so that we can
    re-create a faithful representation of the original source.

    Returns the same values as generate_tokens()

    N z\
T\F
   r   i    )	endswithtokenSTRINGsplitlentokenize	TokenInfoNEWLINENL)r   	last_linelast_lineno
last_ttextttypettextslinenoscolelinenoecolltextinject_backslashccol r*   S/var/www/html/bloggers_ems/venv/lib/python3.10/site-packages/coverage/phystokens.py_phys_tokens   s4   


 r,   c                   @  sF   e Zd ZdZdddZejdkrdddZejdkr!dddZdS dS )SoftKeywordFinderzCHelper for finding lines with soft keywords, like match/case lines.sourcestrr   Nonec                 C  s   t  | _| t| d S N)setsoft_key_linesvisitastparse)selfr.   r*   r*   r+   __init__R   s   zSoftKeywordFinder.__init__   
   node	ast.Matchc                 C  s8   | j |j |jD ]
}| j |jj q
| | dS z Invoked by ast.NodeVisitor.visitN)r3   addlinenocasespatterngeneric_visit)r7   r<   caser*   r*   r+   visit_MatchX   s   
zSoftKeywordFinder.visit_Match)r:      ast.TypeAliasc                 C  s   | j |j | | dS r>   )r3   r?   r@   rC   )r7   r<   r*   r*   r+   visit_TypeAlias`   s   z!SoftKeywordFinder.visit_TypeAliasN)r.   r/   r   r0   )r<   r=   r   r0   )r<   rG   r   r0   )	__name__
__module____qualname____doc__r8   sysversion_inforE   rH   r*   r*   r*   r+   r-   P   s    



r-   r.   r/   r
   c                 c  s   t jt jt jtjh}g }d}| ddd} t| }t	j
jr%t| j}t|D ]\}}\}}	\}
}}
d}td|D ]}|dkrL|V  g }d}d}nv|dkrSd}no||v rZd}nh|rm|	|krm|d	d
|	|  f d}tj|d dd }|t jkrt|rd}n2tjdkrt	j
jrt|rt|dkrd}nt|dkr|d d d	krd}nd}|r||v rd}|||f d}d}	q<|r|}q)|r|V  dS dS )a  Generate a series of lines, one for each line in `source`.

    Each line is a list of pairs, each pair is a token::

        [('key', 'def'), ('ws', ' '), ('nam', 'hello'), ('op', '('), ... ]

    Each pair has a token class, and the token text.

    If you concatenate all the token texts, and then join them with newlines,
    you should have your original `source` back, with two differences:
    trailing white space is not preserved, and a final line with no newline
    is indistinguishable from a final line with a newline.

    r      z
r   Tz(
)Fr   ws xxNr:   keyr9   r   )r   INDENTDEDENTr   r   r   
expandtabsreplacegenerate_tokensr   
PYBEHAVIORsoft_keywordsr-   r3   r,   rer   appendtok_namegetlowerNAMEkeyword	iskeywordrM   rN   issoftkeywordr   )r.   	ws_tokenslinecoltokgenr3   r!   r"   sliner$   _r&   
mark_startpartmark_end	tok_classis_start_of_liner*   r*   r+   source_token_linesf   sZ   




ro   c                   @  s$   e Zd ZdZdddZdd	d
ZdS )CachedTokenizeraX  A one-element cache around tokenize.generate_tokens.

    When reporting, coverage.py tokenizes files twice, once to find the
    structure of the file, and once to syntax-color it.  Tokenizing is
    expensive, and easily cached.

    This is a one-element cache so that our twice-in-a-row tokenizing doesn't
    actually tokenize twice.

    r   r0   c                 C  s   d | _ g | _d S r1   )	last_textlast_tokens)r7   r*   r*   r+   r8      s   
zCachedTokenizer.__init__textr/   r   c                 C  sJ   || j kr"|| _ t|j}ztt|| _W | jS    d| _  | jS )z*A stand-in for `tokenize.generate_tokens`.N)rq   ioStringIOreadlinelistr   rX   rr   )r7   rs   rv   r*   r*   r+   rX      s   
zCachedTokenizer.generate_tokensN)r   r0   )rs   r/   r   r   )rI   rJ   rK   rL   r8   rX   r*   r*   r*   r+   rp      s    

rp   bytesc                 C  s   t | dj}t|d S )zDetermine the encoding for `source`, according to PEP 263.

    `source` is a byte string: the text of the program.

    Returns a string, the name of the encoding.

    Tr   )iter
splitlines__next__r   detect_encoding)r.   rv   r*   r*   r+   source_encoding   s   r}   )r   r   r   r   )r.   r/   r   r
   )r.   rx   r   r/   )rL   
__future__r   r5   rt   ra   r[   rM   r   r   typingr   r   r   r   r   coverager   coverage.typesr	   r
   r   r   r,   NodeVisitorr-   ro   rp   rX   r}   r*   r*   r*   r+   <module>   s&   

7
G