o
    ?߱iF                     @   sn   d dl Z d dlZd dlZd dlmZ d dlmZ d dl	m
Z
 de jeB de jfddZG d	d
 d
e jjZdS )    N)AsyncCollectiveTensor)DTensor)distributedtensorreturnc                 C   s,   t | tr|  }t |tr| S |S | S N)
isinstancer   to_localr   wait)r   local r   W/data/cameron/vidgen/cosmos-predict2.5/cosmos_predict2/_src/reason1/utils/fused_adam.pyget_local_tensor_if_DTensor   s   

r   c                       sJ   e Zd ZdZ									d fdd		ZdddZ fddZ  ZS )	FusedAdama	  Implements Adam algorithm.

    Currently GPU-only.  Requires Apex to be installed via
    ``pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./``.

    This version of fused Adam implements 2 fusions.

      * Fusion of the Adam update's elementwise operations
      * A multi-tensor apply launch that batches the elementwise updates applied to all the model's parameters
        into one or a few kernel launches.

    :class:`FusedAdam` may be used as a drop-in replacement for ``torch.optim.AdamW``,
    or ``torch.optim.Adam`` with ``adam_w_mode=False``::

        opt = FusedAdam(model.parameters(), lr = ....)
        ...
        opt.step()

    .. warning::
        A previous version of :class:`FusedAdam` allowed a number of additional arguments to ``step``.
        These additional arguments are now deprecated and unnecessary.

    Adam was been proposed in `Adam: A Method for Stochastic Optimization`_.

    Arguments:
        params (iterable): iterable of parameters to optimize or dicts defining
            parameter groups.
        lr (float, optional): learning rate. (default: 1e-3)
        betas (Tuple[float, float], optional): coefficients used for computing
            running averages of gradient and its square. (default: (0.9, 0.999))
        eps (float, optional): term added to the denominator to improve
            numerical stability. (default: 1e-8)
        weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
        amsgrad (boolean, optional): whether to use the AMSGrad variant of this
            algorithm from the paper `On the Convergence of Adam and Beyond`_
            (default: False) NOT SUPPORTED in FusedAdam!
        adam_w_mode (boolean, optional): Apply L2 regularization or weight decay
            True for decoupled weight decay(also known as AdamW) (default: True)
        capturable (bool, optional): whether to use the version of the optimizer
            that can be used with CUDA Graphs. (default: False)
        master_weights (bool, optional): whether to maintain FP32 master weights
           in the optimizer with FP16 mixed precision training, currently can
           only be used with capturable set to True. (default: False)

    .. _Adam - A Method for Stochastic Optimization:
        https://arxiv.org/abs/1412.6980
    .. _On the Convergence of Adam and Beyond:
        https://openreview.net/forum?id=ryQu7f-RZ
    MbP?Tg?g+?:0yE>        Fc                    s2  |rt d|
r|	st d|	rtj|tjdn|}t|||||d}tt| || |r0dnd| _|	| _	|
| _
d | _|	rt| jD ]9\}}t|d dkrPqC|d d j}dD ]"}t|| trntj|| tjd||< || j|d	| j| |< qYqCd
| _tjdgtjdd| _tj| _tj| _tj| _d S )Nz/FusedAdam does not support the AMSGrad variant.zGMaster weights is currently only supported with the capturable version.dtype)lrbias_correctionbetasepsweight_decay   r   params)r   deviceTcudar   r   )RuntimeErrortorchr   float32dictsuperr   __init__adam_w_mode
capturablemaster_weightsparam_groups_master	enumerateparam_groupslenr   r   floatto_step_supports_amp_scalingint_dummy_overflow_buftexmulti_tensor_adammulti_tensor_adam_capturable#multi_tensor_adam_capturable_master)selfr   r   r   r   r   r'   r   amsgradr(   r)   defaultsidxgroupr   item	__class__r   r   r&   X   s4   zFusedAdam.__init__Nc           %         s  t dd ||||fD rtdd}|dur| } jdu r?g  _t jD ]\}}	|	d }
 jd fdd|
D i q't j jD ]\}}t|d dkrTqF|d d j}d	|v re|d	 red
nd}|d \}}d|v r j	rt
|d tjr|d j|dn
tj|d tj|d|d< |d   jd
ktj7  < n|d  d
7  < n j	sd
n	tjd
gtj|d|d<  j	rt
|d tjr|d j|dn
tj|d tj|d|d< g g g g f\}}}}g g g g f\}}}}g g g g f\}}}}g }g }g }t|d |d D ]\} }!| jdu rq| jjjrtd j|  }"t|"dkr<t|  |"d< t|  |"d< | jtjkrq jrN|t|! |t| j |t|  |t|"d  |t|"d  q| jtjkr jr|t|! |t| j |t|  |t|"d  |t|"d  q| jtjkrۈ jr|t|! |t| j |t|  |t|"d  |t|"d  qtd j	r|dur| | ntjd|d}# j |# d\}}$|r|! }|" #  }$ntj$d|tjd}tj$d|tjd}$t|dkr\t%j&j'( jr4 j)n j* j jrC|||||gn||||g|d |||d |d  j+||d |$ t|dkrt%j&j'( jrn j)n j* j jr}|||||gn||||g|d |||d |d  j+||d |$ t|dkrt%j&j'( jr j)n j* j jr|||||gn||||g|d |||d |d  j+||d |$ qFt|dkrt%j&j'( j, j||||g|d |||d |d  j+||d  t|dkrt%j&j'( j, j||||g|d |||d |d  j+||d  t|dkrFt%j&j'( j, j||||g|d |||d |d  j+||d  qF|S )a+  Performs a single optimization step.

        Arguments:
            closure (callable, optional): A closure that reevaluates the model
                and returns the loss.

        The remaining arguments are deprecated, and are only retained (for the moment) for error-checking purposes.
        c                 s   s    | ]}|d uV  qd S r   r   .0pr   r   r   	<genexpr>   s    z!FusedAdam.step.<locals>.<genexpr>ztFusedAdam has been updated. Simply initialize it identically to torch.optim.Adam, and call step() with no arguments.Nr   c                    s&   g | ]} j r|   nd qS r   )r)   clonedetachr.   r?   r7   r   r   
<listcomp>   s   & z"FusedAdam.step.<locals>.<listcomp>r   r   r   r   stepr   r    r   zOFusedAdam does not support sparse gradients, please consider SparseAdam insteadexp_avg
exp_avg_sqz%FusedAdam only support fp16 and fp32.)r   )NN)r   r   r   r   )-anyr!   r*   r+   r,   appendzipr-   r   r(   r   r"   Tensorr/   r   int32r2   r1   r#   graddata	is_sparsestate
zeros_liker.   r   float16r)   r   bfloat16_check_inf_per_devicezeroscopy__get_scale_asyncdouble
reciprocalonestepytorch
optimizersmulti_tensor_applierr6   r5   r'   r4   )%r7   closuregradsoutput_paramsscale
grad_normsgrad_scalerlossipg
param_listr;   group_masterr   r   beta1beta2g_16p_16m_16v_16g_bfp_bfm_bfv_bfg_32p_32m_32v_32p_16_masterp_32_masterbf16_masterrA   p_masterrR   	found_inf	inv_scaler   rE   r   rG      sl  	
 "


 
 
 


zFusedAdam.stepc                    s6  t  | | jD ]}| jr*t|d tjr|d  ntj|d tj	d |d< d|v rx| jrht
 dkrSt|d tjrE|d  ntj|d gtjd }n
tjdtjd }t
|d ||d< nt|d tjrx|d  |d< |d D ]}| j| }d|v r|d  |d< |d  |d< q|q	d S )	Nr   r   rG   r   r   r   rH   rI   )r%   load_state_dictr,   r(   r   r"   rM   r   r   r#   r   get_rankrN   rW   	broadcastr<   rR   r.   )r7   
state_dictr;   rG   rA   rR   r=   r   r   r   u  s6   


zFusedAdam.load_state_dict)	r   Tr   r   Tr   FFF)NNNNNN)__name__
__module____qualname____doc__r&   rG   r   __classcell__r   r   r=   r   r   %   s    5
. pr   )r"   transformer_enginer]   transformer_engine_torchr3   )torch.distributed._functional_collectivesr   torch.distributed._tensor.apir   "cosmos_predict2._src.reason1.utilsr   rM   r   optim	Optimizerr   r   r   r   r   <module>   s   