o
    ?߱iA                     @   s@   d dl Z d dlZd dlZd dlmZmZ G dd de jj	Z
dS )    N)distributedlogc                       sJ   e Zd ZdZ									d fdd		ZdddZ fddZ  ZS )	FusedAdama	  Implements Adam algorithm.

    Currently GPU-only.  Requires Apex to be installed via
    ``pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./``.

    This version of fused Adam implements 2 fusions.

      * Fusion of the Adam update's elementwise operations
      * A multi-tensor apply launch that batches the elementwise updates applied to all the model's parameters
        into one or a few kernel launches.

    :class:`FusedAdam` may be used as a drop-in replacement for ``torch.optim.AdamW``,
    or ``torch.optim.Adam`` with ``adam_w_mode=False``::

        opt = FusedAdam(model.parameters(), lr = ....)
        ...
        opt.step()

    .. warning::
        A previous version of :class:`FusedAdam` allowed a number of additional arguments to ``step``.
        These additional arguments are now deprecated and unnecessary.

    Adam was been proposed in `Adam: A Method for Stochastic Optimization`_.

    Arguments:
        params (iterable): iterable of parameters to optimize or dicts defining
            parameter groups.
        lr (float, optional): learning rate. (default: 1e-3)
        betas (Tuple[float, float], optional): coefficients used for computing
            running averages of gradient and its square. (default: (0.9, 0.999))
        eps (float, optional): term added to the denominator to improve
            numerical stability. (default: 1e-8)
        weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
        amsgrad (boolean, optional): whether to use the AMSGrad variant of this
            algorithm from the paper `On the Convergence of Adam and Beyond`_
            (default: False) NOT SUPPORTED in FusedAdam!
        adam_w_mode (boolean, optional): Apply L2 regularization or weight decay
            True for decoupled weight decay(also known as AdamW) (default: True)
        capturable (bool, optional): whether to use the version of the optimizer
            that can be used with CUDA Graphs. (default: False)
        master_weights (bool, optional): whether to maintain FP32 master weights
           in the optimizer with FP16 mixed precision training, currently can
           only be used with capturable set to True. (default: False)

    .. _Adam - A Method for Stochastic Optimization:
        https://arxiv.org/abs/1412.6980
    .. _On the Convergence of Adam and Beyond:
        https://openreview.net/forum?id=ryQu7f-RZ
    MbP?Tg?g+?:0yE>        Fc                    sH  |rt d|
r|	st dtd|
 d|	  |	r#tj|tjdn|}t|||||d}tt| 	|| |r;dnd| _
|	| _|
| _d | _|	rt| jD ]9\}}t|d	 dkr[qN|d	 d j}d
D ]"}t|| trytj|| tjd||< || j|d| j| |< qdqNd| _tjdgtjdd| _tj| _tj| _tj| _d S )Nz/FusedAdam does not support the AMSGrad variant.zGMaster weights is currently only supported with the capturable version.zFusedAdam master_weights: z capturable: dtype)lrbias_correctionbetasepsweight_decay   r   params)r   deviceTcudar
   r   )RuntimeErrorr   warningtorchtensorfloat32dictsuperr   __init__adam_w_mode
capturablemaster_weightsparam_groups_master	enumerateparam_groupslenr   
isinstancefloatto_step_supports_amp_scalingint_dummy_overflow_buftexmulti_tensor_adammulti_tensor_adam_capturable#multi_tensor_adam_capturable_master)selfr   r   r   r   r   r   r   amsgradr   r    defaultsidxgroupr   item	__class__ Z/data/cameron/vidgen/cosmos-predict2.5/cosmos_predict2/_src/imaginaire/utils/fused_adam.pyr   J   s6   zFusedAdam.__init__Nc           %         sh  t dd ||||fD rtdd}|dur| } jdu r?g  _t jD ]\}}	|	d }
 jd fdd|
D i q't j jD ]\}}t|d dkrTqF|d d j}d	|v re|d	 red
nd}|d \}}d|v r j	rt
|d tjr|d j|dn
tj|d tj|d|d< |d   jd
ktj7  < n|d  d
7  < n j	sd
n	tjd
gtj|d|d<  j	rt
|d tjr|d j|dn
tj|d tj|d|d< g g g g f\}}}}g g g g f\}}}}g g g g f\}}}}g }g }g }t|d |d D ]\} }!| jdu rq| jjjrtd j|  }"t|"dkr>t| j |"d< t| j |"d< | jtjkrl jrO||!j || jj || j ||"d  ||"d  q| jtjkr jr}||!j || j ||  ||"d  ||"d  q| jtjkrƈ jr||!j || jj || j ||"d  ||"d  qtd j	r|dur| | ntjd|d}# j|# d\}}$|r|  }|! "  }$ntj#d|tjd}tj#d|tjd}$t|dkrGt$j%j&' jr j(n j) j jr.|||||gn||||g|d |||d |d  j*||d |$ t|dkrt$j%j&' jrY j(n j) j jrh|||||gn||||g|d |||d |d  j*||d |$ t|dkrt$j%j&' jr j(n j) j jr|||||gn||||g|d |||d |d  j*||d |$ qFt|dkrt$j%j&' j+ j||||g|d |||d |d  j*||d  t|dkr
t$j%j&' j+ j||||g|d |||d |d  j*||d  t|dkr1t$j%j&' j+ j||||g|d |||d |d  j*||d  qF|S )a+  Performs a single optimization step.

        Arguments:
            closure (callable, optional): A closure that reevaluates the model
                and returns the loss.

        The remaining arguments are deprecated, and are only retained (for the moment) for error-checking purposes.
        c                 s   s    | ]}|d uV  qd S Nr7   .0pr7   r7   r8   	<genexpr>   s    z!FusedAdam.step.<locals>.<genexpr>ztFusedAdam has been updated. Simply initialize it identically to torch.optim.Adam, and call step() with no arguments.Nr   c                    s&   g | ]} j r|   nd qS r9   )r    clonedetachr&   r:   r/   r7   r8   
<listcomp>   s   & z"FusedAdam.step.<locals>.<listcomp>r   r   r   r   stepr   r   r   zOFusedAdam does not support sparse gradients, please consider SparseAdam insteadexp_avg
exp_avg_sqz%FusedAdam only support fp16 and fp32.)r   )NN)r   r
   r   r   ),anyr   r!   r"   r#   appendzipr$   r   r   r%   r   Tensorr'   r   int32r*   r)   r   graddata	is_sparsestate
zeros_liker&   r
   float16r    bfloat16_check_inf_per_devicezeroscopy__get_scale_asyncdouble
reciprocalonestepytorch
optimizersmulti_tensor_applierr.   r-   r   r,   )%r/   closuregradsoutput_paramsscaleZ
grad_normsgrad_scalerlossipg
param_listr3   group_masterr   r   beta1beta2Zg_16Zp_16Zm_16Zv_16Zg_bfZp_bfZm_bfZv_bfZg_32Zp_32Zm_32Zv_32Zp_16_masterZp_32_masterZbf16_masterr<   p_masterrM   	found_inf	inv_scaler7   r@   r8   rB   y   sl  	
 "



 
 
 


zFusedAdam.stepc                    s6  t  | | jD ]}| jr*t|d tjr|d  ntj|d tj	d |d< d|v rx| jrht
 dkrSt|d tjrE|d  ntj|d gtjd }n
tjdtjd }t
|d ||d< nt|d tjrx|d  |d< |d D ]}| j| }d|v r|d  |d< |d  |d< q|q	d S )	Nr   r	   rB   r   r   r   rC   rD   )r   load_state_dictr#   r   r%   r   rH   r   r   r   r   get_rankrI   rR   	broadcastr4   rM   r&   )r/   
state_dictr3   rB   r<   rM   r5   r7   r8   rk   b  s6   


zFusedAdam.load_state_dict)	r   Tr   r   Tr   FFF)NNNNNN)__name__
__module____qualname____doc__r   rB   rk   __classcell__r7   r7   r5   r8   r      s    5
/ jr   )r   transformer_enginerX   transformer_engine_torchr+   %cosmos_predict2._src.imaginaire.utilsr   r   optim	Optimizerr   r7   r7   r7   r8   <module>   s
   