B
    `X0                 @   s   d Z ddlmZ ddlmZ ddlmZ ddlZddlmZ	 ddl
mZ ddl
mZ dd	lmZ dd
lmZ ddlmZ ddlmZ ddlmZ dd ZG dd dejZeeedddZdS )zThe Sample distribution class.    )absolute_import)division)print_functionN)v2)distribution)kullback_leibler)assert_util)
dtype_util)prefer_static)tensor_util)tensorshape_utilc                s    fdd}|S )zDFactory for implementing summary statistics, eg, mean, stddev, mode.c                s   t j| jdgd}t| j f |}t j| j t jt ||j	d| j
 gdd}tj||d}t j| j || j
 gdd}t||S )z5Implements summary statistic, eg, mean, stddev, mode.)shape)dtyper   )axis)r
   reshapesample_shapegetattrr   concatbatch_shape_tensorZonesrank_from_shaper   event_shape_tensortfZbroadcast_to)selfkwargsr   xr   )attr h/home/dcms/DCMS/lib/python3.7/site-packages/tensorflow_probability/python/distributions/_numpy/sample.py_fn%   s    

z$_make_summary_statistic.<locals>._fnr   )r   r   r   )r   r   _make_summary_statistic#   s    r    c                   s   e Zd ZdZd! fdd	Zedd Zed	d
 Zdd Zdd Z	dd Z
dd Zdd Zdd Zdd ZedZedZedZedZdd Zdd  Z  ZS )"Samplea  Sample distribution via independent draws.

  This distribution is useful for reducing over a collection of independent,
  identical draws. It is otherwise identical to the input distribution.

  #### Mathematical Details

  The probability function is,

  ```none
  p(x) = prod{ p(x[i]) : i = 0, ..., (n - 1) }
  ```

  #### Examples

  ```python
  tfd = tfp.distributions

  # Example 1: Five scalar draws.

  s = tfd.Sample(
      tfd.Normal(loc=0, scale=1),
      sample_shape=5)
  x = s.sample()
  # ==> x.shape: [5]

  lp = s.log_prob(x)
  # ==> lp.shape: []
  #     Equivalently: tf.reduce_sum(s.distribution.log_prob(x), axis=[0, 1])
  #
  # `Sample.log_prob` computes the per-{sample, batch} `log_prob`s then sums
  # over the `Sample.sample_shape` dimensions. In the above example `log_prob`
  # dims `[0, 1]` are summed out. Conceptually, first dim `1` is summed (this
  # being the intrinsic `event`) then we sum over `Sample.sample_shape` dims, in
  # this case dim `0`.

  # Example 2: `[5, 4]`-draws of a bivariate Normal.

  s = tfd.Sample(
      tfd.Independent(tfd.Normal(loc=tf.zeros([3, 2]), scale=1),
                      reinterpreted_batch_ndims=1),
      sample_shape=[5, 4])
  x = s.sample([6, 1])
  # ==> x.shape: [6, 1, 3, 5, 4, 2]

  lp = s.log_prob(x)
  # ==> lp.shape: [6, 1, 3]
  #
  # `s.log_prob` will reduce over (intrinsic) event dims, i.e., dim `5`, then
  # sums over `s.sample_shape` dims `[3, 4]` corresponding to shape (slice)
  # `[5, 4]`.
  ```

  r   FNc          	      sp   t t }t|pd|j H}|| _tj|tjdd| _	t
t| j| jj| jj|| jj||d W dQ R X dS )aI  Construct the `Sample` distribution.

    Args:
      distribution: The base distribution instance to transform. Typically an
        instance of `Distribution`.
      sample_shape: `int` scalar or vector `Tensor` representing the shape of a
        single sample.
      validate_args: Python `bool`.  Whether to validate input with asserts.
        If `validate_args` is `False`, and the inputs are invalid,
        correct behavior is not guaranteed.
      name: The name for ops managed by the distribution.
        Default value: `None` (i.e., `'Sample' + distribution.name`).
    r!   r   )Z
dtype_hintname)r   reparameterization_typevalidate_argsallow_nan_stats
parametersr"   N)dictlocalsr   Z
name_scoper"   _distributionr   Zconvert_nonref_to_tensorint32_sample_shapesuperr!   __init__r   r#   r%   )r   r   r   r$   r"   r&   )	__class__r   r   r-   q   s    

zSample.__init__c             C   s   | j S )N)r)   )r   r   r   r   r      s    zSample.distributionc             C   s   | j S )N)r+   )r   r   r   r   r      s    zSample.sample_shapec             C   s
   | j  S )N)r   r   )r   r   r   r   _batch_shape_tensor   s    zSample._batch_shape_tensorc             C   s   | j jS )N)r   batch_shape)r   r   r   r   _batch_shape   s    zSample._batch_shapec             C   s&   t jt j| jdgd| j gddS )Nr   )r   r   )r   )r
   r   r   r   r   r   )r   r   r   r   _event_shape_tensor   s    
zSample._event_shape_tensorc             C   sl   t | j}t|dkr&t |}nt| j}t|d ksRt| jjd kr\t d S t	|| jjS )N   )
r   get_static_valuer   r   rankZTensorShapeZconstant_value_as_shaper   event_shapeZconcatenate)r   sr   r   r   r   _event_shape   s    
zSample._event_shapec       
   	   K   s   t j| jdgd}t |}t | jj| jj}t | jj| jj}t j	dgt j
d| d| | tjdt j
dd| tjdt j
d| | d| | | tjdgdd}| jjt j	|g|gddfd|i|}	tj|	|dS )	Nr   )r   r   r3   )r   )r   seed)aperm)r
   r   r   r   r   r   r6   r   r0   r   ranger   r*   sample	transpose)
r   nr9   r   r   Zfake_sample_ndimsevent_ndimsbatch_ndimsr;   r   r   r   r   	_sample_n   s*    



zSample._sample_nc             K   s.  t | jj| jj}t | j}t | jj| jj}t |}|| | | }t	j
|t jt |t d| dggddd}t |}t d|}t d|}	t ||| }
t || || | }t || | |}t j|	||
|gdd}t	j||d}| jj|f|}t ||| }t	j||dS )Nr   r3   )ZpaddingsZconstant_values)r   )r   )r:   r;   )r
   r   r   r   r0   r   r   r6   r5   r   r   padr   maximumr<   r   r>   Zlog_probZ
reduce_sum)r   r   r   rA   Zextra_sample_ndimsr@   ZndimsdZsample_ndimsZsample_dimsZ
batch_dimsZextra_sample_dimsZ
event_dimsr;   lpr   r   r   r   	_log_prob   s>    




zSample._log_probc             K   s.   | j jf |}t| j}tj||jd| S )N)r   r   )r   Zentropyr
   reduce_prodr   r   castr   )r   r   hr?   r   r   r   _entropy   s    zSample._entropyZmeanstddevZvariancemodec             C   s
   | j  S )N)r   Z*_experimental_default_event_space_bijector)r   r   r   r   _default_event_space_bijector   s    z$Sample._default_event_space_bijectorc             C   sZ  g }d }t | jj}||d kkrvd}|d k	r@|dkrvt|n6| jrv|d krZt| j}|t	j
t|d|d |r| jj}|d kr|d krt| j}|j}t|tjtjhkrtdt||t| jkrVt| j}d}|d k	r"tt|dk rVtd||n4| jrV|d kr@t| j}|t	j|d	|d |S )
Nz<Argument `sample_shape` must be either a scalar or a vector.r3      )messagez5Argument `sample_shape` must be integer type; saw {}.z6Argument `sample_shape` must have non-negative values.r   z
{} Saw: {}r   )r   r5   r   r   
ValueErrorr$   r   Zconvert_to_tensorappendr   Zassert_lessr   r	   Z
base_dtyper*   int64	TypeErrorformatr"   r   Zis_refr4   npanyarrayZassert_greater)r   Zis_init
assertionsr   Zndims_msgZdtype_Zsample_shape_r   r   r   _parameter_control_dependencies   sD    


z&Sample._parameter_control_dependencies)r   FN)__name__
__module____qualname____doc__r-   propertyr   r   r/   r1   r2   r8   rB   rG   rK   r    Z_meanZ_stddevZ	_variance_moderN   r[   __classcell__r   r   )r.   r   r!   9   s&   6  %r!   	kl_samplec       	   	   C   s   g }t | j}t |j}d}|dk	rF|dk	rFt||slt|n&| jsR|jrl|tj	| j|j|d t 
|6 tj| j|j|d}t| j}t j||jd| S Q R X dS )a  Batched KL divergence `KL(a || b)` for Sample distributions.

  We can leverage the fact that:

  ```
  KL(Sample(a) || Sample(b)) = sum(KL(a || b))
  ```

  where the sum is over the `sample_shape` dims.

  Args:
    a: Instance of `Sample` distribution.
    b: Instance of `Sample` distribution.
    name: (optional) name to use for created ops.
      Default value: `"kl_sample"`'.

  Returns:
    kldiv: Batchwise `KL(a || b)`.

  Raises:
    ValueError: If the `sample_shape` of `a` and `b` don't match.
  z7`a.sample_shape` must be identical to `b.sample_shape`.N)rP   )r"   )r   r   )r   r4   r   rV   Zarray_equalrQ   r$   rR   r   Zassert_equalZcontrol_dependenciesr   Zkl_divergencer   r
   rH   rI   r   )	r:   br"   rY   Za_ssZb_ssrZ   klr?   r   r   r   
_kl_sample&  s    
rf   )rc   )r_   
__future__r   r   r   numpyrV   Z;tensorflow_probability.python.internal.backend.numpy.compatr   r   Z2tensorflow_probability.python.distributions._numpyr   Zdistribution_libr   Z-tensorflow_probability.python.internal._numpyr   r	   r
   r   r   r    Distributionr!   Z
RegisterKLrf   r   r   r   r   <module>   s"    n
