deffunc(x,m,c):returnm*x+cdefjacobian(x,m,c):return## first derivativedefhessian(x,m,c):return## second derivativeconstraints=(## equality constraint## fun = 0dict(type='eq',## = 0fun=lambdax:x.sum()-1.0,jac=lambdax:np.ones_like(x)## optional, but recommended),## inequality constraint## fun >= 0dict(type='ineq',## = 0fun=lambdax:x.sum()+1.0,jac=lambdax:np.ones_like(x)## optional, but recommended),)bounds=((0,None),(0,None))res=minimize(func=func,x0=[0,0],## initial_guessmethod='SLSQP',jac=jacobian,## hes = hessian,bounds=bounds,constraints=constraints)
defbgd(fun,x0,jac,args=(),learning_rate=0.001,mass=0.9,startiter=0,maxiter=1000,callback=None,**kwargs):"""``scipy.optimize.minimize`` compatible implementation of batch gradient descent with momentum. Adapted from ``autograd/misc/optimizers.py``. """x=x0velocity=np.zeros_like(x)foriinrange(startiter,startiter+maxiter):g=jac(x)ifcallbackandcallback(x):breakvelocity=mass*velocity-(1.0-mass)*gx=x+learning_rate*velocityi+=1returnOptimizeResult(x=x,fun=fun(x),jac=g,nit=i,nfev=i,success=True)
defrmsprop(fun,x0,jac,args=(),learning_rate=0.1,gamma=0.9,eps=1e-8,startiter=0,maxiter=1000,callback=None,**kwargs):"""``scipy.optimize.minimize`` compatible implementation of root mean squared prop: See Adagrad paper for details. Adapted from ``autograd/misc/optimizers.py``. """x=x0avg_sq_grad=np.ones_like(x)foriinrange(startiter,startiter+maxiter):g=jac(x)ifcallbackandcallback(x):breakavg_sq_grad=avg_sq_grad*gamma+g**2*(1-gamma)x=x-learning_rate*g/(np.sqrt(avg_sq_grad)+eps)i+=1returnOptimizeResult(x=x,fun=fun(x),jac=g,nit=i,nfev=i,success=True)
defadam(fun,x0,jac,args=(),learning_rate=0.001,beta1=0.9,beta2=0.999,eps=1e-8,startiter=0,maxiter=1000,callback=None,**kwargs):"""``scipy.optimize.minimize`` compatible implementation of ADAM - [http://arxiv.org/pdf/1412.6980.pdf]. Adapted from ``autograd/misc/optimizers.py``. """x=x0m=np.zeros_like(x)v=np.zeros_like(x)foriinrange(startiter,startiter+maxiter):g=jac(x)ifcallbackandcallback(x):breakm=(1-beta1)*g+beta1*m## first moment estimate.v=(1-beta2)*(g**2)+beta2*v## second moment estimate.mhat=m/(1-beta1**(i+1))## bias correction.vhat=v/(1-beta2**(i+1))x=x-learning_rate*mhat/(np.sqrt(vhat)+eps)i+=1returnOptimizeResult(x=x,fun=fun(x),jac=g,nit=i,nfev=i,success=True)
# 2Dfromscipy.fftimportfft2,fftfreq2img_FT=_fft2(img)fy=fftfreq(img.shape[0],d=10)# suppose the spacing between pixels is 10mmfx=fftfreq(img.shape[1],d=10)
p=0.2# probability of failuren=1_000alpha=0.95sig=(1-alpha)/2# method 1: Slow but exactk=int(p*n)# no of failures proportional to p and nlow,high=stats.binomtest(k,n,p).proportion_ci(confidence_level=alpha,method="exact")# method 2: Fast but approximatedist=stats.binom(n,p)low,high=dist.ppf(sig)/n,dist.ppf(1-sig)/n
importnumpyasnpimportscipy.statsasstatsimportmatplotlib.pyplotasplt# Define the parameters for the underlying normal distributionmu,sigma=0,1# Mean and standard deviation of the normal distribution# Theoretical mode based on the formula for a log-normal distributiontheoretical_mode=np.exp(mu-sigma**2)lognorm_dist=stats.lognorm(sigma,scale=np.exp(mu))x=np.linspace(mu,mu+sigma,1_000)# Create an array of x values to evaluate the PDFy=lognorm_dist.pdf(x)# Get the PDF values for each x# Find the mode (maximum of the PDF)mode=x[np.argmax(y)]# The x value corresponding to the maximum PDFprint(f"Numerical Mode of the log-normal distribution: {mode}")print(f"Theoretical Mode of the log-normal distribution: {theoretical_mode}")print(f"Diff: {np.abs(theoretical_mode-mode).round(4)}")# Plotplt.plot(x,y,label='Log-Normal Distribution')plt.axvline(mode,color='red',linestyle='--',label=f'Numerical Mode: {mode}')plt.axvline(theoretical_mode,color='green',linestyle='--',label=f'Theoretical Mode: {theoretical_mode}')plt.legend()plt.show()
importnumpyimportscipy,scipy.optimizeimporttorchdefminim(obs,f,p0):"""Fit function f to observations "obs" starting at p0"""deffitfn(pars):# NB the require_grad parameter specifying we want to# differentiate wrt to the parameterspars=torch.tensor(pars,requires_grad=True)y=f(pars)# Simple least-squares fittingres=((obs-y)**2).sum()res.backward()# Note that gradient is taken from the "pars" variablereturnres.data.cpu().numpy(),pars.grad.data.cpu().numpy()res=scipy.optimize.minimize(fitfn,p0,method="BFGS",jac=True)# NB: we will compute the jacobianreturnres# Points on which observations are madeX=torch.arange(100)defexfn(p):y=torch.sin(p[0]*X)+torch.cos(p[1]*X)returny# Sample observationsyp=exfn(torch.tensor([0.3,0]))# Sample runminim(yp,exfn,numpy.array([0.34,0.01]))