From a80fdc9a380360eccde3109a1cdccd9b1e9bc08a Mon Sep 17 00:00:00 2001 From: Soh Yee Lee <51136409+SohYeeLee@users.noreply.github.com> Date: Wed, 12 Jun 2019 17:46:05 +0200 Subject: [PATCH] Add files via upload --- 3_MonteCarlo_Approx Integration.ipynb | 117 ++++++++++++++++++++++++++ 1 file changed, 117 insertions(+) create mode 100644 3_MonteCarlo_Approx Integration.ipynb diff --git a/3_MonteCarlo_Approx Integration.ipynb b/3_MonteCarlo_Approx Integration.ipynb new file mode 100644 index 0000000..eb00b04 --- /dev/null +++ b/3_MonteCarlo_Approx Integration.ipynb @@ -0,0 +1,117 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This exercise shows how to use Monte Carlo to approximate integration, $F = \\int_{0}^{1} sin (x) dx$ and $F = \\int_{2}^{4} sin (x) dx$" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "import chaospy as cp\n", + "from matplotlib.pyplot import *\n", + "\n", + "np.random.seed(11)\n", + "# declare function to integrate via Monte Carlo sampling\n", + "func = lambda x: np.sin(x)\n", + "\n", + "# declare vector with number of samples\n", + "N = [10, 100, 1000, 10000, 100000, 1000000, 10000000] #, 10000000, 100000000\n", + "\n", + "I_hat = np.zeros(len(N))\n", + "I = np.cos(2) - np.cos(4)\n", + "est_std_dev = np.zeros(len(N))\n", + "rms = np.zeros(len(N))\n", + "\n", + "log_dec = np.zeros(len(N))\n", + "sqrt_dec = np.zeros(len(N))\n", + "linear_dec = np.zeros(len(N))\n", + "quad_dec = np.zeros(len(N))\n", + "\n", + "# define new bounds\n", + "a = 2\n", + "b = 4\n", + "\n", + "zeroone_sampling = True\n", + "# for each N, perform Monte Carlo integration\n", + "for i, n in enumerate(N):\n", + "\n", + " if zeroone_sampling:\n", + " # draw uniform samples in [0, 1]\n", + " distr_w = cp.Uniform(0, 1)\n", + " samples_01 = distr_w.sample(size=n)\n", + " # transform to [a, b]\n", + " samples = a + (b-a)*samples_01\n", + " # approximate the integral as the expectation of the underlying function w.r.t. the uniform distribution\n", + " # write the sample in [a, b] in terms of the generated sampled in [0, 1]\n", + " I_hat[i] = (b-a)*np.mean(func(samples))\n", + " # approximate the standard deviation of the estimation\n", + " est_std_dev[i] = np.sqrt(np.var(func(samples), ddof=1))/np.sqrt(n)\n", + " else:\n", + " distr_w = cp.Uniform(2, 4)\n", + " samples = distr_w.sample(size=n)\n", + " I_hat[i] = (b-a)*np.mean(func(samples))\n", + " est_std_dev[i] = np.sqrt( np.var(func(samples), ddof=1))/np.sqrt(n)\n", + "\n", + " rms[i] = np.abs(I_hat[i] - I)\n", + "\n", + " if i == 0:\n", + " log_dec[0] = est_std_dev[0]\n", + " sqrt_dec[0] = est_std_dev[0]\n", + " linear_dec[0] = est_std_dev[0]\n", + " quad_dec[0] = est_std_dev[0]\n", + " else:\n", + " log_dec[i] = log_dec[i-1] / np.log(10)\n", + " sqrt_dec[i] = sqrt_dec[i-1] / np.sqrt(10)\n", + " linear_dec[i] = linear_dec[i-1] / 10\n", + " quad_dec[i] = quad_dec[i-1] / (10**2)\n", + "\n", + " print (I, I_hat[i], rms[i], est_std_dev[i])\n", + "\n", + "# plot results\n", + "loglog(N, est_std_dev, 'r', label='standard error decay')\n", + "loglog(N, rms, 'b', label=r'$\\epsilon$')\n", + "loglog(N, log_dec, 'k--', label=r'log')\n", + "loglog(N, sqrt_dec, 'k-*', label=r'sqrt')\n", + "#loglog(N, linear_dec, 'k-.', label=r'linear')\n", + "#loglog(N, quad_dec, 'k-', label=r'linear')\n", + "legend(loc='best')\n", + "show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.3" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +}