Skip to content

Commit

Permalink
Website Update
Browse files Browse the repository at this point in the history
  • Loading branch information
Surya-29 committed Apr 1, 2024
1 parent 839d3de commit 3800abe
Show file tree
Hide file tree
Showing 24 changed files with 474 additions and 527 deletions.
6 changes: 5 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
@@ -1,5 +1,9 @@
env
pages/.obsidian/
# pages/temp/
not_so_eff.py
static/404_error.jpg
templates/new.html
templates/new.html
gradients.txt
__pycache__

128 changes: 128 additions & 0 deletions _temp.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,128 @@
import numpy as np

X1 = np.array([0, 0, 1, 1])

X2 = np.array([0, 1, 0, 1])

Y = np.array([0, 1, 1, 0])

np.random.seed(42)

# Weight Initialization

W11 = np.random.randn()

W21 = np.random.randn()

B1 = np.random.randn()

W12 = np.random.randn()

W22 = np.random.randn()

B2 = np.random.randn()

W1_out = np.random.randn()

W2_out = np.random.randn()

B_out = np.random.randn()

learning_rate = 0.5

def sigmoid(x):

return 1 / (1 + np.exp(-x))

def forward_propagation(X1, X2):





H1_input = (X1 * W11) + (X2 * W21) + B1

H1_output = sigmoid(H1_input)

H2_input = (X1 * W12) + (X2 * W22) + B2

H2_output = sigmoid(H2_input)

Y_pred_input = (H1_output * W1_out) + (H2_output * W2_out) + B_out

Y_pred = sigmoid(Y_pred_input)

return Y_pred, H1_output, H2_output

def error_func(Y, Y_pred):

return (1/2) * np.sum((Y - Y_pred) ** 2)

def back_propagation(X1, X2, Y_pred, Y, H1_output, H2_output):

global W11, W21, B1, W12, W22, B2, W1_out, W2_out, B_out

dY_pred = (Y_pred - Y) * Y_pred * (1 - Y_pred)

dW1_out = np.sum(H1_output * dY_pred)

dW2_out = np.sum(H2_output * dY_pred)

dB_out = np.sum(dY_pred)

dH2_output = dY_pred * W2_out

dH2_input = dH2_output * H2_output * (1 - H2_output)

dW12 = np.sum(X1 * dH2_input)

dW22 = np.sum(X2 * dH2_input)

dB2 = np.sum(dH2_input)

dH1_output = dY_pred * W1_out

dH1_input = dH1_output * H1_output * (1 - H1_output)

dW11 = np.sum(X1 * dH1_input)

dW21 = np.sum(X2 * dH1_input)

dB1 = np.sum(dH1_input)

W11 -= learning_rate * dW11

W21 -= learning_rate * dW21

B1 -= learning_rate * dB1

W12 -= learning_rate * dW12

W22 -= learning_rate * dW22

B2 -= learning_rate * dB2

W1_out -= learning_rate * dW1_out





W2_out -= learning_rate * dW2_out

B_out -= learning_rate * dB_out

for epoch in range(2000):

Y_pred, H1_output, H2_output = forward_propagation(X1, X2)

if np.all(np.round(Y_pred) == Y):

print("Weights are:\n")

print("Hidden Layer 1: W11 =", W11, " W21 =", W21, " B1 =", B1)

print("Hidden Layer 2: W12 =", W12, " W22 =", W22, " B2 =", B2)

print("Output Layer: W1_out =", W1_out, " W2_out =", W2_out, " B_out =", B_out)

85 changes: 85 additions & 0 deletions build/blog/Temp blog.html
Original file line number Diff line number Diff line change
@@ -0,0 +1,85 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta http-equiv="X-UA-Compatible" content="IE=edge" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<link rel="stylesheet" href="../static/style.css" />
<link rel="stylesheet" href="../static/default.css" />
<link
href="https://fonts.googleapis.com/css?family=Fira Code"
rel="stylesheet"
/>
<link rel="preconnect" href="https://fonts.googleapis.com" />
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin />
<link
href="https://fonts.googleapis.com/css2?family=Space+Mono:ital,wght@0,400;0,700;1,400;1,700&display=swap"
rel="stylesheet"
/>
<link
href="https://fonts.googleapis.com/css2?family=Dosis:wght@300&display=swap"
rel="stylesheet"
/>
<link
href="https://fonts.googleapis.com/css2?family=Barlow:wght@200;400&display=swap"
rel="stylesheet"
/>
<link
href="https://fonts.googleapis.com/css2?family=Inconsolata:wght@300&display=swap"
rel="stylesheet"
/>
<link
href="https://fonts.googleapis.com/css2?family=Jost:ital,wght@0,100..900;1,100..900&display=swap"
rel="stylesheet"
/>
<link
href="https://iosevka-webfonts.github.io/iosevka/iosevka.css"
rel="stylesheet"
/>
<title></title>
</head>
<body>
<header>
<!-- <a href="/" class="logo">Surya Narayan</a> -->
<a href="/"
><img alt="icon_image" class="logo" src="/static/icon.svg" width="30"
/></a>
<nav class="nav_links">
<ul>
<li><a href="/blog">blog</a></li>
<li><a href="/about">about</a></li>
</ul>
</nav>
</header>


<p class=post_date>1 Apr, 2024</p>
<h1>Testing</h1>
<h2 class="subtitle">None</h2>
<h4 id="sample-title">Sample title</h4>

<blockquote>
<p>sample text</p>
</blockquote>

<ul>
<li>sample code block below</li>
</ul>

<div class="codehilite"><pre><span></span><code><span class="c1"># code </span>
<span class="n">Hello</span> <span class="o">=</span> <span class="nb">int</span><span class="p">(</span><span class="mi">5</span><span class="p">)</span>
<span class="nb">print</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;Hello World </span><span class="si">{</span><span class="n">Hello</span><span class="si">}</span><span class="s2">&quot;</span><span class="p">)</span>
</code></pre></div>

<footer>
<section class="footer">
<a href="/blog/feed.xml">
<img alt="rss feed" src="/static/rss.svg" width="30" />
</a>
<a href="https://github.com/Surya-29">
<img alt="git" src="/static/github.svg" width="35" />
</a>
</section>
</footer>
</body>
</html>
89 changes: 9 additions & 80 deletions build/blog/feed.xml
Original file line number Diff line number Diff line change
@@ -1,86 +1,15 @@
<?xml version='1.0' encoding='UTF-8'?>
<feed xmlns="http://www.w3.org/2005/Atom"><id>http://127.0.0.1:5000/</id><title>Itnaava</title><updated>2022-09-17T06:48:41.078155+00:00</updated><author><name>Surya</name><email>[email protected]</email></author><link href="http://127.0.0.1:5000/" rel="alternate"/><generator uri="https://lkiesow.github.io/python-feedgen" version="0.9.0">python-feedgen</generator><entry><id>nn_scratch</id><title>Neural Networks from scratch</title><updated>2022-09-17T06:48:41.160785+00:00</updated><link href="http://127.0.0.1:8000/blog/nn_scratch" rel="alternate"/><summary type="html">&lt;h2 id="introduction"&gt;Introduction&lt;/h2&gt;</summary></entry><entry><id>music_gen</id><title>Music Generation</title><updated>2022-09-17T06:48:41.078155+00:00</updated><link href="http://127.0.0.1:8000/blog/music_gen" rel="alternate"/><summary type="html">&lt;h4 id="project-title"&gt;Project Title&lt;/h4&gt;
<feed xmlns="http://www.w3.org/2005/Atom"><id>http://127.0.0.1:5000/</id><title>Surya Narayan</title><updated>2024-04-01T09:47:46.487438+00:00</updated><author><name>Surya</name><email>[email protected]</email></author><link href="http://127.0.0.1:5000/" rel="alternate"/><generator uri="https://lkiesow.github.io/python-feedgen" version="0.9.0">python-feedgen</generator><entry><id>Temp blog</id><title>Testing</title><updated>2024-04-01T09:47:46.487438+00:00</updated><link href="http://127.0.0.1:8000/blog/Temp blog" rel="alternate"/><summary type="html">&lt;h4 id="sample-title"&gt;Sample title&lt;/h4&gt;

&lt;p&gt;Composition of music by training a model on sheet music.&lt;/p&gt;

&lt;h4 id="project-members"&gt;Project Members&lt;/h4&gt;

&lt;ul&gt;
&lt;li&gt;Surya Narayan AI&amp;amp;DS B&lt;/li&gt;
&lt;/ul&gt;

&lt;h4 id="abstract"&gt;Abstract&lt;/h4&gt;

&lt;p&gt;Our goal is to compose music (more like short piece of music) by training various deep learning models on a specific instrument's MIDI dataset.We will be looking into both RNN(principally LSTM networks) based and NLP based model as a music generation system,but our primary focus will be more on the latter.&lt;/p&gt;

&lt;h4 id="introduction"&gt;Introduction&lt;/h4&gt;

&lt;p&gt;The art of ordering tones or sound in succession, in combination is music. It is a temporal relationship to produce a composition of notes having continuity and unity.Predicting the likely next few notes can be thought of as a time series problem due to the presence of long-term structural patterns in the music sequence.Also due to its sequential nature ,we can also consider this as an NLP problem.&lt;/p&gt;

&lt;p&gt;Techniques like Recurrent Neural Networks (RNN's) can be used ,which incorporates dependencies across time. Long Short Term Memory is one such variant of RNN, that is capable of capturing long-term temporal dependencies in the given music dataset and it might be a great fit for generating music.&lt;/p&gt;

&lt;p&gt;Transformer archietecture looks really promising not only for NLP problems but also for music generation since it is faster and have really good memory so extracting long-term structural patterns wouldn't be a problem. &lt;/p&gt;

&lt;h4 id="preprocessing-of-musical-instrument-digital-interface-midi-files"&gt;&lt;strong&gt;Preprocessing of musical instrument digital interface (MIDI) Files&lt;/strong&gt;&lt;/h4&gt;

&lt;p&gt;Using the &lt;a href='https://magenta.tensorflow.org/datasets/'&gt;instrument dataset&lt;/a&gt; (i.e.,represented as an MIDI files) we have to extract the features required. Python libraries like music21,python-midi,etc,. can be used to perform the necessary operations.MIDI files plays an important role in extracting information about note sequence, note velocity and the time component.&lt;/p&gt;

&lt;h4 id="model-training"&gt;&lt;strong&gt;Model Training&lt;/strong&gt;&lt;/h4&gt;
&lt;blockquote&gt;
&lt;p&gt;sample text&lt;/p&gt;
&lt;/blockquote&gt;

&lt;ul&gt;
&lt;li&gt;&lt;p&gt;&lt;u&gt;RNN based approach&lt;/u&gt;:&lt;/p&gt;

&lt;p&gt;Long Term Short Memory (LSTM),special type of RNN variant will be used.Since traditional RNN based models will not be able to retain information for long periods of time.&lt;/p&gt;

&lt;p&gt;Image from &lt;a href='https://towardsdatascience.com/neural-networks-for-music-generation-97c983b50204?gi=57ecd2161d78'&gt;article&lt;/a&gt; &lt;/p&gt;

&lt;p&gt;&lt;a href="https://arxiv.org/pdf/1909.09586.pdf"&gt;Link&lt;/a&gt;:Brief on LSTM architecture and function. &lt;/p&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;&lt;u&gt;Language models based approach&lt;/u&gt;&lt;/p&gt;

&lt;p&gt;GPT is an architecture based on Transformers decoders stacked together.The Transformer is a sequence model that leverage self-attention and that already had impressive results for generation tasks involving long-range dependencies. &lt;/p&gt;

&lt;p&gt;It is essentially the vanilla Transformer model with its encoder block and cross-attention mechanism stripped away — so that it can perform more efficiently on unsupervised tasks. This makes it well suited for music representation.&lt;/p&gt;

&lt;p&gt;Source from &lt;a href='https://towardsdatascience.com/neural-networks-for-music-generation-97c983b50204?gi=57ecd2161d78'&gt;article&lt;/a&gt; &lt;/p&gt;

&lt;p&gt;Image form &lt;a href='https://towardsdatascience.com/creating-a-pop-music-generator-with-the-transformer-5867511b382a?gi=d1154441bcd7'&gt;article&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Apart from GPT language &lt;code&gt;model&lt;/code&gt; we will also try to implement this approach on various other language models like BERT,GPT-2,etc,. &lt;/p&gt;&lt;/li&gt;
&lt;li&gt;sample code block below&lt;/li&gt;
&lt;/ul&gt;

&lt;div class="codehilite"&gt;&lt;pre&gt;&lt;span&gt;&lt;/span&gt;&lt;code&gt;&lt;span class="nd"&gt;@app&lt;/span&gt;&lt;span class="o"&gt;.&lt;/span&gt;&lt;span class="n"&gt;route&lt;/span&gt;&lt;span class="p"&gt;(&lt;/span&gt;&lt;span class="s2"&gt;&amp;quot;/blog&amp;quot;&lt;/span&gt;&lt;span class="p"&gt;)&lt;/span&gt;
&lt;span class="k"&gt;def&lt;/span&gt; &lt;span class="nf"&gt;blog_page&lt;/span&gt;&lt;span class="p"&gt;():&lt;/span&gt;
&lt;span class="k"&gt;global&lt;/span&gt; &lt;span class="n"&gt;d&lt;/span&gt;&lt;span class="p"&gt;;&lt;/span&gt;
&lt;span class="n"&gt;dir_lis&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="n"&gt;list_dir&lt;/span&gt;&lt;span class="p"&gt;(&lt;/span&gt;&lt;span class="s1"&gt;&amp;#39;pages/blog&amp;#39;&lt;/span&gt;&lt;span class="p"&gt;)&lt;/span&gt;
&lt;span class="n"&gt;d&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="p"&gt;{}&lt;/span&gt;
&lt;span class="k"&gt;for&lt;/span&gt; &lt;span class="n"&gt;i&lt;/span&gt; &lt;span class="ow"&gt;in&lt;/span&gt; &lt;span class="n"&gt;dir_lis&lt;/span&gt;&lt;span class="p"&gt;:&lt;/span&gt;
&lt;span class="n"&gt;temp&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt;&lt;span class="n"&gt;article_info&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="n"&gt;md_to_html&lt;/span&gt;&lt;span class="p"&gt;(&lt;/span&gt;&lt;span class="s2"&gt;&amp;quot;pages/blog/&amp;quot;&lt;/span&gt;&lt;span class="o"&gt;+&lt;/span&gt;&lt;span class="n"&gt;i&lt;/span&gt;&lt;span class="p"&gt;)&lt;/span&gt;
&lt;span class="n"&gt;article_info&lt;/span&gt;&lt;span class="p"&gt;[&lt;/span&gt;&lt;span class="s1"&gt;&amp;#39;url&amp;#39;&lt;/span&gt;&lt;span class="p"&gt;]&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="s2"&gt;&amp;quot;/&amp;quot;&lt;/span&gt;&lt;span class="o"&gt;+&lt;/span&gt;&lt;span class="n"&gt;article_info&lt;/span&gt;&lt;span class="p"&gt;[&lt;/span&gt;&lt;span class="s1"&gt;&amp;#39;slug&amp;#39;&lt;/span&gt;&lt;span class="p"&gt;]&lt;/span&gt;
&lt;span class="k"&gt;if&lt;/span&gt; &lt;span class="n"&gt;i&lt;/span&gt;&lt;span class="p"&gt;[:&lt;/span&gt;&lt;span class="o"&gt;-&lt;/span&gt;&lt;span class="mi"&gt;3&lt;/span&gt;&lt;span class="p"&gt;]&lt;/span&gt; &lt;span class="o"&gt;==&lt;/span&gt; &lt;span class="n"&gt;article_info&lt;/span&gt;&lt;span class="p"&gt;[&lt;/span&gt;&lt;span class="s1"&gt;&amp;#39;slug&amp;#39;&lt;/span&gt;&lt;span class="p"&gt;]:&lt;/span&gt;
&lt;span class="n"&gt;d&lt;/span&gt;&lt;span class="p"&gt;[&lt;/span&gt;&lt;span class="n"&gt;article_info&lt;/span&gt;&lt;span class="p"&gt;[&lt;/span&gt;&lt;span class="s1"&gt;&amp;#39;title&amp;#39;&lt;/span&gt;&lt;span class="p"&gt;]]&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="p"&gt;[&lt;/span&gt;
&lt;span class="n"&gt;article_info&lt;/span&gt;&lt;span class="p"&gt;[&lt;/span&gt;&lt;span class="s1"&gt;&amp;#39;date&amp;#39;&lt;/span&gt;&lt;span class="p"&gt;]]&lt;/span&gt;&lt;span class="o"&gt;+&lt;/span&gt;&lt;span class="p"&gt;[&lt;/span&gt;&lt;span class="n"&gt;article_info&lt;/span&gt;&lt;span class="p"&gt;[&lt;/span&gt;&lt;span class="s1"&gt;&amp;#39;url&amp;#39;&lt;/span&gt;&lt;span class="p"&gt;]]&lt;/span&gt;
&lt;span class="k"&gt;return&lt;/span&gt; &lt;span class="n"&gt;render_template&lt;/span&gt;&lt;span class="p"&gt;(&lt;/span&gt;&lt;span class="s1"&gt;&amp;#39;blog.html&amp;#39;&lt;/span&gt;&lt;span class="p"&gt;,&lt;/span&gt; &lt;span class="n"&gt;file_dict&lt;/span&gt;&lt;span class="o"&gt;=&lt;/span&gt;&lt;span class="n"&gt;d&lt;/span&gt;&lt;span class="p"&gt;)&lt;/span&gt;
&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;

&lt;h4 id="references"&gt;References&lt;/h4&gt;

&lt;ul&gt;
&lt;li&gt;&lt;p&gt;&lt;strong&gt;Dataset&lt;/strong&gt; :&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;&lt;a href="https://magenta.tensorflow.org/datasets/groove"&gt;https://magenta.tensorflow.org/datasets/groove&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a href="https://magenta.tensorflow.org/datasets/maestro"&gt;https://magenta.tensorflow.org/datasets/maestro&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a href="https://magenta.tensorflow.org/datasets/nsynth"&gt;https://magenta.tensorflow.org/datasets/nsynth&lt;/a&gt;&lt;/li&gt;
&lt;/ul&gt;&lt;/li&gt;
&lt;li&gt;&lt;p&gt;&lt;strong&gt;Articles and Research Papers :&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;&lt;a href="https://towardsdatascience.com/creating-a-pop-music-generator-with-the-transformer-5867511b382a?gi=d1154441bcd7"&gt;https://towardsdatascience.com/creating-a-pop-music-generator-with-the-transformer-5867511b382a?gi=d1154441bcd7&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;RNN Architecture : &lt;a href="https://karpathy.github.io/2015/05/21/rnn-effectiveness/"&gt;https://karpathy.github.io/2015/05/21/rnn-effectiveness/&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a href="https://arxiv.org/ftp/arxiv/papers/1908/1908.01080.pdf"&gt;https://arxiv.org/ftp/arxiv/papers/1908/1908.01080.pdf&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a href="https://medium.com/artists-and-machine-intelligence/neural-nets-for-generating-music-f46dffac21c0"&gt;https://medium.com/artists-and-machine-intelligence/neural-nets-for-generating-music-f46dffac21c0&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;Detailed working on LSTM networks : &lt;a href="http://www.bioinf.jku.at/publications/older/2604.pdf"&gt;http://www.bioinf.jku.at/publications/older/2604.pdf&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;Transformer Architecture : &lt;a href="https://jalammar.github.io/illustrated-transformer/"&gt;https://jalammar.github.io/illustrated-transformer/&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;Research articles by magenta : &lt;a href="https://magenta.tensorflow.org/research/"&gt;https://magenta.tensorflow.org/research/&lt;/a&gt;&lt;/li&gt;
&lt;/ul&gt;&lt;/li&gt;
&lt;/ul&gt;</summary></entry></feed>
&lt;div class="codehilite"&gt;&lt;pre&gt;&lt;span&gt;&lt;/span&gt;&lt;code&gt;&lt;span class="c1"&gt;# code &lt;/span&gt;
&lt;span class="n"&gt;Hello&lt;/span&gt; &lt;span class="o"&gt;=&lt;/span&gt; &lt;span class="nb"&gt;int&lt;/span&gt;&lt;span class="p"&gt;(&lt;/span&gt;&lt;span class="mi"&gt;5&lt;/span&gt;&lt;span class="p"&gt;)&lt;/span&gt;
&lt;span class="nb"&gt;print&lt;/span&gt;&lt;span class="p"&gt;(&lt;/span&gt;&lt;span class="sa"&gt;f&lt;/span&gt;&lt;span class="s2"&gt;&amp;quot;Hello World &lt;/span&gt;&lt;span class="si"&gt;{&lt;/span&gt;&lt;span class="n"&gt;Hello&lt;/span&gt;&lt;span class="si"&gt;}&lt;/span&gt;&lt;span class="s2"&gt;&amp;quot;&lt;/span&gt;&lt;span class="p"&gt;)&lt;/span&gt;
&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;</summary></entry></feed>
Loading

0 comments on commit 3800abe

Please sign in to comment.