You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

253 lines
9.0 KiB

3 years ago
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# 6.8 长短期记忆LSTM\n",
"## 6.8.2 读取数据集"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"1.0.0 cpu\n"
]
}
],
"source": [
"import numpy as np\n",
"import torch\n",
"from torch import nn, optim\n",
"import torch.nn.functional as F\n",
"\n",
"import sys\n",
"sys.path.append(\"..\") \n",
"import d2lzh_pytorch as d2l\n",
"device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
"\n",
"(corpus_indices, char_to_idx, idx_to_char, vocab_size) = d2l.load_data_jay_lyrics()\n",
"\n",
"print(torch.__version__, device)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 6.8.3 从零开始实现\n",
"### 6.8.3.1 初始化模型参数"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"will use cpu\n"
]
}
],
"source": [
"num_inputs, num_hiddens, num_outputs = vocab_size, 256, vocab_size\n",
"print('will use', device)\n",
"\n",
"def get_params():\n",
" def _one(shape):\n",
" ts = torch.tensor(np.random.normal(0, 0.01, size=shape), device=device, dtype=torch.float32)\n",
" return torch.nn.Parameter(ts, requires_grad=True)\n",
" def _three():\n",
" return (_one((num_inputs, num_hiddens)),\n",
" _one((num_hiddens, num_hiddens)),\n",
" torch.nn.Parameter(torch.zeros(num_hiddens, device=device, dtype=torch.float32), requires_grad=True))\n",
" \n",
" W_xi, W_hi, b_i = _three() # 输入门参数\n",
" W_xf, W_hf, b_f = _three() # 遗忘门参数\n",
" W_xo, W_ho, b_o = _three() # 输出门参数\n",
" W_xc, W_hc, b_c = _three() # 候选记忆细胞参数\n",
" \n",
" # 输出层参数\n",
" W_hq = _one((num_hiddens, num_outputs))\n",
" b_q = torch.nn.Parameter(torch.zeros(num_outputs, device=device, dtype=torch.float32), requires_grad=True)\n",
" return nn.ParameterList([W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q])"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 6.8.4 定义模型"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"def init_lstm_state(batch_size, num_hiddens, device):\n",
" return (torch.zeros((batch_size, num_hiddens), device=device), \n",
" torch.zeros((batch_size, num_hiddens), device=device))"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"def lstm(inputs, state, params):\n",
" [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q] = params\n",
" (H, C) = state\n",
" outputs = []\n",
" for X in inputs:\n",
" I = torch.sigmoid(torch.matmul(X, W_xi) + torch.matmul(H, W_hi) + b_i)\n",
" F = torch.sigmoid(torch.matmul(X, W_xf) + torch.matmul(H, W_hf) + b_f)\n",
" O = torch.sigmoid(torch.matmul(X, W_xo) + torch.matmul(H, W_ho) + b_o)\n",
" C_tilda = torch.tanh(torch.matmul(X, W_xc) + torch.matmul(H, W_hc) + b_c)\n",
" C = F * C + I * C_tilda\n",
" H = O * C.tanh()\n",
" Y = torch.matmul(H, W_hq) + b_q\n",
" outputs.append(Y)\n",
" return outputs, (H, C)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### 6.8.4.1 训练模型并创作歌词"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"num_epochs, num_steps, batch_size, lr, clipping_theta = 160, 35, 32, 1e2, 1e-2\n",
"pred_period, pred_len, prefixes = 40, 50, ['分开', '不分开']"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"epoch 40, perplexity 211.416571, time 1.37 sec\n",
" - 分开 我不的我 我不的我 我不的我 我不的我 我不的我 我不的我 我不的我 我不的我 我不的我 我不的我\n",
" - 不分开 我不的我 我不的我 我不的我 我不的我 我不的我 我不的我 我不的我 我不的我 我不的我 我不的我\n",
"epoch 80, perplexity 67.048346, time 1.35 sec\n",
" - 分开 我想你你 我不要再想 我不要这我 我不要这我 我不要这我 我不要这我 我不要这我 我不要这我 我不\n",
" - 不分开 我想你你想你 我不要这不样 我不要这我 我不要这我 我不要这我 我不要这我 我不要这我 我不要这我\n",
"epoch 120, perplexity 15.552743, time 1.36 sec\n",
" - 分开 我想带你的微笑 像这在 你想我 我想你 说你我 说你了 说给怎么么 有你在空 你在在空 在你的空 \n",
" - 不分开 我想要你已经堡 一样样 说你了 我想就这样着你 不知不觉 你已了离开活 后知后觉 我该了这生活 我\n",
"epoch 160, perplexity 4.274031, time 1.35 sec\n",
" - 分开 我想带你 你不一外在半空 我只能够远远著她 这些我 你想我难难头 一话看人对落我一望望我 我不那这\n",
" - 不分开 我想你这生堡 我知好烦 你不的节我 后知后觉 我该了这节奏 后知后觉 又过了一个秋 后知后觉 我该\n"
]
}
],
"source": [
"d2l.train_and_predict_rnn(lstm, get_params, init_lstm_state, num_hiddens,\n",
" vocab_size, device, corpus_indices, idx_to_char,\n",
" char_to_idx, False, num_epochs, num_steps, lr,\n",
" clipping_theta, batch_size, pred_period, pred_len,\n",
" prefixes)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 6.8.5 简洁实现"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"epoch 40, perplexity 1.020401, time 1.54 sec\n",
" - 分开始想担 妈跟我 一定是我妈在 因为分手前那句抱歉 在感动 穿梭时间的画面的钟 从反方向开始移动 回到\n",
" - 不分开始想像 妈跟我 我将我的寂寞封闭 然后在这里 不限日期 然后将过去 慢慢温习 让我爱上你 那场悲剧 \n",
"epoch 80, perplexity 1.011164, time 1.34 sec\n",
" - 分开始想担 你的 从前的可爱女人 温柔的让我心疼的可爱女人 透明的让我感动的可爱女人 坏坏的让我疯狂的可\n",
" - 不分开 我满了 让我疯狂的可爱女人 漂亮的让我面红的可爱女人 温柔的让我心疼的可爱女人 透明的让我感动的可\n",
"epoch 120, perplexity 1.025348, time 1.39 sec\n",
" - 分开始共渡每一天 手牵手 一步两步三步四步望著天 看星星 一颗两颗三颗四颗 连成线背著背默默许下心愿 看\n",
" - 不分开 我不懂 说了没用 他的笑容 有何不同 在你心中 我不再受宠 我的天空 是雨是风 还是彩虹 你在操纵\n",
"epoch 160, perplexity 1.017492, time 1.42 sec\n",
" - 分开始乡相信命运 感谢地心引力 让我碰到你 漂亮的让我面红的可爱女人 温柔的让我心疼的可爱女人 透明的让\n",
" - 不分开 我不能再想 我不 我不 我不能 爱情走的太快就像龙卷风 不能承受我已无处可躲 我不要再想 我不要再\n"
]
}
],
"source": [
"lr = 1e-2 # 注意调整学习率\n",
"lstm_layer = nn.LSTM(input_size=vocab_size, hidden_size=num_hiddens)\n",
"model = d2l.RNNModel(lstm_layer, vocab_size)\n",
"d2l.train_and_predict_rnn_pytorch(model, num_hiddens, vocab_size, device,\n",
" corpus_indices, idx_to_char, char_to_idx,\n",
" num_epochs, num_steps, lr, clipping_theta,\n",
" batch_size, pred_period, pred_len, prefixes)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python [default]",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.3"
}
},
"nbformat": 4,
"nbformat_minor": 2
}