Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

how could I run this on Python 3 #10

Open
selenewang opened this issue Oct 31, 2017 · 3 comments
Open

how could I run this on Python 3 #10

selenewang opened this issue Oct 31, 2017 · 3 comments

Comments

@selenewang
Copy link

RuntimeError Traceback (most recent call last)
in ()
----> 1 loss_full= train_early_stopping(64, X_train, y_train, X_test, y_test, word_attn, sent_attn, word_optmizer, sent_optimizer, criterion, 5000, 1000, 50)

in train_early_stopping(mini_batch_size, X_train, y_train, X_test, y_test, word_attn_model, sent_attn_model, word_attn_optimiser, sent_attn_optimiser, loss_criterion, num_epoch, print_val_loss_every, print_loss_every)
13 try:
14 tokens, labels = next(g)
---> 15 loss = train_data(tokens, labels, word_attn_model, sent_attn_model, word_attn_optimiser, sent_attn_optimiser, loss_criterion)
16 acc = test_accuracy_mini_batch(tokens, labels, word_attn_model, sent_attn_model)
17 accuracy_full.append(acc)

in train_data(mini_batch, targets, word_attn_model, sent_attn_model, word_optimizer, sent_optimizer, criterion)
12 else:
13 s = torch.cat((s,_s),0)
---> 14 y_pred, state_sent, _ = sent_attn_model(s, state_sent)
15 loss = criterion(y_pred, targets)
16 loss.backward()

C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\modules\module.py in call(self, *input, **kwargs)
222 for hook in self._forward_pre_hooks.values():
223 hook(self, input)
--> 224 result = self.forward(*input, **kwargs)
225 for hook in self._forward_hooks.values():
226 hook_result = hook(self, input, result)

in forward(self, word_attention_vectors, state_sent)
36 print(state_sent.size())
37
---> 38 output_sent, state_sent = self.sent_gru(word_attention_vectors, state_sent)
39 sent_squish = batch_matmul_bias(output_sent, self.weight_W_sent,self.bias_sent, nonlinearity='tanh')
40 sent_attn = batch_matmul(sent_squish, self.weight_proj_sent)

C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\modules\module.py in call(self, *input, **kwargs)
222 for hook in self._forward_pre_hooks.values():
223 hook(self, input)
--> 224 result = self.forward(*input, **kwargs)
225 for hook in self._forward_hooks.values():
226 hook_result = hook(self, input, result)

C:\ProgramData\Anaconda3\lib\site-packages\torch\nn\modules\rnn.py in forward(self, input, hx)
160 flat_weight=flat_weight
161 )
--> 162 output, hidden = func(input, self.all_weights, hx)
163 if is_packed:
164 output = PackedSequence(output, batch_sizes)

C:\ProgramData\Anaconda3\lib\site-packages\torch\nn_functions\rnn.py in forward(input, *fargs, **fkwargs)
349 else:
350 func = AutogradRNN(*args, **kwargs)
--> 351 return func(input, *fargs, **fkwargs)
352
353 return forward

C:\ProgramData\Anaconda3\lib\site-packages\torch\nn_functions\rnn.py in forward(input, weight, hidden)
242 input = input.transpose(0, 1)
243
--> 244 nexth, output = func(input, hidden, weight)
245
246 if batch_first and batch_sizes is None:

C:\ProgramData\Anaconda3\lib\site-packages\torch\nn_functions\rnn.py in forward(input, hidden, weight)
82 l = i * num_directions + j
83
---> 84 hy, output = inner(input, hidden[l], weight[l])
85 next_hidden.append(hy)
86 all_output.append(output)

C:\ProgramData\Anaconda3\lib\site-packages\torch\nn_functions\rnn.py in forward(input, hidden, weight)
111 steps = range(input.size(0) - 1, -1, -1) if reverse else range(input.size(0))
112 for i in steps:
--> 113 hidden = inner(input[i], hidden, *weight)
114 # hack to handle LSTM
115 output.append(hidden[0] if isinstance(hidden, tuple) else hidden)

C:\ProgramData\Anaconda3\lib\site-packages\torch\nn_functions\rnn.py in GRUCell(input, hidden, w_ih, w_hh, b_ih, b_hh)
54 gi = F.linear(input, w_ih, b_ih)
55 gh = F.linear(hidden, w_hh, b_hh)
---> 56 i_r, i_i, i_n = gi.chunk(3, 1)
57 h_r, h_i, h_n = gh.chunk(3, 1)
58

C:\ProgramData\Anaconda3\lib\site-packages\torch\autograd\variable.py in chunk(self, num_chunks, dim)
745
746 def chunk(self, num_chunks, dim=0):
--> 747 return Chunk.apply(self, num_chunks, dim)
748
749 def squeeze(self, dim=None):

C:\ProgramData\Anaconda3\lib\site-packages\torch\autograd_functions\tensor.py in forward(ctx, i, num_chunks, dim)
540 def forward(ctx, i, num_chunks, dim=0):
541 ctx.dim = dim
--> 542 result = i.chunk(num_chunks, dim)
543 ctx.mark_shared_storage(*((i, chunk) for chunk in result))
544 return result

C:\ProgramData\Anaconda3\lib\site-packages\torch\tensor.py in chunk(self, n_chunks, dim)
172 See :func:torch.chunk.
173 """
--> 174 return torch.chunk(self, n_chunks, dim)
175
176 def matmul(self, other):

C:\ProgramData\Anaconda3\lib\site-packages\torch\functional.py in chunk(tensor, chunks, dim)
42 if dim < 0:
43 dim += tensor.dim()
---> 44 split_size = (tensor.size(dim) + chunks - 1) // chunks
45 return split(tensor, split_size, dim)
46

RuntimeError: invalid argument 2: dimension 1 out of range of 1D tensor at d:\projects\pytorch\torch\lib\th\generic/THTensor.c:24


Got this runtime Error when I try to run it on Python3. Could anyone help me with this?

Thanks a lot!

@AotY
Copy link

AotY commented Dec 1, 2017

I have the same problem, in Python 3.

@Sandeep42
Copy link
Contributor

I am sorry but I will no longer be maintaining this project, especially when it comes to Python 3 support. Please have a look at the other implementation of the same attention model.
https://github.com/EdGENetworks/anuvada

@AotY
Copy link

AotY commented Dec 3, 2017

Thanks.

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

3 participants