self.normalizer = nn.Sigmoid()


 # D/TODO: Embed all the words and create the embedding matrix
embs = self.word_emb.forwards(sent)

# D/TODO: The second dimension should match...?
assert embs.shape[1] == self.word_emb.embedding_size()


# D/TODO: Calculate the matrix with the scores
scores = self.normalizer(self.linear_layer(embs))

# D/TODO: The second dimension should match...?
assert scores.shape[1] == len(self.tagset)



# D/TODO: determine the position with the highest score
_, ix = torch.max(score_vect, 0)
ix = ix.item()

# D/TODO: assert the index is within the range of POS tag indices
assert 0 <= ix < len(self.tagset)

# D/TODO: determine the corresponding POS tag
pos = list(self.tagset)[ix]