-
Notifications
You must be signed in to change notification settings - Fork 0
/
Models.py
49 lines (42 loc) · 1.99 KB
/
Models.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
import torch
import torch.nn as nn
from Layers import InputEquivariantLayer, IntermediateEquivariantLayer
from Layers import SLogSlaterDeterminant
class LogHarmonicNet(nn.Module):
def __init__(self, num_input, num_hidden, num_layers, num_dets, func):
super(LogHarmonicNet, self).__init__()
"""
Permutational Equivariant Neural Network which takes the one-dimensional positions
of the system (represented by a vector) and returns the log. abs. determinant
(and its sign).
"""
self.num_input=num_input
self.num_hidden=num_hidden
self.num_layers=num_layers
self.num_dets=num_dets
self.func=func
layers = []
self.input_layer = InputEquivariantLayer(in_features=2,
out_features=self.num_hidden,
num_particles=self.num_input,
func=func,
bias=True)
for i in range(1, self.num_layers):
layers.append(IntermediateEquivariantLayer(in_features=2*self.num_hidden,
out_features=self.num_hidden,
num_particles=self.num_input,
func=func,
bias=True)
)
self.layers = nn.ModuleList(layers)
self.slater = SLogSlaterDeterminant(in_features=self.num_hidden,
num_particles=self.num_input,
bias=True)
self.width = nn.Parameter(torch.empty(self.num_dets).fill_(0.1)) #width of envelope
def forward(self, x0):
x = self.input_layer(x0)
for l in self.layers:
x = l(x) + x
log_envelope = -self.width*x0.pow(2).sum(dim=-1) #to enforce output goes to 0 at large input values.
sign, logabsdet = self.slater(x)
return sign, logabsdet + log_envelope