Srikumar26 commited on
Commit
cb851a6
1 Parent(s): 34b7a72

Create model.py

Browse files
Files changed (1) hide show
  1. model.py +133 -0
model.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ from typing import Optional
4
+ from torch import Tensor
5
+ import numpy as np
6
+ from huggingface_hub import PyTorchModelHubMixin
7
+
8
+ # Constants
9
+ A1 = 1.340264
10
+ A2 = -0.081106
11
+ A3 = 0.000893
12
+ A4 = 0.003796
13
+ SF = 66.50336
14
+
15
+ @torch.jit.script
16
+ def gaussian_encoding(
17
+ v: Tensor,
18
+ b: Tensor) -> Tensor:
19
+ r"""Computes :math:`\gamma(\mathbf{v}) = (\cos{2 \pi \mathbf{B} \mathbf{v}} , \sin{2 \pi \mathbf{B} \mathbf{v}})`
20
+
21
+ Args:
22
+ v (Tensor): input tensor of shape :math:`(N, *, \text{input_size})`
23
+ b (Tensor): projection matrix of shape :math:`(\text{encoded_layer_size}, \text{input_size})`
24
+
25
+ Returns:
26
+ Tensor: mapped tensor of shape :math:`(N, *, 2 \cdot \text{encoded_layer_size})`
27
+
28
+ See :class:`~rff.layers.GaussianEncoding` for more details.
29
+ """
30
+ vp = 2 * np.pi * v @ b.T
31
+ return torch.cat((torch.cos(vp), torch.sin(vp)), dim=-1)
32
+
33
+
34
+ def sample_b(sigma: float, size: tuple) -> Tensor:
35
+ r"""Matrix of size :attr:`size` sampled from from :math:`\mathcal{N}(0, \sigma^2)`
36
+
37
+ Args:
38
+ sigma (float): standard deviation
39
+ size (tuple): size of the matrix sampled
40
+
41
+ See :class:`~rff.layers.GaussianEncoding` for more details
42
+ """
43
+ return torch.randn(size) * sigma
44
+
45
+ class GaussianEncoding(nn.Module):
46
+ """Layer for mapping coordinates using random Fourier features"""
47
+
48
+ def __init__(self, sigma: Optional[float] = None,
49
+ input_size: Optional[float] = None,
50
+ encoded_size: Optional[float] = None,
51
+ b: Optional[Tensor] = None):
52
+ r"""
53
+ Args:
54
+ sigma (Optional[float]): standard deviation
55
+ input_size (Optional[float]): the number of input dimensions
56
+ encoded_size (Optional[float]): the number of dimensions the `b` matrix maps to
57
+ b (Optional[Tensor], optional): Optionally specify a :attr:`b` matrix already sampled
58
+ Raises:
59
+ ValueError:
60
+ If :attr:`b` is provided and one of :attr:`sigma`, :attr:`input_size`,
61
+ or :attr:`encoded_size` is provided. If :attr:`b` is not provided and one of
62
+ :attr:`sigma`, :attr:`input_size`, or :attr:`encoded_size` is not provided.
63
+ """
64
+ super().__init__()
65
+ if b is None:
66
+ if sigma is None or input_size is None or encoded_size is None:
67
+ raise ValueError(
68
+ 'Arguments "sigma," "input_size," and "encoded_size" are required.')
69
+
70
+ b = sample_b(sigma, (encoded_size, input_size))
71
+ elif sigma is not None or input_size is not None or encoded_size is not None:
72
+ raise ValueError('Only specify the "b" argument when using it.')
73
+ self.b = nn.parameter.Parameter(b, requires_grad=False)
74
+
75
+ def forward(self, v: Tensor) -> Tensor:
76
+ r"""Computes :math:`\gamma(\mathbf{v}) = (\cos{2 \pi \mathbf{B} \mathbf{v}} , \sin{2 \pi \mathbf{B} \mathbf{v}})`
77
+
78
+ Args:
79
+ v (Tensor): input tensor of shape :math:`(N, *, \text{input_size})`
80
+
81
+ Returns:
82
+ Tensor: Tensor mapping using random fourier features of shape :math:`(N, *, 2 \cdot \text{encoded_size})`
83
+ """
84
+ return gaussian_encoding(v, self.b)
85
+
86
+ def equal_earth_projection(L):
87
+ latitude = L[:, 0]
88
+ longitude = L[:, 1]
89
+ latitude_rad = torch.deg2rad(latitude)
90
+ longitude_rad = torch.deg2rad(longitude)
91
+ sin_theta = (torch.sqrt(torch.tensor(3.0)) / 2) * torch.sin(latitude_rad)
92
+ theta = torch.asin(sin_theta)
93
+ denominator = 3 * (9 * A4 * theta**8 + 7 * A3 * theta**6 + 3 * A2 * theta**2 + A1)
94
+ x = (2 * torch.sqrt(torch.tensor(3.0)) * longitude_rad * torch.cos(theta)) / denominator
95
+ y = A4 * theta**9 + A3 * theta**7 + A2 * theta**3 + A1 * theta
96
+ return (torch.stack((x, y), dim=1) * SF) / 180
97
+
98
+ class LocationEncoderCapsule(nn.Module):
99
+ def __init__(self, sigma):
100
+ super(LocationEncoderCapsule, self).__init__()
101
+ rff_encoding = GaussianEncoding(sigma=sigma, input_size=2, encoded_size=256)
102
+ self.km = sigma
103
+ self.capsule = nn.Sequential(rff_encoding,
104
+ nn.Linear(512, 1024),
105
+ nn.ReLU(),
106
+ nn.Linear(1024, 1024),
107
+ nn.ReLU(),
108
+ nn.Linear(1024, 1024),
109
+ nn.ReLU())
110
+ self.head = nn.Sequential(nn.Linear(1024, 512))
111
+
112
+ def forward(self, x):
113
+ x = self.capsule(x)
114
+ x = self.head(x)
115
+ return x
116
+
117
+ class LocationEncoder(nn.Module, PyTorchModelHubMixin):
118
+ def __init__(self):
119
+ super(LocationEncoder, self).__init__()
120
+ self.sigma = [2**0, 2**4, 2**8]
121
+ self.n = len(self.sigma)
122
+
123
+ for i, s in enumerate(self.sigma):
124
+ self.add_module('LocEnc' + str(i), LocationEncoderCapsule(sigma=s))
125
+
126
+ def forward(self, location):
127
+ location = equal_earth_projection(location)
128
+ location_features = torch.zeros(location.shape[0], 512).to(location.device)
129
+
130
+ for i in range(self.n):
131
+ location_features += self._modules['LocEnc' + str(i)](location)
132
+
133
+ return location_features