content stringlengths 5 1.05M |
|---|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for custom training loops."""
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import numpy as np
import keras
from keras import keras_parameterized
from keras import testing_utils
class LayerWithLosses(keras.layers.Layer):
def build(self, input_shape):
self.v = self.add_weight(
name='hey',
shape=(),
initializer='ones',
regularizer=keras.regularizers.l1(100))
def call(self, inputs):
self.add_loss(tf.reduce_sum(inputs))
return self.v * inputs
class LayerWithMetrics(keras.layers.Layer):
def build(self, input_shape):
self.mean = keras.metrics.Mean(name='mean_object')
def call(self, inputs):
self.add_metric(
tf.reduce_mean(inputs), name='mean_tensor', aggregation='mean')
self.add_metric(self.mean(inputs))
return inputs
class LayerWithTrainingArg(keras.layers.Layer):
def call(self, inputs, training=None):
self.training = training
if training:
return inputs
else:
return 0. * inputs
def add_loss_step(defun):
optimizer = keras.optimizer_v2.adam.Adam()
model = testing_utils.get_model_from_layers([LayerWithLosses()],
input_shape=(10,))
def train_step(x):
with tf.GradientTape() as tape:
model(x)
assert len(model.losses) == 2
loss = tf.reduce_sum(model.losses)
gradients = tape.gradient(loss, model.trainable_weights)
optimizer.apply_gradients(zip(gradients, model.trainable_weights))
return loss
if defun:
train_step = tf.function(train_step)
x = tf.ones((10, 10))
return train_step(x)
def batch_norm_step(defun):
optimizer = keras.optimizer_v2.adadelta.Adadelta()
model = testing_utils.get_model_from_layers([
keras.layers.BatchNormalization(momentum=0.9),
keras.layers.Dense(1, kernel_initializer='zeros', activation='softmax')
],
input_shape=(10,))
def train_step(x, y):
with tf.GradientTape() as tape:
y_pred = model(x, training=True)
loss = keras.losses.binary_crossentropy(y, y_pred)
gradients = tape.gradient(loss, model.trainable_weights)
optimizer.apply_gradients(zip(gradients, model.trainable_weights))
return loss, model(x, training=False)
if defun:
train_step = tf.function(train_step)
x, y = tf.ones((10, 10)), tf.ones((10, 1))
return train_step(x, y)
def add_metric_step(defun):
optimizer = keras.optimizer_v2.rmsprop.RMSprop()
model = testing_utils.get_model_from_layers([
LayerWithMetrics(),
keras.layers.Dense(1, kernel_initializer='zeros', activation='softmax')
],
input_shape=(10,))
def train_step(x, y):
with tf.GradientTape() as tape:
y_pred_1 = model(x)
y_pred_2 = model(2 * x)
y_pred = y_pred_1 + y_pred_2
loss = keras.losses.mean_squared_error(y, y_pred)
gradients = tape.gradient(loss, model.trainable_weights)
optimizer.apply_gradients(zip(gradients, model.trainable_weights))
assert len(model.metrics) == 2
return [m.result() for m in model.metrics]
if defun:
train_step = tf.function(train_step)
x, y = tf.ones((10, 10)), tf.zeros((10, 1))
metrics = train_step(x, y)
assert np.allclose(metrics[0], 1.5)
assert np.allclose(metrics[1], 1.5)
return metrics
@keras_parameterized.run_with_all_model_types
class CustomTrainingLoopTest(keras_parameterized.TestCase):
@parameterized.named_parameters(('add_loss_step', add_loss_step),
('add_metric_step', add_metric_step),
('batch_norm_step', batch_norm_step))
def test_eager_and_tf_function(self, train_step):
eager_result = train_step(defun=False)
fn_result = train_step(defun=True)
self.assertAllClose(eager_result, fn_result)
@parameterized.named_parameters(('eager', False), ('defun', True))
def test_training_arg_propagation(self, defun):
model = testing_utils.get_model_from_layers([LayerWithTrainingArg()],
input_shape=(1,))
def train_step(x):
return model(x), model(x, training=False), model(x, training=True)
if defun:
train_step = tf.function(train_step)
x = tf.ones((1, 1))
results = train_step(x)
self.assertAllClose(results[0], tf.zeros((1, 1)))
self.assertAllClose(results[1], tf.zeros((1, 1)))
self.assertAllClose(results[2], tf.ones((1, 1)))
@parameterized.named_parameters(('eager', False), ('defun', True))
def test_learning_phase_propagation(self, defun):
class MyModel(keras.layers.Layer):
def __init__(self):
super(MyModel, self).__init__()
self.layer = LayerWithTrainingArg()
def call(self, inputs):
return self.layer(inputs)
model = MyModel()
def train_step(x):
no_learning_phase_out = model(x)
self.assertFalse(model.layer.training)
with keras.backend.learning_phase_scope(0):
inf_learning_phase_out = model(x)
self.assertEqual(model.layer.training, 0)
with keras.backend.learning_phase_scope(1):
train_learning_phase_out = model(x)
self.assertEqual(model.layer.training, 1)
return [
no_learning_phase_out, inf_learning_phase_out,
train_learning_phase_out
]
if defun:
train_step = tf.function(train_step)
x = tf.ones((1, 1))
results = train_step(x)
self.assertAllClose(results[0], tf.zeros((1, 1)))
self.assertAllClose(results[1], tf.zeros((1, 1)))
self.assertAllClose(results[2], tf.ones((1, 1)))
@parameterized.named_parameters(('eager', False), ('defun', True))
def test_training_arg_priorities(self, defun):
class MyModel(keras.layers.Layer):
def __init__(self):
super(MyModel, self).__init__()
self.layer = LayerWithTrainingArg()
def call(self, inputs, training=False):
return self.layer(inputs)
model = MyModel()
def train_step(x):
explicit_out = model(x, training=True)
default_out = model(x)
with keras.backend.learning_phase_scope(1):
parent_out = model(x, training=False)
lr_out = model(x)
return [explicit_out, default_out, parent_out, lr_out]
if defun:
train_step = tf.function(train_step)
x = tf.ones((1, 1))
results = train_step(x)
self.assertAllClose(results[0], tf.ones((1, 1)))
self.assertAllClose(results[1], tf.zeros((1, 1)))
self.assertAllClose(results[2], tf.zeros((1, 1)))
self.assertAllClose(results[3], tf.ones((1, 1)))
if __name__ == '__main__':
tf.compat.v1.enable_eager_execution()
tf.test.main()
|
import qiskit
from .cnx_halfdirty import cnx_halfdirty
def cnx_any_dirty(circuit, controls, target, ancilla):
def find_dirty(groups, g):
d = []
for f in groups:
for e in f:
if e not in g and e not in d:
d.append(e)
if len(d) == len(g) - 3:
return d
return d
m = len(ancilla)
assert m > 0, f'If 0 ancilla are provided, please use a cnx_inplace gate instead.'
if len(controls) == 2:
circuit.toffoli(controls[0], controls[1], target)
elif len(controls) == 1:
circuit.cx(controls[0], target)
else:
if len(ancilla) > len(controls) - 2:
cnx_halfdirty(circuit, controls, target, ancilla)
else:
# Group the controls into m + 1 groups, do as many toffoli's as possible then start filling until can't be done
groups = [[] for _ in range(m + 1)]
groups[0].append(controls[0])
for i in range(m + 1):
groups[i].append(controls[i+1])
i = m+2
which_group = -1
while i < len(controls):
if which_group == 0:
groups[0].append(controls[i])
i += 1
which_group = -1
else:
g = groups[-1]
free = sum([len(gg) if gg != g else 0 for gg in groups]) + len(ancilla) - 1
while len(g) + 1 < free + 2:
g.append(controls[i])
i += 1
if i >= len(controls):
break
if i >= len(controls):
break
which_group = 0
for i in range(m + 1):
if i != 0:
groups[i].insert(0, groups[i-1][-1])
if i < m:
groups[i].append(ancilla.pop(0))
else:
groups[i].append(target)
def forward(groups):
for g in reversed(groups):
if len(g) == 3:
circuit.toffoli(*g)
elif len(g) == 2:
circuit.cx(*g)
else:
cnx_halfdirty(circuit, controls=g[:-1], target=g[-1], ancilla=find_dirty(groups, g))
def backward(groups):
for g in groups[1:-1]:
if len(g) == 3:
circuit.toffoli(*g)
elif len(g) == 2:
circuit.cx(*g)
else:
cnx_halfdirty(circuit, controls=g[:-1], target=g[-1], ancilla=find_dirty(groups, g))
forward(groups)
backward(groups)
forward(groups)
backward(groups)
def generate_dirty_multicontrol(n, m):
qs = list(range(n + m + 1))
c = qiskit.circuit.QuantumCircuit(n + m + 1)
cnx_any_dirty(c, qs[:n], qs[n], qs[n+1:])
return c |
# TODO 1. Create dictionary {"a": "alpha", ...}
import pandas
df = pandas.read_csv("day26alpha.csv")
nato_alpha = {row.letter:row.code for _, row in df.iterrows()}
print(nato_alpha)
# TODO 2. Create list of phonetic alphabet from user input
while (user_input := input("Word: ").upper()) != "EXIT":
print("\n".join([nato_alpha[letter] for letter in user_input])) |
"""
Copyright 2016 Pawel Bartusiak
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import subprocess
import sys
import time
processes = 0
dictChannelKeys = {}
dictChannelsin = {}
dictProcesses = {}
def addInstance(channel, processes):
if channel in list(dictChannelKeys.keys()):
return
opener = "open" if sys.platform == "darwin" else "python"
process = subprocess.Popen([opener, "StrongLegsBot.py", channel, "info"])
dictProcesses["%s" % processes] = process.pid
dictChannelKeys["%s" % channel] = str(processes)
processes += 1
def deleteInstance(channel, processes):
pid = dictProcesses[dictChannelKeys[channel]]
os.system("taskkill /pid %s /f" % str(pid))
processes -= 1
channels_file = open("channels.txt", "r")
for channel in channels_file.readlines():
time.sleep(1)
addInstance(channel.strip("\n"), processes)
user_input = None
while user_input != "end":
try:
user_input = input("End?")
except Exception as e:
print(str(e))
break
|
# Based on the work done by the creators of the Dictation Toolbox
# https://github.com/dictation-toolbox/dragonfly-scripts
#
# Modifications by: Tony Grosinger and Anders Sildnes
#
# Licensed under LGPL
class FormatTypes:
camelCase = 1
pascalCase = 2
snakeCase = 3
squash = 4
upperCase = 5
lowerCase = 6
dashify = 7
dotify = 8
spokenForm = 9
sentenceCase = 10
spokenFormWithSpace = 11
def format_camel_case(text):
return text[0] + ''.join([word[0].upper() + word[1:].lower() for word in text[1:]])
def format_pascal_case(text_list):
return ''.join(word[0].upper() + word[1:].lower() for word in text_list)
def format_snake_case(text_list):
newText = ""
for word in text_list:
if newText != "" and newText[-1:].isalnum() and word[-1:].isalnum():
word = "_" + word # Adds underscores between normal words.
newText += word.lower()
return newText
def format_dashify(text_list):
newText = ""
for word in text_list:
if newText != "" and newText[-1:].isalnum() and word[-1:].isalnum():
word = "-" + word # Adds dashes between normal words.
newText += word
return newText
def format_dotify(text):
newText = ""
words = text.strip()
for word in words:
if newText != "" and newText[-1:].isalnum() and word[-1:].isalnum():
word = "." + word # Adds dashes between normal words.
newText += word
return newText
def format_squash(text):
return ''.join(text)
def format_sentence_case(text):
newText = []
words = text.strip()
for word in words:
if newText == []:
newText.append(word.title())
else:
newText.append(word.lower())
return " ".join(newText)
def format_upper_case(text_list):
return ' '.join(x.upper() for x in text_list)
def format_lower_case(text_list):
return ' '.join(x.lower() for x in text_list)
def format_spoken_form(text_list):
newText = ""
for word in text_list:
if newText != "":
word = " " + word
newText += word
return newText
def format_spoken_form_with_space(text_list):
newText = ""
for word in text_list:
if newText != "":
word = " " + word
newText += word
return newText + " "
class FormatTypes:
camelCase = 1
pascalCase = 2
snakeCase = 3
squash = 4
upperCase = 5
lowerCase = 6
dashify = 7
dotify = 8
spokenForm = 9
sentenceCase = 10
spokenFormWithSpace = 11
FORMAT_TYPES_MAP = {
FormatTypes.sentenceCase: format_sentence_case,
FormatTypes.camelCase: format_camel_case,
FormatTypes.pascalCase: format_pascal_case,
FormatTypes.snakeCase: format_snake_case,
FormatTypes.squash: format_squash,
FormatTypes.upperCase: format_upper_case,
FormatTypes.lowerCase: format_lower_case,
FormatTypes.dashify: format_dashify,
FormatTypes.dotify: format_dotify,
FormatTypes.spokenForm: format_spoken_form,
FormatTypes.spokenFormWithSpace: format_spoken_form_with_space,
}
def format_text_logic(text, formatType):
result = text.split()
for value in formatType: # there can be multiple types
result = FORMAT_TYPES_MAP[value](result)
return result
|
from backpack.core.derivatives.dropout import DropoutDerivatives
from backpack.extensions.secondorder.diag_ggn.diag_ggn_base import DiagGGNBaseModule
class DiagGGNDropout(DiagGGNBaseModule):
def __init__(self):
super().__init__(derivatives=DropoutDerivatives())
|
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
from typing import List
import torch
import torch.nn as nn
import torch.nn.functional as F
from inference.proj_adaptive_softmax_jit import ProjectedAdaptiveLogSoftmax
class PositionalEmbedding(torch.jit.ScriptModule):
def __init__(self, demb):
super(PositionalEmbedding, self).__init__()
self.demb = demb
inv_freq = 1 / (10000 ** (torch.arange(0.0, demb, 2.0) / demb))
self.register_buffer('inv_freq', inv_freq)
@torch.jit.script_method
def forward(self, pos_seq, bsz: Optional[int] = None):
sinusoid_inp = torch.ger(pos_seq, self.inv_freq)
pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=1)
if bsz is not None:
return pos_emb[:, None, :].expand(-1, bsz, -1)
else:
return pos_emb[:, None, :]
class PositionwiseFF(torch.jit.ScriptModule):
__constants__ = ['pre_lnorm']
def __init__(self, d_model, d_inner, dropout, pre_lnorm=False):
super(PositionwiseFF, self).__init__()
self.d_model = d_model
self.d_inner = d_inner
self.dropout = dropout
self.CoreNet = nn.Sequential(
nn.Linear(d_model, d_inner), nn.ReLU(inplace=True),
nn.Dropout(dropout),
nn.Linear(d_inner, d_model),
nn.Dropout(dropout),
)
self.layer_norm = nn.LayerNorm(d_model)
self.pre_lnorm = pre_lnorm
@torch.jit.script_method
def forward(self, inp):
if self.pre_lnorm:
# layer normalization + positionwise feed-forward
core_out = self.CoreNet(self.layer_norm(inp))
# residual connection
output = core_out + inp
else:
# positionwise feed-forward
core_out = self.CoreNet(inp)
# residual connection + layer normalization
output = self.layer_norm(inp + core_out)
return output
class RelMultiHeadAttn(torch.jit.ScriptModule):
def __init__(self, n_head, d_model, d_head, dropout, dropatt=0,
tgt_len=None, ext_len=None, mem_len=None, pre_lnorm=False):
super(RelMultiHeadAttn, self).__init__()
self.n_head = n_head
self.d_model = d_model
self.d_head = d_head
self.dropout = dropout
self.qkv_net = nn.Linear(d_model, 3 * n_head * d_head, bias=False)
self.drop = nn.Dropout(dropout)
self.dropatt = nn.Dropout(dropatt)
self.o_net = nn.Linear(n_head * d_head, d_model, bias=False)
self.layer_norm = nn.LayerNorm(d_model)
self.scale = 1 / (d_head ** 0.5)
self.pre_lnorm = pre_lnorm
def _parallelogram_mask(self, h, w, left=False):
mask = torch.ones((h, w)).byte()
m = min(h, w)
mask[:m, :m] = torch.triu(mask[:m, :m])
mask[-m:, -m:] = torch.tril(mask[-m:, -m:])
if left:
return mask.bool()
else:
return mask.flip(0).bool()
def _shift(self, x, qlen, klen, mask, left=False):
if qlen > 1:
zero_pad = torch.zeros((x.size(0), qlen-1, x.size(2), x.size(3)),
device=x.device, dtype=x.dtype)
else:
zero_pad = torch.zeros(0, device=x.device, dtype=x.dtype)
if left:
mask = mask.flip(1)
x_padded = torch.cat([zero_pad, x], dim=1).expand(qlen, -1, -1, -1)
else:
x_padded = torch.cat([x, zero_pad], dim=1).expand(qlen, -1, -1, -1)
x = x_padded.masked_select(mask[:, :, None, None]) \
.view(qlen, klen, x.size(2), x.size(3))
return x
@torch.jit.script_method
def _rel_shift(self, x, zero_triu: bool = False):
zero_pad = torch.zeros((x.size(0), x.size(1), 1, x.size(3)),
device=x.device, dtype=x.dtype)
x_padded = torch.cat([zero_pad, x], dim=2)
x_padded = x_padded.view(x.size(0), x.size(2) + 1, x.size(1), x.size(3))
x = x_padded[:, 1:].view_as(x)
if zero_triu:
ones = torch.ones((x.size(0), x.size(1)))
x = x * torch.tril(ones, x.size(1) - x.size(0))[None, :, :, None]
return x
@torch.jit.script_method
def forward(self, w, r, attn_mask, mems=None):
raise NotImplementedError
class RelPartialLearnableMultiHeadAttn(RelMultiHeadAttn):
__constants__ = ['pre_lnorm', 'n_head', 'd_head', 'scale']
def __init__(self, *args, **kwargs):
super(RelPartialLearnableMultiHeadAttn, self).__init__(*args, **kwargs)
self.r_net = nn.Linear(self.d_model, self.n_head * self.d_head, bias=False)
@torch.jit.script_method
def forward(self, w, r, r_w_bias, r_r_bias, attn_mask,
mems: Optional[torch.Tensor] = None):
qlen, rlen, bsz = w.size(0), r.size(0), w.size(1)
if mems is not None:
cat = torch.cat([mems, w], 0)
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(cat))
else:
w_heads = self.qkv_net(cat)
r_head_k = self.r_net(r)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
w_head_q = w_head_q[-qlen:]
else:
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(w))
else:
w_heads = self.qkv_net(w)
r_head_k = self.r_net(r)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
klen = w_head_k.size(0)
w_head_q = w_head_q.view(qlen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
w_head_k = w_head_k.view(klen, bsz, self.n_head, self.d_head) # klen x bsz x n_head x d_head
w_head_v = w_head_v.view(klen, bsz, self.n_head, self.d_head) # klen x bsz x n_head x d_head
r_head_k = r_head_k.view(rlen, self.n_head, self.d_head) # qlen x n_head x d_head
# compute attention score
rw_head_q = w_head_q + r_w_bias # qlen x bsz x n_head x d_head
# AC = torch.einsum('ibnd,jbnd->bijn', (rw_head_q, w_head_k)) # bsz x qlen x klen x n_head
rw_head_q = rw_head_q.view(qlen, bsz * self.n_head, self.d_head).permute(1, 0, 2)
w_head_k = w_head_k.reshape(klen, bsz * self.n_head, self.d_head).permute(1, 2, 0)
AC = torch.bmm(rw_head_q, w_head_k).view(bsz, self.n_head, qlen, klen).permute(0, 2, 3 ,1)
rr_head_q = w_head_q + r_r_bias
# BD = torch.einsum('ibnd,jnd->bijn', (rr_head_q, r_head_k)) # bsz x qlen x klen x n_head
rr_head_q = rr_head_q.permute(2, 1, 0, 3).reshape(self.n_head, bsz * qlen, self.d_head)
r_head_k = r_head_k.permute(1, 2, 0).view(self.n_head, self.d_head, klen)
BD = torch.bmm(rr_head_q, r_head_k).permute(1, 2, 0).view(bsz, qlen, klen, self.n_head)
BD = self._rel_shift(BD, False)
# [bsz x qlen x klen x n_head]
attn_score = AC + BD
attn_score.mul_(self.scale)
# compute attention probability
if attn_mask is not None and attn_mask.any():
if attn_mask.dim() == 2:
attn_score.masked_fill_(attn_mask[None, :, :, None], -float('inf'))
elif attn_mask.dim() == 3:
attn_score.masked_fill_(attn_mask[:, :, :, None], -float('inf'))
# [bsz x qlen x klen x n_head]
attn_prob = F.softmax(attn_score, dim=2)
attn_prob = self.dropatt(attn_prob)
# compute attention vector
# attn_vec = torch.einsum('bijn,jbnd->ibnd', (attn_prob, w_head_v))
attn_prob = attn_prob.permute(0, 3, 1 ,2).reshape(bsz * self.n_head, qlen, klen)
w_head_v = w_head_v.permute(1, 2, 0, 3).reshape(bsz * self.n_head, klen, self.d_head)
attn_vec = torch.bmm(attn_prob, w_head_v).permute(1, 0, 2).view(qlen, bsz, self.n_head, self.d_head)
# [qlen x bsz x n_head x d_head]
attn_vec = attn_vec.reshape(
attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head)
# linear projection
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
if self.pre_lnorm:
# residual connection
output = w + attn_out
else:
# residual connection + layer normalization
output = self.layer_norm(w + attn_out)
output = output.type_as(w)
return output
class RelPartialLearnableDecoderLayer(torch.jit.ScriptModule):
def __init__(self, n_head, d_model, d_head, d_inner, dropout,
**kwargs):
super(RelPartialLearnableDecoderLayer, self).__init__()
self.dec_attn = RelPartialLearnableMultiHeadAttn(n_head, d_model,
d_head, dropout,
**kwargs)
self.pos_ff = PositionwiseFF(d_model, d_inner, dropout,
pre_lnorm=kwargs.get('pre_lnorm'))
@torch.jit.script_method
def forward(self, dec_inp, r, r_w_bias, r_r_bias, dec_attn_mask,
mems: Optional[torch.Tensor] = None
):
output = self.dec_attn(dec_inp, r, r_w_bias, r_r_bias,
attn_mask=dec_attn_mask,
mems=mems)
output = self.pos_ff(output)
return output
class AdaptiveEmbedding(torch.jit.ScriptModule):
__constants__ = ['div_val', 'd_proj', 'd_embed', 'emb_scale']
def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1,
sample_softmax=False):
super(AdaptiveEmbedding, self).__init__()
self.n_token = n_token
self.d_embed = d_embed
self.cutoffs = cutoffs + [n_token]
self.div_val = div_val
self.d_proj = d_proj
self.emb_scale = d_proj ** 0.5
self.cutoff_ends = [0] + self.cutoffs
self.emb_layers = nn.ModuleList()
self.emb_projs = nn.ParameterList()
if div_val != 1:
raise RuntimeError('TorchScripted model supports only div_val == 1')
if d_proj != d_embed:
raise RuntimeError('TorchScripted model supports only d_proj == d_embed')
self.emb_layers.append(nn.Embedding(n_token, d_embed))
@torch.jit.script_method
def forward(self, x):
for emb_layer in self.emb_layers:
x = emb_layer(x)
x.mul_(self.emb_scale)
return x
class MemTransformerLM(torch.jit.ScriptModule):
__constants__ = ['same_length', 'mem_len', 'clamp_len', 'ext_len',
'n_layer', 'dtype']
def __init__(self, n_token, n_layer, n_head, d_model, d_head, d_inner,
dropout, dropatt, dtype, tie_weight=True, d_embed=None,
div_val=1, tie_projs=[False], pre_lnorm=False,
tgt_len=None, ext_len=None, mem_len=None,
cutoffs=[], adapt_inp=False,
same_length=False, attn_type=0, clamp_len=-1,
sample_softmax=-1):
super(MemTransformerLM, self).__init__()
self.n_token = n_token
d_embed = d_model if d_embed is None else d_embed
self.d_embed = d_embed
self.d_model = d_model
self.n_head = n_head
self.d_head = d_head
self.word_emb = AdaptiveEmbedding(n_token, d_embed, d_model, cutoffs,
div_val=div_val)
self.drop = nn.Dropout(dropout)
self.n_layer = n_layer
self.tgt_len = tgt_len
self.mem_len = mem_len
self.ext_len = ext_len
self.max_klen = tgt_len + ext_len + mem_len
self.dtype = dtype
self.attn_type = attn_type
if attn_type != 0:
raise RuntimeError('TorchScripted supports only attn_type == 0')
self.layers = nn.ModuleList()
# the default attention
for i in range(n_layer):
self.layers.append(
RelPartialLearnableDecoderLayer(
n_head, d_model, d_head, d_inner, dropout,
tgt_len=tgt_len, ext_len=ext_len, mem_len=mem_len,
dropatt=dropatt, pre_lnorm=pre_lnorm)
)
self.crit = ProjectedAdaptiveLogSoftmax(n_token, d_embed, d_model,
cutoffs, div_val=div_val)
self.same_length = same_length
self.clamp_len = clamp_len
self._create_params()
def _create_params(self):
self.pos_emb = PositionalEmbedding(self.d_model)
self.r_w_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
self.r_r_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
@torch.jit.script_method
def init_mems(self):
mems = []
for i in range(self.n_layer+1):
empty = torch.empty(0, dtype=self.dtype, device=torch.device('cuda'))
mems.append(empty)
return mems
def _update_mems(self, hids: List[torch.Tensor], mems: List[torch.Tensor],
qlen: int, mlen: int):
assert len(hids) == len(mems), 'len(hids) != len(mems)'
# There are `mlen + qlen` steps that can be cached into mems
# For the next step, the last `ext_len` of the `qlen` tokens
# will be used as the extended context. Hence, we only cache
# the tokens from `mlen + qlen - self.ext_len - self.mem_len`
# to `mlen + qlen - self.ext_len`.
new_mems = []
end_idx = mlen + max(0, qlen - 0 - self.ext_len)
beg_idx = max(0, end_idx - self.mem_len)
for i in range(len(hids)):
cat = torch.cat([mems[i], hids[i]], dim=0)
new_mems.append(cat[beg_idx:end_idx].detach())
return new_mems
@torch.jit.script_method
def _forward(self, dec_inp, mems: List[torch.Tensor]):
qlen, bsz = dec_inp.size()
word_emb = self.word_emb(dec_inp)
mlen = mems[0].size(0)
klen = mlen + qlen
if self.same_length:
# all_ones = word_emb.new_ones(qlen, klen)
all_ones = torch.ones((qlen, klen), device=torch.device('cuda'), dtype=torch.float32)
mask_len = klen - self.mem_len
if mask_len > 0:
mask_shift_len = qlen - mask_len
else:
mask_shift_len = qlen
dec_attn_mask = (torch.triu(all_ones, 1+mlen) + torch.tril(all_ones, -mask_shift_len)).to(torch.bool)
else:
all_ones = torch.ones((qlen, klen), device=torch.device('cuda'), dtype=torch.float32)
dec_attn_mask = torch.triu(
all_ones, diagonal=1+mlen).to(torch.bool)
hids = []
pos_seq = torch.arange(klen-1, -1, -1.0, device=word_emb.device,
dtype=word_emb.dtype)
if self.clamp_len > 0:
pos_seq.clamp_(max=self.clamp_len)
pos_emb = self.pos_emb(pos_seq)
core_out = self.drop(word_emb)
pos_emb = self.drop(pos_emb)
hids.append(core_out)
i = 0
for layer in self.layers:
mems_i = None if mems is None else mems[i]
core_out = layer(core_out, pos_emb, self.r_w_bias,
self.r_r_bias, dec_attn_mask=dec_attn_mask,
mems=mems_i)
hids.append(core_out)
i += 1
core_out = self.drop(core_out)
new_mems = self._update_mems(hids, mems, qlen, mlen)
return core_out, new_mems
@torch.jit.script_method
def forward(self, data, target, mems: Optional[List[torch.Tensor]]):
# nn.DataParallel does not allow size(0) tensors to be broadcasted.
# So, have to initialize size(0) mems inside the model forward.
# Moreover, have to return new_mems to allow nn.DataParallel to piece
# them together.
if mems is None:
mems = self.init_mems()
tgt_len = target.size(0)
hidden, new_mems = self._forward(data, mems=mems)
pred_hid = hidden[-tgt_len:]
loss = self.crit(pred_hid.view(-1, pred_hid.size(-1)), target.view(-1))
loss = loss.view(tgt_len, -1)
if new_mems is None:
return [loss]
else:
return [loss] + new_mems
|
#%%
import pandas
import numpy as np
fileName ="PathaoFoodDeliveryData"
data= pandas.read_csv(fileName)
data_sorted = data.sort_values(['restaurant_id'], ascending=False)
print(data_sorted)
setUid = set(data["user_id"])
setItemID = set(data["item_id"])
setRestaurantID = set(data["restaurant_id"])
uids = list(setUid)
itemID = list(setItemID)
restaurantID = list(setRestaurantID)
itemDayHour = data.values[:, [1, 2, 3]]
itemID_len = len(itemID)
for i in range(itemID_len):
_index = np.where(itemDayHour[:, 0] == itemID[i])
#print(itemDayHour[_index])
itemDayHour[_index, 0] = i
#print(itemDayHour[_index])
itemID_len = len(itemID)
for i in range(itemID_len):
data['item_id'].replace(
to_replace=[itemID[i]],
value=i,
inplace=True
)
#itemDayHour
print(data)
#pandas.DataFrame(itemDayHour).to_csv("data_2_.csv")
pandas.DataFrame(data.values).to_csv("data_2_.csv")
from sklearn.naive_bayes import GaussianNB
clf = GaussianNB()
X = itemDayHour[:,[1,2]].astype(np.int)
#%%
X
#%%
Y = itemDayHour[:,0].astype(np.int)
#%%
Y
#%%
clf.fit(X, Y)
#%%
print(clf.predict_proba([[ 7, 3]]))
#%%
clf.classes_
|
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='typegenie',
version='0.0.6',
url="https://github.com/abhitopia/TypeGenieApiClient",
author="abhitopia",
author_email="hi@typegenie.net",
description='Client Library for TypeGenie API. Check out http://api.typegenie.net for more info.',
long_description=long_description,
long_description_content_type="text/markdown",
packages=find_packages('src', include=['typegenie', 'typegenie.*']),
# py_modules=[''],
package_dir={'': 'src'},
classifiers=[
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3"
],
install_requires=[
"pandas",
"requests"
]
)
|
# -----------------------------------------------------
# Analyse Annotations of the Dataset
#
# Author: Qiling Xu
# Creating Date: May 25, 2018
# Latest rectifying: May 25, 2018
# -----------------------------------------------------
import json
import os
import os.path
from PIL import Image
def crop(images, name):
for i in images.keys():
print(i)
for j in range(len(images[i]["cast"])):
fn = images[i]["cast"][j]["img"]
cropmake(fn, name)
def cropmake(fn, name):
loc = 'E:/study/ECCVchallenge/person_search_trainval/'
im = Image.open(loc + '/' + name + '/' + fn)
try:
newloc = loc + 'newImage/' + name + '/'
crop_fn = newloc + fn
File_Path = os.path.dirname(crop_fn)
if not os.path.exists(File_Path):
os.makedirs(File_Path)
im.save(crop_fn)
except SystemError:
print("Error: error finding or failure cutting")
def main():
path = 'E:/study/ECCVchallenge/person_search_trainval/'
fullpath_t = path + 'train.json'
fp_t = open(fullpath_t, 'r')
images_t = json.load(fp_t)
fp_t.close()
crop(images_t, 'train')
fullpath_v = path + 'val.json'
fp_v = open(fullpath_v, 'r')
images_v = json.load(fp_v)
fp_v.close()
crop(images_v, 'val')
if __name__ == '__main__':
main()
|
import os.path as osp
import logging
import time
import argparse
from collections import OrderedDict
import options.options as option
import utils.util as util
from data import create_dataset, create_dataloader
from models import create_model
def main():
#### options
parser = argparse.ArgumentParser()
parser.add_argument('-opt', type=str, required=True, help='Path to options YMAL file.')
opt = option.parse(parser.parse_args().opt, is_train=False)
opt = option.dict_to_nonedict(opt)
util.mkdirs(
(path for key, path in opt['path'].items()
if not key == 'experiments_root' and 'pretrain_model' not in key and 'resume' not in key))
util.setup_logger('base', opt['path']['log'], 'test_' + opt['name'], level=logging.INFO,
screen=True, tofile=True)
logger = logging.getLogger('base')
logger.info(option.dict2str(opt))
#### Create test dataset and dataloader
test_loaders = []
for phase, dataset_opt in sorted(opt['datasets'].items()):
test_set = create_dataset(dataset_opt)
test_loader = create_dataloader(test_set, dataset_opt)
logger.info('Number of test audio files in [{:s}]: {:d}'.format(dataset_opt['name'], len(test_set)))
test_loaders.append(test_loader)
model = create_model(opt)
for test_loader in test_loaders:
test_set_name = test_loader.dataset.opt['name']
logger.info('\nTesting [{:s}]...'.format(test_set_name))
test_start_time = time.time()
dataset_dir = osp.join(opt['path']['results_root'], test_set_name)
util.mkdir(dataset_dir)
test_results = OrderedDict()
test_results['psnr'] = []
test_results['psnr_y'] = []
for data in test_loader:
need_GT = False if test_loader.dataset.opt['dataroot_GT'] is None else True
model.feed_data(data, need_GT=need_GT)
audio_path = data['GT_path'][0] if need_GT else data['LQ_path'][0]
audio_name = osp.splitext(osp.basename(audio_path))[0]
model.test()
audio_samples = model.get_current_audio_samples(need_GT=need_GT)
sr_audio = audio_samples['SR']
suffix = opt['suffix']
if suffix:
save_audio_path = osp.join(dataset_dir, audio_name + suffix + '.wav')
else:
save_audio_path = osp.join(dataset_dir, audio_name + '.wav')
util.save_audio(sr_audio, save_audio_path)
logger.info(audio_name)
if __name__ == '__main__':
main()
|
import os
os.environ['PYTHONUNBUFFERED'] = '1'
os.environ['PYTHONASYNCIODEBUG'] = '1'
import logging
logging.basicConfig(level=logging.DEBUG)
import asyncio
asyncio.set_event_loop(None)
|
import logging
import os
from .Plugin import Plugin
from localpdb.utils.config import Config
from localpdb.utils.os import create_directory
from localpdb.utils.network import download_url
logger = logging.getLogger(__name__)
# Plugin specific imports
import gzip
import pandas as pd
from urllib.parse import urlparse
class SIFTS(Plugin):
### Beginning of the part required for proper plugin handling ###
#################################################################
plugin_name = os.path.basename(__file__).split('.')[0] # Name of the plugin based on the filename
plugin_config = Config(
f'{os.path.dirname(os.path.realpath(__file__))}/config/{plugin_name}.yml').data # Plugin config (dict)
plugin_dir = plugin_config['path']
###########################################################
### End of the part required for proper plugin handling ###
def __init__(self, lpdb):
super().__init__(lpdb)
self.taxonomy_fn = f'{self.plugin_dir}/{self.plugin_version}/pdb_chain_taxonomy.tsv.gz'
self.ec_fn = f'{self.plugin_dir}/{self.plugin_version}/pdb_chain_enzyme.tsv.gz'
self.pfam_fn = f'{self.plugin_dir}/{self.plugin_version}/pdb_chain_pfam.tsv.gz'
self.cath_fn = f'{self.plugin_dir}/{self.plugin_version}/pdb_chain_cath_uniprot.tsv.gz'
self.scop_fn = f'{self.plugin_dir}/{self.plugin_version}/pdb_chain_scop_uniprot.tsv.gz'
def _load(self):
self.__load_taxonomy()
self.lpdb._register_attr('ec')
self.__load_ec()
self.lpdb._register_attr('pfam')
self.__load_pfam()
self.lpdb._register_attr('cath')
self.__load_cath()
self.lpdb._register_attr('scop')
self.__load_scop()
def __load_taxonomy(self):
tmp_df = pd.read_csv(self.taxonomy_fn, skiprows=1, sep='\t')
tmp_df['pdb_chain'] = tmp_df['PDB'] + '_' + tmp_df['CHAIN']
tmp_df_wrapped = tmp_df.groupby(by='pdb_chain').agg({'TAX_ID': 'first'})
tmp_df_wrapped.columns = ['ncbi_taxid']
tmp_df_wrapped['ncbi_taxid'] = tmp_df_wrapped['ncbi_taxid'].astype(str)
self.lpdb._add_col_chains(tmp_df_wrapped)
def __load_ec(self):
tmp_df = pd.read_csv(self.ec_fn, skiprows=1, sep='\t')
tmp_df['pdb_chain'] = tmp_df['PDB'] + '_' + tmp_df['CHAIN']
tmp_df = tmp_df[['pdb_chain', 'PDB', 'EC_NUMBER']]
tmp_df.columns = ['pdb_chain', 'pdb', 'ec_id']
_, pdb_chain_ids = self.lpdb._get_current_indexes()
tmp_df = tmp_df[tmp_df['pdb_chain'].isin(pdb_chain_ids)]
self.lpdb.ec = tmp_df
def __load_pfam(self):
tmp_df = pd.read_csv(self.pfam_fn, skiprows=1, sep='\t')
tmp_df['pdb_chain'] = tmp_df['PDB'] + '_' + tmp_df['CHAIN']
tmp_df = tmp_df[['PDB', 'pdb_chain', 'PFAM_ID', 'COVERAGE']]
tmp_df.columns = ['pdb', 'pdb_chain', 'pfam_id', 'pfam_cov']
_, pdb_chain_ids = self.lpdb._get_current_indexes()
tmp_df = tmp_df[tmp_df['pdb_chain'].isin(pdb_chain_ids)]
self.lpdb.pfam = tmp_df
def __load_cath(self):
tmp_df = pd.read_csv(self.cath_fn, skiprows=1, sep='\t')
tmp_df['pdb_chain'] = tmp_df['PDB'] + '_' + tmp_df['CHAIN']
tmp_df = tmp_df[['PDB', 'pdb_chain', 'CATH_ID']]
tmp_df.columns = ['pdb', 'pdb_chain', 'cath_id']
_, pdb_chain_ids = self.lpdb._get_current_indexes()
tmp_df = tmp_df[tmp_df['pdb_chain'].isin(pdb_chain_ids)]
self.lpdb.cath = tmp_df
def __load_scop(self):
tmp_df = pd.read_csv(self.scop_fn, skiprows=1, sep='\t')
tmp_df['pdb_chain'] = tmp_df['PDB'] + '_' + tmp_df['CHAIN']
tmp_df = tmp_df[['PDB', 'pdb_chain', 'SCOP_ID']]
tmp_df.columns = ['pdb', 'pdb_chain', 'scop_id']
_, pdb_chain_ids = self.lpdb._get_current_indexes()
tmp_df = tmp_df[tmp_df['pdb_chain'].isin(pdb_chain_ids)]
self.lpdb.scop = tmp_df
def _prep_paths(self):
create_directory(f'{self.plugin_dir}/')
create_directory(f'{self.plugin_dir}/{self.plugin_version}')
def _setup(self):
out_dir = f'{self.plugin_dir}/{self.plugin_version}/'
for name, url in self.plugin_config['urls'].items():
out_fn = '{}/{}'.format(out_dir, os.path.basename(urlparse(url).path))
download_url(url, out_fn, ftp=True)
def _filter_chains(self, chains):
self.lpdb.pfam = self.lpdb.pfam[self.lpdb.pfam['pdb_chain'].isin(chains)]
self.lpdb.ec = self.lpdb.ec[self.lpdb.ec['pdb_chain'].isin(chains)]
self.lpdb.cath = self.lpdb.cath[self.lpdb.cath['pdb_chain'].isin(chains)]
self.lpdb.scop = self.lpdb.scop[self.lpdb.scop['pdb_chain'].isin(chains)]
def _filter_structures(self, structures):
self.lpdb.pfam = self.lpdb.pfam[self.lpdb.pfam['pdb'].isin(structures)]
self.lpdb.ec = self.lpdb.ec[self.lpdb.ec['pdb'].isin(structures)]
self.lpdb.cath = self.lpdb.cath[self.lpdb.cath['pdb'].isin(structures)]
self.lpdb.scop = self.lpdb.scop[self.lpdb.cath['pdb'].isin(structures)]
def _reset(self):
del self.lpdb.pfam
del self.lpdb.ec
del self.lpdb.cath
del self.lpdb.scop
self.lpdb._remove_attr('pfam')
self.lpdb._remove_attr('ec')
self.lpdb._remove_attr('cath')
self.lpdb._remove_attr('scop')
self._load()
|
import os
import inspect
from pathlib import Path
def file_path(path):
frame = inspect.stack()[1]
caller_module = inspect.getmodule(frame[0])
base_dir = os.path.realpath(__file__) + f'/../../'
relative_to_base = caller_module.__file__[len(base_dir):]
return os.path.realpath(f"resources/{relative_to_base}/{path}")
def file_content(path):
return Path(file_path(path)).read_text()
|
from ..errors import OmbottException
class RouteError(OmbottException):
""" This is a base class for all routing related exceptions """
__slots__ = ('extra',)
def __init__(self, *args, **kw):
super().__init__(*args)
self.extra = kw
class RouteSyntaxError(RouteError):
""" The route parser found something not supported by this router. """
class RouteBuildError(RouteError):
""" The route could not be built. """
class RouteMethodError(RouteError):
""" Method not allowed """ |
"""
mbed SDK
Copyright (c) 2016 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pytest
from mock import patch
from tools.targets import set_targets_json_location
from tools.test_api import find_tests, build_tests
"""
Tests for test_api.py
"""
def setUp(self):
"""
Called before each test case
:return:
"""
self.base_dir = 'base_dir'
self.target = "K64F"
self.toolchain_name = "ARM"
|
#
# This sets up how models are displayed
# in the web admin interface.
#
from django.conf import settings
from evennia.typeclasses.admin import AttributeInline, TagInline
from evennia.scripts.models import ScriptDB
from django.contrib import admin
class ScriptTagInline(TagInline):
"""
Inline script tags.
"""
model = ScriptDB.db_tags.through
related_field = "scriptdb"
class ScriptAttributeInline(AttributeInline):
"""
Inline attribute tags.
"""
model = ScriptDB.db_attributes.through
related_field = "scriptdb"
class ScriptDBAdmin(admin.ModelAdmin):
"""
Displaying the main Script page.
"""
list_display = ('id', 'db_key', 'db_typeclass_path',
'db_obj', 'db_interval', 'db_repeats', 'db_persistent')
list_display_links = ('id', 'db_key')
ordering = ['db_obj', 'db_typeclass_path']
search_fields = ['^db_key', 'db_typeclass_path']
save_as = True
save_on_top = True
list_select_related = True
raw_id_fields = ('db_obj',)
fieldsets = (
(None, {
'fields': (('db_key', 'db_typeclass_path'), 'db_interval',
'db_repeats', 'db_start_delay', 'db_persistent',
'db_obj')}),
)
inlines = [ScriptTagInline, ScriptAttributeInline]
def save_model(self, request, obj, form, change):
"""
Model-save hook.
Args:
request (Request): Incoming request.
obj (Object): Database object.
form (Form): Form instance.
change (bool): If this is a change or a new object.
"""
obj.save()
if not change:
# adding a new object
# have to call init with typeclass passed to it
obj.set_class_from_typeclass(typeclass_path=obj.db_typeclass_path)
admin.site.register(ScriptDB, ScriptDBAdmin)
|
from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse
from django.shortcuts import render, redirect
from .forms import *
from django.conf import settings
# for boto3
import boto3
import botocore
from botocore.exceptions import ClientError
# for filtering
from PIL import Image, ImageOps, ImageFilter
import os
# for sending presigned url to download images
from django.http import HttpResponseRedirect
def fileExistedOnS3(bucketName, fileName):
s3 = boto3.resource('s3')
try:
s3.Object(bucketName, fileName).load()
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
# The object does not exist.
return False
else:
# Something else has gone wrong.
raise
else:
# The object does exist.
return True
def download_image_from_S3(request, bucket_name, image_name):
# if found image from S3, then let's it download
s3_client = boto3.client('s3')
image_found = fileExistedOnS3(bucket_name, image_name)
print("Name of the image is {}".format(image_name))
print("Value of image_found is {}".format(image_found))
if image_found:
# generate a pre-signed download link
# that only availables for 15 seconds
url = s3_client.generate_presigned_url(ClientMethod='get_object', \
Params={'Bucket': bucket_name, 'Key': image_name}, \
ExpiresIn=15)
return url
else:
# if not, then dont let it download
return redirect('imgNotFound')
def apply_filter(file_path, preset):
im = Image.open(file_path)
fileExt = file_path.split(".")[-1]
if preset == 'none':
print("No need for filter")
else:
ext = ''
if preset == 'gray':
im = ImageOps.grayscale(im)
ext = 'gray'
if preset == 'edge':
#im = ImageOps.posterize(im, 3)
#ext = 'poster'
im = ImageOps.grayscale(im)
im = im.filter(ImageFilter.FIND_EDGES)
ext = 'edge'
if preset == 'blur':
#im = ImageOps.solarize(im, threshold=80)
#ext = 'solar'
im = im.filter(ImageFilter.BLUR)
ext = 'blur'
im.save(file_path.split(".")[0] + "_" + ext + "." + fileExt)
print("Filter was applied successfully")
def upload_to_s3(file_name, bucket, object_name):
# Upload the file
s3_client = boto3.client('s3')
# If S3 object_name was not specified, use file_name
if object_name is None:
object_name = file_name
try:
response = s3_client.upload_file(file_name, bucket, object_name)
except ClientError as e:
print(e)
return False
return True
# Create your views here.
def image_view(request):
if request.method == 'POST':
form = ImageForm(request.POST, request.FILES)
# since request.POST is a QueryDict
# we must convert it to a normal dictionary data type
# then we can access the field name from there
print("The file name is: " + request.POST.dict()["name"])
bucket = "4517-image-app"
# since the upload_to function replaces white space with _
# in the name
# we must do the same here in order to find the image
ext = request.POST.dict()["ext"]
file_path = settings.MEDIA_ROOT + "/images/" + request.POST.dict()["name"].replace(" ", "_") + "." + ext
print(file_path)
preset = request.POST.dict()["preset_gray_or_edge_or_blur_or_none"]
if form.is_valid():
form.save()
# apply filter
apply_filter(file_path, preset)
# upload image to s3
upload_to_s3(file_path, bucket, request.POST.dict()["name"].replace(" ", "_") + "_" + preset + "." + ext)
return redirect('success')
else:
form = ImageForm()
return render(request, 'submit.html', {'form' : form})
# Python program to view
# for displaying images
def display_images(request):
if request.method == 'GET':
# getting all the objects of hotel.
# Images = ImageModel.objects.all()
Images = os.listdir(settings.MEDIA_ROOT + '/images/')
return render(request, 'display_images.html', {'images': Images})
def download_images_view(request):
if request.method == 'POST':
# get images
form = DownloadImageForm(request.POST, request.FILES)
ext = request.POST.dict()["ext"]
img_name = request.POST.dict()["name_To_Download"].replace(" ", "_")
pretext = request.POST.dict()["preset_gray_or_edge_or_blur_or_none"]
bucket = "4517-image-app"
if form.is_valid():
form.save()
print("Downloading file from S3")
url = download_image_from_S3(request, bucket, img_name + "_" + pretext + "." + ext)
print('Presigned URL is {}'.format(url))
return HttpResponseRedirect(url)
else:
form = DownloadImageForm()
return render(request, 'download.html', {'form' : form})
def success(request):
return HttpResponse('successfully uploaded')
def imgNotFound(request):
return HttpResponse('Image Not Found. Please go back to the search image page.')
|
import ../daemon/scheduler/task_placer
import unittest
class TestTaskPlacer():
pass |
import typing as T
import numpy as np
import numpy.random
import gemini3d.read
import gemini3d.write
def perturb(cfg: T.Dict[str, T.Any], xg: T.Dict[str, T.Any]):
"""
perturb plasma from initial_conditions file
"""
# %% READ IN THE SIMULATION INFORMATION
# trim ghost cells
x1 = xg["x1"][2:-2]
x2 = xg["x2"][2:-2]
# %% LOAD THE FRAME OF THE SIMULATION THAT WE WANT TO PERTURB
dat = gemini3d.read.data(cfg["indat_file"], var=["ns", "Ts", "vs1"])
ns = dat["ns"]
lsp = ns.shape[0]
# %% Choose a single profile from the center of the eq domain as a reference
ix2 = xg["lx"][1] // 2
ix3 = xg["lx"][2] // 2
nsscale = np.zeros_like(ns)
for i in range(lsp):
nprof = ns[i, :, ix2, ix3]
for j in range(xg["lx"][1]):
for k in range(xg["lx"][2]):
nsscale[i, :, j, k] = nprof
# %% SCALE EQ PROFILES UP TO SENSIBLE BACKGROUND CONDITIONS
scalefact = 2.75 * 6 / 8
for i in range(lsp - 1):
nsscale[i, :, :, :] = scalefact * nsscale[i, :, :, :]
nsscale[-1, :, :, :] = nsscale[:-1, :, :, :].sum(axis=0)
# enforce quasineutrality
# %% GDI EXAMPLE (PERIODIC) INITIAL DENSITY STRUCTURE AND SEEDING
ell = 5e3 # gradient scale length for patch/blob
x21 = -20e3 # location on one of the patch edges
x22 = 20e3 # other patch edge
nepatchfact = 10 # density increase factor over background
nsperturb = np.zeros_like(ns)
for i in range(lsp - 1):
for j in range(xg["lx"][1]):
amplitude = numpy.random.standard_normal([xg["lx"][0], xg["lx"][2]])
# AWGN - note that can result in subtractive effects on density so apply a floor later!!!
amplitude = 0.01 * amplitude
# amplitude standard dev. is scaled to be 1% of reference profile
# original data
nsperturb[i, :, j, :] = nsscale[i, :, j, :] + nepatchfact * nsscale[i, :, j, :] * (
1 / 2 * np.tanh((x2[j] - x21) / ell) - 1 / 2 * np.tanh((x2[j] - x22) / ell)
)
# patch, note offset in the x2 index!!!!
if (j > 9) and (j < xg["lx"][1] - 10):
# do not apply noise near the edge (corrupts boundary conditions)
nsperturb[i, :, j, :] = nsperturb[i, :, j, :] + amplitude * nsscale[i, :, j, :]
nsperturb = np.maximum(nsperturb, 1e4)
# enforce a density floor (particularly need to pull out negative densities
# which can occur when noise is applied)
nsperturb[-1, :, :, :] = nsperturb[:-1, :, :, :].sum(axis=0) # enforce quasineutrality
# %% KILL OFF THE E-REGION WHICH WILL DAMP THE INSTABILITY (AND USUALLY ISN'T PRESENT IN PATCHES)
x1ref = 200e3
# where to start tapering down the density in altitude
dx1 = 10e3
taper = 0.5 + 0.5 * np.tanh((x1 - x1ref) / dx1)
for i in range(lsp - 1):
for ix3 in range(xg["lx"][2]):
for ix2 in range(xg["lx"][1]):
nsperturb[i, :, ix2, ix3] = 1e6 + nsperturb[i, :, ix2, ix3] * taper
nsperturb[-1, :, :, :] = nsperturb[:-1, :, :, :].sum(axis=0)
# enforce quasineutrality
# %% WRITE OUT THE RESULTS TO the same file
gemini3d.write.state(
cfg["indat_file"],
dat,
ns=nsperturb,
file_format=cfg["file_format"],
)
|
from django import forms
from django.forms import ModelForm
from django.contrib.auth.forms import UserCreationForm, UserChangeForm
from django.contrib.auth.models import User
from .models import UserProfile, UserPost
class ExtendedUserCreationForm(UserCreationForm):
email = forms.EmailField(required=True, label='Email')
first_name = forms.CharField(required=True, label="First Name")
last_name = forms.CharField(required=True, label="Last Name")
# age = forms.IntegerField(required=True, label="Age")
# location = forms.CharField(required=True, label="Location")
# picture = forms.FileField(label="Upload Image")
class Meta:
model = User
fields = ("first_name", "last_name", "username", "email", "password1", "password2")
def save(self, commit=True):
user = super().save(commit=False)
user.email = self.cleaned_data["email"]
user.first_name = self.cleaned_data["first_name"]
user.last_name = self.cleaned_data["last_name"]
if commit:
user.save()
return user
class UserProfileForm(ModelForm):
class Meta:
model = UserProfile
fields = ("location", "age", "image", "gender")
widgets = {'gender': forms.RadioSelect}
class PostForm(forms.ModelForm):
class Meta:
model = UserPost
fields = ("content", ) |
def funny_division3(anumber):
try:
if anumber == 13:
raise ValueError("13 is an unlucky number")
return 100 / anumber
except ZeroDivisionError:
return "Enter a number other than zero"
except TypeError:
return "Enter a numerical value"
except ValueError:
print("No, No, not 13!")
raise
for val in (0, "hello", 50.0, 13):
print("Testing %s:" % val, end=" ")
print(funny_division3(val))
|
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import json
from math import fmod, ceil
from lib import octmps
from src.python.octmps_io import show_version
from src.python.mesh import load_mesh
def read_opt_files(opt_file_name):
regions = list()
with open(opt_file_name, 'r') as opt_file:
opt_params = json.load(opt_file)
number_of_runs = opt_params['number_of_runs']
probe_data = octmps.simulation.ProbeData(**opt_params['probe_data'])
# Ambient medium should be the first medium
regions.append(
octmps.mesh.Region(n=opt_params['n_ambient_medium'])) # Default value of parameters is zeros
for region_params in opt_params['regions']:
regions.append(octmps.mesh.Region(**region_params))
return number_of_runs, {'probe': probe_data,
'n_regions': len(regions),
'regions': regions,
'number_of_photons': opt_params['number_of_photons']}
def read_bias_files(bias_file_name):
with open(bias_file_name, 'r') as bias_file:
bias_params = json.load(bias_file)
# Calculating the Optical Depth Shift
bias_params['optical_depth_shift'] = fmod(bias_params['target_depth_min'],
bias_params['coherence_length_source'])
# Calculating the number of A-scan simulation points (steps) based on the coherent length of the probe
# and the depth of interest
bias_params['num_optical_depth_length_steps'] = int(ceil(octmps.NUM_SUBSTEPS_RESOLUTION * (
bias_params['target_depth_max'] - bias_params['target_depth_min']) /
bias_params['coherence_length_source']))
return bias_params
def check_number_of_gpu_cards(value):
number_of_available_gpu_card = octmps.cuda_utils.number_of_available_gpu_card()
value = int(value)
if not (1 <= value <= number_of_available_gpu_card):
raise argparse.ArgumentTypeError(
"Number of gpu card must be an integer between 1 to {}".format(number_of_available_gpu_card))
return value
def parse_input():
# Usage:
# OCT-MPS [-S<seed>] [-G<num GPUs>] <input opt file> <input mesh file> <input bias file>
parser = argparse.ArgumentParser(description='Massively Parallel Simulator of Optical Coherence '
'Tomography (OCTMPS)')
parser.add_argument('-S', '--seed', help='random number generator seed',
dest='seed',
type=int, required=False)
parser.add_argument('-G', '--number-of-gpu-cards',
help='Number of GPU cards, should be between 1 to {}'.format(
octmps.cuda_utils.get_number_of_available_gpu_cards()),
dest='num_gpu_cards',
type=check_number_of_gpu_cards,
required=False)
parser.add_argument('--input-opt-json-file', help='Input opt json file',
dest='input_opt_file',
type=str, required=True)
parser.add_argument('--input-mesh-file', help='Input .mesh file',
dest='input_mesh_file',
type=str, required=True)
parser.add_argument('--input-bias-json-file', help='Input bias json file',
dest='input_bias_file',
type=str, required=True)
parser.add_argument('--visualize', help='Visualize the mesh and B-Scan cross-section',
action="store_true", required=False)
args = parser.parse_args()
params = dict()
number_of_runs, opt_params = read_opt_files(args.input_opt_file)
params.update(opt_params)
bias_params = read_bias_files(args.input_bias_file)
params.update(bias_params)
tetrahedrons, num_tetrahedrons, num_vertices, num_faces = load_mesh(args.input_mesh_file)
params.update({'num_tetrahedrons': num_tetrahedrons})
simulation = octmps.simulation.Simulation(**params)
_ = octmps.octmps_run(tetrahedrons, simulation, 1, num_vertices, num_faces, num_tetrahedrons, 1)
# Mayavi has code in their global name space that will execute command line parsing.
# hence I cannot include it on top of the file and had to include it here and also remove
# input command arguments!
if args.visualize:
import sys
sys.argv = sys.argv[:1]
from src.python.visualize import visualize
visualize(tetrahedrons)
def run():
show_version()
parse_input()
if __name__ == '__main__':
run()
|
import nested_admin
from django.contrib import admin
from . models import (Page,
PageBlock,
PageCarousel,
PageContact,
PageHeading,
PageHeadingLocalization,
PageLink,
PageLocalization,
PageMedia,
PageMediaCollection,
PageMenu,
PagePublication,
PageRelated)
class PageInline(admin.TabularInline):
model = Page
extra = 0
classes = ['collapse']
class PageLocalizationInline(nested_admin.NestedStackedInline):
model = PageLocalization
extra = 0
classes = ['collapse']
class PageLinkInline(nested_admin.NestedTabularInline):
model = PageLink
extra = 0
classes = ['collapse']
sortable_field_name = "order"
class PageRelatedInline(nested_admin.NestedTabularInline):
model = PageRelated
fk_name = 'page'
autocomplete_fields = ('related_page',)
extra = 0
classes = ['collapse']
sortable_field_name = "order"
class PageBlockInline(nested_admin.NestedTabularInline):
model = PageBlock
extra = 0
raw_id_fields = ('block',)
sortable_field_name = "order"
class PagePublicationInline(nested_admin.NestedTabularInline):
model = PagePublication
extra = 0
raw_id_fields = ('publication',)
sortable_field_name = "order"
class PageCarouselInline(nested_admin.NestedTabularInline):
model = PageCarousel
extra = 0
classes = ['collapse']
raw_id_fields = ("carousel",)
sortable_field_name = "order"
class PageContactInline(nested_admin.NestedTabularInline):
model = PageContact
extra = 0
classes = ['collapse']
raw_id_fields = ("contact",)
sortable_field_name = "order"
class PageMediaInline(nested_admin.NestedTabularInline):
model = PageMedia
extra = 0
classes = ['collapse']
raw_id_fields = ("media",)
sortable_field_name = "order"
class PageMediaCollectionInline(nested_admin.NestedTabularInline):
model = PageMediaCollection
extra = 0
classes = ['collapse']
raw_id_fields = ("collection",)
sortable_field_name = "order"
class PageMenuInline(nested_admin.NestedTabularInline):
model = PageMenu
extra = 0
classes = ['collapse']
raw_id_fields = ("menu",)
sortable_field_name = "order"
class PageHeadingLocalizationInline(nested_admin.NestedTabularInline):
model = PageHeadingLocalization
extra = 0
classes = ['collapse']
sortable_field_name = "order"
class PageHeadingInline(nested_admin.NestedTabularInline):
model = PageHeading
extra = 0
classes = ['collapse']
sortable_field_name = "order"
inlines = (PageHeadingLocalizationInline,)
|
"""
Implementation of different training methods
"""
import numpy as np
from sklearn.model_selection import KFold, train_test_split, ShuffleSplit
import pandas as pd
import configparser
from fb_rnn import FBRNN
from forward_rnn import ForwardRNN
from nade import NADE
from bimodal import BIMODAL
from one_hot_encoder import SMILESEncoder
from sklearn.utils import shuffle
import os
from helper import clean_molecule, check_model, check_molecules
np.random.seed(1)
class Trainer():
def __init__(self, experiment_name='ForwardRNN'):
self._encoder = SMILESEncoder()
# Read all parameter from the .ini file
self._config = configparser.ConfigParser()
self._config.read('../experiments/' + experiment_name + '.ini')
self._model_type = self._config['MODEL']['model']
self._experiment_name = experiment_name
self._hidden_units = int(self._config['MODEL']['hidden_units'])
self._file_name = '../data/' + self._config['DATA']['data']
self._encoding_size = int(self._config['DATA']['encoding_size'])
self._molecular_size = int(self._config['DATA']['molecular_size'])
self._epochs = int(self._config['TRAINING']['epochs'])
self._n_folds = int(self._config['TRAINING']['n_folds'])
self._learning_rate = float(self._config['TRAINING']['learning_rate'])
self._batch_size = int(self._config['TRAINING']['batch_size'])
self._samples = int(self._config['EVALUATION']['samples'])
self._T = float(self._config['EVALUATION']['temp'])
self._starting_token = self._encoder.encode([self._config['EVALUATION']['starting_token']])
if self._model_type == 'ForwardRNN':
self._model = ForwardRNN(self._molecular_size, self._encoding_size,
self._learning_rate, self._hidden_units)
elif self._model_type == 'BIMODAL':
self._model = BIMODAL(self._molecular_size, self._encoding_size,
self._learning_rate, self._hidden_units)
self._data = self._encoder.encode_from_file(self._file_name)
def complete_run(self, stor_dir='../evaluation/', restart=False):
'''Training without validation on complete data'''
# Create directories
if not os.path.exists(stor_dir + '/' + self._experiment_name + '/models'):
os.makedirs(stor_dir + '/' + self._experiment_name + '/models')
if not os.path.exists(stor_dir + '/' + self._experiment_name + '/molecules'):
os.makedirs(stor_dir + '/' + self._experiment_name + '/molecules')
if not os.path.exists(stor_dir + '/' + self._experiment_name + '/statistic'):
os.makedirs(stor_dir + '/' + self._experiment_name + '/statistic')
# Compute labels
label = np.argmax(self._data, axis=-1).astype(int)
# Build model
self._model.build()
# Store total Statistics
tot_stat = []
# only single fold
fold = 1
# Shuffle data before training (Data reshaped from (N_samples, N_augmentation, molecular_size, encoding_size)
# to (all_SMILES, molecular_size, encoding_size))
self._data, label = shuffle(self._data.reshape(-1, self._molecular_size, self._encoding_size),
label.reshape(-1, self._molecular_size))
for i in range(self._epochs):
print('Fold:', fold)
print('Epoch:', i)
# With restart read existing files
if restart:
tmp_stat_file = pd.read_csv(
stor_dir + '/' + self._experiment_name + '/statistic/stat_fold_' + str(fold) + '.csv',
header=None).to_numpy()
# Check if current epoch is successfully completed else continue with normal training
if check_model(self._model_type, self._experiment_name, stor_dir, fold, i) and check_molecules(
self._experiment_name, stor_dir, fold, i) and tmp_stat_file.shape[0] > i:
# Load model
self._model.build(
stor_dir + '/' + self._experiment_name + '/models/model_fold_' + str(fold) + '_epochs_' + str(i))
# Fill statistic and loss list
tot_stat.append(tmp_stat_file[i, 1:].reshape(1, -1).tolist())
continue
# Continue with normal training
else:
restart = False
# Train model
statistic = self._model.train(self._data, label, epochs=1, batch_size=self._batch_size)
tot_stat.append(statistic.tolist())
# Store model
self._model.save(
stor_dir + '/' + self._experiment_name + '/models/model_fold_' + str(fold) + '_epochs_' + str(i))
# Sample new molecules
new_molecules = []
for s in range(self._samples):
mol = self._encoder.decode(self._model.sample(self._starting_token, self._T))
new_molecules.append(clean_molecule(mol[0], self._model_type))
# Store new molecules
new_molecules = np.array(new_molecules)
pd.DataFrame(new_molecules).to_csv(
stor_dir + '/' + self._experiment_name + '/molecules/molecule_fold_' + str(fold) + '_epochs_' + str(
i) + '.csv', header=None)
# Store statistic
store_stat = np.array(tot_stat).reshape(i + 1, -1)
pd.DataFrame(np.array(store_stat)).to_csv(
stor_dir + '/' + self._experiment_name + '/statistic/stat_fold_' + str(fold) + '.csv',
header=None)
def single_run(self, stor_dir='../evaluation/', restart=False):
'''Training with validation and store data'''
# Create directories
if not os.path.exists(stor_dir + '/' + self._experiment_name + '/models'):
os.makedirs(stor_dir + '/' + self._experiment_name + '/models')
if not os.path.exists(stor_dir + '/' + self._experiment_name + '/molecules'):
os.makedirs(stor_dir + '/' + self._experiment_name + '/molecules')
if not os.path.exists(stor_dir + '/' + self._experiment_name + '/statistic'):
os.makedirs(stor_dir + '/' + self._experiment_name + '/statistic')
if not os.path.exists(stor_dir + '/' + self._experiment_name + '/validation'):
os.makedirs(stor_dir + '/' + self._experiment_name + '/validation')
# Compute labels
label = np.argmax(self._data, axis=-1).astype(int)
# Special preprocessing in the case of NADE
if (self._model_type == 'NADE' or self._model_type == 'NADE_v2') and self._generation == 'random':
# First column stores correct SMILES and second column stores SMILES with missing values
label = np.argmax(self._data[:, 0], axis=-1).astype(int)
aug = self._data.shape[1] - 1
label = np.repeat(label[:, np.newaxis, :], aug, axis=1)
self._data = self._data[:, 1:]
# Split data into train and test data
train_data, test_data, train_label, test_label = train_test_split(self._data, label, test_size=1. / 5,
random_state=1, shuffle=True)
# Build model
self._model.build()
# Store total Statistics
tot_stat = []
# Store validation loss
tot_loss = []
# only single fold
fold = 1
for i in range(self._epochs):
print('Fold:', fold)
print('Epoch:', i)
if restart:
# Read existing files
tmp_val_file = pd.read_csv(
stor_dir + '/' + self._experiment_name + '/validation/val_fold_' + str(fold) + '.csv',
header=None).to_numpy()
tmp_stat_file = pd.read_csv(
stor_dir + '/' + self._experiment_name + '/statistic/stat_fold_' + str(fold) + '.csv',
header=None).to_numpy()
# Check if current epoch is successfully completed else continue with normal training
if check_model(self._model_type, self._experiment_name, stor_dir, fold, i) and check_molecules(
self._experiment_name, stor_dir, fold, i) and tmp_val_file.shape[0] > i and tmp_stat_file.shape[
0] > i:
# Load model
self._model.build(
stor_dir + '/' + self._experiment_name + '/models/model_fold_' + str(fold) + '_epochs_' + str(i))
# Fill statistic and loss list
tot_stat.append(tmp_stat_file[i, 1:].reshape(1, -1).tolist())
tot_loss.append(tmp_val_file[i, 1])
# Skip this epoch
continue
# Continue with normal training
else:
restart = False
# Train model (Data reshaped from (N_samples, N_augmentation, molecular_size, encoding_size)
# to (all_SMILES, molecular_size, encoding_size))
statistic = self._model.train(train_data.reshape(-1, self._molecular_size, self._encoding_size),
train_label.reshape(-1, self._molecular_size), epochs=1,
batch_size=self._batch_size)
tot_stat.append(statistic.tolist())
# Store model
self._model.save(
stor_dir + '/' + self._experiment_name + '/models/model_fold_' + str(fold) + '_epochs_' + str(i))
# Test model on validation set
tot_loss.append(
self._model.validate(test_data.reshape(-1, self._molecular_size, self._encoding_size),
test_label.reshape(-1, self._molecular_size)))
# Sample new molecules
new_molecules = []
for s in range(self._samples):
mol = self._encoder.decode(self._model.sample(self._starting_token, self._T))
new_molecules.append(clean_molecule(mol[0], self._model_type))
# Store new molecules
new_molecules = np.array(new_molecules)
pd.DataFrame(new_molecules).to_csv(
stor_dir + '/' + self._experiment_name + '/molecules/molecule_fold_' + str(fold) + '_epochs_' + str(
i) + '.csv', header=None)
# Store statistic
store_stat = np.array(tot_stat).reshape(i + 1, -1)
pd.DataFrame(np.array(store_stat)).to_csv(
stor_dir + '/' + self._experiment_name + '/statistic/stat_fold_' + str(fold) + '.csv',
header=None)
# Store validation data
pd.DataFrame(np.array(tot_loss).reshape(-1, 1)).to_csv(
stor_dir + '/' + self._experiment_name + '/validation/val_fold_' + str(fold) + '.csv',
header=None)
def cross_validation(self, stor_dir='../evaluation/', restart=False):
'''Perform cross-validation and store data'''
# Create directories
if not os.path.exists(stor_dir + '/' + self._experiment_name + '/models'):
os.makedirs(stor_dir + '/' + self._experiment_name + '/models')
if not os.path.exists(stor_dir + '/' + self._experiment_name + '/molecules'):
os.makedirs(stor_dir + '/' + self._experiment_name + '/molecules')
if not os.path.exists(stor_dir + '/' + self._experiment_name + '/statistic'):
os.makedirs(stor_dir + '/' + self._experiment_name + '/statistic')
if not os.path.exists(stor_dir + '/' + self._experiment_name + '/validation'):
os.makedirs(stor_dir + '/' + self._experiment_name + '/validation')
self._kf = KFold(n_splits=self._n_folds, shuffle=True, random_state=2)
# Count iterations
fold = 0
# Compute labels
label = np.argmax(self._data, axis=-1).astype(int)
# Split data into train and test data
for train, test in self._kf.split(self._data):
# Shuffle index within test and train set
np.random.shuffle(train)
np.random.shuffle(test)
fold += 1
self._model.build()
# Store total statistics
tot_stat = []
# Store validation loss
tot_loss = []
for i in range(self._epochs):
print('Fold:', fold)
print('Epoch:', i)
if restart:
tmp_val_file = pd.read_csv(
stor_dir + '/' + self._experiment_name + '/validation/val_fold_' + str(fold) + '.csv',
header=None).to_numpy()
tmp_stat_file = pd.read_csv(
stor_dir + '/' + self._experiment_name + '/statistic/stat_fold_' + str(fold) + '.csv',
header=None).to_numpy()
# Check if current epoch is successfully complete[0]d else continue with normal training
if check_model(self._model_type, self._experiment_name, stor_dir, fold, i) and check_molecules(
self._experiment_name, stor_dir, fold, i) and tmp_val_file.shape[0] > i and tmp_stat_file.shape[
0] > i:
# Load model
self._model.build(
stor_dir + '/' + self._experiment_name + '/models/model_fold_' + str(fold) + '_epochs_' + str(i))
# Fill statistic and loss list
tot_stat.append(tmp_stat_file[i, 1:].reshape(1, -1).tolist())
tot_loss.append(tmp_val_file[i, 1])
# Skip this epoch
continue
else:
restart = False
# Train model (Data reshaped from (N_samples, N_augmentation, molecular_size, encoding_size)
# to (all_SMILES, molecular_size, encoding_size))
statistic = self._model.train(
self._data[train].reshape(-1, self._molecular_size, self._encoding_size),
label[train].reshape(-1, self._molecular_size), epochs=1, batch_size=self._batch_size)
tot_stat.append(statistic.tolist())
# Store model
self._model.save(
stor_dir + '/' + self._experiment_name + '/models/model_fold_' + str(fold) + '_epochs_' + str(i))
# Test model on validation set
tot_loss.append(
self._model.validate(self._data[test].reshape(-1, self._molecular_size, self._encoding_size),
label[test].reshape(-1, self._molecular_size)))
# Sample new molecules
new_molecules = []
for s in range(self._samples):
mol = self._encoder.decode(self._model.sample(self._starting_token, self._T))
new_molecules.append(clean_molecule(mol[0], self._model_type))
# Store new molecules
new_molecules = np.array(new_molecules)
pd.DataFrame(new_molecules).to_csv(
stor_dir + '/' + self._experiment_name + '/molecules/molecule_fold_' + str(fold) + '_epochs_' + str(
i) + '.csv', header=None)
# Store statistic
store_stat = np.array(tot_stat).reshape(i + 1, -1)
pd.DataFrame(np.array(store_stat)).to_csv(
stor_dir + '/' + self._experiment_name + '/statistic/stat_fold_' + str(fold) + '.csv', header=None)
# Store validation data
pd.DataFrame(np.array(tot_loss).reshape(-1, 1)).to_csv(
stor_dir + '/' + self._experiment_name + '/validation/val_fold_' + str(fold) + '.csv', header=None)
|
input = """
1 2 0 0
1 3 0 0
1 4 0 0
1 5 0 0
1 6 0 0
1 7 0 0
1 8 0 0
1 9 0 0
1 10 0 0
1 11 0 0
1 12 0 0
1 13 0 0
1 14 0 0
1 15 0 0
1 16 0 0
1 17 0 0
1 18 1 1 19
1 20 1 1 21
1 22 1 1 23
1 24 1 1 25
1 26 1 1 27
1 28 1 1 29
1 19 1 1 18
1 21 1 1 20
1 23 1 1 22
1 25 1 1 24
1 27 1 1 26
1 29 1 1 28
1 1 1 1 18
2 30 2 0 1 22 20
2 31 2 0 2 22 20
1 32 2 1 31 30
1 1 1 1 32
2 33 3 0 1 28 26 24
1 34 1 1 33
1 1 1 1 34
2 35 3 0 1 24 20 18
2 36 3 0 2 24 20 18
1 37 2 1 36 35
1 1 1 1 37
2 38 1 0 1 26
1 39 1 1 38
1 1 1 1 39
2 40 2 0 1 28 22
2 41 2 0 2 28 22
1 42 2 1 41 40
1 1 1 1 42
1 1 2 0 20 19
1 1 2 0 22 19
1 1 2 0 24 21
1 1 2 0 24 23
1 1 2 0 26 21
1 1 2 0 26 23
1 1 2 0 28 21
1 1 2 0 28 23
1 1 2 0 20 23
1 1 2 0 22 21
1 1 2 0 24 27
1 1 2 0 24 29
1 1 2 0 26 25
1 1 2 0 26 29
1 1 2 0 28 25
1 1 2 0 28 27
0
2 xsucc(1,2)
3 xsucc(2,3)
18 filled(3,3)
20 filled(3,2)
22 filled(1,2)
24 filled(3,1)
26 filled(2,1)
28 filled(1,1)
4 ysucc(1,2)
5 ysucc(2,3)
19 unfilled(3,3)
21 unfilled(3,2)
23 unfilled(1,2)
25 unfilled(3,1)
27 unfilled(2,1)
29 unfilled(1,1)
12 xvalue(1,0)
13 xvalue(2,1)
14 xvalue(3,1)
6 bottle(1,1,1)
7 bottle(1,2,1)
8 bottle(1,3,1)
9 bottle(1,1,2)
10 bottle(1,3,2)
11 bottle(1,3,3)
15 yvalue(1,1)
16 yvalue(2,0)
17 yvalue(3,1)
0
B+
0
B-
1
0
1
"""
output = """
INCOHERENT
"""
|
import pytest
import diglet
def test_live():
resp = diglet.Mkreq('google.com', qtype=diglet.QType.A)
assert(len(resp['answers']) > 0)
resp = diglet.Mkreq('google.com', qtype=diglet.QType.MX)
assert(len(resp['answers']) > 0)
|
# Copyright 2020 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Color codes for use by rest of pw_cli."""
import ctypes
import os
import sys
from typing import Optional, Union
import pw_cli.env
def _make_color(*codes):
# Apply all the requested ANSI color codes. Note that this is unbalanced
# with respect to the reset, which only requires a '0' to erase all codes.
start = ''.join(f'\033[{code}m' for code in codes)
reset = '\033[0m'
return lambda msg: f'{start}{msg}{reset}'
# TODO(keir): Totally replace this object with something more complete like the
# 'colorful' module.
class _Color: # pylint: disable=too-few-public-methods
"""Helpers to surround text with ASCII color escapes"""
def __init__(self):
self.red = _make_color(31, 1)
self.bold_red = _make_color(30, 41)
self.yellow = _make_color(33, 1)
self.bold_yellow = _make_color(30, 43, 1)
self.green = _make_color(32)
self.bold_green = _make_color(30, 42)
self.blue = _make_color(34, 1)
self.cyan = _make_color(36, 1)
self.magenta = _make_color(35, 1)
self.bold_white = _make_color(37, 1)
self.black_on_white = _make_color(30, 47) # black fg white bg
class _NoColor:
"""Fake version of the _Color class that doesn't colorize."""
def __getattr__(self, _):
return str
def colors(enabled: Optional[bool] = None) -> Union[_Color, _NoColor]:
"""Returns an object for colorizing strings.
By default, the object only colorizes if both stderr and stdout are TTYs.
"""
if enabled is None:
env = pw_cli.env.pigweed_environment()
enabled = env.PW_USE_COLOR or (sys.stdout.isatty()
and sys.stderr.isatty())
if enabled and os.name == 'nt':
# Enable ANSI color codes in Windows cmd.exe.
kernel32 = ctypes.windll.kernel32 # type: ignore
kernel32.SetConsoleMode(kernel32.GetStdHandle(-11), 7)
return _Color() if enabled else _NoColor()
|
"""
Grapevine network connection
This is an implementation of the Grapevine Websocket protocol v 1.0.0 as
outlined here: https://grapevine.haus/docs
This will allow the linked game to transfer status as well as connects
the grapevine client to in-game channels.
"""
import json
from twisted.internet import protocol
from django.conf import settings
from evennia.server.session import Session
from evennia.utils import get_evennia_version
from evennia.utils.logger import log_info, log_err
from autobahn.twisted.websocket import (
WebSocketClientProtocol, WebSocketClientFactory, connectWS)
# There is only one at this time
GRAPEVINE_URI = "wss://grapevine.haus/socket"
GRAPEVINE_CLIENT_ID = settings.GRAPEVINE_CLIENT_ID
GRAPEVINE_CLIENT_SECRET = settings.GRAPEVINE_CLIENT_SECRET
GRAPEVINE_CHANNELS = settings.GRAPEVINE_CHANNELS
# defined error codes
CLOSE_NORMAL = 1000
GRAPEVINE_AUTH_ERROR = 4000
GRAPEVINE_HEARTBEAT_FAILURE = 4001
class RestartingWebsocketServerFactory(WebSocketClientFactory,
protocol.ReconnectingClientFactory):
"""
A variant of the websocket-factory that auto-reconnects.
"""
initialDelay = 1
factor = 1.5
maxDelay = 60
def __init__(self, sessionhandler, *args, **kwargs):
self.uid = kwargs.pop('uid')
self.channel = kwargs.pop('grapevine_channel')
self.sessionhandler = sessionhandler
# self.noisy = False
self.port = None
self.bot = None
WebSocketClientFactory.__init__(self, GRAPEVINE_URI, *args, **kwargs)
def buildProtocol(self, addr):
"""
Build new instance of protocol
Args:
addr (str): Not used, using factory/settings data
"""
protocol = GrapevineClient()
protocol.factory = self
protocol.channel = self.channel
protocol.sessionhandler = self.sessionhandler
return protocol
def startedConnecting(self, connector):
"""
Tracks reconnections for debugging.
Args:
connector (Connector): Represents the connection.
"""
log_info("(re)connecting to grapevine channel '%s'" % self.channel)
def clientConnectionFailed(self, connector, reason):
"""
Called when Client failed to connect.
Args:
connector (Connection): Represents the connection.
reason (str): The reason for the failure.
"""
protocol.ReconnectingClientFactory.clientConnectionLost(self, connector, reason)
def clientConnectionLost(self, connector, reason):
"""
Called when Client loses connection.
Args:
connector (Connection): Represents the connection.
reason (str): The reason for the failure.
"""
if not (self.bot or (self.bot and self.bot.stopping)):
self.retry(connector)
def reconnect(self):
"""
Force a reconnection of the bot protocol. This requires
de-registering the session and then reattaching a new one,
otherwise you end up with an ever growing number of bot
sessions.
"""
self.bot.stopping = True
self.bot.transport.loseConnection()
self.sessionhandler.server_disconnect(self.bot)
self.start()
def start(self):
"Connect protocol to remote server"
try:
from twisted.internet import ssl
except ImportError:
log_err("To use Grapevine, The PyOpenSSL module must be installed.")
else:
context_factory = ssl.ClientContextFactory() if self.isSecure else None
connectWS(self, context_factory)
# service.name = "websocket/grapevine"
# self.sessionhandler.portal.services.addService(service)
class GrapevineClient(WebSocketClientProtocol, Session):
"""
Implements the grapevine client
"""
def __init__(self):
WebSocketClientProtocol.__init__(self)
Session.__init__(self)
self.restart_downtime = None
def at_login(self):
pass
def onOpen(self):
"""
Called when connection is established.
"""
self.restart_downtime = None
self.restart_task = None
self.stopping = False
self.factory.bot = self
self.init_session("grapevine", GRAPEVINE_URI, self.factory.sessionhandler)
self.uid = int(self.factory.uid)
self.logged_in = True
self.sessionhandler.connect(self)
self.send_authenticate()
def onMessage(self, payload, isBinary):
"""
Callback fired when a complete WebSocket message was received.
Args:
payload (bytes): The WebSocket message received.
isBinary (bool): Flag indicating whether payload is binary or
UTF-8 encoded text.
"""
if not isBinary:
data = json.loads(str(payload, 'utf-8'))
self.data_in(data=data)
self.retry_task = None
def onClose(self, wasClean, code=None, reason=None):
"""
This is executed when the connection is lost for whatever
reason. it can also be called directly, from the disconnect
method.
Args:
wasClean (bool): ``True`` if the WebSocket was closed cleanly.
code (int or None): Close status as sent by the WebSocket peer.
reason (str or None): Close reason as sent by the WebSocket peer.
"""
self.disconnect(reason)
if code == GRAPEVINE_HEARTBEAT_FAILURE:
log_err("Grapevine connection lost (Heartbeat error)")
elif code == GRAPEVINE_AUTH_ERROR:
log_err("Grapevine connection lost (Auth error)")
elif self.restart_downtime:
# server previously warned us about downtime and told us to be
# ready to reconnect.
log_info("Grapevine connection lost (Server restart).")
def _send_json(self, data):
"""
Send (json-) data to client.
Args:
data (str): Text to send.
"""
return self.sendMessage(json.dumps(data).encode('utf-8'))
def disconnect(self, reason=None):
"""
Generic hook for the engine to call in order to
disconnect this protocol.
Args:
reason (str or None): Motivation for the disconnection.
"""
self.sessionhandler.disconnect(self)
# autobahn-python: 1000 for a normal close, 3000-4999 for app. specific,
# in case anyone wants to expose this functionality later.
#
# sendClose() under autobahn/websocket/interfaces.py
self.sendClose(CLOSE_NORMAL, reason)
# send_* method are automatically callable through .msg(heartbeat={}) etc
def send_authenticate(self, *args, **kwargs):
"""
Send grapevine authentication. This should be send immediately upon connection.
"""
data = {
"event": "authenticate",
"payload": {
"client_id": GRAPEVINE_CLIENT_ID,
"client_secret": GRAPEVINE_CLIENT_SECRET,
"supports": ["channels"],
"channels": GRAPEVINE_CHANNELS,
"version": "1.0.0",
"user_agent": get_evennia_version('pretty')
}
}
# override on-the-fly
data.update(kwargs)
self._send_json(data)
def send_heartbeat(self, *args, **kwargs):
"""
Send heartbeat to remote grapevine server.
"""
# pass along all connected players
data = {
"event": "heartbeat",
"payload": {
}
}
sessions = self.sessionhandler.get_sessions(include_unloggedin=False)
data['payload']['players'] = [sess.account.key for sess in sessions
if hasattr(sess, "account")]
self._send_json(data)
def send_subscribe(self, channelname, *args, **kwargs):
"""
Subscribe to new grapevine channel
Use with session.msg(subscribe="channelname")
"""
data = {
"event": "channels/subscribe",
"payload": {
"channel": channelname
}
}
self._send_json(data)
def send_unsubscribe(self, channelname, *args, **kwargs):
"""
Un-subscribe to a grapevine channel
Use with session.msg(unsubscribe="channelname")
"""
data = {
"event": "channels/unsubscribe",
"payload": {
"channel": channelname
}
}
self._send_json(data)
def send_channel(self, text, channel, sender, *args, **kwargs):
"""
Send text type Evennia -> grapevine
This is the channels/send message type
Use with session.msg(channel=(message, channel, sender))
"""
data = {
"event": "channels/send",
"payload": {
"message": text,
"channel": channel,
"name": sender
}
}
self._send_json(data)
def send_default(self, *args, **kwargs):
"""
Ignore other outputfuncs
"""
pass
def data_in(self, data, **kwargs):
"""
Send data grapevine -> Evennia
Kwargs:
data (dict): Converted json data.
"""
event = data['event']
if event == "authenticate":
# server replies to our auth handshake
if data['status'] != "success":
log_err("Grapevine authentication failed.")
self.disconnect()
else:
log_info("Connected and authenticated to Grapevine network.")
elif event == "heartbeat":
# server sends heartbeat - we have to send one back
self.send_heartbeat()
elif event == "restart":
# set the expected downtime
self.restart_downtime = data['payload']['downtime']
elif event == "channels/subscribe":
# subscription verification
if data.get('status', 'success') == "failure":
err = data.get("error", "N/A")
self.sessionhandler.data_in(bot_data_in=((f"Grapevine error: {err}"),
{'event': event}))
elif event == "channels/unsubscribe":
# unsubscribe-verification
pass
elif event == "channels/broadcast":
# incoming broadcast from network
payload = data["payload"]
print("channels/broadcast:", payload['channel'], self.channel)
if str(payload['channel']) != self.channel:
# only echo from channels this particular bot actually listens to
return
else:
# correct channel
self.sessionhandler.data_in(
self, bot_data_in=(
str(payload['message'],),
{"event": event,
"grapevine_channel": str(payload['channel']),
"sender": str(payload['name']),
"game": str(payload['game'])}))
elif event == "channels/send":
pass
else:
self.sessionhandler.data_in(self, bot_data_in=("", kwargs))
|
from .mcmc_models import mcmcMLP
from .mcmc_models import mcmcLeNet
from .models import LeNet
from .models import SmallCNN
from .mcmc_models import mcmcSmallCNN
|
from pyshinobicctvapi.connection import Connection
def test_create():
assert not Connection("") is None |
"""Djangopress' Urls """
from django.conf.urls import url, include
from .views import PostList, PostDetail, MonthArchive
app_name = 'djangopress' # pylint: disable=invalid-name
urlpatterns = [
url(r'^', include('djangopress.pages.urls')),
url(r'^$', PostList.as_view(), name='home'),
url(r'^page/(?P<page>[\d]+)/$', PostList.as_view(), name='page'),
url(r'^archive/(?P<year>[0-9]{4})/(?P<month>[0-9]+)/$',
MonthArchive.as_view(),
name="month_archive"
),
url(r'^post/(?P<slug>[\w\d-]+)/$', PostDetail.as_view(), name='post'),
]
|
# -*- coding: utf-8 -*-
# File: myutils.py
# Author: Rafał Nowak <rafal.nowak@cs.uni.wroc.pl>
import os
import logging
import tarfile
import urllib.request
import pickle
import numpy as np
# pylint:
CIFAR_DATA_URL = 'http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'
CIFAR_DIR_PATH = './cifar10_data/'
CIFAR_FOLDERNAME = 'cifar-10-batches-py'
CIFAR_BATCH_SIZE = 10000 # CIFAR10 data are split into blocks of 10000 images
CIFAR_TRAINING_FILENAMES = [
os.path.join(CIFAR_DIR_PATH, CIFAR_FOLDERNAME, 'data_batch_%d' % i) for i in range(1, 6)
]
CIFAR_TESTING_FILENAMES = [os.path.join(CIFAR_DIR_PATH, CIFAR_FOLDERNAME, 'test_batch')]
def read_CIFAR_files(filenames):
"""
Return the CIFAR dataset loaded from the bunch of files.
Keyword arguments:
filenames -- the list of filenames (strings)
"""
dataset = [] # dataset to be returned
for file in filenames:
with open(file, 'rb') as fo:
_dict = pickle.load(fo, encoding='bytes')
# Loaded in this way, each of the batch files contains a dictionary
# with the following elements:
data = _dict[b'data']
labels = _dict[b'labels']
# data -- a 10000x3072 numpy array of uint8s.
# Each row of the array stores a 32x32 colour image.
# The first 1024 entries contain the red channel values,
# the next 1024 the green, and the final 1024 the blue.
# The image is stored in row-major order, so that the
# first 32 entries of the array are the red channel values
# of the first row of the image.
# labels -- a list of 10000 numbers in the range 0-9. The number
# at index i indicates the label of the ith image in the
# array data.
# data[0] is the first image, data[1] is second and so on
assert data[0].size == 3*32*32
for k in range(CIFAR_BATCH_SIZE):
# pick k-th image
image = data[k].reshape(3, 32, 32)
# image[ C, x, y ] where C means the color
# image[ :, x, y ] is array [ R,G,B ]
# 0 1 2
# Since we want to transpose image to have image[x,y,:]
# 1 2 0
image = np.transpose(image, [1, 2, 0])
# img[x,y,:] is array [R,G,B]
dataset.append([image, labels[k]])
return dataset
def load_CIFAR_classnames():
"""Return the names of consecutive classes in CIFAR10 database"""
return ['plane','auto','bird','cat','deer','dog','frog','horse','ship','truck']
def load_CIFAR_dataset(shuffle=True):
"""
Download (if necessary) CIFAR database file and extract it.
Return the tuple of training and testing dataset.
"""
logging.info("Loading dataset ...")
# checking if the data is already in the folder
if not os.path.isdir(os.path.join(CIFAR_DIR_PATH, CIFAR_FOLDERNAME)):
# if not, we download the data
os.makedirs(CIFAR_DIR_PATH, exist_ok=True) # create folder for the data
filename = CIFAR_DATA_URL.split('/')[-1]
filepath = os.path.join(CIFAR_DIR_PATH, filename)
# try to download the file
try:
import sys
def _progress(cnt,blck_size,total_size):
sys.stdout.write('\r>> Downloading file %s (%3.1f%%)' % (filename, 100.0*cnt*blck_size/total_size))
sys.stdout.flush()
logging.info("Downloading file {f}".format(f=CIFAR_DATA_URL))
fpath, _ = urllib.request.urlretrieve(CIFAR_DATA_URL, filepath, reporthook=_progress)
statinfo = os.stat(fpath)
size = statinfo.st_size
except:
logging.error("Failed to download {f}".format(f=CIFAR_DATA_URL))
raise
print('Succesfully downloaded {f} ({s} bytes)'.format(f=filename,s=size))
tarfile.open(filepath, 'r:gz').extractall(CIFAR_DIR_PATH)
trainingData = read_CIFAR_files(CIFAR_TRAINING_FILENAMES)
testingData = read_CIFAR_files(CIFAR_TESTING_FILENAMES)
if shuffle:
logging.info("Shuffling data ...")
import sklearn
trainingData = sklearn.utils.shuffle(trainingData)
testingData = sklearn.utils.shuffle(testingData)
return trainingData, testingData
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
load_CIFAR_dataset()
|
from plugin import plugin, require
import requests
@require(network=True)
@plugin('countryinfo')
class country_info:
"""
Welcome to the Countryinfo plugin documentation! Here you will be able
to find all the functionalities of the plugin.
Usage: Type countryinfo and follow the instructions.
This plugin gives you several important details corresponding to country which is asked as an input
Please enter country name in smallcase
Go on and explore your information!!
"""
def __call__(self, jarvis, s):
jarvis.say("Welcome!")
print()
country_fetch = self.get_country(jarvis)
if country_fetch is not None:
self.country_info(jarvis, country_fetch)
def get_country(self, jarvis):
"""
function creates request to api and fetches the corresponding data
"""
while True:
country = jarvis.input(
"Enter the name of the country or type exit to leave: ")
if country == '':
jarvis.say("Please enter valid input.")
elif country == 'exit':
return
else:
url = "https://restcountries.eu/rest/v2/name/%s?fullText=true" % country
r = requests.get(url)
if isinstance(r.json(), dict):
jarvis.say("Country not found.")
else:
return r.json()
def country_info(self, jarvis, country_fetch):
capital = country_fetch[0]["capital"]
calling_code = country_fetch[0]["callingCodes"][0]
population = country_fetch[0]["population"]
region = country_fetch[0]["region"]
currency = country_fetch[0]["currencies"][0]["name"]
currency_symbol = country_fetch[0]["currencies"][0]["symbol"]
time_zone = country_fetch[0]["timezones"][0]
print()
jarvis.say("Capital: " + capital)
jarvis.say("Calling Code: " + calling_code)
jarvis.say("Currency: " + currency)
jarvis.say("Currency Symbol: " + currency_symbol)
jarvis.say("Population: " + str(population))
jarvis.say("Region: " + region)
jarvis.say("Time Zone: " + time_zone)
return
|
"""
==============================================
Immunohistochemical staining colors separation
==============================================
In this example we separate the immunohistochemical (IHC) staining
from the hematoxylin counterstaining. The separation is achieved with the
method described in [1]_, known as "color deconvolution".
The IHC staining expression of the FHL2 protein is here revealed with
Diaminobenzidine (DAB) which gives a brown color.
.. [1] A. C. Ruifrok and D. A. Johnston, "Quantification of histochemical
staining by color deconvolution.," Analytical and quantitative
cytology and histology / the International Academy of Cytology [and]
American Society of Cytology, vol. 23, no. 4, pp. 291-9, Aug. 2001.
"""
import matplotlib.pyplot as plt
from skimage import data
from skimage.color import rgb2hed
ihc_rgb = data.immunohistochemistry()
ihc_hed = rgb2hed(ihc_rgb)
fig, axes = plt.subplots(2, 2, figsize=(7, 6))
ax0, ax1, ax2, ax3 = axes.ravel()
ax0.imshow(ihc_rgb)
ax0.set_title("Original image")
ax1.imshow(ihc_hed[:, :, 0], cmap=plt.cm.gray)
ax1.set_title("Hematoxylin")
ax2.imshow(ihc_hed[:, :, 1], cmap=plt.cm.gray)
ax2.set_title("Eosin")
ax3.imshow(ihc_hed[:, :, 2], cmap=plt.cm.gray)
ax3.set_title("DAB")
for ax in axes.ravel():
ax.axis('off')
fig.subplots_adjust(hspace=0.3)
"""
.. image:: PLOT2RST.current_figure
Now we can easily manipulate the hematoxylin and DAB "channels":
"""
import numpy as np
from skimage.exposure import rescale_intensity
# Rescale hematoxylin and DAB signals and give them a fluorescence look
h = rescale_intensity(ihc_hed[:, :, 0], out_range=(0, 1))
d = rescale_intensity(ihc_hed[:, :, 2], out_range=(0, 1))
zdh = np.dstack((np.zeros_like(h), d, h))
plt.figure()
plt.imshow(zdh)
plt.title("Stain separated image (rescaled)")
plt.axis('off')
plt.show()
"""
.. image:: PLOT2RST.current_figure
"""
|
"""
Provides the following multilateral methods:
* :func:`time_dummy`
* :func:`geary_khamis`
* :func:`geks`
paired with
* :func:`carli`
* :func:`jevons`
* :func:`dutot`
* :func:`laspeyres`
* :func:`paasche`
* :func:`geom_laspeyres`
* :func:`geom_paasche`
* :func:`drobish`
* :func:`marshall_edgeworth`
* :func:`palgrave`
* :func:`fisher`
* :func:`tornqvist`
* :func:`walsh`
* :func:`sato_vartia`
* :func:`geary_khamis_b`
* :func:`tpd`
* :func:`rothwell`
The TDH/TPD methods are model-based multilateral index number methods
which have been proposed to incorporate scanner data. They are part of
many multilateral methods motivated by an attempt to minimize the risk
of chain drift, particularly within a window, while maximizing the
number of matches in the data.
TDH index is used when information on item characteristics are
available, and the TPD index when this information is lacking. The
TDH produces an explicit hedonic price index, while the TPD produces
an implicit hedonic price index, which are both estimated on the
pooled data of one or more periods via an application of expenditure
shares weighted least squares regression.
"""
from typing import List, Sequence, Optional
from itertools import combinations
import pandas as pd
import numpy as np
from scipy.stats.mstats import gmean
from .bilateral import *
from .helpers import diag, _weights_calc
from .weighted_least_squares import wls
__author__ = ['Dr. Usman Kayani']
def geks(
df: pd.DataFrame,
price_col: str = 'price',
quantity_col: str = 'quantity',
date_col: str='month',
product_id_col: str='id',
bilateral_method: str = 'tornqvist',
) -> List:
"""
Obtain the GEKS indices paired with a bilateral method for a given dataframe.
Calculate the index values using a for loop to determine the matrix of
bilaterals, where we exploit the symmetry condition a_{i j} = 1/a_{j i} and
a_{i i} = 1 to save computation time, followed by a geometric mean.
Parameters
----------
df : pd.DataFrame
Dataframe containing the data.
price_col : str, optional
Name of the column containing the price information.
quantity_col : str, optional
Name of the column containing the quantity information.
date_col : str, optional
Name of the column containing the date information.
product_id_col : str, optional
Name of the column containing the product id information.
bilateral_method : str, optional
Returns
-------
List
List of the GEKS indices.
"""
# Reverse unstack from dynamic window func.
#df = df.stack().reset_index([date_col, product_id_col])
# Get unique periods and length of time series.
periods = df[date_col].unique()
no_of_periods = len(periods)
if bilateral_method != 'tpd':
# Obtain bilateral function for bilateral method.
bilateral_func = globals()[bilateral_method]
# Intialize matrix for bilateral pairs.
pindices = np.zeros((no_of_periods, no_of_periods))
for month_idx in combinations(range(no_of_periods), 2):
# Get period index for base and current month, and slice df for these
# months.
i, j = month_idx
df_base = df.loc[df[date_col] == periods[i]]
df_curr = df.loc[df[date_col] == periods[j]]
# Make sure the sample is matched for given periods.
df_base = df_base[df_base[product_id_col].isin(df_curr[product_id_col])]
df_curr = df_curr[df_curr[product_id_col].isin(df_base[product_id_col])]
if bilateral_method == 'tpd':
# Use multilateral TPD method with two periods.
df_matched = (
pd.concat([df_base, df_curr])
.drop_duplicates()
.drop(columns='weights')
)
# Recalculate weights for matched df.
df_matched = _weights_calc(df_matched)
# Append values to upper triangular of matrix.
pindices[i, j] = time_dummy(df_matched)[-1]
else:
# Find price and quantity vectors of base period and current period.
p_base = df_base[price_col].to_numpy()
p_curr = df_curr[price_col].to_numpy()
data = (p_base, p_curr)
# Get quantities for bilateral methods that use this information.
if bilateral_method in {
'laspeyres', 'drobish', 'marshall_edgeworth',
'geom_laspeyres', 'tornqvist', 'fisher',
'walsh', 'sato_vartia', 'geary_khamis_b',
'rothwell', 'lowe'
}:
q_base = df_base[quantity_col].to_numpy()
data += (q_base, )
if bilateral_method in {
'paasche', 'drobish','palgrave',
'marshall_edgeworth', 'geom_paasche', 'tornqvist',
'fisher', 'walsh', 'sato_vartia',
'geary_khamis_b'
}:
q_curr = df_curr[quantity_col].to_numpy()
data += (q_curr, )
# Determine the bilaterals for each base and current period and
# append to upper tringular of matrix.
pindices[i, j] = bilateral_func(*data)
# Exploit symmetry conditions for matrix of bilaterals.
pindices_sym = np.copy(pindices.T)
mask = pindices_sym != 0
pindices_sym[mask] = 1/pindices_sym[mask]
pindices += pindices_sym + np.identity(no_of_periods)
# Calculate geometric mean for the unnormalized price levels.
pgeo = gmean(pindices)
# Normalize to first period.
return pd.Series(
pgeo/pgeo[0],
index=periods,
)
def time_dummy(
df: pd.DataFrame,
price_col: str = 'price',
quantity_col: str = 'quantity',
date_col: str = 'month',
product_id_col: str = 'id',
engine: str = 'numpy'
) -> List:
"""Obtain the time dummy indices for a given dataframe.
Calculates the time dummy indices using a formula with weighted least
squares regression. When passed with characteristics, this function returns
the Time Dummy Hedonic indices. When passed without it returns the Time
Product Dummy indices.
Parameters
----------
df : pd.DataFrame
Dataframe containing the data.
price_col : str, optional
Name of the column containing the price information.
quantity_col : str, optional
Name of the column containing the quantity information.
date_col : str, optional
Name of the column containing the date information.
product_id_col : str, optional
Name of the column containing the product id information.
engine : str, optional
Name of the engine to use for the calculation.
Returns
-------
List
List of the time dummy indices.
"""
# Reverse unstack from dynamic window func.
df = df.stack().reset_index([date_col, product_id_col])
# Set the dtype for ID columns, in case it is numerical.
df[product_id_col] = df[product_id_col].astype(str)
# Calculate logarithm of the prices for each item for dependent variable.
df['log_price'] = np.log(df[price_col])
# Get time series for output index.
time_series = df[date_col].unique()
# Get terms for wls regression where characteristics are used if available.
non_time_vars = [product_id_col]
model_params = wls(
df,
dependent_var='log_price',
independent_vars=[date_col, *non_time_vars],
engine=engine
)
# Get indices from the time dummy coefficients & set first = 1.
is_time_dummy = model_params.index.str.contains(date_col)
return pd.Series(
[1, *np.exp(model_params.loc[is_time_dummy])],
index=time_series,
)
def geary_khamis(
df: pd.DataFrame,
price_col: str = 'price',
quantity_col: str = 'quantity',
date_col: str = 'month',
product_id_col: str = 'id',
method_type: str = 'matrix',
) -> List:
r"""Obtain the Geary-Khamis indices for a given dataframe.
Calculates the Geary-Khamis indices using matrix operations.
Parameters
----------
price_col : str, defaults to 'price'
User-defined price column name.
quantity_col : str, defaults to 'quantity'
User-defined quantity column name.
product_id_col : str, defaults to 'product_id'
The column name containing product ID values or product names.
method_type: str, defaults to 'matrix'
Options: {'matrix', 'iterative'}
The method type to use for the GK computation.
Returns
-------
List
The sorted list of indices for each group.
Notes
-----
For Geary-Khamis with the matrix method, we can determine the
quality adjustment factors by solving the system of equations:
.. math::
\vec{b}=\left[I_{N}-C+R\right]^{-1} \vec{c}
where :math:`\vec{c} = [1,0,\ldots, 0]^T` is an :math:`N \times 1`
vector and :math:`R` is an :math:`N \times N` matrix given by,
.. math::
R=\left[\begin{array}{cccc}
1 & 1 & \ldots & 1 \\
0 & \ldots & \ldots & 0 \\
\vdots & & & \vdots \\
0 & \ldots & \ldots & 0
\end{array}\right]
and :math:`C` is the :math:`N \times N` matrix defined by,
.. math::
C=\hat{q}^{-1} \sum_{t=1}^{T} s^{t} q^{t \mathbf{T}}
where :math:`\hat{q}^{-1}` is the inverse of an :math:`N \times N`
diagonal matrix :math:`\hat{q}`, where the diagonal elements are the
total quantities purchased for each good over all time periods,
:math:`s^{t}` is a vector of the expenditure shares for time period
:math:`t`, and :math:`q^{t \mathbf{T}}` is the transpose of the
vector of quantities purchased in time period :math:`t`.
Once the :math:`\vec{b}` vector has been calculated, the price
levels can be computed from the equation:
.. math::
P_{t} =\frac{p^{t} \cdot q^{t}}{ \vec{b} \cdot q^{t}}
The price index values can be determined by normalizing the price
levels by the first period as,
.. math::
I_{t} = \frac{P_{t}}{P_{0}}
References
----------
Diewart, W. E, and Kevin, F. (2017). Substitution Bias in
Multilateral Methods for CPI Construction Using Scanner Data.
Discussion Paper 1702. Department of Economics, University of
British Columbia.
"""
if method_type not in ('matrix', 'iterative'):
raise ValueError('The method type must be `matrix` or `iterative`')
# We need to deal with missing values and reshape the df for the
# required vectors and matrices.
df = _matrix_method_reshape(df)
# Get number of unique products for the size of the vectors and
# matrices.
N = len(df.index.unique(level=product_id_col))
# Matrices for the prices, quantities and weights.
prices = df.loc[price_col]
quantities = df.loc[quantity_col]
weights = df.loc['weights']
# Use iterative method directly if specified.
if method_type == 'iterative':
return _geary_khamis_iterative(prices, quantities)
# Inverse of diagonal matrix with total quantities for each good over all
# time periods as diagonal elements and matrix product of weights and
# transpose of quantities to produce a square matrix, both required for C
# matrix.
q_matrix_inverse = np.diag(1/quantities.T.sum())
prod_weights_qt_matrix = weights @ quantities.T
# Product of above matrices to give the C square matrix, with the fixed
# identity and R matrix, and c vector all required determine the quality
# adjustment factors b.
C_matrix = q_matrix_inverse @ prod_weights_qt_matrix
R_matrix = np.zeros(shape=(N, N))
R_matrix[:1] = 1
# Define combo matrix used for isolating the singular matrices and
# calculating the index values from the combo matrix `I_n - C + R`.
combo_matrix = np.identity(N) - C_matrix + R_matrix
if abs(np.linalg.det(combo_matrix)) <= 1e-7:
# Fallback to iterative method for singular matrices.
return _geary_khamis_iterative(prices, quantities)
else:
# Primary matrix method for non-singular matrices.
return _geary_khamis_matrix(prices, quantities, combo_matrix)
def _matrix_method_reshape(df: pd.DataFrame) -> pd.DataFrame:
"""
Reshape df for matrix method and deal with missing values.
We first drop columns which contain all missing values, transpose
the dataframe and then fill the remaining missing values with zero,
to deal with missing items in some periods.
Parameters
----------
df : pd.DataFrame
The dataframe to reshape.
Returns
-------
pd.DataFrame
The reshaped dataframe.
"""
return df.dropna(how='all', axis=1).T.fillna(0)
def _geary_khamis_iterative(
prices: pd.DataFrame,
quantities: pd.DataFrame,
no_of_iterations: int = 100,
precision: float = 1e-8,
) -> pd.Series:
"""
Geary-Khamis iterative method.
Parameters
----------
prices : pd.DataFrame
The price dataframe.
quantities : pd.DataFrame
The quantity dataframe.
no_of_iterations : int, defaults to 100
The number of iterations to perform.
precision : float, defaults to 1e-8
The precision to use for the iterative method.
Returns
-------
pd.Series
The price index values.
"""
# Initialise index vals as 1's to find the solution with iteration.
price_levels = pd.Series(1.0, index=prices.columns)
quantity_share = quantities.T / quantities.sum(axis=1)
# Iterate until we reach the set level of precision, or after a set
# number of iterations if they do not converge.
for _ in range(no_of_iterations):
# Obtain matrices for iterative calculation.
deflated_prices = prices / price_levels
factors = diag(deflated_prices @ quantity_share)
# Calculate new price levels from previous value.
new_price_levels = (
diag(prices.T @ quantities)
.div(quantities.T @ factors)
.squeeze()
)
pl_abs_diff = abs(price_levels - new_price_levels)
if (pl_abs_diff <= precision).all():
# Break loop when we reach given precision for final price levels.
break
else:
# Otherwise set price level for next iteration.
price_levels = new_price_levels
# Normalize by first period for final output.
return price_levels / price_levels.iloc[0]
def _geary_khamis_matrix(
prices: pd.DataFrame,
quantities: pd.DataFrame,
combo_matrix: pd.DataFrame,
) -> pd.Series:
"""
Geary-Khamis matrix method.
Parameters
----------
prices : pd.DataFrame
The price dataframe.
quantities : pd.DataFrame
The quantity dataframe.
combo_matrix : pd.DataFrame
The combo matrix.
Returns
-------
pd.Series
The price index values.
"""
# Calculation of the vector b (factors) required to produce the
# price levels. Corresponds to `b = [I_n - C + R]^-1 [1,0,..,0]^T`.
# We use the Moore-Penrose inverse for the matrix inverse.
factors = np.linalg.pinv(combo_matrix) @ np.eye(len(prices.index), 1)
# Determine price levels to compute the final index values.
price_levels = diag(prices.T @ quantities).div(quantities.T @ factors)
# Normalize price levels to first period for final index values.
index_vals = price_levels / price_levels.iloc[0]
# Output as Pandas series for dynamic window.
return index_vals.iloc[:, 0]
|
###################################################
### ###
### Plot script for experiments on Tasic data ###
### written by Bettina Mieth, Nico Görnitz, ###
### Marina Vidovic and Alex Gutteridge ###
### ###
###################################################
# Please change all directories to yours!
import sys
sys.path.append('/home/bmieth/scRNAseq/implementations')
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import pdb
from scipy import stats
import pandas as pd
from scipy import stats
def plot_main_opt_mix(fig_num, res, res_opt_mix_ind,res_opt_mix_aris, accs_desc, method_desc, percs, genes, n_src, n_trg, mixes, overlap_setting, source_label_setting):
# Setting up plot
ind_src = 0
plt.figure(fig_num)
# Baseline methods (TargetCluster and ConcatenateCluster)
ari_1_baseline = np.mean(res[ind_src, 0, :, :, 0], axis=0)
ari_2_baseline = np.mean(res[ind_src, 0, :, :, 1], axis=0)
# Standard errors
ste_ari_1_baseline = stats.sem(res[ind_src, 0, :, :, 0], axis=0, ddof=0)
ste_ari_2_baseline = stats.sem(res[ind_src, 0, :, :, 1], axis=0, ddof=0)
# Plot with errorbars
markers, caps, bars = plt.errorbar(percs, ari_1_baseline, fmt='c', yerr=ste_ari_1_baseline, linewidth=2.0)
[bar.set_alpha(0.5) for bar in bars]
[cap.set_alpha(0.5) for cap in caps]
markers, caps, bars = plt.errorbar(percs, ari_2_baseline, fmt='y', yerr=ste_ari_2_baseline, linewidth=2.0)
[bar.set_alpha(0.5) for bar in bars]
[cap.set_alpha(0.5) for cap in caps]
# Plot our method (TransferCluster)
ari = np.mean(res_opt_mix_aris[ind_src, :, :], axis=0)
ste = stats.sem(res_opt_mix_aris[ind_src, :, :], axis=0, ddof=0)
markers, caps, bars = plt.errorbar(percs, ari, fmt='-b', yerr=ste, linewidth=2.0)
[bar.set_alpha(0.5) for bar in bars]
[cap.set_alpha(0.5) for cap in caps]
if overlap_setting == 0:
plt.title('Complete overlap', fontsize=22, x=0.5, y=0.93)
else:
plt.title('Incomplete overlap', fontsize=22, x=0.5, y=0.93)
if source_label_setting == 0:
plt.text( x=0.15, y=0.88, s='Ground truth labels from NMF clustering', fontsize= 14)
else:
plt.text( x=0.15, y=0.88, s='Ground truth labels from original publication', fontsize= 14)
plt.text( x=0.15, y=0.88, s='Ground truth labels from NMF clustering', fontsize= 14)
plt.xlabel('Target cells', fontsize=16)
plt.ylabel('ARI', fontsize=16)
plt.xlim([np.min(percs), np.max(percs)])
plt.xticks(percs, np.array(percs * n_trg, dtype=np.int), fontsize=13)
plt.yticks(fontsize=13)
plt.ylim([0.0, 1.0])
plt.legend(['TargetCluster', 'ConcatenateCluster', 'TransferCluster'], fontsize=13, loc=4)
if __name__ == "__main__":
# Figure direction to save to
fname_plot ='/home/bmieth/scRNAseq/results/mouse_data_final/main_results_mouse_all_four'
# Location of experimental results - change to yours
foo_com_orig = np.load('/home/bmieth/scRNAseq/results/mouse_data_final/main_results_mouse_18clusters_completeoverlap.npz')
foo_incom_orig = np.load('/home/bmieth/scRNAseq/results/mouse_data_final/main_results_mouse_18clusters_incompleteoverlap.npz')
foo_com_NMF = np.load('/home/bmieth/scRNAseq/results/mouse_data_NMF_final/main_results_mouse_NMFlabels_18cluster_completeoverlap.npz')
foo_incom_NMF = np.load('/home/bmieth/scRNAseq/results/mouse_data_NMF_final/main_results_mouse_NMFlabels_18cluster_incompleteoverlap.npz')
# Load data complete overlap + NMF labels
res = foo_com_NMF['res'] # n_src x genes x common x acc_funcs x reps x percs x methods
res_opt_mix_ind = foo_com_NMF['res_opt_mix_ind']
res_opt_mix_aris = foo_com_NMF['res_opt_mix_aris']
accs_desc = foo_com_NMF['accs_desc']
method_desc = foo_com_NMF['method_desc']
percs = foo_com_NMF['percs']
genes = foo_com_NMF['genes']
n_src = foo_com_NMF['n_src']
n_trg = foo_com_NMF['n_trg']
mixes = foo_com_NMF['mixes']
# Plot figure of complete overlap + NMF labels
fig = plt.figure(figsize=(16,16))
plt.subplot(2,2,1)
plot_main_opt_mix(1,res, res_opt_mix_ind,res_opt_mix_aris, accs_desc, method_desc, percs, genes, n_src, n_trg, mixes, overlap_setting = 0, source_label_setting = 0)
# Load data incomplete overlap + NMF labels
res = foo_incom_NMF['res'] # n_src x genes x common x acc_funcs x reps x percs x methods
res_opt_mix_ind = foo_incom_NMF['res_opt_mix_ind']
res_opt_mix_aris = foo_incom_NMF['res_opt_mix_aris']
accs_desc = foo_incom_NMF['accs_desc']
method_desc = foo_incom_NMF['method_desc']
percs = foo_incom_NMF['percs']
genes = foo_incom_NMF['genes']
n_src = foo_incom_NMF['n_src']
n_trg = foo_incom_NMF['n_trg']
mixes = foo_incom_NMF['mixes']
# Plot figure of incomplete overlap + NMF labels
plt.subplot(2,2,2)
plot_main_opt_mix(1,res, res_opt_mix_ind,res_opt_mix_aris, accs_desc, method_desc, percs, genes, n_src, n_trg, mixes, overlap_setting = 1, source_label_setting = 0)
# Load data complete overlap + real labels
res = foo_com_orig['res'] # n_src x genes x common x acc_funcs x reps x percs x methods
res_opt_mix_ind = foo_com_orig['res_opt_mix_ind']
res_opt_mix_aris = foo_com_orig['res_opt_mix_aris']
accs_desc = foo_com_orig['accs_desc']
method_desc = foo_com_orig['method_desc']
percs = foo_com_orig['percs']
genes = foo_com_orig['genes']
n_src = foo_com_orig['n_src']
n_trg = foo_com_orig['n_trg']
mixes = foo_com_orig['mixes']
# Plot figure of complete overlap + real labels
plt.subplot(2,2,3)
plot_main_opt_mix(1,res, res_opt_mix_ind,res_opt_mix_aris, accs_desc, method_desc, percs, genes, n_src, n_trg, mixes, overlap_setting = 0, source_label_setting = 1)
# Load data incomplete overlap + real labels
res = foo_incom_orig['res'] # n_src x genes x common x acc_funcs x reps x percs x methods
res_opt_mix_ind = foo_incom_orig['res_opt_mix_ind']
res_opt_mix_aris = foo_incom_orig['res_opt_mix_aris']
accs_desc = foo_incom_orig['accs_desc']
method_desc = foo_incom_orig['method_desc']
percs = foo_incom_orig['percs']
genes = foo_incom_orig['genes']
n_src = foo_incom_orig['n_src']
n_trg = foo_incom_orig['n_trg']
mixes = foo_incom_orig['mixes']
# Plot figure of incomplete overlap + real labels
plt.subplot(2,2,4)
plot_main_opt_mix(1,res, res_opt_mix_ind,res_opt_mix_aris, accs_desc, method_desc, percs, genes, n_src, n_trg, mixes, overlap_setting = 1, source_label_setting = 1)
plt.savefig(fname_plot+'.jpg')
print('Done')
|
# -*-coding:utf8-*-
import math
import torch
import torch.nn as nn
from torchvision.models import efficientnet
from torchvision.ops.misc import ConvNormActivation
from utils.tensor_op import pixel_shuffle
from utils.debug_utils import AverageTimer
def initialize_weights(model):
for m in model.modules():
if isinstance(m, torch.nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / (fan_out // m.groups)))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, torch.nn.BatchNorm2d):
m.weight.data.fill_(1.0)
m.bias.data.zero_()
elif isinstance(m, torch.nn.Linear):
m.weight.data.uniform_(-1.0 / math.sqrt(m.weight.size()[0]), 1.0 / math.sqrt(m.weight.size()[0]))
m.bias.data.zero_()
def simple_nms(scores, nms_radius: int):
""" Fast Non-maximum suppression to remove nearby points """
assert (nms_radius >= 0)
def max_pool(x):
return torch.nn.functional.max_pool2d(
x, kernel_size=nms_radius * 2 + 1, stride=1, padding=nms_radius)
zeros = torch.zeros_like(scores)
max_mask = scores == max_pool(scores)
for _ in range(2):
supp_mask = max_pool(max_mask.float()) > 0
supp_scores = torch.where(supp_mask, zeros, scores)
new_max_mask = supp_scores == max_pool(supp_scores)
max_mask = max_mask | (new_max_mask & (~supp_mask))
return torch.where(max_mask, scores, zeros)
class EfficientBB(nn.Module):
def __init__(self, encoder='b0', pretrained=True, coarse_out_ch=128, fine_out_ch=128):
super(EfficientBB, self).__init__()
efficient_ins = efficientnet.efficientnet_b0(pretrained=pretrained)
self.first_conv = ConvNormActivation(in_channels=1,
out_channels=32,
kernel_size=3,
stride=2,
norm_layer=nn.BatchNorm2d,
activation_layer=nn.SiLU)
self.layer2_3 = efficient_ins.features[1:3] # H/4
self.layer4 = efficient_ins.features[3] # H/8
initialize_weights(self.first_conv)
def forward(self, x):
# print("x shape: {}".format(x.shape))
x = self.first_conv(x)
x_s4 = self.layer2_3(x)
# print("x_s4 shape: {}".format(x_s4.shape))
x_s8 = self.layer4(x_s4)
# print("x_s8 shape: {}".format(x_s8.shape))
return x_s4, x_s8
class EfficientBBV2(nn.Module):
def __init__(self, encoder='b0', pretrained=True, coarse_out_ch=128, fine_out_ch=128):
super(EfficientBBV2, self).__init__()
efficient_ins = efficientnet.efficientnet_b0(pretrained=pretrained)
self.first_conv = ConvNormActivation(in_channels=1,
out_channels=3,
kernel_size=3,
stride=1,
norm_layer=nn.BatchNorm2d,
activation_layer=nn.SiLU)
self.layer1_3 = efficient_ins.features[:3] # H/4
self.layer4 = efficient_ins.features[3] # H/8
initialize_weights(self.first_conv)
def forward(self, x):
# print("x shape: {}".format(x.shape))
x = self.first_conv(x)
x_s4 = self.layer1_3(x)
# print("x_s4 shape: {}".format(x_s4.shape))
x_s8 = self.layer4(x_s4)
# print("x_s8 shape: {}".format(x_s8.shape))
return x_s4, x_s8
class DetectorHead(torch.nn.Module):
def __init__(self, input_channel, grid_size):
super(DetectorHead, self).__init__()
self.grid_size = grid_size
self.convPa = torch.nn.Conv2d(input_channel, 256, 3, stride=1, padding=1)
self.relu = torch.nn.ReLU(inplace=True)
self.convPb = torch.nn.Conv2d(256, pow(grid_size, 2)+1, kernel_size=1, stride=1, padding=0)
self.bnPa = torch.nn.BatchNorm2d(256)
self.bnPb = torch.nn.BatchNorm2d(pow(grid_size, 2)+1)
self.softmax = torch.nn.Softmax(dim=1)
def forward(self, x):
out = None
out = self.bnPa(self.relu(self.convPa(x)))
logits = self.bnPb(self.convPb(out)) #(B,65,H,W)
prob = self.softmax(logits)
prob = prob[:, :-1, :, :] # remove dustbin,[B,64,H,W]
# Reshape to get full resolution heatmap.
prob = pixel_shuffle(prob, self.grid_size) # [B,1,H*8,W*8]
return logits, prob
class MagicPoint(nn.Module):
""" Pytorch definition of SuperPoint Network. """
def __init__(self, nms, bb_name, input_channel=1, grid_size=8):
super(MagicPoint, self).__init__()
self.nms = nms
self.bb_name = bb_name
if bb_name == "EfficientBB":
self.backbone = EfficientBB()
out_chs = 40
elif bb_name == "EfficientBBV2":
self.backbone = EfficientBBV2()
out_chs = 40
else:
raise ValueError("Backbone not support")
self.detector_head = DetectorHead(input_channel=out_chs, grid_size=grid_size)
self.average_time = AverageTimer()
def forward(self, x):
""" Forward pass that jointly computes unprocessed point and descriptor
tensors.
Input
x: Image pytorch tensor shaped N x 1 x H x W.
Output
semi: Output point pytorch tensor shaped N x 65 x H/8 x W/8.
"""
self.average_time.reset() # TODO remove
x_s4, x_s8 = self.backbone(x)
self.average_time.update("backbone") # TODO remove
logits, prob = self.detector_head(x_s8) # N x H x W
self.average_time.update("detector_head")
if not self.training:
prob_nms = simple_nms(prob, nms_radius=self.nms)
self.average_time.update("nms")
else:
prob_nms = None
return x_s4, x_s8, logits, prob, prob_nms
if __name__ == "__main__":
import time
device=torch.device("cuda:0")
net = MagicPoint(nms=4, bb_name="EfficientBBV2")
net = net.to(device)
net.eval()
net.average_time.cuda = True
in_size=[1, 1, 608, 608]
with torch.no_grad():
data = torch.randn(*in_size, device=device)
net.average_time.add = False
out = net(data)
net.average_time.add = True
run_time = 1000
torch.cuda.synchronize()
start_time = time.time()
for idx in range(run_time):
out = net(data)
torch.cuda.synchronize()
time_interval = time.time() - start_time
print(time_interval)
net.average_time.print()
|
## Copyright 2016 - 2018 Raik Gruenberg
## This file is part of the LabHamster project (https://github.com/graik/labhamster).
## LabHamster is released under the MIT open source license, which you can find
## along with this project (LICENSE) or at <https://opensource.org/licenses/MIT>.
from __future__ import unicode_literals
from django.db import models
import django.forms as forms
from django.utils.text import capfirst
from django.utils.translation import gettext as _
import re
from os.path import splitext
class TextField(forms.CharField):
"""
A multi-line text input area with custom dimensions
"""
widget = forms.Textarea
def __init__(self, rows = None, cols = None, attrs = {}, *args, **kwargs):
"""
widget - override default widget (default: django.forms.Textarea)
rows - int, row parameter passed to default Textarea widget
cols - int, cols parameter passed to default Textarea widget
attrs - {str:str}, other parameters passed to widget
"""
self.attrs = {}
if rows:
self.attrs['rows'] = rows
if cols:
self.attrs['cols'] = cols
self.attrs.update(attrs)
for key, value in self.attrs.items():
if value is not None:
self.attrs[key] = str(value)
super(TextField, self).__init__(*args, **kwargs)
def widget_attrs(self, widget):
return self.attrs
class TextModelField(models.TextField):
def __init__(self, *args, **kw):
"""
@param extensions: [str], allowed file extensions
@param size: int, maximum file size
"""
self.rows = kw.pop('rows', None)
self.cols = kw.pop('cols', None)
super(TextModelField, self).__init__(*args, **kw)
def formfield(self, **kwargs):
defaults = {'form_class': TextField}
defaults.update(kwargs)
defaults.update({'rows': self.rows,
'cols': self.cols})
return super(TextModelField, self).formfield(**defaults)
class DayConversion:
UNITS = ('days', 'weeks', 'months', 'years')
CONVERSION = (1, 7, 30, 365)
@staticmethod
def days2tuple(value):
"""
value - int, time in days
-> ( int, int ) - ( duration, factor ) where factor is 1, 7, 30 or 365
"""
choices = zip(DayConversion.UNITS, DayConversion.CONVERSION)
choices.reverse()
for unit, factor in choices:
if value % factor == 0:
return (value / factor, factor)
return (value, 1)
@staticmethod
def tuple2days(time, factor):
return time * factor
@staticmethod
def days2str(value):
"""
value - int, time in days
-> str, 'duration unit' where unit is days, weeks, months or years
"""
duration, factor = DayConversion.days2tuple(value)
lookup = dict(zip(DayConversion.CONVERSION, DayConversion.UNITS))
unit = lookup[factor]
if duration == 1:
unit = unit[:-1]
return '%i %s' % (duration, unit)
class DayWidget(forms.MultiWidget):
"""
A widget that displays a duration of days as days, weeks, months or years
Adapted from: http://djangosnippets.org/snippets/2327/
"""
UNITS = ('days', 'weeks', 'months', 'years')
CONVERSION = (1, 7, 30, 365)
def __init__(self, attrs = None):
choices = zip(self.CONVERSION, self.UNITS)
self.attrs = attrs or {}
widgets = (forms.TextInput(attrs={'size': '5'}),
forms.Select(attrs=self.attrs, choices=choices))
super(DayWidget, self).__init__(widgets, attrs)
def decompress(self, value):
"""
Called for display of Widget -- convert single value from database
(days) into two values (integer + unit).
"""
if value:
return DayConversion.days2tuple(value)
return (None, None)
class DayFormField(forms.MultiValueField):
widget = DayWidget
def __init__(self, *args, **kwargs):
errors = self.default_error_messages.copy()
choices = zip(self.widget.CONVERSION, self.widget.UNITS)
initial = str(kwargs.get('unitchoice', 1))
initial = ('months', '30')
if 'unitchoice' in kwargs:
del kwargs['unitchoice']
if 'error_messages' in kwargs:
errors.update(kwargs['error_messages'])
localize = kwargs.get('localize', False)
fields = (forms.IntegerField(min_value=0, required=False,
localize=localize),
forms.ChoiceField(choices=choices, initial=initial))
super(DayFormField, self).__init__(fields, *args, **kwargs)
def compress(self, data_list):
"""
convert input from form widget (int + unit) into single number (days)
"""
if data_list and data_list[0]:
duration = data_list[0]
unit = data_list[1] or '1'
unit = int(unit)
return data_list[0] * unit
class DayModelField(models.Field):
def __init__(self, unit = 'days', *args, **kwargs):
"""
unit - str, either of: 'days', 'weeks', 'months' or 'years' ['days']
"""
self.conversion = dict(zip(DayWidget.UNITS, DayWidget.CONVERSION))
self.unitchoice = self.conversion.get(unit, None)
super(DayModelField, self).__init__(*args, **kwargs)
def get_internal_type(self):
return 'IntegerField'
def formfield(self, **kwargs):
defaults = {'form_class': DayFormField,
'unitchoice': self.unitchoice}
defaults.update(**kwargs)
return super(DayModelField, self).formfield(**defaults)
def deconstruct(self):
"""
Required for migrations support
https://docs.djangoproject.com/en/1.9/howto/custom-model-fields/#custom-field-deconstruct-method
"""
name, path, args, kwargs = super(DayModelField, self).deconstruct()
if self.unitchoice != self.conversion['days']:
backconversion = dict(zip(self.conversion.values(), self.conversion.keys()))
kwargs['unit'] = backconversion.get(self.unitchoice, None)
return name, path, args, kwargs
## replaced by deconstruct() in django v1.7+
##from south.modelsinspector import add_introspection_rules
##add_introspection_rules([], ["^labhamster\.customfields\.datafields\.DayModelField"])
if __name__ == '__main__':
pass |
#!/usr/bin/env python
# All of the argument parsing is done in the `parallel.py` module.
import multiprocessing
import time
import numpy as np
import Starfish
from Starfish.model import ThetaParam, PhiParam
import argparse
parser = argparse.ArgumentParser(prog="plot_many_mix_models.py", description="Plot many mixture models.")
parser.add_argument("--ff", type=int, default=3, help="Number of fill factor models to assume")
parser.add_argument("--config", action='store_true', help="Use config file instead of emcee.")
parser.add_argument("--static", action="store_true", help="Make a static figure of one draw")
parser.add_argument("--animate", action="store_true", help="Make an animation of many draws from the two components.")
parser.add_argument("--OG", action="store_true", help="The Original Gangster version, clunky and all.")
args = parser.parse_args()
import os
import matplotlib.pyplot as plt
import os
import Starfish.grid_tools
from Starfish.spectrum import DataSpectrum, Mask, ChebyshevSpectrum
from Starfish.emulator import Emulator
import Starfish.constants as C
from Starfish.covariance import get_dense_C, make_k_func, make_k_func_region
from scipy.special import j1
from scipy.interpolate import InterpolatedUnivariateSpline
from scipy.linalg import cho_factor, cho_solve
from numpy.linalg import slogdet
from astropy.stats import sigma_clip
import gc
import logging
from itertools import chain
#from collections import deque
from operator import itemgetter
import yaml
import shutil
import json
from star_base import Order as OrderBase
from star_base import SampleThetaPhi as SampleThetaPhiBase
Starfish.routdir = ""
# list of keys from 0 to (norders - 1)
order_keys = np.arange(1)
DataSpectra = [DataSpectrum.open(os.path.expandvars(file), orders=Starfish.data["orders"]) for file in Starfish.data["files"]]
# list of keys from 0 to (nspectra - 1) Used for indexing purposes.
spectra_keys = np.arange(len(DataSpectra))
#Instruments are provided as one per dataset
Instruments = [eval("Starfish.grid_tools." + inst)() for inst in Starfish.data["instruments"]]
logging.basicConfig(format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", filename="{}log.log".format(
Starfish.routdir), level=logging.DEBUG, filemode="w", datefmt='%m/%d/%Y %I:%M:%S %p')
class Order(OrderBase):
def initialize(self, key):
OrderBase.initialize(self, key)
self.flux_scalar2 = None
self.mus2, self.C_GP2 = None, None
self.Omega2 = None
def evaluate(self):
'''
Return the lnprob using the current version of the C_GP matrix, data matrix,
and other intermediate products.
'''
self.lnprob_last = self.lnprob
X = (self.chebyshevSpectrum.k * self.flux_std * np.eye(self.ndata)).dot(self.eigenspectra.T)
part1 = self.Omega**2 * self.flux_scalar**2 * X.dot(self.C_GP.dot(X.T))
part2 = self.Omega2**2 * self.flux_scalar2**2 * X.dot(self.C_GP2.dot(X.T))
part3 = self.data_mat
#CC = X.dot(self.C_GP.dot(X.T)) + self.data_mat
CC = part1 + part2 + part3
np.save('CC.npy', CC)
try:
factor, flag = cho_factor(CC)
except np.linalg.linalg.LinAlgError:
print("Spectrum:", self.spectrum_id, "Order:", self.order)
self.CC_debugger(CC)
raise
try:
model1 = self.Omega * self.flux_scalar *(self.chebyshevSpectrum.k * self.flux_mean + X.dot(self.mus))
model2 = self.Omega2 * self.flux_scalar2 * (self.chebyshevSpectrum.k * self.flux_mean + X.dot(self.mus2))
net_model = model1 + model2
R = self.fl - net_model
logdet = np.sum(2 * np.log((np.diag(factor))))
self.lnprob = -0.5 * (np.dot(R, cho_solve((factor, flag), R)) + logdet)
self.logger.debug("Evaluating lnprob={}".format(self.lnprob))
return self.lnprob
# To give us some debugging information about what went wrong.
except np.linalg.linalg.LinAlgError:
print("Spectrum:", self.spectrum_id, "Order:", self.order)
raise
def update_Theta(self, p):
OrderBase.update_Theta(self, p)
self.emulator.params = np.append(p.teff2, p.grid[1:])
self.mus2, self.C_GP2 = self.emulator.matrix
self.flux_scalar2 = self.emulator.absolute_flux
self.Omega2 = 10**p.logOmega2
def draw_save(self):
'''
Return the lnprob using the current version of the C_GP matrix, data matrix,
and other intermediate products.
'''
self.lnprob_last = self.lnprob
X = (self.chebyshevSpectrum.k * self.flux_std * np.eye(self.ndata)).dot(self.eigenspectra.T)
model1 = self.Omega * self.flux_scalar *(self.chebyshevSpectrum.k * self.flux_mean + X.dot(self.mus))
model2 = self.Omega2 * self.flux_scalar2 * (self.chebyshevSpectrum.k * self.flux_mean + X.dot(self.mus2))
net_model = model1 + model2
model_out = net_model
return model_out
class SampleThetaPhi(Order, SampleThetaPhiBase):
pass
# Run the program.
model = SampleThetaPhi(debug=True)
model.initialize((0,0))
def lnlike(p):
# Now we can proceed with the model
try:
#pars1 = ThetaParam(grid=p[0:3], vz=p[3], vsini=p[4], logOmega=p[5])
pars1 = ThetaParam(grid=p[0:3], vz=p[3], vsini=p[4], logOmega=p[5], teff2=p[6], logOmega2=p[7])
model.update_Theta(pars1)
# hard code npoly=3 (for fixc0 = True with npoly=4)
#pars2 = PhiParam(0, 0, True, p[6:9], p[9], p[10], p[11])
pars2 = PhiParam(0, 0, True, p[8:11], p[11], p[12], p[13])
model.update_Phi(pars2)
lnp = model.evaluate()
return lnp
except C.ModelError:
model.logger.debug("ModelError in stellar parameters, sending back -np.inf {}".format(p))
return -np.inf
# Must load a user-defined prior
try:
sourcepath_env = Starfish.config['Theta_priors']
sourcepath = os.path.expandvars(sourcepath_env)
with open(sourcepath, 'r') as f:
sourcecode = f.read()
code = compile(sourcecode, sourcepath, 'exec')
exec(code)
lnprior = user_defined_lnprior
print("Using the user defined prior in {}".format(sourcepath_env))
except:
print("Don't you want to use a user defined prior??")
raise
# Insert the prior here
def lnprob(p):
lp = lnprior(p)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(p)
# Run the program.
model = SampleThetaPhi(debug=True)
model.initialize((0,0))
def lnprob_all(p):
pars1 = ThetaParam(grid=p[0:3], vz=p[3], vsini=p[4], logOmega=p[5], teff2=p[6], logOmega2=p[7])
model.update_Theta(pars1)
# hard code npoly=3 (for fixc0 = True with npoly=4)
#pars2 = PhiParam(0, 0, True, p[6:9], p[9], p[10], p[11])
pars2 = PhiParam(0, 0, True, p[8:11], p[11], p[12], p[13])
model.update_Phi(pars2)
junk = model.evaluate()
draw = model.draw_save()
return draw
draws = []
#Colorbrewer bands
s3 = '#fee6ce'
s2 = '#fdae6b'
s1 = '#e6550d'
wl = model.wl
data = model.fl
import pandas as pd
import json
if args.config:
df_out = pd.DataFrame({'wl':wl, 'data':data})
with open('s0_o0phi.json') as f:
s0phi = json.load(f)
psl = (Starfish.config['Theta']['grid']+
[Starfish.config['Theta'][key] for key in ['vz', 'vsini', 'logOmega', 'teff2', 'logOmega2']] +
s0phi['cheb'] +
[s0phi['sigAmp']] + [s0phi['logAmp']] + [s0phi['l']])
ps = np.array(psl)
df_out['model_composite'] = lnprob_all(ps)
pset1 = ps.copy()
pset1[5] = -20
df_out['model_cool50'] = lnprob_all(pset1)
pset2 = ps.copy()
pset2[7] = -20
df_out['model_hot50'] = lnprob_all(pset2)
df_out.to_csv('spec_config.csv', index=False)
import sys
sys.exit()
try:
ws = np.load("emcee_chain.npy")
burned = ws[:, -200:,:]
except:
ws = np.load("temp_emcee_chain.npy")
max_save = ws.any(axis=(0,2)).sum()
burned = ws[:, max_save-200:max_save,:]
xs, ys, zs = burned.shape
fc = burned.reshape(xs*ys, zs)
nx, ny = fc.shape
if args.OG:
median_vz_shift = np.median(fc[:, 3])
dlam = median_vz_shift/299792.0*np.median(wl)
# Get the line list of strong lines in Arcturus
#all_ll = pd.read_csv('/Users/obsidian/GitHub/ApJdataFrames/data/Rayner2009/tbl7_clean.csv')
#all_ll['wl_A'] = all_ll.wl*10000.0
#ll = all_ll[ (all_ll.wl_A > np.min(wl)) & (all_ll.wl_A < np.max(wl)) ]
#ll = ll.reset_index()
# Sort the flatchain by fill factor:
ff = 10**fc[:, 7]/(10**fc[:, 7]+10**fc[:, 5])
inds_sorted = np.argsort(ff)
fc_sorted = fc[inds_sorted]
# If we use 8000 samples, the 5th and 95th percentile samples are at:
ind_lo = 400 #0.05*8000
ind_med = 4000 #0.50*8000
ind_hi = 7600 #0.95*8000
df_out = pd.DataFrame({'wl':wl, 'data':data})
# Low end:
ps_lo = fc_sorted[ind_lo]
print(ps_lo)
df_out['model_comp05'] = lnprob_all(ps_lo)
pset1 = ps_lo.copy()
pset1[5] = -20
df_out['model_cool05'] = lnprob_all(pset1)
pset2 = ps_lo.copy()
pset2[7] = -20
df_out['model_hot05'] = lnprob_all(pset2)
# Middle:
ps_med = fc_sorted[ind_med]
df_out['model_comp50'] = lnprob_all(ps_med)
pset1 = ps_med.copy()
pset1[5] = -20
df_out['model_cool50'] = lnprob_all(pset1)
pset2 = ps_med.copy()
pset2[7] = -20
df_out['model_hot50'] = lnprob_all(pset2)
# Hi end:
ps_hi = fc_sorted[ind_hi]
df_out['model_comp95'] = lnprob_all(ps_hi)
pset1 = ps_hi.copy()
pset1[5] = -20
df_out['model_cool95'] = lnprob_all(pset1)
pset2 = ps_hi.copy()
pset2[7] = -20
df_out['model_hot95'] = lnprob_all(pset2)
df_out.to_csv('models_ff-05_50_95.csv', index=False)
if args.static:
draws = []
ws = np.load("emcee_chain.npy")
burned = ws[:, 4997:5000,:]
xs, ys, zs = burned.shape
fc = burned.reshape(xs*ys, zs)
nx, ny = fc.shape
median_vz_shift = np.median(fc[:, 3])
dlam = median_vz_shift/299792.0*np.median(wl)
# Sort the flatchain by fill factor:
fc_sorted = fc
ind_med = 60 #Random
ind_lo = 6 #0.05*8000
ind_hi = 114 #0.95*8000
df_out = pd.DataFrame({'wl':wl, 'data':data})
# Low end:
ps_lo = fc_sorted[ind_lo]
print(ps_lo)
df_out['model_comp05'] = lnprob_all(ps_lo)
pset1 = ps_lo.copy()
pset1[5] = -20
df_out['model_cool05'] = lnprob_all(pset1)
pset2 = ps_lo.copy()
pset2[7] = -20
df_out['model_hot05'] = lnprob_all(pset2)
# Middle:
ps_med = fc_sorted[ind_med]
df_out['model_comp50'] = lnprob_all(ps_med)
pset1 = ps_med.copy()
pset1[5] = -20
df_out['model_cool50'] = lnprob_all(pset1)
pset2 = ps_med.copy()
pset2[7] = -20
df_out['model_hot50'] = lnprob_all(pset2)
# Hi end:
ps_hi = fc_sorted[ind_hi]
df_out['model_comp95'] = lnprob_all(ps_hi)
pset1 = ps_hi.copy()
pset1[5] = -20
df_out['model_cool95'] = lnprob_all(pset1)
pset2 = ps_hi.copy()
pset2[7] = -20
df_out['model_hot95'] = lnprob_all(pset2)
# Middle:
ps_med = fc_sorted[ind_med]
df_out['model_comp50'] = lnprob_all(ps_med)
df_out.to_csv('models_draw.csv', index=False)
if args.animate:
from matplotlib import animation
n_draws = 200
rints = np.random.randint(0, nx, size=n_draws)
ps_es = fc[rints]
asi = ps_es[:, 4].argsort()
ps_vals = ps_es[asi , :]
draws = []
for i in range(n_draws):
ps = ps_vals[i]
draw = lnprob_all(ps)
draws.append(draw)
"""
Matplotlib Animation Example
author: Jake Vanderplas
email: vanderplas@astro.washington.edu
website: http://jakevdp.github.com
license: BSD
Please feel free to use and modify this, but keep the above information. Thanks!
"""
import seaborn as sns
sns.set_context('talk', font_scale=1.5)
sns.set_style('ticks')
fig = plt.figure()
ax = fig.add_subplot(111)
plt.step(wl, data, 'k', label='Data')
ax.set_xlim(np.min(wl), np.max(wl))
ax.set_xlabel(r"$\lambda (\AA)$")
ax.set_ylim(0, 1.3*np.percentile(data, 95))
#ax.set_yticks([])
#ax.set_xticks([])
# First set up the figure, the axis, and the plot element we want to animate
line, = ax.plot([], [], color='#AA00AA', lw=2, label='Model')
plt.legend(loc='upper right')
# initialization function: plot the background of each frame
def init():
line.set_data([], [])
return [line]
# animation function. This is called sequentially
def animate(i):
line.set_data(wl, draws[i])
return [line]
# call the animator. blit=True means only re-draw the parts that have changed.
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=200, interval=20, blit=True)
# save the animation as an mp4. This requires ffmpeg or mencoder to be
# installed. The extra_args ensure that the x264 codec is used, so that
# the video can be embedded in html5. You may need to adjust this for
# your system: for more information, see
# http://matplotlib.sourceforge.net/api/animation_api.html
anim.save('subsub_spec_anim.mp4', fps=10, dpi=300)
|
"""
[12/4/2012] Challenge #114 [Intermediate] Shortest word ladder
https://www.reddit.com/r/dailyprogrammer/comments/149khi/1242012_challenge_114_intermediate_shortest_word/
Given any two words from [this list of 3,807 four-letter words](http://pastebin.com/zY4Xt7iB), output a [word
ladder](http://www.reddit.com/r/dailyprogrammer/comments/149kec/1242012_challenge_114_easy_word_ladder_steps/) between
them that's as short as possible, using words from the list. (Note that the word list was chosen so that it's possible
to form a ladder between any pair of words in the list.) Sample input:
look
leap
Sample output (any valid ladder of 5 words from `look` to `leap` would also work):
look
loon
loan
lean
leap
__Bonus__: There are 8 pairs of words that require a ladder of 18 words to join. Find these 8 pairs of words. (Hint: a
certain word appears in each of the 8 pairs.)
_Thanks to **Thomas1122** for suggesting this challenge on /r/dailyprogrammer_ideas!_
"""
def main():
pass
if __name__ == "__main__":
main()
|
import pymongo
from pymongo import collection
if __name__ == "__main__" :
client = pymongo.MongoClient("mongodb://localhost:27017/")
db = client["VehicalDB"]
collection = db["IndianVehicalInformation"]
# data = [
# {"Car Number": "MH20DV2363", "Owner Name": "Raktim Midya", "Owner Phone Number": "8888899999", "Owner Location": "Nagpur, Maharastra", "Car Model Number": "SKODA SUPERB", "Registration Date": "25th Sept, 2020"},
# {"Car Number": "HR51BV3737", "Owner Name": "Aditya Raj", "Owner Phone Number": "7777766666", "Owner Location": "Gurugram, Haryana", "Car Model Number": "MARUTI SWIFT", "Registration Date": "5th Jan, 2019"}
# ]
# collection.insert_many(data)
collection.insert_one({"Car Number": "MH20DV2363", "Owner Name": "Raktim Midya", "Owner Phone Number": "8888899999", "Owner Location": "Nagpur, Maharastra", "Car Model Number": "SKODA SUPERB", "Registration Date": "25th Sept, 2020"})
# collection.insert_one({"Car Number": "HR51BV3737", "Owner Name": "Aditya Raj", "Owner Phone Number": "7777766666", "Owner Location": "Gurugram, Haryana", "Car Model Number": "MARUTI SWIFT", "Registration Date": "5th Jan, 2019"})
|
from .client import Client
from .command import Command
from .embed import Embed
from .interaction import * |
#
# This file is executed by the makefile in ../
#
#copy index.html to /docs folder and change the base href to point to "build/html/"
def installIndexHtml(src, dst):
task = "installing index html"
print(task + "...", end="")
try:
with open(src, "r") as r, open(dst, "w") as w:
w.write(r.read().replace('<head>','<head><base href="build/html/">'))
print(" done")
except Exception as e:
print(" failed: " + str(e))
def installIndexHtmlLegacy(src, dst):
task = "installing index html"
print(task + "...", end="")
try:
#read string from file
with open(src, "r") as r:
str = r.read()
#add base href tag
str = str.replace('<head>','<head><base href="build/html/">')
#write back
with open(dst, "w") as w:
w.write(str)
print(" done")
except Exception as e:
print(" failed: " + str(e))
installIndexHtml("build/html/index.html", "index.html") |
import os
from balsa import get_logger, Balsa, __author__
def test_balsa_sentry():
application_name = "test_balsa_sentry"
if "SENTRY_DSN" in os.environ:
balsa = Balsa(application_name, __author__, use_sentry=True, inhibit_cloud_services=False, is_root=False, sentry_dsn=os.environ["SENTRY_DSN"])
balsa.init_logger()
log = get_logger(application_name)
log.error("test balsa sentry error message")
else:
print("Please set SENTRY_DSN environment variable to have a good %s test" % __name__)
if __name__ == "__main__":
test_balsa_sentry()
|
#
# @lc app=leetcode id=509 lang=python3
#
# [509] Fibonacci Number
# https://leetcode.com/problems/fibonacci-number/description/
# Tagged "tree".
#
import unittest
# @lc code=start
class Solution:
def fib(self, N: int) -> int:
"""
Avoid re-computing numbers. For each number in the sequence,
we only need the sum of the previous two numbers.
Using a tuple here is slightly faster than using a list.
"""
if N == 0:
return 0
ans = (0, 1)
for _ in range(2, N):
ans = (ans[1], ans[0] + ans[1])
return ans[0] + ans[1]
# @lc code=end
class SimpleRecursionSolution:
def fib(self, N: int) -> int:
if N == 0 or N == 1:
return N
return self.fib(N - 2) + self.fib(N - 1)
class TestSolution(unittest.TestCase):
def test_example1(self) -> None:
self.assertEqual(Solution().fib(2), 1)
def test_example2(self) -> None:
self.assertEqual(Solution().fib(3), 2)
def test_zero(self) -> None:
self.assertEqual(Solution().fib(0), 0)
if __name__ == "__main__":
unittest.main()
|
import glfw
from window.base import BaseKeys
class Keys(BaseKeys):
"""
Namespace defining glfw specific keys constants
"""
ACTION_PRESS = glfw.PRESS
ACTION_RELEASE = glfw.RELEASE
ESCAPE = glfw.KEY_ESCAPE
SPACE = glfw.KEY_SPACE
ENTER = glfw.KEY_ENTER
PAGE_UP = glfw.KEY_PAGE_UP
PAGE_DOWN = glfw.KEY_PAGE_DOWN
LEFT = glfw.KEY_LEFT
RIGHT = glfw.KEY_RIGHT
UP = glfw.KEY_UP
DOWN = glfw.KEY_DOWN
A = glfw.KEY_A
B = glfw.KEY_B
C = glfw.KEY_C
D = glfw.KEY_D
E = glfw.KEY_E
F = glfw.KEY_F
G = glfw.KEY_G
H = glfw.KEY_H
I = glfw.KEY_I
J = glfw.KEY_J
K = glfw.KEY_K
L = glfw.KEY_L
M = glfw.KEY_M
N = glfw.KEY_N
O = glfw.KEY_O
P = glfw.KEY_P
Q = glfw.KEY_Q
R = glfw.KEY_R
S = glfw.KEY_S
T = glfw.KEY_T
U = glfw.KEY_U
V = glfw.KEY_V
W = glfw.KEY_W
X = glfw.KEY_X
Y = glfw.KEY_Y
Z = glfw.KEY_Z
|
#19) Counting Sundays
#You are given the following information, but you may prefer to do some research for yourself.
#1 Jan 1900 was a Monday.
#Thirty days has September,
#April, June and November.
#All the rest have thirty-one,
#Saving February alone,
#Which has twenty-eight, rain or shine.
#And on leap years, twenty-nine.
#A leap year occurs on any year evenly divisible by 4, but not on a century unless it is divisible by 400.
#How many Sundays fell on the first of the month during the twentieth century (1 Jan 1901 to 31 Dec 2000)?
# Solution
import datetime
limits = [datetime.date(1901, 1, 1), datetime.date(2000, 12, 31)]
dates = [limits[0] + datetime.timedelta(days=x) for x in range(0, (limits[1] - limits[0]).days + 1)]
first = [x for x in dates if x.day == 1]
sundays = [x for x in first if x.weekday() == 6]
len(sundays) |
from os.path import join, exists, dirname
from sklearn.neighbors import KDTree
from tool import DataProcessing as DP
from helper_ply import write_ply
import numpy as np
import os, pickle, argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_path', type=str, required=True, help='the number of GPUs to use [default: 0]')
FLAGS = parser.parse_args()
dataset_name = 'SensatUrban'
dataset_path = FLAGS.dataset_path
preparation_types = ['grid'] # Grid sampling & Random sampling
grid_size = 0.2
random_sample_ratio = 10
train_files = np.sort([join(dataset_path, 'train', i) for i in os.listdir(join(dataset_path, 'train'))])
test_files = np.sort([join(dataset_path, 'test', i) for i in os.listdir(join(dataset_path, 'test'))])
files = np.sort(np.hstack((train_files, test_files)))
for sample_type in preparation_types:
for pc_path in files:
cloud_name = pc_path.split('/')[-1][:-4]
print('start to process:', cloud_name)
# create output directory
out_folder = join(dirname(dataset_path), sample_type + '_{:.3f}'.format(grid_size))
os.makedirs(out_folder) if not exists(out_folder) else None
# check if it has already calculated
if exists(join(out_folder, cloud_name + '_KDTree.pkl')):
print(cloud_name, 'already exists, skipped')
continue
if pc_path in train_files:
xyz, rgb, labels = DP.read_ply_data(pc_path, with_rgb=True)
else:
xyz, rgb = DP.read_ply_data(pc_path, with_rgb=True, with_label=False)
labels = np.zeros(len(xyz), dtype=np.uint8)
sub_ply_file = join(out_folder, cloud_name + '.ply')
if sample_type == 'grid':
sub_xyz, sub_rgb, sub_labels = DP.grid_sub_sampling(xyz, rgb, labels, grid_size)
else:
sub_xyz, sub_rgb, sub_labels = DP.random_sub_sampling(xyz, rgb, labels, random_sample_ratio)
sub_rgb = sub_rgb / 255.0
sub_labels = np.squeeze(sub_labels)
write_ply(sub_ply_file, [sub_xyz, sub_rgb, sub_labels], ['x', 'y', 'z', 'red', 'green', 'blue', 'class'])
search_tree = KDTree(sub_xyz, leaf_size=50)
kd_tree_file = join(out_folder, cloud_name + '_KDTree.pkl')
with open(kd_tree_file, 'wb') as f:
pickle.dump(search_tree, f)
proj_idx = np.squeeze(search_tree.query(xyz, return_distance=False))
proj_idx = proj_idx.astype(np.int32)
proj_save = join(out_folder, cloud_name + '_proj.pkl')
with open(proj_save, 'wb') as f:
pickle.dump([proj_idx, labels], f)
|
import unittest
import json
from pyetherscan import client, response, error
class BaseClientTestCase(unittest.TestCase):
def setUp(self):
self.client = client.Client()
def base_etherscan_response_status(self, result):
self.assertEqual(200, result.response_status_code)
self.assertEqual('1', result.status)
self.assertEqual('OK', result.message)
class TestInitialization(BaseClientTestCase):
def test_initialization_objects(self):
# Test api format error
with self.assertRaises(error.EtherscanInitializationError):
client.Client(apikey=5)
# Test timeout error
with self.assertRaises(error.EtherscanInitializationError):
client.Client(timeout='5')
class TestAccountEndpoint(BaseClientTestCase):
def test_get_single_balance(self):
expected_bal = 744997704382925139479303.0
expected_response = {
u'status': u'1',
u'message': u'OK',
u'result': u'744997704382925139479303'
}
address = '0xde0b295669a9fd93d5f28d9ec85e40f4cb697bae'
result = self.client.get_single_balance(address)
self.assertEqual(response.SingleAddressBalanceResponse, type(result))
self.assertEqual(expected_response, result.etherscan_response)
self.assertEqual(expected_bal, result.balance)
self.base_etherscan_response_status(result)
def test_get_multi_balance(self):
expected_response = {
u'status': u'1',
u'message': u'OK',
u'result': [
{
u'account': u'0xddbd2b932c763ba5b1b7ae3b362eac3e8d40121a',
u'balance': u'40807168564070000000000'
}, {
u'account': u'0x63a9975ba31b0b9626b34300f7f627147df1f526',
u'balance': u'332567136222827062478'
}, {
u'account': u'0x198ef1ec325a96cc354c7266a038be8b5c558f67',
u'balance': u'12005264493462223951724'
}
]
}
addresses = [
'0xddbd2b932c763ba5b1b7ae3b362eac3e8d40121a',
'0x63a9975ba31b0b9626b34300f7f627147df1f526',
'0x198ef1ec325a96cc354c7266a038be8b5c558f67'
]
result = self.client.get_multi_balance(addresses)
self.assertEqual(response.MultiAddressBalanceResponse, type(result))
self.assertEqual(expected_response, result.etherscan_response)
balances = {
u'0x198ef1ec325a96cc354c7266a038be8b5c558f67': 1.2005264493462224e+22,
u'0x63a9975ba31b0b9626b34300f7f627147df1f526': 3.3256713622282705e+20,
u'0xddbd2b932c763ba5b1b7ae3b362eac3e8d40121a': 4.080716856407e+22
}
self.assertEqual(balances, result.balances)
self.base_etherscan_response_status(result)
with self.assertRaises(error.EtherscanAddressError):
self.client.get_multi_balance(addresses='')
with self.assertRaises(error.EtherscanAddressError):
self.client.get_multi_balance(addresses=['' for x in range(30)])
def test_get_transactions_by_address(self):
address = '0xde0b295669a9fd93d5f28d9ec85e40f4cb697bae'
result = self.client.get_transactions_by_address(address)
self.assertEqual(response.TransactionsByAddressResponse, type(result))
# self.assertEqual(expected_response_result_sorted, etherscan_response_sorted)
self.base_etherscan_response_status(result)
def test_get_transactions_by_address_with_block_offset(self):
address = '0x2c1ba59d6f58433fb1eaee7d20b26ed83bda51a3'
startblock = 0
endblock = 2702578
offset = 10
page = 1
result = self.client.get_transactions_by_address(
address=address,
startblock=startblock,
endblock=endblock,
offset=offset,
page=page
)
self.assertEqual(response.TransactionsByAddressResponse, type(result))
self.base_etherscan_response_status(result)
with self.assertRaises(error.EtherscanTransactionError):
self.client.get_transactions_by_address(
address=address,
startblock=startblock,
endblock=endblock,
offset=offset
)
def test_get_transaction_by_hash(self):
transaction = {
u'contractAddress': u'',
u'from': u'0x2cac6e4b11d6b58f6d3c1c9d5fe8faa89f60e5a2',
u'timeStamp': u'1466489498',
u'gas': u'2300',
u'errCode': u'',
u'value': u'7106740000000000',
u'blockNumber': u'1743059',
u'to': u'0x66a1c3eaf0f1ffc28d209c0763ed0ca614f3b002',
u'input': u'',
u'type': u'call',
u'isError': u'0',
u'gasUsed': u'0'
}
expected_response = {
u'status': u'1',
u'message': u'OK',
u'result': [transaction]
}
hash = '0x40eb908387324f2b575b4879cd9d7188f69c8fc9d87c901b9e2daaea4b442170'
result = self.client.get_transaction_by_hash(hash)
self.assertEqual(response.TransactionsByHashResponse, type(result))
self.assertEqual(expected_response, result.etherscan_response)
self.assertEqual(transaction, result.transaction)
self.base_etherscan_response_status(result)
def test_get_blocks_mined_by_address(self):
expected_response = {
u'status': u'1',
u'message': u'OK',
u'result': [
{
u'timeStamp': u'1491118514',
u'blockReward': u'5194770940000000000',
u'blockNumber': u'3462296'
}
]
}
address = '0x9dd134d14d1e65f84b706d6f205cd5b1cd03a46b'
result = self.client.get_blocks_mined_by_address(address)
self.assertEqual(response.BlocksMinedByAddressResponse, type(result))
eth_response_result = result.etherscan_response.get('result')[0]
expected_response_result = expected_response.get('result')[0]
self.assertEqual(eth_response_result, expected_response_result)
self.assertEqual(
expected_response.get('status'),
result.etherscan_response.get('status')
)
self.assertEqual(
expected_response.get('message'),
result.etherscan_response.get('message')
)
self.base_etherscan_response_status(result)
def test_get_blocks_mined_by_address_with_block_offset(self):
address = '0x9dd134d14d1e65f84b706d6f205cd5b1cd03a46b'
startblock = 0
endblock = 3462296
offset = 10
page = 1
result = self.client.get_transactions_by_address(
address=address,
startblock=startblock,
endblock=endblock,
offset=offset,
page=page
)
self.assertEqual(response.TransactionsByAddressResponse, type(result))
self.base_etherscan_response_status(result)
with self.assertRaises(error.EtherscanTransactionError):
self.client.get_transactions_by_address(
address=address,
startblock=startblock,
endblock=endblock,
offset=offset
)
class TestContractEndpoint(BaseClientTestCase):
def test_get_contract_abi(self):
contract_abi = {
"constant":True,
"inputs":[
{
"name":"",
"type":"uint256"
}
],
"name":"proposals",
"outputs":[
{
"name":"recipient",
"type":"address"
},{
"name":"amount",
"type":"uint256"
},{
"name":"description",
"type":"string"
},{
"name":"votingDeadline",
"type":"uint256"
},{
"name":"open",
"type":"bool"
},{
"name":"proposalPassed",
"type":"bool"
},{
"name":"proposalHash",
"type":"bytes32"
},{
"name":"proposalDeposit",
"type":"uint256"
},{
"name":"newCurator",
"type":"bool"
},{
"name":"yea",
"type":"uint256"
},{
"name":"nay",
"type":"uint256"
},{
"name":"creator",
"type":"address"
}
],
"type":"function"
}
expected_response = {
u'status': u'1',
u'message': u'OK',
u'result': contract_abi
}
address = '0xBB9bc244D798123fDe783fCc1C72d3Bb8C189413'
result = self.client.get_contract_abi(address)
self.assertEqual(response.ContractABIByAddressResponse, type(result))
truncated_response = json.loads(
result.etherscan_response.get('result'))[0]
exp_truncated = expected_response.get('result')
self.assertEqual(
exp_truncated,
truncated_response
)
self.assertEqual(
expected_response.get('status'),
result.etherscan_response.get('status')
)
self.assertEqual(
expected_response.get('message'),
result.etherscan_response.get('message')
)
self.base_etherscan_response_status(result)
class TestTransactionsEndpoint(BaseClientTestCase):
def test_get_contract_execution_status(self):
expected_response = {
u'status': u'1',
u'message': u'OK',
u'result': {
u'isError': u'1',
u'errDescription':
u'Bad jump destination'
}
}
hash = '0x15f8e5ea1079d9a0bb04a4c58ae5fe7654b5b2b4463375ff7ffb490aa0032f3a'
result = self.client.get_contract_execution_status(hash)
self.assertEqual(response.ContractStatusResponse, type(result))
self.assertEqual(expected_response, result.etherscan_response)
self.base_etherscan_response_status(result)
class TestTokenEndpoint(BaseClientTestCase):
def test_get_token_supply_by_address(self):
expected_response = {
u'status': u'1',
u'message': u'OK',
u'result': u'21265524714464'
}
address = '0x57d90b64a1a57749b0f932f1a3395792e12e7055'
result = self.client.get_token_supply_by_address(address)
self.assertEqual(response.TokenSupplyResponse, type(result))
self.assertEqual(expected_response, result.etherscan_response)
self.assertEqual(21265524714464.0, result.total_supply)
self.base_etherscan_response_status(result)
def test_get_token_balance_by_address(self):
expected_response = {
u'status': u'1',
u'message': u'OK',
u'result': u'135499'
}
contract_address = '0x57d90b64a1a57749b0f932f1a3395792e12e7055'
account_address = '0xe04f27eb70e025b78871a2ad7eabe85e61212761'
result = self.client.get_token_balance_by_address(
contract_address,
account_address
)
self.assertEqual(response.TokenAccountBalanceResponse, type(result))
self.assertEqual(expected_response, result.etherscan_response)
self.assertEqual(135499.0, result.balance)
self.base_etherscan_response_status(result)
class TestBlockEndpoint(BaseClientTestCase):
def test_get_block_rewards(self):
expected_response = {
"status": "1",
"message": "OK",
"result": {
"blockNumber": "2165403",
"timeStamp": "1472533979",
"blockMiner": "0x13a06d3dfe21e0db5c016c03ea7d2509f7f8d1e3",
"blockReward": "5314181600000000000",
"uncles": [
{
"miner": "0xbcdfc35b86bedf72f0cda046a3c16829a2ef41d1",
"unclePosition": "0",
"blockreward": "3750000000000000000"
}, {
"miner": "0x0d0c9855c722ff0c78f21e43aa275a5b8ea60dce",
"unclePosition": "1",
"blockreward": "3750000000000000000"
}
],
"uncleInclusionReward": "312500000000000000"
}
}
block_number = 2165403
result = self.client.get_block_and_uncle_rewards_by_block_number(
block_number
)
self.assertEqual(response.BlockRewardsResponse, type(result))
self.assertEqual(expected_response, result.etherscan_response)
self.base_etherscan_response_status(result)
|
# Copyright (c) Facebook, Inc. and its affiliates
# SPDX-License-Identifier: MIT OR Apache-2.0
import numpy as np
from dataclasses import dataclass
import typing
class SerializationError(ValueError):
"""Error raised during Serialization"""
pass
class DeserializationError(ValueError):
"""Error raised during Deserialization"""
pass
@dataclass(init=False)
class uint128:
high: np.uint64
low: np.uint64
def __init__(self, num):
self.high = np.uint64(num >> 64)
self.low = np.uint64(num & 0xFFFFFFFFFFFFFFFF)
def __int__(self):
return (int(self.high) << 64) | int(self.low)
@dataclass(init=False)
class int128:
high: np.int64
low: np.uint64
def __init__(self, num):
self.high = np.int64(num >> 64)
self.low = np.uint64(num & 0xFFFFFFFFFFFFFFFF)
def __int__(self):
return (int(self.high) << 64) | int(self.low)
@dataclass(init=False)
class char:
value: str
def __init__(self, s):
if len(s) != 1:
raise ValueError("`char` expects a single unicode character")
self.value = s
def __str__(self):
return self.value
unit = typing.Type[None]
bool = bool
int8 = np.int8
int16 = np.int16
int32 = np.int32
int64 = np.int64
uint8 = np.uint8
uint16 = np.uint16
uint32 = np.uint32
uint64 = np.uint64
float32 = np.float32
float64 = np.float64
|
from django.conf import settings
from django.utils.module_loading import import_module
from django.core.management.base import BaseCommand
from ...contrib.fontforge.watcher import Watcher
class Command(BaseCommand):
args = ''
help = 'ex: ./manage.py fontforge_watcher'
def handle(self, *args, **options):
watcher = Watcher(command=self)
watcher.watch() |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/13_probabilistic_models.ipynb (unless otherwise specified).
__all__ = ['MeanStdWrapper']
# Cell
import numpy as np
import torch
from torch import nn
from ..tabular.model import *
from fastai.tabular.data import *
from ..timeseries.model import *
from fastai.tabular.all import *
from torch.autograd import Variable
import pandas as pd
from ..losses import *
from fastai.losses import MSELossFlat
from blitz.utils import variational_estimator
from ..utils_blitz import set_train_mode
from ..metrics import *
# Cell
@variational_estimator
class MeanStdWrapper(nn.Module):
def __init__(self, model, last_layer_size=None, nll_output_layer=None):
super().__init__()
self.model = model
if nll_output_layer is None and last_layer_size is not None:
self.nll_output_layer = nn.Linear(last_layer_size, 2)
elif nll_output_layer is None and last_layer_size is None:
self.nll_output_layer = nll_output_layer
else:
raise ValueError("Either provide and output layer or the lasy layer size.")
def forward(self, categorical_data, continuous_data):
x = self.model(categorical_data, continuous_data)
x = self.nll_output_layer(x)
return x
def train(self, mode: bool = True):
super().train(mode)
set_train_mode(self, mode) |
from apps import App, action
import smtplib
import email.utils
from email.mime.text import MIMEText
class Main(App):
def __init__(self, name, device, context):
App.__init__(self, name, device, context)
self.server = smtplib.SMTP('{0}:{1}'.format(self.device_fields['ip'], self.device_fields['port']))
try:
self.server.set_debuglevel(False)
self.server.ehlo()
if self.server.has_extn('STARTTLS'):
self.server.starttls()
self.server.ehlo() # re-identify ourselves over TLS connection
self.server.login(self.device_fields['username'], self.device.get_encrypted_field('password'))
except Exception as e:
self.shutdown()
@action
def send_email(self, sender, receivers, subject, message, html, sender_name):
message_format = 'html' if html else 'plain'
msg = MIMEText(message, message_format)
msg.set_unixfrom('author')
msg['To'] = email.utils.formataddr(('Recipient', receivers))
msg['From'] = email.utils.formataddr((sender_name, sender))
msg['Subject'] = subject
self.server.sendmail(sender, receivers, msg.as_string())
return 'success'
def shutdown(self):
self.server.quit()
|
import logging
import datacloud.login
import pytest
from tests.base_test import BaseTest
@pytest.mark.usefixtures("setup")
class TestLogin(BaseTest):
# "Login" Fixtures
@pytest.fixture(scope="class")
def good_login(self, request):
return datacloud.login.CreateLoginAPI(request.cls.dc_host, request.cls.dc_user, request.cls.dc_pwd)
@pytest.fixture(scope="class")
def bad_login1(self, request):
return datacloud.login.CreateLoginAPI(request.cls.dc_host, "bogus_user_name@gmail.com", request.cls.dc_pwd)
@pytest.fixture(scope="class")
def bad_login2(self, request):
return datacloud.login.CreateLoginAPI(request.cls.dc_host, request.cls.dc_user, "bogus_pwd")
@pytest.fixture(scope="class")
def bad_login3(self, request):
return datacloud.login.CreateLoginAPI(request.cls.dc_host, "bogus_user_name@gmail.com", "bogus_pwd")
def test_login1(self, good_login):
logging.info("Test login response message")
expect_value = "Login successful"
actual_value = good_login.get_message()
err_msg = "Expected: " + expect_value + " got: " + actual_value
self.log_assert(actual_value == expect_value, err_msg)
def test_login2(self, good_login):
logging.info("Test login response status")
expect_value = True
actual_value = good_login.get_success_status()
err_msg = "Did not get a success true outcome - Expected: " + str(expect_value) + " got: " + str(actual_value)
self.log_assert(actual_value == expect_value, err_msg)
def test_login3(self, good_login):
logging.info("Test login user id is correct.")
expect_value = 21610
actual_value = good_login.get_userid()
err_msg = "Did not get correct user id - Expected: " + str(expect_value) + " got: " + str(actual_value)
self.log_assert(actual_value == expect_value, err_msg)
def test_login4(self, good_login):
logging.info("Test login account id is correct.")
expect_value = 1193
actual_value = good_login.get_accountid()
err_msg = "Did not get correct account id - Expected: " + str(expect_value) + " got: " + str(actual_value)
self.log_assert(actual_value == expect_value, err_msg)
def test_login5(self, good_login):
logging.info("Test login account external id is correct.")
expect_value = "Actian QA Stage"
actual_value = good_login.get_accountexternalid()
err_msg = "Did not get correct external account id - Expected: " + expect_value + " got: " + actual_value
self.log_assert(actual_value == expect_value, err_msg)
def test_login6(self, bad_login1):
logging.info("Test we can not log into the DataCloud Integration Manager console with bogus username.")
expect_value = "DX_EXCEPTION: INVALID_SESSION: Login failed; Invalid user or password"
actual_value = bad_login1.get_error()
err_msg = "Did not get expected error message: - Expected: " + expect_value + " got: " + actual_value
self.log_assert(actual_value == expect_value, err_msg)
def test_login7(self, bad_login2):
logging.info("Test we can not log into the DataCloud Integration Manager console with bogus password.")
expect_value = "DX_EXCEPTION: INVALID_SESSION: Login failed; Invalid user or password"
actual_value = bad_login2.get_error()
err_msg = "Did not get expected error message: - Expected: " + expect_value + " got: " + actual_value
self.log_assert(actual_value == expect_value, err_msg)
|
# ----------------------------------------------------------------------------#
# Imports
# ----------------------------------------------------------------------------#
import json
import dateutil.parser
import babel
from flask import Flask, render_template, request, Response, flash, redirect, url_for, abort
from flask_moment import Moment
from flask_sqlalchemy import SQLAlchemy
import logging
from logging import Formatter, FileHandler
from flask_wtf import Form
from sqlalchemy.orm import Session
from sqlalchemy.sql import expression
from sqlalchemy import func, desc
from forms import *
from flask_migrate import Migrate
# ----------------------------------------------------------------------------#
# App Config.
# ----------------------------------------------------------------------------#
app = Flask(__name__)
moment = Moment(app)
app.config.from_object('config')
db = SQLAlchemy(app)
migrate = Migrate(app, db)
# ----------------------------------------------------------------------------#
# Models.
# ----------------------------------------------------------------------------#
class Venue(db.Model):
__tablename__ = 'venue'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String, nullable=False)
city = db.Column(db.String(120), nullable=False)
state = db.Column(db.String(120), nullable=False)
address = db.Column(db.String(120), nullable=False)
phone = db.Column(db.String(120), nullable=False)
genres = db.Column(db.ARRAY(db.String))
seeking_talent = db.Column(db.BOOLEAN, server_default=expression.false())
seeking_description = db.Column(db.String(120), default="")
website = db.Column(db.String(500))
image_link = db.Column(db.String(500))
facebook_link = db.Column(db.String(500))
artists = db.relationship('Artist', secondary='show')
def __repr__(self):
return f'<Venue {self.id} {self.name}>'
# TODO: implement any missing fields, as a database migration using Flask-Migrate
class Artist(db.Model):
__tablename__ = 'artist'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String, nullable=False)
city = db.Column(db.String(120), nullable=False)
state = db.Column(db.String(120), nullable=False)
phone = db.Column(db.String(120), nullable=False)
genres = db.Column(db.ARRAY(db.String))
image_link = db.Column(db.String(500))
facebook_link = db.Column(db.String(500))
website = db.Column(db.String(500))
seeking_venue = db.Column(db.BOOLEAN, server_default=expression.false())
seeking_description = db.Column(db.String(500))
venues = db.relationship('Venue', secondary='show')
def __repr__(self):
return f'<Artist {self.id} {self.name}>'
# TODO: implement any missing fields, as a database migration using Flask-Migrate
class Show(db.Model):
__tablename__ = 'show'
artist_id = db.Column(db.Integer, db.ForeignKey('artist.id'), primary_key=True)
venue_id = db.Column(db.Integer, db.ForeignKey('venue.id'), primary_key=True)
start_time = db.Column(db.DateTime, nullable=False)
def __repr__(self):
return f'<Show {self.artist_id} {self.venue_id} {self.start_time}>'
# TODO Implement Show and Artist models, and complete all model relationships and properties, as a database migration.
# ----------------------------------------------------------------------------#
# Filters.
# ----------------------------------------------------------------------------#
def format_datetime(value, format='medium'):
date = dateutil.parser.parse(value)
if format == 'full':
format = "EEEE MMMM, d, y 'at' h:mma"
elif format == 'medium':
format = "EE MM, dd, y h:mma"
return babel.dates.format_datetime(date, format, locale='en')
app.jinja_env.filters['datetime'] = format_datetime
# ----------------------------------------------------------------------------#
# Controllers.
# ----------------------------------------------------------------------------#
@app.route('/')
def index():
return render_template('pages/home.html')
# Venues
# ----------------------------------------------------------------
@app.route('/venues')
def venues():
locations = Venue.query.with_entities(Venue.city, Venue.state).distinct()
data = list()
for location in locations:
single_location = {}
venue_list = []
single_location['city'] = location[0]
single_location['state'] = location[1]
query_venues = Venue.query \
.filter_by(city=location[0], state=location[1]) \
.with_entities(Venue.id, Venue.name) \
.order_by('id') \
.all()
for v in query_venues:
venue_data = {
'id': v[0],
'name': v[1],
'num_upcoming_shows': Show.query.filter_by(venue_id=v[0]).filter(
Show.start_time >= datetime.now())
}
venue_list.append(venue_data)
single_location['venues'] = venue_list
data.append(single_location)
return render_template('pages/venues.html', areas=data)
@app.route('/venues/search', methods=['POST'])
def search_venues():
# TODO: implement search on artists with partial string search. Ensure it is case-insensitive.
# seach for Hop should return "The Musical Hop".
# search for "Music" should return "The Musical Hop" and "Park Square Live Music & Coffee"
search_name = request.form.get('search_term', '')
results = Venue.query.filter(Venue.name.contains(search_name)).all()
response = {
"count": len(results),
}
data = []
for r in results:
data.append({
'id': r.id,
'name': r.name,
'num_upcoming_shows': Show.query.filter_by(venue_id=r.id).filter(
Show.start_time >= datetime.now()).count()
})
response['data'] = data
return render_template('pages/search_venues.html', results=response,
search_term=request.form.get('search_term', ''))
@app.route('/venues/<int:venue_id>')
def show_venue(venue_id):
# shows the venue page with the given venue_id
# TODO: replace with real venue data from the venues table, using venue_id
venue = Venue.query.get(venue_id)
if venue is None:
return render_template('errors/404.html')
shows = Show.query.filter_by(venue_id=venue_id)
data = {
'id': venue_id,
'name': venue.name,
'genres': venue.genres,
'address': venue.address,
'city': venue.city,
'state': venue.state,
'phone': venue.phone,
'website': venue.website,
'facebook_link': venue.facebook_link,
'seeking_description': venue.seeking_description,
'seeking_talent': venue.seeking_talent,
'image_link': venue.image_link
}
past_show = []
upcoming_show = []
for show in shows.filter(Show.start_time < datetime.now()).all():
past = {
'artist_id': show.artist_id,
'artist_name': Artist.query.get(show.artist_id).name,
'artist_image_link': Artist.query.get(show.artist_id).image_link,
'start_time': str(show.start_time)
}
past_show.append(past)
for show in shows.filter(Show.start_time >= datetime.now()).all():
coming = {
'artist_id': show.artist_id,
'artist_name': Artist.query.get(show.artist_id).name,
'artist_image_link': Artist.query.get(show.artist_id).image_link,
'start_time': str(show.start_time)
}
upcoming_show.append(coming)
data['past_shows'] = past_show
data['upcoming_shows'] = upcoming_show
data['past_shows_count'] = len(past_show)
data['upcoming_shows_count'] = len(upcoming_show)
return render_template('pages/show_venue.html', venue=data)
# Create Venue
# ----------------------------------------------------------------
@app.route('/venues/create', methods=['GET'])
def create_venue_form():
form = VenueForm()
return render_template('forms/new_venue.html', form=form)
@app.route('/venues/create', methods=['POST'])
def create_venue_submission():
# TODO: insert form data as a new Venue record in the db, instead
# TODO: modify data to be the data object returned from db insertion
error = False
form = VenueForm(request.form)
try:
print(request.form)
if form.validate():
name = form.name.data
city = form.city.data
state = form.state.data
address = form.address.data
phone = form.phone.data
genres = form.genres.data
image_link = form.image_link.data
seeking_description = form.seeking_description.data
seeking_talent = True if seeking_description is not "" else False
facebook_link = form.facebook_link.data
venue = Venue(name=name, city=city, state=state, address=address, phone=phone, genres=genres,
image_link=image_link, seeking_talent=seeking_talent, seeking_description=seeking_description,
facebook_link=facebook_link)
db.session.add(venue)
db.session.commit()
else:
flash("Form Validation Failed, Check Form Data Format")
return render_template('forms/new_venue.html', form=form)
except:
error = True
db.session.rollback()
flash('Error occurred ! Venue ' + request.form['name'] + ' was not inserted')
return redirect('')
finally:
db.session.close()
if not error:
flash('Venue ' + request.form['name'] + ' was successfully listed!')
else:
return render_template('errors/500.html')
return render_template('pages/home.html')
@app.route('/venues/<venue_id>', methods=['DELETE'])
def delete_venue(venue_id):
error = False
try:
show = Show.query.filter_by(venue_id=venue_id).delete()
venue = Venue.query.filter_by(id=venue_id).delete()
db.session.execute("SELECT setval ('venue_id_seq', max(venue.id)) FROM venue;")
if len(Venue.query.all()) is 0:
db.session.execute("ALTER SEQUENCE venue_id_seq RESTART WITH 1;")
db.session.commit()
else:
db.session.commit()
flash('Success to Delete Venue' + venue_id + 'and Show' + show.id + '! Good!')
except:
error = True
db.session.rollback()
flash('Fail to Delete Venue' + venue_id + '! Something Wrong!')
finally:
db.session.close()
if not error:
flash('Success to Delete Venue' + venue_id + '! Good!')
else:
return render_template('errors/500.html')
# TODO: Complete this endpoint for taking a venue_id, and using
# SQLAlchemy ORM to delete a record. Handle cases where the session commit could fail.
# BONUS CHALLENGE: Implement a button to delete a Venue on a Venue Page, have it so that
# clicking that button delete it from the db then redirect the user to the homepage
return render_template('pages/home.html')
# Artists
# ----------------------------------------------------------------
@app.route('/artists')
def artists():
# TODO: replace with real data returned from querying the database
data = []
artists_objects = Artist.query.all()
for artist in artists_objects:
dic = {
'id': artist.id,
'name': artist.name
}
data.append(dic)
return render_template('pages/artists.html', artists=data)
@app.route('/artists/search', methods=['POST'])
def search_artists():
# TODO: implement search on artists with partial string search. Ensure it is case-insensitive.
# search for "A" should return "Guns N Petals", "Matt Quevado", and "The Wild Sax Band".
# search for "band" should return "The Wild Sax Band".
search_name = request.form.get('search_term', '')
results = Artist.query.filter(Artist.name.contains(search_name)).all()
response = {
"count": len(results),
}
data = []
for r in results:
data.append({
'id': r.id,
'name': r.name,
'num_upcoming_shows': Show.query.filter_by(artist_id=r.id).filter(
Show.start_time >= datetime.now()).count()
})
response['data'] = data
return render_template('pages/search_artists.html', results=response,
search_term=request.form.get('search_term', ''))
@app.route('/artists/<int:artist_id>')
def show_artist(artist_id):
# shows the venue page with the given venue_id
# TODO: replace with real venue data from the venues table, using venue_id
artist = Artist.query.get(artist_id)
shows = Show.query.filter_by(artist_id=artist_id)
data = {
'id': artist_id,
'name': artist.name,
'genres': artist.genres,
'city': artist.city,
'state': artist.state,
'phone': artist.phone,
'website': artist.website,
'facebook_link': artist.facebook_link,
'seeking_description': artist.seeking_description,
'image_link': artist.image_link
}
past_show = []
upcoming_show = []
for show in shows.filter(Show.start_time < datetime.now()).all():
past = {
'venue_id': show.venue_id,
'venue_name': Venue.query.get(show.venue_id).name,
'venue_image_link': Venue.query.get(show.venue_id).image_link,
'start_time': str(show.start_time)
}
past_show.append(past)
for show in shows.filter(Show.start_time >= datetime.now()).all():
coming = {
'venue_id': show.venue_id,
'venue_name': Venue.query.get(show.venue_id).name,
'venue_image_link': Venue.query.get(show.venue_id).image_link,
'start_time': str(show.start_time)
}
upcoming_show.append(coming)
data['past_shows'] = past_show
data['upcoming_shows'] = upcoming_show
data['past_shows_count'] = len(past_show)
data['upcoming_shows_count'] = len(upcoming_show)
return render_template('pages/show_artist.html', artist=data)
# Update
# ----------------------------------------------------------------
@app.route('/artists/<int:artist_id>/edit', methods=['GET'])
def edit_artist(artist_id):
form = ArtistForm()
artist = Artist.query.get(artist_id)
form.name.data = artist.name
form.genres.data = artist.genres
form.city.data = artist.city
form.state.data = artist.state
form.phone.data = artist.phone
form.website.data = artist.website
form.facebook_link.data = artist.facebook_link
form.seeking_description.data = artist.seeking_description
form.image_link.data = artist.image_link
# TODO: populate form with fields from artist with ID <artist_id>
return render_template('forms/edit_artist.html', form=form, artist=artist)
@app.route('/artists/<int:artist_id>/edit', methods=['POST'])
def edit_artist_submission(artist_id):
# TODO: take values from the form submitted, and update existing
# artist record with ID <artist_id> using the new attributes
artist = Artist.query.get(artist_id)
form = ArtistForm(request.form)
try:
if form.validate():
artist.name = form.name.data
artist.genres = form.genres.data
artist.city = form.city.data
artist.state = form.state.data
artist.phone = form.phone.data
artist.facebook_link = form.facebook_link.data
db.session.commit()
else:
flash("Edit Form Validation Failed, Please Resubmit")
return redirect(url_for('show_artist', artist_id=artist_id))
except:
db.session.rollback()
flash("Something Wrong During Submit!")
return redirect(url_for('show_artist', artist_id=artist_id))
finally:
db.session.close()
return redirect(url_for('show_artist', artist_id=artist_id))
@app.route('/venues/<int:venue_id>/edit', methods=['GET'])
def edit_venue(venue_id):
form = VenueForm()
venue = Venue.query.get(venue_id)
form.name.data = venue.name
form.genres.data = venue.genres
form.address.data = venue.address
form.city.data = venue.city
form.state.data = venue.state
form.phone.data = venue.phone
form.website.data = venue.website
form.facebook_link.data = venue.facebook_link
form.seeking_description.data = venue.seeking_description
# TODO: populate form with values from venue with ID <venue_id>
return render_template('forms/edit_venue.html', form=form, venue=venue)
@app.route('/venues/<int:venue_id>/edit', methods=['POST'])
def edit_venue_submission(venue_id):
# TODO: take values from the form submitted, and update existing
# venue record with ID <venue_id> using the new attributes
try:
form = VenueForm(request.form)
if form.validate():
venue = Venue.query.get(venue_id)
venue.name = form.name.data
venue.genres = form.genres.data
venue.address = form.address.data
venue.city = form.city.data
venue.state = form.state.data
venue.phone = form.phone.data
venue.website = form.website.data
venue.facebook_link = form.facebook_link.data
venue.seeking_description = form.seeking_description.data
venue.seeking_talent = True if venue.seeking_description is not "" else False
venue.image_link = form.image_link.data
db.session.commit()
else:
flash("Form Validation Failed")
except:
db.session.rollback()
flash("Wrong Update")
return redirect(url_for('show_venue', venue_id=venue_id))
finally:
db.session.close()
return redirect(url_for('show_venue', venue_id=venue_id))
# Create Artist
# ----------------------------------------------------------------
@app.route('/artists/create', methods=['GET'])
def create_artist_form():
form = ArtistForm()
return render_template('forms/new_artist.html', form=form)
@app.route('/artists/create', methods=['POST'])
def create_artist_submission():
# called upon submitting the new artist listing form
# TODO: insert form data as a new Venue record in the db, instead
# TODO: modify data to be the data object returned from db insertion
error = False
form = ArtistForm(request.form)
try:
name = form.name.data
city = form.city.data
state = form.state.data
phone = form.phone.data
seeking_description = form.seeking_description.data
image_link = form.image_link.data
genres = form.genres.data
facebook_link = form.facebook_link.data
seeking_venue = True if seeking_description is not "" else False
artist = Artist(name=name, city=city, state=state, phone=phone, genres=genres,
image_link=image_link, seeking_venue=seeking_venue, seeking_description=seeking_description,
facebook_link=facebook_link)
db.session.add(artist)
db.session.commit()
except:
error = True
db.session.rollback()
flash('Error occurred ! Artist ' + form.name.data + ' was not inserted')
finally:
db.session.close()
if not error:
flash('Artist ' + form.name.data + ' was successfully listed!')
else:
return render_template('errors/500.html')
# on successful db insert, flash success
# TODO: on unsuccessful db insert, flash an error instead.
# e.g., flash('An error occurred. Artist ' + data.name + ' could not be listed.')
return render_template('pages/home.html')
# Shows
# ----------------------------------------------------------------
@app.route('/shows')
def shows():
# displays list of shows at /shows
# TODO: replace with real venues data.
# num_shows should be aggregated based on number of upcoming shows per venue.
results = db.session.query(Show, Venue, Artist).join(Venue).join(Artist).all()
data = []
for result in results:
data.append({
'venue_id': result[1].id,
'venue_name': result[1].name,
'artist_id': result[2].id,
'artist_name': result[2].name,
'artist_image_link': result[2].image_link,
'start_time': str(result[0].start_time)
})
return render_template('pages/shows.html', shows=data)
@app.route('/shows/create')
def create_shows():
# renders form. do not touch.
form = ShowForm()
return render_template('forms/new_show.html', form=form)
@app.route('/shows/create', methods=['POST'])
def create_show_submission():
# called to create new shows in the db, upon submitting new show listing form
# TODO: insert form data as a new Show record in the db, instead
error = False
form = ShowForm(request.form)
try:
artist_id = form.artist_id.data
venue_id = form.venue_id.data
start_time = form.start_time.data
show = Show(artist_id=artist_id, venue_id=venue_id, start_time=start_time)
db.session.add(show)
db.session.commit()
except:
error = True
db.session.rollback()
finally:
db.session.close()
if not error:
flash('Show was successfully listed!')
else:
flash('Error occurred ! Show ' + ' was not inserted')
return render_template('errors/500.html')
# on successful db insert, flash success
# TODO: on unsuccessful db insert, flash an error instead.
# e.g., flash('An error occurred. Show could not be listed.')
# see: http://flask.pocoo.org/docs/1.0/patterns/flashing/
return render_template('pages/home.html')
@app.errorhandler(404)
def not_found_error(error):
return render_template('errors/404.html'), 404
@app.errorhandler(500)
def server_error(error):
return render_template('errors/500.html'), 500
if not app.debug:
file_handler = FileHandler('error.log')
file_handler.setFormatter(
Formatter('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]')
)
app.logger.setLevel(logging.INFO)
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
app.logger.info('errors')
# ----------------------------------------------------------------------------#
# Launch.
# ----------------------------------------------------------------------------#
# Default port:
if __name__ == '__main__':
app.run()
# Or specify port manually:
'''
if __name__ == '__main__':
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port)
'''
|
"""
overseer.urls
~~~~~~~~~~~~~
:copyright: (c) 2011 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
from django.conf.urls.defaults import *
import os.path
urlpatterns = patterns('',
url(r'^media/(?P<path>.+)?$', 'django.views.static.serve', {
'document_root': os.path.join(os.path.dirname(__file__), 'media'),
'show_indexes': True
}, name='media'),
url(r'^$', 'overseer.views.index', name='index'),
url(r'^service/(?P<slug>[^/]+)/$', 'overseer.views.service', name='service'),
url(r'^service/(?P<slug>[^/]+)/last-event/$', 'overseer.views.last_event', name='last_event'),
url(r'^event/(?P<id>[^/]+)/$', 'overseer.views.event', name='event'),
url(r'^(?P<id>\d+)$', 'django.views.generic.simple.redirect_to', {'url': 'event/%(id)s/'}, name='event_short'),
url(r'^subscribe/$', 'overseer.views.create_subscription', name='create_subscription'),
url(r'^subscription/(?P<ident>[^/]+)/$', 'overseer.views.update_subscription', name='update_subscription'),
url(r'^subscription/(?P<ident>[^/]+)/verify/$', 'overseer.views.verify_subscription', name='verify_subscription'),
) |
from django.apps import AppConfig
class FairshakeConfig(AppConfig):
name = 'FAIRshakeHub'
|
# Copyright (C) 2020 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Change CycleTaskGroupObjectTask finished_date and verified_date type
from DateTime to Date
Create Date: 2019-02-13 10:24:27.731045
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '3f80820cbf08'
down_revision = 'a8a44ea42a2b91'
columns = ["finished_date", "verified_date"]
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
for column in columns:
op.alter_column(
"cycle_task_group_object_tasks",
column,
existing_type=sa.DateTime,
type_=sa.Date
)
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
raise NotImplementedError("Downgrade is not supported")
|
import pickle
with open('raw.pickle', 'rb') as f:
u = pickle._Unpickler(f)
u.encoding = 'latin1'
p = u.load()
print(p.keys()) |
"""Tests for pickaxe.py using pytest."""
# pylint: disable=redefined-outer-name
# pylint: disable=protected-access
import hashlib
import os
import re
import subprocess
from filecmp import cmp
from pathlib import Path
import pymongo
import pytest
from pymongo.errors import ServerSelectionTimeoutError
from rdkit.Chem import AllChem
from minedatabase import pickaxe
from minedatabase.databases import MINE
try:
client = pymongo.MongoClient(ServerSelectionTimeoutMS=20)
client.server_info()
del client
is_mongo = True
except ServerSelectionTimeoutError as err:
is_mongo = False
valid_db = pytest.mark.skipif(not is_mongo, reason="No MongoDB Connection")
file_path = Path(__file__)
file_dir = file_path.parent
DATA_DIR = (file_dir / "../data/").resolve()
def purge(directory, pattern):
"""Delete all files in a directory matching a regex pattern."""
for filename in os.listdir(directory):
if re.search(pattern, filename):
os.remove(os.path.join(directory, filename))
def delete_database(name):
"""Delete database."""
mine = MINE(name)
mine.client.drop_database(name)
mine.client.close()
def test_cofactor_loading(pk):
"""Test loading cofactors.
GIVEN a default Pickaxe object
WHEN cofactors are loaded into the Pickaxe object in its creation
THEN make sure those cofactors were loaded correctly
"""
c_id = "X73bc8ef21db580aefe4dbc0af17d4013961d9d17"
assert c_id in pk.compounds
assert pk.compounds[c_id]["Formula"] == "H2O"
assert pk.compounds[c_id]["Type"] == "Coreactant"
assert isinstance(pk.coreactants["Water"][0], AllChem.Mol)
assert pk.coreactants["Water"][1][0] == "X"
def test_reaction_rule_loading(default_rule):
"""Test loading rules.
GIVEN a reaction rule dict
WHEN reaction rules are loaded during Pickaxe object initialization
THEN make sure it is formatted correctly
"""
assert isinstance(default_rule[0], AllChem.ChemicalReaction)
assert isinstance(default_rule[1], dict)
assert default_rule[1]["Reactants"] == ["ATP", "Any"]
assert "Products" in default_rule[1]
assert "Comments" in default_rule[1]
def test_compound_loading(pk):
"""Test loading compounds.
GIVEN a default Pickaxe object
WHEN compounds are loaded
THEN check that they are loaded correctly
"""
compound_smiles = pk.load_compound_set(
compound_file=file_dir / "../data/test_compounds.tsv"
)
assert len(compound_smiles) == 14
def test_transform_all(default_rule, smiles_dict, coreactant_dict):
"""Test transform function.
GIVEN a set of rules and starting compounds
WHEN we run pickaxe to predict potential transformations
THEN make sure all expected transformations are predicted
"""
pk = pickaxe.Pickaxe(errors=False, explicit_h=True)
pk._load_coreactant(coreactant_dict["ATP"])
pk._load_coreactant(coreactant_dict["ADP"])
pk._add_compound(
smiles_dict["FADH"], smiles_dict["FADH"], cpd_type="Starting Compound"
)
pk.operators["2.7.1.a"] = default_rule
pk.transform_all(generations=2)
assert len(pk.compounds) == 31
assert len(pk.reactions) == 49
comp_gens = set([x["Generation"] for x in pk.compounds.values()])
assert comp_gens == {0, 1, 2}
def test_compound_output_writing(pk_transformed):
"""Test compound output writing.
GIVEN a Pickaxe object with predicted transformations
WHEN all compounds (including predicted) are written to an output file
THEN make sure they are correctly written, and that they are all present
"""
with open(file_dir / "../data/testcompoundsout.tsv", "rb") as infile:
expected = hashlib.sha256(infile.read()).hexdigest()
pk_transformed.write_compound_output_file(file_dir / "../data/testcompoundsout.tsv")
assert os.path.exists(file_dir / "../data/testcompoundsout_new.tsv")
try:
with open(file_dir / "../data/testcompoundsout_new.tsv", "rb") as infile:
output_compounds = hashlib.sha256(infile.read()).hexdigest()
assert expected == output_compounds
finally:
os.remove(file_dir / "../data/testcompoundsout_new.tsv")
def test_reaction_output_writing(pk_transformed):
"""Test writing reaction output.
GIVEN a Pickaxe object with predicted transformations
WHEN all reactions (including predicted) are written to an output file
THEN make sure they are correctly written, and that they are all present
"""
with open(file_dir / "../data/testreactionsout.tsv", "rb") as infile:
expected = hashlib.sha256(infile.read()).hexdigest()
pk_transformed.write_reaction_output_file(file_dir / "../data/testreactionsout.tsv")
assert os.path.exists(file_dir / "../data/testreactionsout_new.tsv")
try:
with open(file_dir / "../data/testreactionsout_new.tsv", "rb") as infile:
output_compounds = hashlib.sha256(infile.read()).hexdigest()
assert expected == output_compounds
finally:
os.remove(file_dir / "../data/testreactionsout_new.tsv")
def test_multiprocessing(pk, smiles_dict, coreactant_dict):
"""Test multiprocessing.
GIVEN a Pickaxe object
WHEN we use multiprocessing to enumerate predicted reactions
THEN make sure those predictions are correct
"""
pk._load_coreactant(coreactant_dict["ATP"])
pk._load_coreactant(coreactant_dict["ADP"])
pk._add_compound("FADH", smiles_dict["FADH"], cpd_type="Starting Compound")
pk.transform_all(generations=2, processes=2)
assert len(pk.compounds) == 67
assert len(pk.reactions) == 49
comp_gens = set([x["Generation"] for x in pk.compounds.values()])
assert comp_gens == {0, 1, 2}
def test_pruning(default_rule, smiles_dict, coreactant_dict):
"""Test pruning network to targets.
GIVEN a Pickaxe expansion
WHEN that expansion is pruned via Pickaxe.prune_network()
THEN make sure that the pruned compounds no longer exist in the network
"""
pk = pickaxe.Pickaxe(explicit_h=True)
pk.operators["2.7.1.a"] = default_rule
pk._load_coreactant(coreactant_dict["ATP"])
pk._load_coreactant(coreactant_dict["ADP"])
pk._add_compound("FADH", smiles_dict["FADH"], cpd_type="Starting Compound")
pk.transform_all(generations=2)
ids = [
"C89d19c432cbe8729c117cfe50ff6ae4704a4e6c1",
"C750e93db23dd3f796ffdf9bdefabe32b10710053",
"C41",
]
pk.prune_network(ids)
pk.assign_ids()
DATA_DIR = (file_dir / "../data").resolve()
pk.write_compound_output_file(DATA_DIR / "pruned_comps.tsv")
pk.write_reaction_output_file(DATA_DIR / "pruned_rxns.tsv")
assert os.path.exists(DATA_DIR / "pruned_comps_new.tsv")
assert os.path.exists(DATA_DIR / "pruned_rxns_new.tsv")
try:
assert cmp(DATA_DIR / "pruned_comps.tsv", DATA_DIR / "pruned_comps_new.tsv")
assert cmp(DATA_DIR / "pruned_rxns.tsv", DATA_DIR / "pruned_rxns_new.tsv")
finally:
os.remove((DATA_DIR / "pruned_comps_new.tsv").resolve())
os.remove((DATA_DIR / "pruned_rxns_new.tsv").resolve())
def test_target_generation(default_rule, smiles_dict, coreactant_dict):
"""Test generating a target from starting compounds."""
pk = pickaxe.Pickaxe(explicit_h=True)
pk.operators["2.7.1.a"] = default_rule
pk._load_coreactant(coreactant_dict["ATP"])
pk._load_coreactant(coreactant_dict["ADP"])
pk._add_compound("FADH", smiles_dict["FADH"], cpd_type="Starting Compound")
pk.load_targets(file_dir / "../data/test_targets.csv")
pk.transform_all(generations=2)
pk.prune_network_to_targets()
assert "C11088915f64b93293e70af9c3b7822a4f131225d" in pk.compounds
assert len(pk.reactions) == 4
assert len(pk.compounds) == 6
@valid_db
def test_save_as_mine(default_rule, smiles_dict, coreactant_dict):
"""Test saving compounds to database.
GIVEN a Pickaxe expansion
WHEN that expansion is saved as a MINE DB in the MongoDB
THEN make sure that all features are saved in the MongoDB as expected
"""
DATA_DIR = (file_dir / "../data").resolve()
delete_database("MINE_test")
pk = pickaxe.Pickaxe(database="MINE_test", image_dir=DATA_DIR, explicit_h=True)
pk.operators["2.7.1.a"] = default_rule
pk._load_coreactant(coreactant_dict["ATP"])
pk._load_coreactant(coreactant_dict["ADP"])
pk._add_compound("FADH", smiles_dict["FADH"], cpd_type="Starting Compound")
pk.transform_all(generations=2)
pk.save_to_mine(processes=1)
mine_db = MINE("MINE_test")
try:
assert mine_db.compounds.estimated_document_count() == 31
assert mine_db.reactions.estimated_document_count() == 49
assert mine_db.operators.estimated_document_count() == 1
assert mine_db.operators.find_one()["Reactions_predicted"] == 49
assert os.path.exists(
DATA_DIR / "X9c29f84930a190d9086a46c344020283c85fb917.svg"
)
start_comp = mine_db.compounds.find_one({"Type": "Starting Compound"})
assert len(start_comp["Reactant_in"]) > 0
# Don't track sources of coreactants
coreactant = mine_db.compounds.find_one({"Type": "Coreactant"})
assert "Product_of" not in coreactant
assert "Reactant_in" not in coreactant
product = mine_db.compounds.find_one({"Generation": 2})
assert len(product["Product_of"]) > 0
assert product["Type"] == "Predicted"
finally:
delete_database("MINE_test")
purge(DATA_DIR, r".*\.svg$")
@valid_db
def test_save_target_mine(default_rule, smiles_dict, coreactant_dict):
"""Test saving the target run to a MINE."""
delete_database("MINE_test")
pk = pickaxe.Pickaxe(database="MINE_test", explicit_h=True)
pk.operators["2.7.1.a"] = default_rule
pk._load_coreactant(coreactant_dict["ATP"])
pk._load_coreactant(coreactant_dict["ADP"])
pk._add_compound("FADH", smiles_dict["FADH"], cpd_type="Starting Compound")
pk.load_targets(file_dir / "../data/test_targets.csv")
pk.transform_all(generations=2)
pk.prune_network_to_targets()
pk.save_to_mine()
mine_db = MINE("MINE_test")
try:
assert mine_db.compounds.estimated_document_count() == 6
assert mine_db.reactions.estimated_document_count() == 4
assert mine_db.operators.estimated_document_count() == 1
assert mine_db.operators.find_one()["Reactions_predicted"] == 4
start_comp = mine_db.target_compounds.find_one()
assert start_comp["InChI_key"] == "RYNUDNWPSBJQQY-UHFFFAOYSA-N"
assert all([i in start_comp.keys() for i in ["_id", "SMILES", "InChI_key"]])
finally:
delete_database("MINE_test")
@valid_db
def test_database_already_exists(default_rule, smiles_dict, coreactant_dict):
"""Test database collision.
GIVEN an existing MINE
WHEN a new pickaxe object is defined
THEN make sure program exits with database collision
"""
delete_database("MINE_test")
pk = pickaxe.Pickaxe(database="MINE_test")
pk.operators["2.7.1.a"] = default_rule
pk._load_coreactant(coreactant_dict["ATP"])
pk._load_coreactant(coreactant_dict["ADP"])
pk._add_compound("FADH", smiles_dict["FADH"], cpd_type="Starting Compound")
pk.transform_all(generations=2)
pk.save_to_mine(processes=1)
try:
with pytest.raises(SystemExit) as pytest_wrapped_e:
pk = pickaxe.Pickaxe(database="MINE_test")
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == (
"Exiting due to database name collision."
)
finally:
delete_database("MINE_test")
def test_pickle(coreactant_dict, smiles_dict, default_rule):
"""Test pickling of pickaxe objects."""
pickle_path = Path("test_pickle.pk")
pk = pickaxe.Pickaxe(errors=False, explicit_h=True)
pk._load_coreactant(coreactant_dict["ATP"])
pk._load_coreactant(coreactant_dict["ADP"])
pk._add_compound(
smiles_dict["FADH"], smiles_dict["FADH"], cpd_type="Starting Compound"
)
pk.operators["2.7.1.a"] = default_rule
pk.transform_all(generations=2)
pk.pickle_pickaxe(pickle_path)
del pk
pk = pickaxe.Pickaxe(errors=False)
pk.load_pickled_pickaxe(pickle_path)
assert len(pk.compounds) == 31
assert len(pk.reactions) == 49
comp_gens = set([x["Generation"] for x in pk.compounds.values()])
assert comp_gens == {0, 1, 2}
pickle_path.unlink()
def test_local_cli():
"""Test command line interface writing locally.
GIVEN the pickaxe CLI
WHEN pickaxe is run from the command line
THEN make sure it exits with exit code 0 (no errors)
"""
os.chdir(file_dir / "../data/../..")
rc = subprocess.call(
f"python minedatabase/pickaxe.py -o tests/ -r tests/data/test_cd_rxn_rule.tsv",
shell=True,
)
assert not rc
purge(DATA_DIR / "..", r".*\.tsv$")
@valid_db
def test_mongo_cli():
"""Test command line interface writing to mongo."""
mine = MINE("tests")
os.chdir(file_dir / "../data/../..")
rc = subprocess.call(
"python minedatabase/pickaxe.py -d tests -r tests/data/test_cd_rxn_rule.tsv",
shell=True,
)
assert not rc
try:
assert mine.compounds.estimated_document_count() == 51
finally:
mine.client.drop_database("tests")
purge(file_dir / "..", r".*\.svg$")
|
import numpy as np
import imageio
"""
Utilities for generating example density distributions
"""
def generate_single_focus(width, height, fx, fy, variance):
"""
Generates a single focus density as a 2D gaussian.
width, height: dimension of the map
fx, fy: fraction of the map where the focus lies.
variance: variance in pixels of the gaze region
"""
x, y = np.meshgrid(np.linspace(0, width-1, width), np.linspace(0, height-1, height))
#dx = (x-width*fx)**2 / (2*((variance*(width/height)**2)**2))
dx = (x-width*fx)**2 / (2*((variance)**2))
dy = (y-height*fy)**2 / (2*(variance**2))
density = np.exp(-dx - dy)
return density
def density_from_image_gray(filename):
"""
Loads an image, converts it to grayscale and uses that as density
"""
img = imageio.imread(filename, as_gray=True)
#img = np.swapaxes(img, 0, 1)
return img
def density_from_image_grad(filename):
"""
Loads an image, computes the gradient norm of the colors and uses that as density
"""
img = imageio.imread(filename, pilmode='RGB') / 255.0
#img = np.swapaxes(img, 0, 1)
g1, g2 = np.gradient(img, axis=(0,1))
g1 = np.abs(g1)
g2 = np.abs(g2)
g = np.sum(g1, axis=2)+np.sum(g2, axis=2)
return g
|
'''
Created on Jun. 01, 2019
OpenDSS Mosaik interface, and Sensor/Actuator Models
@file simulator_pflow.py
@author Evandro de Souza
@date 2019.06.01
@version 0.1
@company University of Alberta - Computing Science
'''
import mosaik_api
from SimDSS import SimDSS
from LoadGenerator import LoadGenerator
import numpy as np
import math
import opendssdirect as dss
from Sensor import Phasor, Smartmeter, Prober
META = {
'models': {
'Sensor': {
'public': True,
'params': ['idt', 'step_size', 'verbose'],
'attrs': ['v', 't'],
},
'Prober': {
'public': True,
'params': ['idt','cktTerminal','cktPhase','cktProperty','step_size','cktElement','error','verbose'],
'attrs': ['v', 't'],
},
'Phasor': {
'public': True,
'params': ['idt','cktTerminal','cktPhase','step_size','cktElement','error','verbose'],
'attrs': ['v', 't'],
},
'Smartmeter': {
'public': True,
'params': ['idt','cktTerminal','cktPhase','step_size','cktElement','error','verbose'],
'attrs': ['v', 't'],
},
},
'extra_methods': [
'set_next'
],
}
class ProberSim:
def __init__(self,
cktTerminal,
cktPhase,
cktProperty,
idt,
step_size,
objDSS,
cktElement,
error,
verbose):
self.objProberSim = Prober(cktTerminal,
cktPhase,
cktProperty,
idt,
step_size,
objDSS,
cktElement,
error,
verbose)
def updateValues(self, time):
self.objProberSim.updateValues(time)
def getLastValue(self):
return self.objProberSim.getLastValue()
class PhasorSim:
def __init__(self,
cktTerminal,
cktPhase,
idt,
step_size,
objDSS,
cktElement,
error,
verbose):
self.objPhasorSim = Phasor(cktTerminal,
cktPhase,
idt,
step_size,
objDSS,
cktElement,
error,
verbose)
def updateValues(self, time):
self.objPhasorSim.updateValues(time)
def getLastValue(self):
return self.objPhasorSim.getLastValue()
class SmartmeterSim:
def __init__(self,
cktTerminal,
cktPhase,
idt,
step_size,
objDSS,
cktElement,
error,
verbose):
self.objSmartmeterSim = Smartmeter(cktTerminal,
cktPhase,
idt,
step_size,
objDSS,
cktElement,
error,
verbose)
def updateValues(self, time):
self.objSmartmeterSim.updateValues(time)
def getLastValue(self):
return self.objSmartmeterSim.getLastValue()
class PFlowSim(mosaik_api.Simulator):
def __init__(self):
super().__init__(META)
self.data = {}
self.actives = {}
self.entities = {}
self.next = {}
self.instances = {}
self.step_size = 1
self.loadgen_interval = self.step_size
def init(self, sid, topofile, nwlfile, loadgen_interval, ilpqfile = "", verbose=0):
self.sid = sid
self.verbose = verbose
self.loadgen_interval = loadgen_interval
self.swpos = 0
self.swcycle = 35
if (self.verbose > 0): print('simulator_pflow::init', self.sid)
if (self.verbose > 1): print('simulator_pflow::init', topofile, nwlfile, ilpqfile, verbose)
#--- start opendss
self.dssObj = SimDSS(topofile, nwlfile, ilpqfile)
if (self.verbose > 2):
self.dssObj.showLoads()
self.dssObj.showVNodes()
self.dssObj.showIinout()
self.dssObj.showVMagAnglePu()
dss.run_command("Show Voltages LN nodes")
dss.run_command("Show Buses")
#--- Generate and save AdjMatrix and YMatrix
# self.dssObj.createAdjMatrix("config/IEEE33_AdjMatrixFull.txt")
# YMatrix = self.dssObj.getYMatrix()
# np.save('config/IEEE33_YMatrixFull.npy', YMatrix)
#--- create instance of LoadGenerator
self.objLoadGen = LoadGenerator(nwlfile,
PFLimInf = 0.95,
PFLimSup = 0.95,
LoadLimInf = 0.4,
LoadLimSup = 0.9,
AmpGain = 0.25,
Freq = 1./1250,
PhaseShift = math.pi)
return self.meta
def create(self, num, model, idt, **kwargs):
if (self.verbose > 0): print('simulator_pflow::create ', model, idt)
eid = '%s_%s' % (model, idt)
self.data[eid] = {}
self.instances[eid] = {}
if (model == 'Prober'):
self.instances[eid] = ProberSim(
cktTerminal = kwargs['cktTerminal'],
cktPhase = kwargs['cktPhase'],
cktProperty = kwargs['cktProperty'],
idt = idt,
step_size = kwargs['step_size'],
objDSS = self.dssObj,
cktElement = kwargs['cktElement'],
error = kwargs['error'],
verbose = kwargs['verbose']
)
if (model == 'Phasor'):
self.instances[eid] = PhasorSim(
cktTerminal = kwargs['cktTerminal'],
cktPhase = kwargs['cktPhase'],
idt = idt,
step_size = kwargs['step_size'],
objDSS = self.dssObj,
cktElement = kwargs['cktElement'],
error = kwargs['error'],
verbose = kwargs['verbose']
)
if (model == 'Smartmeter'):
self.instances[eid] = SmartmeterSim(
cktTerminal = kwargs['cktTerminal'],
cktPhase = kwargs['cktPhase'],
idt = idt,
step_size = kwargs['step_size'],
objDSS = self.dssObj,
cktElement = kwargs['cktElement'],
error = kwargs['error'],
verbose = kwargs['verbose']
)
return [{'eid': eid, 'type': model}]
def step(self, time, inputs):
if (self.verbose > 0): print('simulator_pflow::step time =', time, ' inputs = ', inputs)
next_step = time + 1
#---
#--- process inputs data
#---
#--- Activate load generator
if (0 == (time % self.loadgen_interval)):
#-- get a new sample from loadgen
# ePQ = self.objLoadGen.createLoads()
ePQ = self.objLoadGen.readLoads()
#-- execute processing of the the new elastic load
self.dssObj.setLoads(ePQ)
#--- use actuators to update opendss state with actions received by controllers (Mosaik)
for eid, attrs in inputs.items():
value_v = list(attrs['v'].values())[0]
value_t = list(attrs['t'].values())[0]
if (value_v != 'None' and value_v != None):
if (self.verbose > 1): print('simulator_pflow::step Propagation delay =', time - value_t)
self.instances[eid].setControl(value_v, time)
#---
#--- Update values from Probers, Phasor, SmartMeters
#---
for instance_eid in self.instances:
self.instances[instance_eid].updateValues(time)
return next_step
def get_data(self, outputs):
if (self.verbose > 0): print('simulator_pflow::get_data INPUT', outputs)
for instance_eid in self.instances:
val_v, val_t = self.instances[instance_eid].getLastValue()
self.data[instance_eid]['v'] = val_v
self.data[instance_eid]['t'] = val_t
if (self.verbose > 0): print('simulator_pflow::get_data OUPUT data:', self.data)
return self.data
def set_next(self, pflow, instance, parameters):
if (self.verbose > 2): print('simulator_pflow::set_next', instance, parameters)
if instance not in self.instances[pflow]:
self.instances[pflow][instance] = parameters
# def finalize(self):
# print('OpenDSS Final Results:')
# self.dssObj.showIinout() |
#!/usr/bin/env python3
import argparse
import pprint
import requests
def login_admin(admin_id, admin_secret, url, verbose):
headers = {'Content-Type': 'application/x-www-form-urlencoded',
'Accept': 'application/json'}
payload = {'grant_type': 'client_credentials',
'response_type': 'token',
'token_format': 'opaque'}
response = requests.post('http://{}/oauth/token'.format(url), headers=headers,
params=payload,
auth=(admin_id, admin_secret))
resp_json = response.json()
if verbose:
pprint.pprint(resp_json)
access_token = resp_json.get('access_token')
if verbose:
print("Access code {}".format(access_token))
return access_token
def query_client(access_token, url, verbose):
headers = {'Content-Type': 'application/json',
'Accept': 'application/json',
'Authorization': 'Bearer {}'.format(access_token)}
response = requests.get('http://{}/oauth/clients'.format(url), headers=headers)
if verbose:
print(response.status_code)
pprint.pprint(response.json())
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Add user to UAA")
parser.add_argument("-a", "--admin_id", required=True, dest="admin_id", help="The admin id for the organization")
parser.add_argument("-s", "--admin_secret", required=True, dest="admin_secret", help="The admin password for the organization")
parser.add_argument("-url", "--url", required=True, dest="url",
help="The UAA url to target")
parser.add_argument("-v", "--verbose", default=False, required=False, dest="verbose",
help="To enable verbose output", action="store_true")
args = parser.parse_args()
token = login_admin(args.admin_id, args.admin_secret, args.url, verbose=args.verbose)
query_client(access_token=token, url=args.url, verbose=args.verbose)
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Set of methods to be used with find_element_by"""
# Created by a1wen at 28.02.19
# Stolen from Selene
from selenium.webdriver.common.by import By
def by_css(css_selector):
"""Add CSS_SELECTOR selector strategy constant to the actual selector search pattern"""
return By.CSS_SELECTOR, css_selector
def by_name(name):
"""Add NAME selector strategy constant to the actual selector search pattern"""
return By.NAME, name
def by_class(name):
"""Add CLASS_NAME selector strategy constant to the actual selector search pattern"""
return By.CLASS_NAME, name
def by_link_text(text):
"""Add LINK_TEXT selector strategy constant to the actual selector search pattern"""
return By.LINK_TEXT, text
def by_partial_link_text(partial_text):
"""Add PARTIAL_LINK_TEXT selector strategy constant to the actual selector search pattern"""
return By.PARTIAL_LINK_TEXT, partial_text
def by_xpath(xpath):
"""Add XPATH selector strategy constant to the actual selector search pattern"""
return By.XPATH, xpath
def by_tag(tag):
"""Add TAG_NAME selector strategy constant to the actual selector search pattern"""
return By.TAG_NAME, tag
def by_id(oid):
"""Add ID selector strategy constant to the actual selector search pattern"""
return By.ID, oid
|
'''
Implement strStr().
Return the index of the first occurrence of needle in haystack, or -1 if needle is not part of haystack.
Example 1:
Input: haystack = "hello", needle = "ll"
Output: 2
Example 2:
Input: haystack = "aaaaa", needle = "bba"
Output: -1
Clarification:
What should we return when needle is an empty string? This is a great question to ask during an interview.
For the purpose of this problem, we will return 0 when needle is an empty string. This is consistent to C's strstr() and Java's indexOf().
'''
class Solution:
def strStr(self, haystack, needle):
try:
return haystack.index(needle)
except:
return -1
mysolution = Solution()
haystack = "hello"
needle = "ll"
print(mysolution.strStr(haystack, needle))
|
# coding: utf-8
import time
import os
import shutil
from dp_toolbox import *
from pywinauto_recorder.player import *
from pathlib import Path
if __name__ == '__main__':
os.chdir(os.path.dirname(os.path.abspath(__file__)))
brian = Voice(name='Brian')
obs = OBSStudio()
#####################################################################
# Copy script in Data to 'Pywinauto Recorder' folder in Home folder #
#####################################################################
shutil.copy(Path("Data")/Path("recorded Mon Apr 5 16_28_28 2021.py"), Path.home()/Path("Pywinauto recorder"))
####################################
# Open 'Pywinauto Recorder' folder #
####################################
brian.say("The generated file is in 'Pywinauto Recorder' folder under your home folder.")
time.sleep(1.5)
with Window(u"||List"):
double_left_click("Home||ListItem")
time.sleep(1.5)
send_keys("{LWIN down}""{VK_RIGHT}""{LWIN up}")
with Window("C:\\Users\\d_pra||Window"):
double_left_click("*->Shell Folder View||Pane->Items View||List->Pywinauto recorder||ListItem->Name||Edit")
#############################################################
# Drag and drop the recorded file on pywinauto_recorder.exe #
#############################################################
with Window("C:\\\\Users\\\\.*\\\\Pywinauto recorder||Window", regex_title=True):
drag_and_drop_start = find("*->Shell Folder View||Pane->Items View||List->||ListItem->Name||Edit#[0,0]")
drag_and_drop_start.draw_outline(colour='blue')
with Window("||List"):
drag_and_drop_end = find(u"Pywinauto recorder||ListItem")
drag_and_drop_end.draw_outline()
#with Window(u"Program Manager||Pane"):
# drag_and_drop_end = find(u"Desktop||List->Pywinauto recorder||ListItem")
# drag_and_drop_end.draw_outline()
time.sleep(1)
drag_and_drop(drag_and_drop_start, drag_and_drop_end)
brian.say("Thanks for watching! In the next tutorial, you will see how to make a robust recorded script. See you soon!", wait_until_the_end_of_the_sentence=True)
obs.stop_recording()
time.sleep(1)
obs.quit()
exit(0)
|
__author__ = 'Guorong Xu<g1xu@ucsd.edu>'
import yaml
## parseing yaml file
def parse_yaml_file(yaml_file):
documents = {}
samples = []
stream = open(yaml_file, "r")
docs = yaml.load_all(stream)
for doc in docs:
for k,v in doc.items():
if k == "sample":
samples.append(v)
else:
documents.update({k:v})
print(k, "->", v)
print("\n")
documents.update({"sample":samples})
return documents
if __name__ == "__main__":
yaml_file = "/Users/guorongxu/Desktop/Sample_cDNA.yaml"
# yaml_file = "/home/mustafa/ccbb/jupyter-genomics-github/src/awsCluster/server/ChipSeqPipeline/homer_workflow/yaml_examples/Sample_cDNA.yaml"
documents = parse_yaml_file(yaml_file)
print(documents.get("sample"))
|
import torch
import torch.nn.functional as F
from tqdm import tqdm
import numpy as np
from sklearn.metrics import confusion_matrix
import argparse
import logging
import os
from torch.utils.data import DataLoader
from unet import UNet
from utils.sar_dataset_loader import BasicDataset
dataset_dir = './data/dataset/test/'
def compute_roc_curve(net, loader, device, batch_size):
"""Evaluation without the densecrf with the dice coefficient"""
net.eval()
true_positive_rate = np.zeros((11))
false_positive_rate = np.zeros((11))
for i in range(0, 11):
for batch in loader:
imgs = batch['image']
true_masks = batch['mask']
imgs = imgs.to(device=device, dtype=torch.float32)
mask_type = torch.float32 if net.n_classes == 1 else torch.long
true_masks = true_masks.to(device=device, dtype=mask_type)
mask_pred = net(imgs)
for true_mask, pred in zip(true_masks, mask_pred):
pred = (pred > (i / 10)).float()
if net.n_classes > 1:
tot += F.cross_entropy(pred.unsqueeze(dim=0), true_mask.unsqueeze(dim=0)).item()
else:
tn, fp, fn, tp = confusion_matrix(true_mask, pred, labels=[0, 1]).ravel()
true_positive_rate[i] += tp / (tp + fn)
false_positive_rate[i] += fp / (fp + tn)
trueDetection[i] /= (len(loader) * batch_size)
falseAlarm[i] /= (len(loader) * batch_size)
np.save("true_positive_rate", true_positive_rate)
np.save("false_positive_rate", false_positive_rate)
def get_args():
parser = argparse.ArgumentParser(description='Predict masks from input images',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--model', '-m', default='MODEL.pth',
metavar='FILE',
help="Specify the file in which the model is stored")
parser.add_argument('--scale', '-s', type=float,
help="Scale factor for the input images",
default=1)
parser.add_argument('--chennel', '-c', type=int,
help="number of channels in the patch",
default=1)
parser.add_argument('-b', '--batch-size', metavar='B', type=int, nargs='?', default=1,
help='Batch size', dest='batchsize')
return parser.parse_args()
if __name__ == "__main__":
args = get_args()
myNet = UNet(n_channels=args.chennel, n_classes=1)
logging.info("Loading model {}".format(args.model))
myDevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
logging.info(f'Using device {myDevice}')
myNet.to(device=myDevice)
checkpoint = torch.load(args.model)
myNet.load_state_dict(
checkpoint['model_state_dict'])
logging.info("Model loaded !")
if args.chennel == 1:
testSet = BasicDataset(dataset_dir, channels='VV', train=False, scale=args.scale)
else:
testSet = BasicDataset(dataset_dir, channels='VVVH', train=False, scale=args.scale)
myLoader = DataLoader(testSet, batch_size=args.batchsize, shuffle=False, num_workers=8, pin_memory=True)
compute_roc_curve(net=myNet, loader=myLoader, device=myDevice, batch_size=args.batchsize)
|
import digitalocean
host_tag = 'ovirt-host'
droplets = digitalocean.Manager().get_all_droplets()
#droplets = digitalocean.Manager().get_all_droplets(tag_name=host_tag)
for droplet in droplets:
droplet.destroy()
|
import webbrowser
import random
import time
while True:
sites=random.choice(['google.com','amazon.in','flipkart.com','youtube.com'])
visit="http://{}".format(sites)
webbrowser.open(visit)
seconds=random.randrange(0,2) #set time between 20 to 50 for testing
time.sleep(seconds)
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
=======================================================================
Calibration Experiments (:mod:`qiskit_experiments.library.calibration`)
=======================================================================
.. currentmodule:: qiskit_experiments.library.calibration
.. warning::
The calibrations interface is still in active development. It may have
breaking API changes without deprecation warnings in future releases until
otherwise indicated.
Calibrating qubit setups is the task of finding the pulse shapes and parameter
values that maximizes the fidelity of the resulting quantum operations. This
therefore requires experiments which are analyzed to extract parameter values.
Furthermore, the resulting parameter values and schedules must be managed. The
calibration module in Qiskit experiments allows users to run calibration
experiments and manage the resulting schedules and parameter values.
The following experiments are designed to calibrate parameter values. Some experiments such
as :class:`QubitSpectroscopy` can both be seen as characterization and calibrations
experiments. Such experiments can be found in the
:mod:`qiskit_experiments.library.characterization`
module.
.. autosummary::
:toctree: ../stubs/
:template: autosummary/experiment.rst
RoughFrequencyCal
DragCal
FineDrag
FineXDrag
FineSXDrag
Rabi
FineAmplitudeCal
FineXAmplitudeCal
FineSXAmplitudeCal
RamseyXY
Calibration analysis
====================
.. autosummary::
:toctree: ../stubs/
:template: autosummary/analysis.rst
DragCalAnalysis
FineDragAnalysis
FineAmplitudeAnalysis
RamseyXYAnalysis
Calibrations management
=======================
See :mod:`qiskit_experiments.calibration_management`.
"""
from .rough_frequency import RoughFrequencyCal
from .drag import DragCal
from .fine_drag import FineDrag, FineXDrag, FineSXDrag
from .fine_amplitude import FineAmplitudeCal, FineXAmplitudeCal, FineSXAmplitudeCal
from .rabi import Rabi, EFRabi
from .ramsey_xy import RamseyXY
from .analysis.drag_analysis import DragCalAnalysis
from .analysis.fine_drag_analysis import FineDragAnalysis
from .analysis.fine_amplitude_analysis import FineAmplitudeAnalysis
from .analysis.remsey_xy_analysis import RamseyXYAnalysis
|
import codecs
import os
import sys
import logging
logging.basicConfig(filename="run.log", level=logging.INFO)
def to_json(python_object):
if isinstance(python_object, bytes):
return {'__class__': 'bytes',
'__value__': codecs.encode(python_object, 'base64').decode()}
raise TypeError(repr(python_object) + ' is not JSON serializable')
def from_json(json_object):
if '__class__' in json_object and json_object['__class__'] == 'bytes':
return codecs.decode(json_object['__value__'].encode(), 'base64')
return json_object
def colors(state):
color = ''
if (state == 'BLUE'):
color = '\033[94m'
if (state == 'GREEN'):
color = '\033[92m'
if (state == 'YELLOW'):
color = '\033[93m'
if (state == 'RED'):
color = '\033[91m'
if (state == 'ENDC'):
color = '\033[0m'
if (state == 'WHITE'):
color = '\033[0m'
return color
def supports_color():
"""
from https://github.com/django/django/blob/master/django/core/management/color.py
Return True if the running system's terminal supports color,
and False otherwise.
"""
plat = sys.platform
supported_platform = plat != 'Pocket PC' and (plat != 'win32' or 'ANSICON' in os.environ)
# isatty is not always implemented, #6223.
is_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()
if not supported_platform or not is_a_tty:
return False
return True
def log(string, color):
logging.info(string)
if not supports_color():
print(string)
else:
print('\033[1m' + colors(color) + string + colors("ENDC"))
def seperator(color):
logging.info("-" * 70)
if not supports_color():
print("-" * 70)
else:
print('\033[1m' + colors(color) + ("-" * 70) + colors("ENDC")) |
import sys
import itertools
import pandas as pd
from sklearn.model_selection import (StratifiedKFold, train_test_split, ParameterGrid, cross_val_score)
from sklearn.metrics import balanced_accuracy_score
from sklearn.base import clone
import warnings
from time import time
from tempfile import mkdtemp
from shutil import rmtree
from sklearn.externals.joblib import Memory
from read_file import read_file
import pdb
import numpy as np
from methods import *
import os.path
import copy
def evaluate_model(dataset, save_file, random_state, est, hyper_params):
est_name = type(est).__name__
# load data
X, y, feature_names = read_file(dataset)
# generate train/test split
X_train, X_test, y_train, y_test = train_test_split(X, y,
train_size=0.75,
test_size=0.25,
stratify=y,
random_state=None)
assert(random_state!=None)
# scale and normalize the data
dataname = dataset.split('/')[-1][:-7]
# Grid Search
with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# grid_est.fit(X_train,y_train)
param_grid = list(ParameterGrid(hyper_params))
#clone estimators
#Clfs = [clone(est).set_params(**p) for p in param_grid]
Clfs = [copy.deepcopy(est).set_params(**p) for p in param_grid]
# results=[]
for clf in Clfs:
# fix the seed of the estimator if it has one
for a in ['random_state','seed']:
if hasattr(clf,a):
setattr(clf,a,random_state)
if (est_name=='GPMaLClassifier'):
setattr(clf,'dataset',dataset)
print('running',clf.get_params(),'...')
# clf.random_state = random_state
# get the CV score on the training data
start=time()
clf.fit(X_train, y_train)
end=time()
runtime=(end-start)/3600
# refit the model to all the data
# get a holdout test score
train_bal_accuracy = balanced_accuracy_score(y_train, clf.predict(X_train))
test_bal_accuracy = balanced_accuracy_score(y_test, clf.predict(X_test))
model=[]
best_fitness=0
if(est_name=='ManiGPClassifier'):
model=[str(clf.model[0]), str(clf.model[1])]
best_fitness=clf.best_fitness
else:
model=['','']
results=[{
'dataset' : dataname,
'seed' : random_state,
'algorithm' : est_name,
'train_bal_accuracy':train_bal_accuracy,
'test_bal_accuracy':test_bal_accuracy,
'runtime': runtime,
'parameters': clf.get_params(),
'model1' : model[0],
'model2' : model[1],
'best_fitness' : best_fitness}
]
# print results
df = pd.DataFrame.from_records(data=results,columns=results[0].keys())
df.to_csv(save_file, index=False,header=False,mode='a')
# df['seed'] = random_state
# df['dataset'] = dataname
# df['algorithm'] = est_name
# df['parameters_hash'] = df['parameters'].apply(lambda x:
# hash(frozenset(x.items())))
# print('dataframe columns:',df.columns)
# print(df[:10])
# if os.path.isfile(save_file):
# # if exists, append
# df.to_csv(save_file, mode='a', header=False, index=False)
# else:
# df.to_csv(save_file, index=False)
################################################################################
# main entry point
################################################################################
import argparse
import importlib
if __name__ == '__main__':
# parse command line arguments
parser = argparse.ArgumentParser(description="Evaluate a method on a dataset.",
add_help=False)
parser.add_argument('INPUT_FILE', type=str,
help='Data file to analyze; ensure that the '
'target/label column is labeled as "class".')
parser.add_argument('-h', '--help', action='help',
help='Show this help message and exit.')
parser.add_argument('-ml', action='store', dest='ALG',default=None,type=str,
help='Name of estimator (with matching file in methods/)')
parser.add_argument('-save_file', action='store', dest='SAVE_FILE',default=None,
type=str, help='Name of save file')
parser.add_argument('-seed', action='store', dest='RANDOM_STATE',default=None,
type=int, help='Seed / trial')
args = parser.parse_args()
# import algorithm
# print('import from','methods.'+args.ALG)
algorithm = importlib.__import__('methods.'+ args.ALG, globals(),locals(),
['est','hyper_params'])
#est=ManiGPClassifier()
# print('algorithm:',algorithm.est)
# print('hyperparams:',algorithm.hyper_params)
# text='\t'.join([str(args.INPUT_FILE), ' ', str(args.SAVE_FILE), ' ', str(args.RANDOM_STATE), ' ',
# str(algorithm.est), ' ', str(algorithm.hyper_params)])
# print(text)
evaluate_model(args.INPUT_FILE, args.SAVE_FILE, args.RANDOM_STATE,
algorithm.est, algorithm.hyper_params)
|
# type: ignore
# flake8: noqa
import os
import sys
import ray
from ray import tune
from sacred import Experiment
# add project path to sys.path for easy import
file_dir = os.path.dirname(os.path.abspath(__file__))
root_dir = os.path.abspath(os.path.join(file_dir, ".."))
sys.path.append(root_dir)
if not ray.is_initialized():
ray.init(num_cpus=4)
def wrapper_sacred_inner_experiment(config, reporter):
# need to append path here again cause ray tune won't find it otherwise (No module named src)
sys.path.append(root_dir)
# import needs to be here because sacred projects aren't pickle-able but tune needs to be pickable
from src.sacred_experiment import inner_experiment
inner_experiment.run(config_updates=config)
hyper_ex: Experiment = Experiment("hyper_ex")
# add an observer here if you'd like to
@hyper_ex.main
def hyper_param_tuning():
hyper_params = {"max_depth": tune.grid_search([1])}
tune.run(wrapper_sacred_inner_experiment,
config=hyper_params,
resources_per_trial=tune.PlacementGroupFactory([{
"CPU": 1,
"GPU": 0
}, {
"CPU": 1
}]))
if __name__ == "__main__":
hyper_ex.run_commandline()
|
import json
import os
import redis
def run():
# 读取配置文件
with open('config.json', 'r', encoding='utf-8') as f:
config = json.load(f).get('json_to_redis')
if config is None:
print('不能读取应用 `json_to_redis` 的配置。')
exit()
# 获取配置项
JSON_PATH = config.get('json_path')
REDIS_HOST = config.get('redis_host')
REDIS_PORT = config.get('redis_port')
REDIS_PASSWD = config.get('redis_passwd')
REDIS_DB = config.get('redis_db')
REDIS_KEY = config.get('redis_key')
if None in (JSON_PATH, REDIS_HOST, REDIS_PORT, REDIS_PASSWD, REDIS_DB):
print('不能读取应用 `json_to_redis` 的配置。')
exit()
if not os.path.isfile(JSON_PATH):
print('JSON 文件 \'{json_path}\' 不存在。'.format(json_path=JSON_PATH))
exit()
r = redis.Redis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB, password=REDIS_PASSWD)
try:
res = r.exists(REDIS_KEY)
if res != 0:
choice = input('Redis 中 key \'{key}\' 已存在,是否覆盖它?(\'Y\' 或 \'n\'):'.format(key=REDIS_KEY))
if choice == 'Y':
r.delete(REDIS_KEY)
print('已删除 key \'{key}\' ...'.format(key=REDIS_KEY))
else:
print('操作中止...')
exit()
except redis.exceptions.ConnectionError as e:
print('无法连接 Redis 服务:{err}'.format(err=e))
exit()
with open(JSON_PATH, 'r', encoding='utf-8') as f:
row = f.readline()
i = 0
print('正在往 Redis 插入数据...')
while row:
row = row[:-1] if row[-1] == '\n' else row
r.sadd(REDIS_KEY, row)
i += 1
# 每插入1000条数据,commit一次
if i == 5000:
i = 0
print('=', end='')
r.publish('SE-5035', 'hello')
row = f.readline()
print('\n插入完成.')
if __name__ == "__main__":
run()
|
"""Google's protocol buffer I/O support."""
import warnings
import numpy as np
from physt import __version__
from physt.histogram_base import HistogramBase
from physt.io import require_compatible_version, create_from_dict
from .histogram_pb2 import Histogram, Meta, HistogramCollection
# Name of fields that are re-used from to_dict / from_dict
SIMPLE_CONVERSION_FIELDS = (
"histogram_type", "dtype"
)
SIMPLE_META_KEYS = ("name", "title")
# Version writing the message
CURRENT_VERSION = __version__
# First version that should understand this
COMPATIBLE_VERSION = "0.3.42"
def write(histogram):
"""Convert a histogram to a protobuf message.
Note: Currently, all binnings are converted to
static form. When you load the histogram again,
you will lose any related behaviour.
Note: A histogram collection is also planned.
Parameters
----------
histogram : HistogramBase | list | dict
Any histogram
Returns
-------
message : google.protobuf.message.Message
A protocol buffer message
"""
histogram_dict = histogram.to_dict()
message = Histogram()
for field in SIMPLE_CONVERSION_FIELDS:
setattr(message, field, histogram_dict[field])
# Main numerical data - TODO: Optimize!
message.frequencies.extend(histogram.frequencies.flatten())
message.errors2.extend(histogram.errors2.flatten())
# Binnings
for binning in histogram._binnings:
binning_message = message.binnings.add()
for edges in binning.bins:
limits = binning_message.bins.add()
limits.lower = edges[0]
limits.upper = edges[1]
# All meta data
meta_message = message.meta
# user_defined = {}
# for key, value in histogram.meta_data.items():
# if key not in PREDEFINED:
# user_defined[str(key)] = str(value)
for key in SIMPLE_META_KEYS:
if key in histogram.meta_data:
setattr(meta_message, key, str(histogram.meta_data[key]))
if "axis_names" in histogram.meta_data:
meta_message.axis_names.extend(histogram.meta_data["axis_names"])
message.physt_version = CURRENT_VERSION
message.physt_compatible = COMPATIBLE_VERSION
return message
def read(message):
"""Convert a parsed protobuf message into a histogram."""
require_compatible_version(message.physt_compatible)
# Currently the only implementation
a_dict = _dict_from_v0342(message)
return create_from_dict(a_dict, "Message")
def write_many(histogram_collection):
warnings.warn("Histogram collections are unstable API. May be removed.")
message = HistogramCollection()
for name, histogram in histogram_collection.items():
proto = message.histograms[name]
proto.CopyFrom(write(histogram))
return message
# TODO: Will change with real HistogramCollection class
def read_many(message):
warnings.warn("Histogram collections are unstable API. May be removed.")
return { name: read(value) for name, value in message.histograms.items() }
# TODO: Will change with real HistogramCollection class
def _binning_to_dict(binning_message):
return {
"bins" : [
[bin.lower, bin.upper] for bin in binning_message.bins
]
}
def _dict_from_v0342(message):
a_dict = {
key: getattr(message, key)
for key in SIMPLE_CONVERSION_FIELDS
}
a_dict.update({
"physt_compatible": message.physt_compatible,
"binnings": [
_binning_to_dict(b) for b in message.binnings
],
"meta_data": {
k: getattr(message.meta, k) for k in SIMPLE_META_KEYS if getattr(message.meta, k)
},
})
axis_names = list(message.meta.axis_names)
if axis_names:
a_dict["meta_data"].update({
"axis_names": axis_names
})
print(a_dict)
shape = [len(binning["bins"]) for binning in a_dict["binnings"]]
a_dict.update({
"frequencies": np.asarray([f for f in message.frequencies]).reshape(shape),
"errors2": np.asarray([e for e in message.errors2]).reshape(shape)
})
return a_dict
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.alert import Alert
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import NoSuchElementException, ElementNotInteractableException
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.wait import WebDriverWait
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.chrome.options import Options
from crement import Lever
from bs4 import BeautifulSoup
from s4gpy.s4gpy import S4GAPI
import time
import random
import undetected_chromedriver.v2 as uc
import json
import requests
import urllib.parse
PATH = r"C:\chromedriver\90\chromedriver.exe"
opt = webdriver.ChromeOptions()
caps = webdriver.DesiredCapabilities.CHROME.copy()
opt.add_argument("--no-sandbox")
opt.add_argument("--disable-gpu")
opt.add_argument("--allow-running-insecure-content")
opt.add_argument("--ignore-ssl-errors=yes")
opt.add_argument("--window-size=1280,720")
opt.add_argument("--ignore-certificate-errors")
opt.add_argument("--disable-dev-shm-usage")
caps['goog:loggingPrefs'] = { 'browser':'ALL' }
driver = uc.Chrome(PATH, options=opt, desired_capabilities=caps)
#driver = uc.Chrome()
def YouTube_Google_Log_In(thisLogin):
try:
email = 0
password = 0
isFound = True
laccounts = get_Google_Accounts()
if thisLogin != "":
for x in laccounts:
if thisLogin in x:
email = x[0]
password = x[1]
isFound = False
if isFound:
thisLogin == 0
elif thisLogin == 0:
selectTupple = laccounts[random.randrange(len(laccounts))]
email = selectTupple[0]
password = selectTupple[1]
driver.find_element_by_css_selector("#end > #buttons > ytd-button-renderer > a").click()
emailInput = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR, "#identifierId")))
emailInput.send_keys(email)
time.sleep(0.5)
driver.find_element_by_css_selector("#identifierNext > div > button").click()
passwordInput = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR, "#password > div.aCsJod.oJeWuf > div > div.Xb9hP > input")))
time.sleep(2)
passwordInput.send_keys(password)
time.sleep(2)
driver.find_element_by_css_selector("#passwordNext > div > button").click()
time.sleep(0.5)
#driver.find_element_by_css_selector("#yDmH0d > c-wiz > div > div > div > div.L5MEH.Bokche.ypEC4c > div.lq3Znf > div.U26fgb.O0WRkf.oG5Srb.HQ8yf.C0oVfc.Zrq4w.WIL89.k97fxb.yu6jOd.M9Bg4d.j7nIZb > span > span").click()
driver.find_element_by_css_selector("#yDmH0d > c-wiz.yip5uc.SSPGKf > c-wiz > div > div.p9lFnc > div > div > div > div.ZRg0lb.Kn8Efe > div:nth-child(3) > div > div.yKBrKe > div > span > span").click()
return email
except:
print("Error in YouTube_Google_Log_In(email)")
def YouTube_Google_Log_Out():
try:
currPage = driver.current_url
home_page()
time.sleep(2)
driver.get(driver.current_url + "logout/")
time.sleep(2)
driver.get(currPage)
except:
print("Error in YouTube_Google_Log_Out()")
def YouTube_Acces_Website():
try:
driver.get("https://www.youtube.com/")
except:
print("Error in YouTube_Acces_Website()")
def YouTube_Accept_Cookies():
try:
driver.find_element_by_css_selector("#yDmH0d > c-wiz > div > div > div > div.NIoIEf > div.G4njw > div.qqtRac > form > div.lssxud > div > button").click()
except:
print("Error in YouTube_Accept_Cookies()")
def YouTube_Deny_Log_In():
try:
driver.find_element_by_xpath("/html/body/ytd-app/ytd-popup-container/tp-yt-paper-dialog/yt-upsell-dialog-renderer/div/div[3]/div[1]/yt-button-renderer/a/tp-yt-paper-button/yt-formatted-string").click()
time.sleep(1)
driver.switch_to.default_content()
except:
pass
def YouTube_Toggle_AutoPlay(boolean):
try:
if boolean == 'True':
#Regarder si l'auto play est false pour le mettre en true
isPressed = driver.find_element_by_css_selector("#movie_player > div.ytp-chrome-bottom > div.ytp-chrome-controls > div.ytp-right-controls > button:nth-child(1) > div > div")
if isPressed.get_attribute("aria-checked") == "false":
isPressed.click()
else:
#Regarder si l'auto play est true pour le mettre en false
isPressed = driver.find_element_by_css_selector("#movie_player > div.ytp-chrome-bottom > div.ytp-chrome-controls > div.ytp-right-controls > button:nth-child(1) > div > div")
if isPressed.get_attribute("aria-checked") == "true":
isPressed.click()
except:
print("Error in YouTube_Toggle_AutoPlay(boolean)")
def YouTube_Get_Video_Id_From_Url(url):
try:
if url == "https://www.youtube.com/":
return ''
return url.split("=")[1].split("&")[0]
except:
print("Error in YouTube_Get_Video_Id_From_Url(url)")
return ''
def YouTube_Music_No_Thanks():
try:
driver.find_element_by_css_selector("ytd-button-renderer#dismiss-button > a > tp-yt-paper-button > yt-formatted-string").click()
except:
print("Error in YouTube_Music_No_Thanks()")
def get_Google_Accounts():
api = S4GAPI("pierre.rambert@hotmail.fr","Pj1101vC")
return api.get_credentials_api().get_credentials_all("youtube")
def home_page():
try:
driver.find_element_by_css_selector("#logo > a > div > #logo-icon").click()
except:
print("Error in home_page()")
def scrollDown():
try:
driver.execute_script("window.scrollBy(0,1500);")
except:
print("Error in scrollDown()")
def find_caption():
try:
driver.find_element_by_xpath("//div[3]/div/ytd-menu-renderer/yt-icon-button/button/yt-icon").click()
driver.find_elements_by_css_selector(".ytd-menu-popup-renderer > ytd-menu-service-item-renderer")[0].click()
caption = "".join([e.get_attribute('innerHTML') for e in driver.find_elements_by_css_selector("div.cue-group > div > div")])
return caption
except:
print("Error in find_caption()")
return ''
def find_video():
try:
l = []
#for x in driver.find_elements_by_css_selector("#thumbnail"):
for x in driver.find_elements_by_css_selector("#dismissible > ytd-thumbnail > a#thumbnail"):
url = x.get_attribute("href")
if url == None:
continue
idVideo = YouTube_Get_Video_Id_From_Url(url)
l.append(idVideo)
return l
except:
print("Error in find_video")
def select_video(n=0):
try:
currUrl = driver.current_url
if currUrl == "https://www.youtube.com/":
# From homepage
# print("homepage")
driver.find_elements_by_css_selector("#contents > ytd-rich-item-renderer")[n].click()
elif "watch?v=" in currUrl:
# From a watching video
# print("video")
driver.find_elements_by_css_selector("#items > ytd-compact-video-renderer")[n].click()
elif "results?search_query=" in currUrl:
# From a search
# print("search")
driver.find_elements_by_css_selector("#contents > ytd-video-renderer > #dismissible > ytd-thumbnail")[n].click()
else:
# From a video tab from a channel
# print("channel")
driver.find_elements_by_css_selector("#items > ytd-grid-video-renderer")[n].click()
except:
time.sleep(2)
print("I'm trying to click on a video")
scrollDown()
select_video(n)
def find_video_length_in_seconds():
try :
strTime = driver.find_element_by_css_selector("#movie_player > div.ytp-chrome-bottom > div.ytp-chrome-controls > div.ytp-left-controls > div.ytp-time-display.notranslate > span.ytp-time-duration").text
listTime = strTime.split(":")[::-1]
res = 0
for i in range(len(listTime)):
res += int(listTime[i]) * (60**i)
return res
except :
print("Error in find_video_length_in_seconds()")
def watch_the_video_for(n=0):
try:
time.sleep(n)
except:
print("Error in watch_the_video_for()")
def dislike_video():
try:
driver.find_element_by_css_selector(".ytd-video-primary-info-renderer > #top-level-buttons > .style-scope:nth-child(2) #button > #button > .style-scope").click()
except:
print("Error in dislike_video()")
def like_video():
try:
driver.find_element_by_css_selector(".ytd-video-primary-info-renderer > #top-level-buttons > .style-scope:nth-child(1) #button > #button > .style-scope").click()
except:
print("Error in like_video()")
def go_to_channel():
try:
driver.find_element_by_css_selector("#top-row > ytd-video-owner-renderer > a").click()
videoTab = WebDriverWait(driver, 10).until(EC.presence_of_all_elements_located((By.CSS_SELECTOR, "#tabsContent > tp-yt-paper-tab")))
time.sleep(1)
videoTab[1].click()
except:
print("Error in go_to_channel()")
def search_with_url(url):
try:
driver.get(url)
except:
print("Error in search_with_url()")
def search_bar(text):
try:
# Query
driver.find_element_by_css_selector("#search-input > #search").clear()
driver.find_element_by_css_selector("#search-input > #search").send_keys(text)
driver.find_element_by_css_selector("#search-icon-legacy").click()
except:
print("Error in search_bar()")
def robot(file):
urlForDB = "test.netops.fr"
thisSession = str(int(time.time()))
requests.post("https://"+ urlForDB + "/api/session/new",headers={"accept":"application/ld+json","Content-Type": "application/ld+json"}, json={"id":thisSession})
lets_toggle = False
isLogedIn = False
actionNumber = Lever()
currentAction = 7
time.sleep(2)
listVideos = find_video()
print("Where's the list of all the videos on this page (*~▽~) :")
for x in listVideos:
print("\t"+str(x))
a = requests.post("https://"+ urlForDB + "/api/log/new", headers={"accept":"application/ld+json","Content-Type": "application/ld+json"}, json={"session":thisSession,"action":currentAction, "videos":listVideos, "position":actionNumber.get()})
actionNumber.incr()
for x in file:
YouTube_Deny_Log_In()
if x["action"] == 'settings':
currentAction = 1
print("Let's change some settings ⊂((・▽・))⊃")
if "autoPlay" in x["options"]:
print("Auto Play is set to : " + str(x["options"]["autoPlay"]) + " ヾ(*´∀`*)ノ")
requests.post("https://"+ urlForDB + "/api/log/new", headers={"accept":"application/ld+json","Content-Type": "application/ld+json"}, json={"session":thisSession,"action":currentAction, "position":actionNumber.get()})
actionNumber.incr()
YouTube_Toggle_AutoPlay(x["options"]["autoPlay"])
if "login" in x["options"]:
print("We'll soon log in (=^▽^=)")
logEmail = YouTube_Google_Log_In(x["options"]["login"])
requests.post("https://"+ urlForDB + "/api/log/new", headers={"accept":"application/ld+json","Content-Type": "application/ld+json"}, json={"session":thisSession,"action":currentAction, "email":logEmail, "position":actionNumber.get()})
actionNumber.incr()
isLogedIn = True
if "logout" in x["options"]:
print("We're login out ! °˖✧◝(^▿^)◜✧˖°")
requests.post("https://"+ urlForDB + "/api/log/new", headers={"accept":"application/ld+json","Content-Type": "application/ld+json"}, json={"session":thisSession,"action":currentAction, "email":"log out", "position":actionNumber.get()})
actionNumber.incr()
YouTube_Google_Log_Out()
isLogedIn = False
elif x["action"] == 'search':
print("Let's search for : " + str(x["toSearch"]) + " ー( ´ ▽ ` )ノ")
currentAction = 2
search_bar(x["toSearch"])
time.sleep(2)
listVideos = find_video()
print("Where's the list of all the videos on this page (*~▽~) :")
for x in listVideos:
print("\t"+str(x))
requests.post("https://"+ urlForDB + "/api/log/new", headers={"accept":"application/ld+json","Content-Type": "application/ld+json"}, json={"session":thisSession,"action":currentAction, "videos":listVideos, "key_word" : x["toSearch"], "position":actionNumber.get()})
actionNumber.incr()
elif x["action"] == 'watch':
currentAction = 3
index = -1
videoLever = True
if "url" in x:
print("Let's watch a video from an URL o(^▽^)o")
search_with_url(x["url"])
elif "index" in x :
print("Let's watch the video number : " + str(x["index"]) + " on this page ヾ(^∇^)")
select_video(x["index"])
index = x["index"]
else:
print("Let's watch the video number : 1 on this page ヾ(^∇^)")
select_video()
index = 1
time.sleep(2)
currentVideo = driver.current_url
time.sleep(2)
listVideos = find_video()
print("Where's the list of all the videos on this page (*~▽~) :")
for x in listVideos:
print("\t"+str(x))
requests.post("https://"+ urlForDB + "/api/log/new", headers={"accept":"application/ld+json","Content-Type": "application/ld+json"}, json={"session":thisSession,"currentVideo":YouTube_Get_Video_Id_From_Url(currentVideo),"action":currentAction, "videos":listVideos, "index":index, "position":actionNumber.get()})
actionNumber.incr()
if "watchContext" in x:
if x["watchContext"]["stopsAt"] == "never":
print("We're going to watch it 'til the end ! (*⌒∇⌒*)")
watch_the_video_for(find_video_length_in_seconds())
else :
print("We're going to watch it for : " + str(x["watchContext"]["stopsAt"]) + "seconds (*⌒∇⌒*)")
watch_the_video_for(int(x["watchContext"]["stopsAt"]))
if isLogedIn:
if "social" in x["watchContext"]:
#Envoye à Sylvain les likes ou dislikes
if x["watchContext"]["social"] == 'like':
print("I like it !! (ᗒᗊᗕ)")
currentAction = 4
like_video()
time.sleep(2)
requests.post("https://"+ urlForDB + "/api/log/new", headers={"accept":"application/ld+json","Content-Type": "application/ld+json"}, json={"session":thisSession,"action":currentAction,"position":actionNumber.get()})
actionNumber.incr()
else :
print("It wasn't great thought ... (๑꒪▿꒪)*")
currentAction = 5
dislike_video()
time.sleep(2)
requests.post("https://"+ urlForDB + "/api/log/new", headers={"accept":"application/ld+json","Content-Type": "application/ld+json"}, json={"session":thisSession,"action":currentAction,"position":actionNumber.get()})
actionNumber.incr()
elif x["action"] == 'goToChannel':
print("Intresting ! Let's visit this channel ~ヾ(^∇^)")
currentAction = 6
go_to_channel()
time.sleep(2)
listVideos = find_video()
print("Where's the list of all the videos on this page (*~▽~) :")
for x in listVideos:
print("\t"+str(x))
requests.post("https://"+ urlForDB + "/api/log/new", headers={"accept":"application/ld+json","Content-Type": "application/ld+json"}, json={"session":thisSession,"action":currentAction, "videos":listVideos, "position":actionNumber.get()})
actionNumber.incr()
elif x["action"] == 'home':
print("Let's go back to homepage (^▽^)")
currentAction = 7
home_page()
time.sleep(2)
listVideos = find_video()
print("Where's the list of all the videos on this page (*~▽~) :")
for x in listVideos:
print("\t"+str(x))
requests.post("https://"+ urlForDB + "/api/log/new", headers={"accept":"application/ld+json","Content-Type": "application/ld+json"}, json={"session":thisSession,"action":currentAction, "videos":listVideos, "position":actionNumber.get()})
actionNumber.incr()
time.sleep(2)
time.sleep(10)
print("Fiouf, it's the end of our journey, hope you like it (⌒▽⌒ゞ")
print("Cya soon !")
driver.quit()
def launch():
print("Hi, I'm Pybot, and I'm going to take you on a YouTube journey ( ´ ▽ ` )ノ")
YouTube_Acces_Website()
time.sleep(2)
YouTube_Accept_Cookies()
time.sleep(2)
YouTube_Deny_Log_In()
print("We're on YouTube homepage ! ( ^∇^)")
print("Just let me grab my map and see where we go from here ⊂((・▽・))⊃")
file = ''
# with open('bot.json') as jfile:
# file = json.load(jfile)["0"]
url = "https://scriptgenyoutube.miage.dev/generate"
#Recuperer le json dans le payload avec un request a un front end
payload = json.dumps({
"type": "conspi",
"watchNext": "15",
"watchFromURL": "0",
"watchFromHome": "10",
"search": "conspi",
"watchFromSearch": "5",
"watchFromChannel": "5",
"watchRecommended": "15",
"stopsAt": "5",
"social": "like",
"interactionPercent": "50",
"order": [
"home",
"next",
"search",
"channel",
"recommended"
]
})
headers = {
'Content-Type': 'application/json'
}
response = requests.request("GET", url, headers=headers, data=payload)
time.sleep(1)
afile = response.text
file = json.loads(afile)["actions"]
print(r"We're all set, let's go ! \(*≧∇≦*)/")
time.sleep(1)
robot(file)
#launch() |
# Copyright 2014 DreamHost, LLC
#
# Author: DreamHost, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import re
import netaddr
from oslo.config import cfg
from akanda.rug.openstack.common import jsonutils
LOG = logging.getLogger(__name__)
DEFAULT_AS = 64512
OPTIONS = [
cfg.StrOpt('provider_rules_path'),
cfg.IntOpt('asn', default=DEFAULT_AS),
cfg.IntOpt('neighbor_asn', default=DEFAULT_AS),
]
cfg.CONF.register_opts(OPTIONS)
EXTERNAL_NET = 'external'
INTERNAL_NET = 'internal'
MANAGEMENT_NET = 'management'
SERVICE_STATIC = 'static'
SERVICE_DHCP = 'dhcp'
SERVICE_RA = 'ra'
def build_config(client, router, interfaces):
provider_rules = load_provider_rules(cfg.CONF.provider_rules_path)
networks = generate_network_config(client, router, interfaces)
gateway = get_default_v4_gateway(client, router, networks)
return {
'asn': cfg.CONF.asn,
'neighbor_asn': cfg.CONF.neighbor_asn,
'default_v4_gateway': gateway,
'networks': networks,
'address_book': generate_address_book_config(client, router),
'anchors': generate_anchor_config(client, provider_rules, router),
'labels': provider_rules.get('labels', {}),
'floating_ips': generate_floating_config(router),
'tenant_id': router.tenant_id,
'hostname': router.name
}
def get_default_v4_gateway(client, router, networks):
"""Find the IPv4 default gateway for the router.
"""
LOG.debug('networks = %r', networks)
LOG.debug('external interface = %s', router.external_port.mac_address)
# Now find the subnet that our external IP is on, and return its
# gateway.
for n in networks:
if n['network_type'] == EXTERNAL_NET:
v4_addresses = [
addr
for addr in (netaddr.IPAddress(ip.partition('/')[0])
for ip in n['interface']['addresses'])
if addr.version == 4
]
for s in n['subnets']:
subnet = netaddr.IPNetwork(s['cidr'])
if subnet.version != 4:
continue
LOG.debug(
'%s: checking if subnet %s should have the default route',
router.id, s['cidr'])
for addr in v4_addresses:
if addr in subnet:
LOG.debug(
'%s: found gateway %s for subnet %s on network %s',
router.id,
s['gateway_ip'],
s['cidr'],
n['network_id'],
)
return s['gateway_ip']
# Sometimes we are asked to build a configuration for the server
# when the external interface is still marked as "down". We can
# report that case, but we don't treat it as an error here because
# we'll be asked to do it again when the interface comes up.
LOG.info('%s: no default gateway was found', router.id)
return ''
def load_provider_rules(path):
try:
return jsonutils.load(open(path))
except: # pragma nocover
LOG.exception('unable to open provider rules: %s' % path)
def generate_network_config(client, router, interfaces):
iface_map = dict((i['lladdr'], i['ifname']) for i in interfaces)
retval = [
_network_config(
client,
router.external_port,
iface_map[router.external_port.mac_address],
EXTERNAL_NET),
_management_network_config(
router.management_port,
iface_map[router.management_port.mac_address],
interfaces,
)]
retval.extend(
_network_config(
client,
p,
iface_map[p.mac_address],
INTERNAL_NET,
client.get_network_ports(p.network_id))
for p in router.internal_ports)
return retval
def _management_network_config(port, ifname, interfaces):
for iface in interfaces:
if iface['ifname'] == ifname:
return _make_network_config_dict(
iface, MANAGEMENT_NET, port.network_id)
def _network_config(client, port, ifname, network_type, network_ports=[]):
subnets = client.get_network_subnets(port.network_id)
subnets_dict = dict((s.id, s) for s in subnets)
return _make_network_config_dict(
_interface_config(ifname, port, subnets_dict),
network_type,
port.network_id,
subnets_dict=subnets_dict,
network_ports=network_ports)
def _make_network_config_dict(interface, network_type, network_id,
v4_conf=SERVICE_STATIC, v6_conf=SERVICE_STATIC,
subnets_dict={}, network_ports=[]):
return {'interface': interface,
'network_id': network_id,
'v4_conf_service': v4_conf,
'v6_conf_service': v6_conf,
'network_type': network_type,
'subnets': [_subnet_config(s) for s in subnets_dict.values()],
'allocations': _allocation_config(network_ports, subnets_dict)}
def _interface_config(ifname, port, subnets_dict):
def fmt(fixed):
return '%s/%s' % (fixed.ip_address,
subnets_dict[fixed.subnet_id].cidr.prefixlen)
return {'ifname': ifname,
'addresses': [fmt(fixed) for fixed in port.fixed_ips]}
def _subnet_config(subnet):
return {
'cidr': str(subnet.cidr),
'dhcp_enabled': subnet.enable_dhcp,
'dns_nameservers': subnet.dns_nameservers,
'host_routes': subnet.host_routes,
'gateway_ip': (str(subnet.gateway_ip)
if subnet.gateway_ip is not None
else ''),
}
def _allocation_config(ports, subnets_dict):
r = re.compile('[:.]')
allocations = []
for port in ports:
addrs = {
str(fixed.ip_address): subnets_dict[fixed.subnet_id].enable_dhcp
for fixed in port.fixed_ips
}
if not addrs:
continue
allocations.append(
{
'ip_addresses': addrs,
'device_id': port.device_id,
'hostname': '%s.local' % r.sub('-', sorted(addrs.keys())[0]),
'mac_address': port.mac_address
}
)
return allocations
def generate_address_book_config(client, router):
return dict([(g.name, [str(e) for e in g.entries])
for g in client.get_addressgroups(router.tenant_id)])
def generate_anchor_config(client, provider_rules, router):
retval = provider_rules.get('preanchors', [])
retval.extend([
generate_tenant_port_forward_anchor(client, router),
generate_tenant_filter_rule_anchor(client, router)
])
retval.extend(provider_rules.get('postanchors', []))
return retval
def generate_tenant_port_forward_anchor(client, router):
to_ip = router.external_port.first_v4 or '127.0.0.1'
rules = [_format_port_forward_rule(to_ip, pf)
for pf in client.get_portforwards(router.tenant_id)]
return {
'name': 'tenant_v4_portforwards',
'rules': [r for r in rules if r]
}
def _format_port_forward_rule(to_ip, pf):
redirect_ip = pf.port.first_v4
if not redirect_ip:
return
return {
'action': 'pass',
'direction': 'in',
'family': 'inet',
'protocol': pf.protocol,
'destination': '%s/32' % to_ip,
'destination_port': pf.public_port,
'redirect': redirect_ip,
'redirect_port': pf.private_port
}
def generate_tenant_filter_rule_anchor(client, router):
return {
'name': 'tenant_filterrules',
'rules': [_format_filter_rule(r)
for r in client.get_filterrules(router.tenant_id)]
}
def _format_filter_rule(rule):
return {
'action': rule.action,
'protocol': rule.protocol,
'source': rule.source.name if rule.source else None,
'source_port': rule.source_port,
'destination': rule.destination.name if rule.destination else None,
'destination_port': rule.destination_port,
}
def generate_floating_config(router):
return [
{'floating_ip': str(fip.floating_ip), 'fixed_ip': str(fip.fixed_ip)}
for fip in router.floating_ips
]
|
from typing import Any, List, Literal, TypedDict
from .FHIR_code import FHIR_code
from .FHIR_decimal import FHIR_decimal
from .FHIR_Element import FHIR_Element
from .FHIR_string import FHIR_string
# An amount of economic utility in some recognized currency.
FHIR_Money = TypedDict(
"FHIR_Money",
{
# Unique id for the element within a resource (for internal references). This may be any string value that does not contain spaces.
"id": FHIR_string,
# May be used to represent additional information that is not part of the basic definition of the element. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer can define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension.
"extension": List[Any],
# Numerical value (with implicit precision).
"value": FHIR_decimal,
# Extensions for value
"_value": FHIR_Element,
# ISO 4217 Currency Code.
"currency": FHIR_code,
# Extensions for currency
"_currency": FHIR_Element,
},
total=False,
)
|
from tasks.supervised import SupervisedForecastTask
__all__ = ["SupervisedForecastTask"]
|
#!/usr/bin/env python
from tkinter import Tk, Spinbox
import tkinter
from tkinter.ttk import Style, Label, Button, Combobox
top = Tk()
Style().configure('TButton',
foreground='white', background='red')
Label(top,
text='动物(成对的;最少:一对;最多:一打)').pack()
Label(top, text='数量:').pack()
Spinbox(top, from_=2, to=12,
increment=2, font='Helvetica -14 bold').pack()
Label(top, text='种类:').pack()
Combobox(top, values=('狗',
'猫', '仓鼠', '蟒蛇')).pack()
Button(top, text='退出',
command=top.quit, style='TButton').pack()
top.mainloop() |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# write a correct test!
import unittest
import pygimli as pg
import numpy as np
class TestMisc(unittest.TestCase):
def test_Trans(self):
"""
"""
f = pg.trans.Trans()
x = pg.Vector(3, 1.0)
np.testing.assert_array_equal(f(x), x)
np.testing.assert_array_equal(f.inv(x), x)
np.testing.assert_array_equal(f.inv(f(x)), x)
self.assertEqual(f.trans(1.0), 1.0)
self.assertEqual(f(1.0), 1.0)
self.assertEqual(f.inv(1.0), 1.0)
f = pg.trans.TransLin(factor=2., offset=4.)
np.testing.assert_array_equal(f(x), x*2. + 4.)
np.testing.assert_array_equal(f.trans(x), x*2. + 4.)
np.testing.assert_array_equal(f.inv(f(x)), x)
self.assertEqual(f(1.0), 6.0)
self.assertEqual(f.trans(1.0), 6.0)
self.assertEqual(f.inv(6.0), 1.0)
self.assertEqual(f.invTrans(6.0), 1.0)
f = pg.trans.TransLogLU(lowerbound=0, upperbound=10)
# print(f.update([1.], [100.]))
np.testing.assert_array_equal(f.update([1.], [100.]), [10.0])
# print(f.update([1.], [1000.]))
# np.testing.assert_array_equal(f.update([1.], [1000.]), [10.0])
f = pg.trans.TransCumulative()
f.add(pg.trans.TransLog(), 5)
f.add(pg.trans.TransLog(), 5)
np.testing.assert_array_equal(f.at(0).fwd(np.ones(10)*10),
np.log(np.ones(10)*10))
np.testing.assert_array_equal(f.fwd(np.ones(10)*10),
np.log(np.ones(10)*10))
# tm2 = pg.trans.TransLog()
# tc.add(tm2, 5, 10)
# fop._modelTrans = pg.trans.TransCumulative()
# fop._modelTrans.add(tm2, size=nModel)
#fop._modelTrans = pg.trans.TransLog()
def test_DataContainerFilter(self):
"""
"""
data = pg.DataContainer()
data.resize(5)
data.markValid([0, 4])
self.assertEqual(data('valid'), [1.0, 0.0, 0.0, 0.0, 1.0])
data.markInvalid(pg.core.IndexArray(np.arange(5, dtype="long")))
self.assertEqual(data('valid'), [0.0, 0.0, 0.0, 0.0, 0.0])
data.markValid(np.arange(5, dtype="long"))
self.assertEqual(data('valid'), [1.0, 1.0, 1.0, 1.0, 1.0])
data.markInvalid(range(5))
self.assertEqual(data('valid'), [0.0, 0.0, 0.0, 0.0, 0.0])
x = np.arange(5, dtype='float')
data.markValid(pg.Vector(x) > 2.0)
self.assertEqual(data('valid'), [0.0, 0.0, 0.0, 1.0, 1.0])
data.markValid(pg.BVector(x < 2.0))
self.assertEqual(data('valid'), [1.0, 1.0, 0.0, 1.0, 1.0])
data.markInvalid(pg.find(x > 3.0))
self.assertEqual(data('valid'), [1.0, 1.0, 0.0, 1.0, 0.0])
data.markInvalid(x < 1.0)
self.assertEqual(data('valid'), [0.0, 1.0, 0.0, 1.0, 0.0])
def test_DataContainerSensors(self):
data = pg.DataContainer()
sensors = [[x, 0.0] for x in range(5)]
data.setSensorPositions(sensors)
data.setSensorPositions(data.sensors()[::-1])
self.assertEqual(data.sensor(0), [4., 0.0, 0.0])
self.assertEqual(data.sensor(4), [0., 0.0, 0.0])
def test_DataContainerIndex(self):
data = pg.DataContainer()
data['b'] = np.ones(2) * 3.14
np.testing.assert_array_equal(data['b'], np.ones(2)*3.14)
self.assertEqual(type(data['b']), type(pg.Vector()))
data['b'][0] = 1.0
self.assertEqual(data['b'][0], 1.0)
data.registerSensorIndex('a')
data['a'] = np.ones(2)
np.testing.assert_array_equal(data['a'], np.ones(2))
self.assertEqual(type(data['a']), type(np.array(1)))
self.assertEqual(data['a'].dtype, 'int')
data['a'][0] = 1.0 # will not work for sensorIndex until its changed in the datacontainer as IndexArray
data['a'] = np.ones(2)*1.2
np.testing.assert_array_equal(data['a'], np.ones(2))
self.assertEqual(type(data['a']), type(np.array(1)))
self.assertEqual(data['a'].dtype, 'int')
def test_Operators(self):
t = pg.Vector(10, 1.0)
self.assertEqual(len(t == 1.0), len(t > 0))
self.assertEqual(len(t == 1.0), len(t == 1))
def test_Int64Problem(self):
data = pg.DataContainerERT()
data.createFourPointData(0, 0, 1, 2, 3)
pos = np.arange(4, dtype=np.int)
data.createFourPointData(1, pos[0], pos[1], pos[2], pos[3])
pos = np.arange(4, dtype=np.int32)
data.createFourPointData(2, pos[0], pos[1], pos[2], pos[3])
pos = np.arange(4, dtype=np.int64)
data.createFourPointData(3, pos[0], pos[1], pos[2], pos[3])
pos = np.arange(4, dtype=np.float)
data.createFourPointData(4, pos[0], pos[1], pos[2], pos[3])
pos = np.arange(4, dtype=np.float32)
data.createFourPointData(5, pos[0], pos[1], pos[2], pos[3])
pos = np.arange(4, dtype=np.float64)
data.createFourPointData(6, pos[0], pos[1], pos[2], pos[3])
pos = np.arange(4)
data.createFourPointData(7, pos[0], pos[1], pos[2], pos[3])
pos = range(4)
data.addFourPointData(pos[0], pos[1], pos[2], pos[3])
#print(data('a'), data('b'), data('m'), data('n'))
self.assertEqual(sum(data('a')), 9*0)
self.assertEqual(sum(data('b')), 9*1)
self.assertEqual(sum(data('m')), 9*2)
self.assertEqual(sum(data('n')), 9*3)
def test_PosConstMember(self):
p1 = pg.Pos(1.0, 0.0, 0.0)
p2 = pg.Pos(0.0, 1.0, 0.0)
p3 = p1.cross(p2)
self.assertEqual(p3, pg.Pos(0.0, 0.0, 1.0))
def test_Hash(self):
v1 = pg.Vector(10, 2.)
v2 = pg.Vector(10, 2.)
self.assertFalse(pg.Vector(1, 0.).hash() == pg.Vector(2, 0.).hash())
self.assertEqual(v1.hash(), v2.hash())
self.assertEqual(hash(v1), hash(v2))
v2[2] = 3.
self.assertFalse(v1.hash() is v2.hash())
v2[2] = 2.
self.assertTrue(v1.hash() == v2.hash())
self.assertEqual(v1.hash(), pg.Vector(10, 2.).hash())
def test_HashData(self):
d1 = pg.DataContainerERT()
d2 = pg.DataContainerERT()
self.assertEqual(d1.hash(), d2.hash())
d1.createSensor([1.0, 0.0])
d2.createSensor([2.0, 0.0])
self.assertFalse(d1.hash() == d2.hash())
d2.setSensor(0, [1.0, 0.0])
self.assertTrue(d1.hash() == d2.hash())
d1.resize(10)
d2.resize(12)
d1.add('a', pg.Vector(d1.size(), 1.0))
d2.add('a', pg.Vector(d2.size(), 1.0))
self.assertFalse(d1.hash() == d2.hash())
d2.resize(10)
self.assertTrue(d1.hash() == d2.hash())
d2('a')[3] = 2.0
self.assertFalse(d1.hash() == d2.hash())
d2('a')[3] = 1.0
self.assertTrue(d1.hash() == d2.hash())
def test_HashMesh(self):
m1 = pg.Mesh()
m2 = pg.Mesh()
self.assertTrue(m1.hash() == m2.hash())
m1.createNode([1.0, 0.0])
m2.createNode([2.0, 0.0])
self.assertFalse(m1.hash() == m2.hash())
m2.node(0).setPos([1.0, 0.0])
self.assertTrue(m1.hash() == m2.hash())
# does not work .. need time to implement
# def test_DataContainerWrite(self):
# data = pg.DataContainer()
# data.save('test.dat')
# fi = open('test2.dat', 'w')
# data.write(fi)
# fi.close()
def test_DataTypes(self):
pg.core.showSizes()
if __name__ == '__main__':
pg.core.setDeepDebug(0)
unittest.main()
|
from django.db import models
from django.conf import settings
import decimal, datetime
from django.contrib.auth.models import AbstractUser
class User(AbstractUser):
#inherited from AbstractUser:
#first_name
#last_name
#username
#email
#is_active
#is_staff
#date_joined
#password?
lemail = models.TextField(blank = True, null=True)
school = models.TextField(blank=True, null=True)
grade = models.PositiveSmallIntegerField(blank = True, null=True)
def __str__(self):
return self.username
class Tablet(models.Model):
name = models.TextField(blank=True, null=True)
imagePath = models.TextField(blank = True, null = True)
history = models.TextField(blank = True, null=True)
def __str__(self):
return self.history
class Line(models.Model):
#Lookup the rest of these options. And get them from stratford
SIDE_CHOICES = (
('OBV', 'Obverse'),
('FR', 'FRONT'), #add the other sides
)
tablet = models.ForeignKey(Tablet, related_name = "TabletNum")
side = models.TextField(blank= True, null=True)
lineNumber = models.PositiveSmallIntegerField(blank= True, null=True)
def __str__(self):
return str(self.lineNumber)
class Sign(models.Model):
filepath = models.TextField(blank=True, null=True)
name = models.TextField(blank= True, null=True)
mimeType = models.TextField()
def __str__(self):
return self.filepath
class AssyrianChar(models.Model):
line = models.PositiveSmallIntegerField(blank=True, null=True)
positionNO = models.PositiveSmallIntegerField(blank= True, null=True)
Sign = models.ForeignKey(Sign, related_name = "char_sign")
note = models.TextField(blank= True, null=True)
def __str__(self):
return self.note
class IdentifiedCharacter(models.Model): #change this to placement
user = models.ForeignKey(User)
sign = models.ForeignKey(Sign, related_name = "Sign", blank=True, null=True)
date_recorded = models.DateTimeField(auto_now_add=True)
link = models.TextField(blank = True, null=True)
hotspot_x = models.PositiveSmallIntegerField(blank=True, null=True)
hotspot_y = models.PositiveSmallIntegerField(blank=True, null=True)
hotspot_width = models.PositiveSmallIntegerField(blank=True, null=True)
hotspot_height = models.PositiveSmallIntegerField(blank=True, null=True)
def __str__(self):
return self.date_recorded
'''
Stored Procedures:
- After a certain amount of user inputs, the database calculates the hotspots and sends the report to us.
- Make an admin site that shows the results of the tablets
- Calculate how effective each user is at identifying characters, and show the user how successful they are at identifying characters.
'''
|
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
}
}
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.sites',
'django.contrib.contenttypes',
'django.contrib.sessions',
'avocado',
'serrano',
'tests',
'tests.cases.base',
'tests.cases.resources',
'tests.cases.forms',
'tests.cases.sets',
)
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
EMAIL_HOST = 'localhost'
EMAIL_PORT = 1025
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'serrano.middleware.SessionMiddleware',
)
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'serrano.backends.TokenBackend',
)
SERRANO = {
'RATE_LIMIT_COUNT': 20,
'RATE_LIMIT_SECONDS': 3,
'AUTH_RATE_LIMIT_COUNT': 40,
'AUTH_RATE_LIMIT_SECONDS': 6,
'OBJECT_SETS': [{
'model': 'tests.Team',
}],
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
}
}
SITE_ID = 1
ROOT_URLCONF = 'tests.urls'
ANONYMOUS_USER_ID = -1
TEST_RUNNER = 'tests.runner.ProfilingTestRunner'
TEST_PROFILE = 'unittest.profile'
SECRET_KEY = 'abc123'
MODELTREES = {
'default': {
'model': 'tests.Employee',
}
}
AVOCADO = {
'FORCE_SYNC_LOG': True,
}
# Switch handlers from 'null' => 'console' to see logging output
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'django.utils.log.NullHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler'
}
},
'loggers': {
'avocado': {
'handlers': ['null'],
'level': 'DEBUG',
'propagate': True,
},
'serrano': {
'handlers': ['null'],
'level': 'DEBUG',
'propagate': True,
},
}
}
|
#!/usr/bin/python
import os
import sys
import struct
import bsa
import daggerfall.casttypes as casttypes
class Tolerance():
def __init__(self, byte):
self.toParalysis = bool(byte & casttypes.PARALYSIS)
self.toMagic = bool(byte & casttypes.MAGIC)
self.toPoison = bool(byte & casttypes.POISON)
self.toFire = bool(byte & casttypes.POISON)
self.toFrost = bool(byte & casttypes.FROST)
self.toShock = bool(byte & casttypes.SHOCK)
self.toDisease = bool(byte & casttypes.SHOCK)
def toString(self):
a = [ ]
if self.toParalysis is True:
a.append('to Paralysis')
if self.toMagic is True:
a.append('to Magic')
if self.toPoison is True:
a.append('to Poison')
if self.toFire is True:
a.append('to Fire')
if self.toFrost is True:
a.append('to Frost')
if self.toShock is True:
a.append('to Shock')
if self.toDisease is True:
a.append('to Disease')
return ', '.join(a)
class Monster():
def __init__(self, string):
tup = struct.unpack('BBBBHBBBBBHBHBBBBBBBBBB16s30x', string)
self.name = tup[23].partition('\0')[0]
self.resistance = Tolerance(tup[0])
self.immunity = Tolerance(tup[1])
self.low_tolerance = Tolerance(tup[2])
self.critical_weakness = Tolerance(tup[3])
self.acute_hearing = bool(tup[4] & 1)
self.athleticism = bool(tup[4] & 2)
self.adrenaline_rush = bool(tup[4] & 4)
self.no_regen_sp = bool(tup[4] & 8)
self.sun_damage = bool(tup[4] & 16)
self.holy_damage = bool(tup[4] & 32)
sp_mult = {
0: 3.0,
1: 2.0,
2: 1.75,
3: 1.5,
4: 1.0
}
print 'tup4 %016x' % tup[4]
self.sp_in_dark = (tup[4] & 0x00C0) >> 8
self.sp_in_light = (tup[4] & 0x0300) >> 10
self.total_sp = sp_mult[(tup[4] & 0x1C00) >> 12]
if '-h' in sys.argv or len(sys.argv) != 2:
print """
Usage: openscrolls_monster <MONSTER.BSA>
Display all the information stored in MONSTER.BSA.
"""
exit()
file = bsa.BSAFile(sys.argv[1])
for i in range(0, 43):
data = file.get_record('ENEMY0%02d.CFG' % i)
monster = Monster(data)
print """
Name: %s
Resistances: %s
Immunities: %s
Low Tolerance: %s
Critical Weakness: %s
Acute Hearing: %r
Athleticism: %r
Adrenaline Rush: %r
No Regen SP: %r
Sun Damage: %r
Holy Damage: %r
SP in dark: %d
SP in light: %d
Total SP: %f
""" % (monster.name, monster.resistance.toString(), monster.immunity.toString(),
monster.low_tolerance.toString(), monster.critical_weakness.toString(),
monster.acute_hearing, monster.athleticism, monster.adrenaline_rush,
monster.no_regen_sp, monster.sun_damage, monster.holy_damage,
monster.sp_in_dark, monster.sp_in_light, monster.total_sp)
#print data
|
#!/usr/bin/env python
"""Exposes miscellaneous functions to interface with the CIL tool."""
"""See the LICENSE file, located in the root directory of
the source distribution and
at http://verifun.eecs.berkeley.edu/gametime/about/LICENSE,
for details on the GameTime license and authors.
"""
import os
import subprocess
from defaults import config, sourceDir
from fileHelper import removeFiles
def _generateCilCommand(projectConfig, keepLineNumbers):
"""Generates the system call to run CIL on the file currently
being analyzed.
Arguments:
projectConfig:
:class:`~gametime.projectConfiguration.ProjectConfiguration`
object that represents the configuration of a GameTime project.
keepLineNumbers:
`True` if, and only if, the resulting file should contain
preprocessor directives that maintain the line numbers from
the original source file (and other included source files).
Returns:
Appropriate system call as a list that contains the program
to be run and the proper arguments.
"""
# Set the environment variable that allows the Cilly driver to find
# the path to the configuration file for the Findlib OCaml module.
os.environ["OCAMLFIND_CONF"] = os.path.join(sourceDir,
"ocaml/conf/findlib.conf")
# Set the environment variable that allows the Cilly driver to find
# the path to the folder that contains the compiled OCaml files.
os.environ["OCAMLPATH"] = os.path.join(sourceDir, "ocaml/lib")
command = []
command.append(os.path.join(config.TOOL_CIL, "bin/cilly.bat"))
command.append("--dooneRet")
command.append("--domakeCFG")
command.append("--dosimpleMem")
command.append("--disallowDuplication")
if not keepLineNumbers:
command.append("--noPrintLn")
command.append("--dopartial")
command.append("--partial_root_function=%s" % projectConfig.func)
command.append(projectConfig.locationTempFile)
command.append("-I'%s'" % projectConfig.locationOrigDir)
for includePath in projectConfig.included:
command.append("-I'%s'" % includePath)
command.append("--save-temps='%s'" % projectConfig.locationTempDir)
command.append("-c")
command.append("-o")
command.append("'%s.out'" % projectConfig.locationTempNoExtension)
return command
def runCil(projectConfig, keepLineNumbers=False):
"""Conducts the sequence of system calls that will run CIL on the
file currently being analyzed.
Arguments:
projectConfig:
:class:`~gametime.projectConfiguration.ProjectConfiguration`
object that represents the configuration of a GameTime project.
keepLineNumbers:
`True` if, and only if, the resulting file should contain
preprocessor directives that maintain the line numbers from
the original source file (and other included source files).
Returns:
Zero if the calls were successful; a non-zero value otherwise.
"""
command = _generateCilCommand(projectConfig, keepLineNumbers)
print " ".join(command)
return subprocess.call(command, shell=True)
def removeTempCilFiles(projectConfig):
"""Removes the temporary files created by CIL during its analysis.
Arguments:
projectConfig:
:class:`~gametime.projectConfiguration.ProjectConfiguration`
object that represents the configuration of a GameTime project.
"""
# Remove the files with extension ".cil.*".
otherTempFiles = r".*\.cil\..*"
removeFiles([otherTempFiles], projectConfig.locationTempDir)
# By this point, we have files that are named the same as the
# temporary file for GameTime, but that have different extensions.
# Remove these files.
otherTempFiles = r".*-gt\.[^c]+"
removeFiles([otherTempFiles], projectConfig.locationTempDir)
|
from django.contrib.auth.models import User, Group
from django.db.models import (
Model,
TextField,
DateTimeField,
ForeignKey,
CASCADE,
OneToOneField,
ManyToManyField,
CharField,
)
from asgiref.sync import async_to_sync
from channels.layers import get_channel_layer
class RoomModel(Model):
"""
Do NOT extend auth.models.Group as a "Room Group" model
unless you want to spend another 3 hour meaninglessly
debugging like me.
"""
base_group = OneToOneField(
Group,
related_name="room_group",
on_delete=CASCADE,
parent_link=True,
)
members = ManyToManyField(
User,
related_name="room_member",
)
name = CharField(default="", max_length=50)
class MessageModel(Model):
"""
This class represents a chat message. It has a owner (user), timestamp and
the message body.
"""
user = ForeignKey(
User,
on_delete=CASCADE,
verbose_name="user",
related_name="from_user",
db_index=True,
)
group = ForeignKey(
RoomModel,
on_delete=CASCADE,
related_name="message_group",
)
timestamp = DateTimeField(
"timestamp", auto_now_add=True, editable=False, db_index=True
)
body = TextField("body")
def __str__(self):
return str(self.id)
def characters(self):
"""
Toy function to count body characters.
:return: body's char number
"""
return len(self.body)
def notify_ws_clients(self):
"""
Inform client there is a new message.
"""
notification = {
"type": "recieve_group_message",
"message": "{}".format(self.id),
}
channel_layer = get_channel_layer()
print("user.id {}".format(self.user.id))
# print("user.id {}".format(self.recipient.id))
async_to_sync(channel_layer.group_send)("{}".format(self.user.id), notification)
# async_to_sync(channel_layer.group_send)(
# "{}".format(self.recipient.id), notification
# )
def save(self, *args, **kwargs):
"""
Trims white spaces, saves the message and notifies the recipient via WS
if the message is new.
"""
new = self.id
self.body = self.body.strip() # Trimming whitespaces from the body
super(MessageModel, self).save(*args, **kwargs)
if new is None:
self.notify_ws_clients()
# Meta
class Meta:
app_label = "core"
verbose_name = "message"
verbose_name_plural = "messages"
ordering = ("timestamp",)
|
# ------------------------------------------------------------------------
# Conditional DETR
# Copyright (c) 2021 Microsoft. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------
# Modified from DETR (https://github.com/facebookresearch/detr)
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# ------------------------------------------------------------------------
import os
import numpy as np
import cv2
import argparse
from pathlib import Path
import torch
from torch.utils.data import DataLoader, DistributedSampler
import datasets
import util.misc as utils
import datasets.samplers as samplers
from datasets import build_dataset, get_coco_api_from_dataset
from models import build_model
from main import get_args_parser as get_main_args_parser
from datasets.coco_eval import CocoEvaluator
import torchvision.transforms as transforms
import datasets.transforms as T
def show_demo_args_parser():
parser = argparse.ArgumentParser('Set transformer detector', add_help=False)
parser.add_argument('--output_dir', type=float, default=None, help='path to save demo images')
parser.add_argument('--save', action='store_true', help='whether save the demo images')
parser.add_argument('--line', type=int, default=3, help='line width to draw bounding boxes')
parser.add_argument('--thresh', type=float, default=0.9, help='score threshold for showing boxes')
return parser
def box_cxcywh_to_xyxy(x):
x_c, y_c, w, h = x.unbind(1)
b = [(x_c - 0.5 * w), (y_c - 0.5 * h),
(x_c + 0.5 * w), (y_c + 0.5 * h)]
return torch.stack(b, dim=1)
def rescale_bboxes(out_bbox, size):
img_w, img_h = size
b = box_cxcywh_to_xyxy(out_bbox)
b = b * torch.tensor([img_w, img_h, img_w, img_h], dtype=torch.float32, device=b.device)
return b
# RGB reversed for cv2
class2color = {
'bus': (36, 140, 135), # My
'bicycle': (42, 42, 165), # Brown
'car': (0, 255, 0), # Lime
'motorcycle': (226, 43, 138), # BlueViolet
'person': (230, 128, 94), # My Blue
'rider': (163, 28, 191), # My pink
'train': (170, 178, 32), # LightSeaGreen
'truck': (23, 150, 187), # My brwon
}
def plot_results(pil_img, prob, labels, boxes, output_dir, save_name, lineWidth=2, dataset=None):
"""Visual debugging of detections."""
assert dataset is not None
if dataset in ['cityscapes']:
idx2cls = ['person', 'car', 'train', 'rider', 'truck', 'motorcycle', 'bicycle', 'bus']
elif 'coco' in dataset:
idx2cls = ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat',
'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog',
'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat',
'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork',
'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog',
'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv',
'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush']
else:
raise NotImplementedError("unknown dataset!")
im2show = cv2.cvtColor(np.asarray(pil_img), cv2.COLOR_RGB2BGR)
for p, cls_idx, (xmin, ymin, xmax, ymax) in zip(prob, labels, boxes.tolist()):
xmin = int(np.round(xmin))
ymin = int(np.round(ymin))
xmax = int(np.round(xmax))
ymax = int(np.round(ymax))
cls_name = idx2cls[cls_idx-1]
# color = class2color[cls_name]
color = (0, 255, 0)
cv2.rectangle(im2show, (xmin, ymin), (xmax, ymax), color, lineWidth)
cv2.putText(im2show, '%s: %.3f' % (cls_name, p), (xmin, ymin + 15), cv2.FONT_HERSHEY_PLAIN,
1.0, (0, 0, 255), thickness=1)
result_path = os.path.join(output_dir, save_name)
cv2.imwrite(result_path, im2show)
@torch.no_grad()
def evaluate_and_demo(model, criterion, postprocessors, data_loader, base_ds, device,
val_transforms, use_meta=False, main_args=None, show_args=None):
model.eval()
criterion.eval()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('class_error', utils.SmoothedValue(window_size=1, fmt='{value:.2f}'))
header = 'Test:'
iou_types = tuple(k for k in ('segm', 'bbox') if k in postprocessors.keys())
coco_evaluator = CocoEvaluator(base_ds, iou_types)
for samples, targets in metric_logger.log_every(data_loader, 10, header):
# imgid + model + thresh + line
model_name = main_args.resume.split('/')[-2]
save_name = 'imgid{}_{}_thresh{}_line{}.png'.format(
str(targets[0]['image_id'].item()), model_name, str(show_args.thresh), str(show_args.line)
)
# input with only ToTensor transformation
samples_pil = transforms.ToPILImage()(samples.tensors.squeeze()).convert("RGB") # pil image w/o resizing
samples, targets = val_transforms(samples_pil, targets[0])
samples = samples.unsqueeze(0)
targets = [targets]
samples = samples.to(device)
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
if use_meta:
meta_info = {
'size': torch.stack([t['size'][[1,0]] for t in targets]), # (bs, 2) W, H
}
outputs = model(samples, meta_info)
else:
outputs = model(samples)
orig_target_sizes = torch.stack([t["orig_size"] for t in targets], dim=0)
results = postprocessors['bbox'](outputs, orig_target_sizes)
assert len(results) == 1
# (100,), (100,), (100, 4)
probs, labels, bboxes_scaled = results[0]['scores'], results[0]['labels'], results[0]['boxes']
keep = probs > show_args.thresh
if show_args.save:
if show_args.output_dir is None:
show_args.output_dir = os.path.join('demo', model_name)
Path(show_args.output_dir).mkdir(parents=True, exist_ok=True)
plot_results(samples_pil, probs[keep], labels[keep], bboxes_scaled[keep],
output_dir=show_args.output_dir, save_name=save_name,
lineWidth=show_args.line, dataset=main_args.dataset_file)
loss_dict = criterion(outputs, targets)
weight_dict = criterion.weight_dict
# reduce losses over all GPUs for logging purposes
loss_dict_reduced = utils.reduce_dict(loss_dict)
loss_dict_reduced_scaled = {k: v * weight_dict[k]
for k, v in loss_dict_reduced.items() if k in weight_dict}
loss_dict_reduced_unscaled = {f'{k}_unscaled': v
for k, v in loss_dict_reduced.items()}
metric_logger.update(loss=sum(loss_dict_reduced_scaled.values()),
**loss_dict_reduced_scaled,
**loss_dict_reduced_unscaled)
metric_logger.update(class_error=loss_dict_reduced['class_error'])
res = {target['image_id'].item(): output for target, output in zip(targets, results)}
if coco_evaluator is not None:
coco_evaluator.update(res)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
if coco_evaluator is not None:
coco_evaluator.synchronize_between_processes()
# accumulate predictions from all images
if coco_evaluator is not None:
coco_evaluator.accumulate()
coco_evaluator.summarize()
stats = {k: meter.global_avg for k, meter in metric_logger.meters.items()}
if coco_evaluator is not None:
if 'bbox' in postprocessors.keys():
stats['coco_eval_bbox'] = coco_evaluator.coco_eval['bbox'].stats.tolist()
return stats, coco_evaluator
def main(args, show_args):
utils.init_distributed_mode(args)
print("git:\n {}\n".format(utils.get_sha()))
if args.frozen_weights is not None:
assert args.masks, "Frozen training is meant for segmentation only"
print(args)
device = torch.device(args.device)
model, criterion, postprocessors = build_model(args)
model.to(device)
model_without_ddp = model
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print('number of params:', n_parameters)
dataset_val = build_dataset(image_set='val', args=args)
val_transforms = dataset_val._transforms
dataset_val._transforms = T.Compose([
T.ToTensor()
])
if args.distributed:
if args.cache_mode:
sampler_val = samplers.NodeDistributedSampler(dataset_val, shuffle=False)
else:
sampler_val = samplers.DistributedSampler(dataset_val, shuffle=False)
else:
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
data_loader_val = DataLoader(dataset_val, args.batch_size, sampler=sampler_val,
drop_last=False, collate_fn=utils.collate_fn, num_workers=args.num_workers,
pin_memory=True)
if args.dataset_file == "coco_panoptic":
# We also evaluate AP during panoptic training, on original coco DS
coco_val = datasets.coco.build("val", args)
base_ds = get_coco_api_from_dataset(coco_val)
else:
base_ds = get_coco_api_from_dataset(dataset_val)
if args.frozen_weights is not None:
checkpoint = torch.load(args.frozen_weights, map_location='cpu')
model_without_ddp.detr.load_state_dict(checkpoint['model'])
if args.resume:
if args.resume.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.resume, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.resume, map_location='cpu')
model_without_ddp.load_state_dict(checkpoint['model'])
use_meta = True if 'roi' in args else False
test_stats, coco_evaluator = evaluate_and_demo(
model, criterion, postprocessors, data_loader_val, base_ds, device,
val_transforms, use_meta=use_meta, main_args=args, show_args=show_args
)
return
if __name__ == '__main__':
# parser = argparse.ArgumentParser('Conditional DETR training and evaluation script', parents=[get_args_parser()])
# args = parser.parse_args()
show_args, _ = show_demo_args_parser().parse_known_args()
main_args = get_main_args_parser().parse_args(_)
main_args.batch_size = 1
main(main_args, show_args)
|
#!/usr/bin/env python
"""
===========
{PROG}
===========
----------------------------------------------------
plot csv-ish input from stdin using matplotlib
----------------------------------------------------
:Author: skipm@trdlnk.com
:Date: 2013-03-15
:Copyright: TradeLink LLC 2013
:Version: 0.1
:Manual section: 1
:Manual group: data filters
SYNOPSIS
========
{PROG} [ -f x,y[,a[,c[,l[s[,m]]]]]] [ -p file ] [ -F fmt ] [ -s sep ] \\
[ -T title ] [ -r label ] [ -l label] [ -x label ] \\
[ -b x,y,val,c ] [ -L ] [ -v ] [ -B bkend ] \\
[ -X min:max ] [ -Y min:max[,min:max] ] [ --xkcd ]
OPTIONS
=======
* -f x,y[,axis[,color[,legend[,style[,marker]]]]] - plot field x vs.
field y
* -b x,y,val,c - set background to c where field y value >= val at x
* -p file - if given, will save the plot to the named file and exit.
* -F fmt - plot the X axis using fmt (default varies)
* -s sep - use sep as the field separator (default is comma)
* -d dimension - specify dimensions in (I think) inches, default
8 wide, 6 high
* -T title - set title of the plot
* -r label - set the label for the right y axis
* -l label - set the label for the left y axis
* -x label - set the label for the x axis
* -L - do not create a legend
* -X min:max (two floats) or min,max (two dates) - set the min and max
values for the X axis - "today" or "yesterday" may be used as the max
date
* -Y min:max[,min:max] - set the initial min and max values for the left (and
optionally, right) Y axis
* -v - be a bit more verbose
* -B bkend - use the named backend
* -S - if a fieldname is not found, also try stripping leading and trailing
whitespace
* --xkcd - mimic style of the XKCD cartoon.
LONG OPTION NAMES
-----------------
Long option names are supported. Supported long names and their
equivalent short names are:
* backend: B
* format : F
* skip_legend: L
* title: T
* xkcd: X
* y_range: Y
* background: b
* dimension: d
* field: f
* help: h
* left_label: l
* plot_file: p
* right_label: r
* separator: s
* verbose: v
* x_label: x
DESCRIPTION
===========
The {PROG} command takes a CSV-like file as input and generates one
or more plots. Those plots can be displayed interactively (the
default) or written to an image file (if the -p option is given).
In its simplest form it displays a single plot of the values from a
CSV file in column 1 as a function of time (in column 0). In this
case, the CSV file is assumed to not have a header.
The specification of the fields to plot and their attributes are given
using the -f option. At its simplest, a single x,y pair is given,
resulting in a red line plot using the left Y axis. If the CSV file
has a header, x and y can be column names as well.
The -f option is the most frequently used and most complicated:
* The first two fields (x, y) are always required, and specify the two
columns to plot.
* The Y axis defaults to "left", but can be given explicitly as 'l'
or 'r'.
* The color can be anything matplotlib accepts. Typically, a single
letter suffices ('r' for red, etc), but the color name can also be
spelled out or given using hex notation.
* A legend label can be specified, and defaults to the y column name.
* Consult the matplotlib documentation for details of acceptable styles
(default '-'). By default, the line width is 1.0, but you can specify
the linestyle as "s/w", where "s" is the basic style, and "w" is a
floating point width.
* markers (default ''). You may use a ';' instead of ',', or quote the
field and the full field string (to preserve the quoting around the
field - it is split using Python's csv.reader class). By default, the
marker size is 1.0, but you can define the marker as "m/s", where "m"
is the marker, and "s" is a floating point scale value.
You can color the background of the plot based on one or more values using
the -b flag. For example, if you have a value at offset 2 which toggles
between three values, -1, 0, and +1, you could color the background
accordingly::
-b 0,2,-1,skyblue -b 0,2,0,pink -b 0,2,+1,lightgreen.
If you don't specify a format for the X axis, time is assumed, and a
hopefully reasonable format is chosen based on the current visible X
range:
* X range > 1.5 years ==> "%%Y-%%m-%%d"
* X range > two days ==> "%%m/%%d\\n%%H:%%M"
* X range < ten minutes ==> "%%H:%%M\\n%%S.%%f"
* X range < two hours ==> "%%H:%%M:%%S"
* otherwise ==> "%%H:%%M"
EXAMPLE
=======
Plot one hour of F:NQM12 trades, highlighting all trades of size 10
with a light green background::
nt -S -s 2012-04-06T07:25 -e 2012-04-06T08:25 F:NQM12 \\
| mpl -f 0,2,l,r -b 0,3,10,lightgreen
VERSION
=======
@@VERSION@@
SEE ALSO
========
* avg
* bars
* nt
* pt
* square
* take
"""
import sys
import csv
import getopt
import datetime
import os
import re
import io
import dateutil.parser
import numpy
import matplotlib.dates
import matplotlib.ticker
import pylab
PROG = os.path.basename(sys.argv[0])
SECONDS_PER_DAY = 60 * 60 * 24
ONE_MINUTE = datetime.timedelta(minutes=1)
ONE_DAY = datetime.timedelta(days=1)
ONE_HOUR = datetime.timedelta(minutes=60)
def main():
"see __doc__"
args = sys.argv[1:]
fields = []
xtime = True
xfmt = None
sep = ","
title = ""
right_label = ""
left_label = ""
x_label = ""
plot_file = ""
dims = (8, 6)
bkgds = []
x_min_max = []
y_min_max = []
do_legend = True
backend = None
use_xkcd = False
verbose = False
opts, args = getopt.getopt(args, "B:F:LT:X:Y:b:d:f:hl:p:r:s:vx:",
["backend=",
"format=",
"skip_legend",
"title=",
"xkcd",
"x_range=",
"y_range=",
"background=",
"dimension=",
"field=",
"help",
"left_label=",
"plot_file=",
"right_label=",
"separator=",
"verbose=",
"x_label",
])
for opt, arg in opts:
if opt in ("-B", "--backend"):
backend = arg
elif opt in ("-f", "--field"):
if "'" in arg:
quotechar = "'"
else:
quotechar = '"'
plarg = io.StringIO(arg)
plarg = next(csv.reader(plarg, quotechar=quotechar))
if len(plarg) == 2:
# plot using left y axis by default
plarg.append("l")
if len(plarg) == 3:
# plot using blue by default
plarg.append("b")
if len(plarg) == 4:
# use the Y column name as the default legend name.
plarg.append(plarg[1])
if len(plarg) == 5:
# plot with '-' line style by default
plarg.append("-")
if len(plarg) == 6:
# no marker by default
plarg.append("")
try:
fields.append([int(x.strip()) for x in plarg[0:2]]+
[plarg[2][0].lower()]+
[plarg[3].lower()]+
plarg[4:])
reader = csv.reader
except ValueError:
# Assume first two fields name column headers.
fields.append(plarg[0:2]+
[plarg[2][0].lower()]+
[plarg[3].lower()]+
plarg[4:])
reader = csv.DictReader
elif opt in ("-b", "--background"):
bg_spec = arg.split(",")
try:
bg_spec[0] = int(bg_spec[0])
bg_spec[1] = int(bg_spec[1])
reader = csv.reader
except ValueError:
bg_spec[0] = bg_spec[0].strip()
bg_spec[1] = bg_spec[1].strip()
reader = csv.DictReader
if ":" in bg_spec[2]:
low, high = [float(x) for x in bg_spec[2].split(":")]
else:
low = high = float(bg_spec[2])
bkgds.append((bg_spec[0], bg_spec[1], low, high, bg_spec[3]))
elif opt in ("-F", "--format"):
xtime = "%H" in arg or "%M" in arg or "%m" in arg or "%d" in arg
xfmt = arg
elif opt in ("-d", "--dimension"):
dims = tuple([float(v.strip()) for v in re.split("[x,]", arg)])
elif opt in ("-L", "--skip_legend"):
do_legend = False
elif opt in ("-p", "--plot_file"):
plot_file = arg
elif opt in ("-l", "--left_label"):
left_label = arg
elif opt in ("-r", "--right_label"):
right_label = arg
elif opt in ("-x", "--x_label"):
x_label = arg
elif opt == "--xkcd":
use_xkcd = True
elif opt in ("-v", "--verbose"):
verbose = True
elif opt in ("-Y", "--y_range"):
if "," in arg:
left, right = arg.split(",")
y_min_max = [[float(x) for x in left.split(":")],
[float(x) for x in right.split(":")]]
else:
y_min_max = [[float(x) for x in arg.split(":")]]
elif opt in ("-X", "--x_range"):
# First try splitting at colon (assuming a pair of floats). If
# that produces too many values, try a comma (assuming
# timestamps).
if len(arg.split(":")) == 2:
x_min_max = [[float(x) for x in arg.split(":")]]
else:
min_dt, max_dt = arg.split(",")
x_min = dateutil.parser.parse(min_dt)
try:
x_max = dateutil.parser.parse(max_dt)
except dateutil.parser.ParserError:
if max_dt == "today":
x_max = datetime.datetime.now()
elif max_dt == "yesterday":
x_max = datetime.datetime.now() - datetime.timedelta(days=1)
else:
raise
x_min_max = [x_min, x_max]
elif opt in ("-s", "--separator"):
sep = arg
elif opt in ("-T", "--title"):
title = arg
elif opt in ("-h", "--help"):
usage()
raise SystemExit
if backend is None:
if not os.environ.get("DISPLAY"):
# Allow non-interactive use (e.g. running with -p from cron)
matplotlib.use("Agg")
else:
matplotlib.use(backend)
if verbose:
print("Using", matplotlib.get_backend(), file=sys.stderr)
if use_xkcd:
from matplotlib import pyplot
try:
pyplot.xkcd()
except AttributeError:
print("XKCD style not available.", file=sys.stderr)
else:
if verbose:
print("Using XKCD style.", file=sys.stderr)
if not fields:
fields = [(0, 2, "l", "b", "2", "-", "")]
reader = csv.reader
min_y = 1e99
max_y = -1e99
if xtime:
min_x = datetime.datetime(9999, 12, 31, 23, 59, 59)
max_x = datetime.datetime(1970, 1, 1, 0, 0, 0)
def parse_x(x_val):
try:
return dateutil.parser.parse(x_val)
except ValueError:
print(f"Can't parse {x_val!r} as a timestamp.", file=sys.stderr)
raise
def fmt_date(tick_val, _=None, xfmt=xfmt):
date = matplotlib.dates.num2date(tick_val)
if xfmt is None:
# Calculate X format dynamically based on the visible
# range.
left, right = [matplotlib.dates.num2date(x) for x in pylab.xlim()]
x_delta = right - left
if x_delta > int(5 * 365) * ONE_DAY:
xfmt = "%Y"
elif x_delta > int(2 * 365) * ONE_DAY:
xfmt = "%Y-%m"
elif x_delta > int(1.5 * 365) * ONE_DAY:
xfmt = "%Y-%m-%d"
elif x_delta > 2 * ONE_DAY:
xfmt = "%m/%d\n%H:%M"
elif x_delta < 10 * ONE_MINUTE:
xfmt = "%H:%M\n%S.%f"
elif x_delta < 2 * ONE_HOUR:
xfmt = "%H:%M:%S"
else:
xfmt = "%H:%M"
return date.strftime(xfmt)
formatter = matplotlib.ticker.FuncFormatter(fmt_date)
else:
min_x = 1e99
max_x = -1e99
def parse_x(x_val):
return float(x_val)
def fmt_float(x_val, _=None):
return xfmt % x_val
formatter = matplotlib.ticker.FuncFormatter(fmt_float)
if reader == csv.DictReader:
fieldnames = next(csv.reader(sys.stdin, delimiter=sep))
rdr = reader(sys.stdin, fieldnames=fieldnames, delimiter=sep)
else:
rdr = reader(sys.stdin, delimiter=sep)
raw = list(rdr)
left = []
right = []
lt_y_range = [min_y, max_y]
rt_y_range = [min_y, max_y]
x_range = [min_x, max_x]
for (col1, col2, side, color, legend, style, marker) in fields:
if "/" in style:
style, width = style.split("/", 1)
width = float(width)
else:
width = 1.0
if "/" in marker:
marker, m_scale = marker.split("/", 1)
m_scale = float(m_scale)
else:
m_scale = 1.0
if marker == ";":
marker = ","
data = ([], color, legend, (style, width), (marker, m_scale))
if side == "l":
left.append(data)
y_range = lt_y_range
else:
right.append(data)
y_range = rt_y_range
for values in raw:
try:
_, _ = (values[col1], values[col2])
except IndexError:
# Rows don't need to be completely filled.
continue
x_val = values[col1]
y_val = values[col2]
if x_val and y_val:
try:
x_val = parse_x(x_val)
except ValueError as err:
print(err, values, file=sys.stderr)
raise
y_val = float(values[col2])
# If we get inputs with timezone info, convert. This
# is likely only to be executed once, as if one
# timestamp has tzinfo, all are likely to.
if xtime and x_range[0].tzinfo != x_val.tzinfo:
zone = x_val.tzinfo
x_range = [dt.replace(tzinfo=zone) for dt in x_range]
y_range[:] = [min(y_range[0], y_val),
max(y_range[1], y_val)]
data[0].append((x_val, y_val))
if data[0]:
x_range = [min([x for (x, _y) in data[0]]+[x_range[0]]),
max([x for (x, _y) in data[0]]+[x_range[1]])]
else:
print("No data for x range!", file=sys.stderr)
if (sum([len(x) for x in left]) == 0 and
sum([len(x) for x in right]) == 0):
print("No points to plot!", file=sys.stderr)
return 1
figure = pylab.figure(figsize=dims)
if xtime:
figure.autofmt_xdate()
left_plot = figure.add_subplot(111)
left_plot.set_title(title)
left_plot.set_axisbelow(True)
left_plot.yaxis.set_major_formatter(pylab.FormatStrFormatter('%g'))
left_plot.xaxis.set_major_formatter(formatter)
# Use a light, but solid, grid for the X axis and the left Y
# axis. No grid for right Y axis.
left_plot.xaxis.grid(True, linestyle='solid', which='major',
color='lightgrey', alpha=0.5)
left_plot.yaxis.grid(True, linestyle='solid', which='major',
color='lightgrey', alpha=0.5)
lines = []
if left:
if left_label:
left_plot.set_ylabel(left_label, color=left[0][1])
if x_label:
left_plot.set_xlabel(x_label, color=left[0][1])
for data in left:
points, color, legend, style, marker = data
lines.extend(left_plot.plot([x for x, y in points],
[y for x, y in points],
color=color,
linestyle=style[0],
linewidth=style[1],
label=legend,
marker=marker[0],
markersize=marker[1]))
for tick_label in left_plot.get_yticklabels():
tick_label.set_color(left[0][1])
extra = 0.02 * (lt_y_range[1]-lt_y_range[0])
lt_y_range = [lt_y_range[0] - extra, lt_y_range[1] + extra]
if y_min_max:
left_plot.set_ylim(y_min_max[0])
else:
left_plot.set_ylim(lt_y_range)
if right:
right_plot = left_plot.twinx()
right_plot.set_axisbelow(True)
right_plot.yaxis.set_major_formatter(pylab.FormatStrFormatter('%g'))
right_plot.xaxis.set_major_formatter(formatter)
if right_label:
right_plot.set_ylabel(right_label, color=right[0][1])
if x_label and not left:
right_plot.set_xlabel(x_label, color=left[0][1])
for data in right:
points, color, legend, style, marker = data
lines.extend(right_plot.plot([x for x, y in points],
[y for x, y in points],
color=color,
linestyle=style[0],
linewidth=style[1],
label=legend,
marker=marker[0],
markersize=marker[1]))
for tick_label in right_plot.get_yticklabels():
tick_label.set_color(right[0][1])
extra = 0.02 * (rt_y_range[1]-rt_y_range[0])
rt_y_range = [rt_y_range[0] - extra, rt_y_range[1] + extra]
if len(y_min_max) == 2:
right_plot.set_ylim(y_min_max[1])
else:
right_plot.set_ylim(rt_y_range)
color_bkgd(bkgds, left and left_plot or right_plot,
left and lt_y_range or rt_y_range, raw, parse_x)
if x_min_max:
left_plot.set_xlim(x_min_max[0])
else:
extra = (x_range[1]-x_range[0]) * 2 // 100
try:
x_range = [x_range[0] - extra, x_range[1] + extra]
except OverflowError:
print("overflow:", x_range, extra, file=sys.stderr)
raise
left_plot.set_xlim(x_range)
if do_legend:
labels = [line.get_label() for line in lines]
if right:
right_plot.legend(lines, labels, loc='best').set_draggable(True)
else:
left_plot.legend(lines, labels, loc='best').set_draggable(True)
figure.tight_layout()
if plot_file:
pylab.savefig(plot_file)
else:
pylab.show()
return 0
def color_bkgd(bkgds, plot, y_range, raw_data, parse_x):
"Add background fill colors."
if not bkgds:
return
for col1, col2, low, high, color in bkgds:
data = []
for values in raw_data:
try:
_, _ = (values[col1], values[col2])
except IndexError:
# Rows don't need to be completely filled.
continue
if values[col1] and values[col2]:
try:
values[col1] = parse_x(values[col1])
except ValueError as err:
print(err, values, file=sys.stderr)
raise
values[col2] = float(values[col2])
data.append((values[col1], values[col2]))
xdata = [x for (x, y) in data]
ydata = [y for (x, y) in data]
if low == high:
mask = (low == numpy.array(ydata))
else:
mask = (low <= numpy.array(ydata) < high)
plot.fill_between(xdata, y_range[0], y_range[1],
edgecolor=color, facecolor=color,
where=mask)
def usage():
"help"
print(__doc__.format(**globals()), file=sys.stderr)
def as_days(delta):
"timedelta as float # of days"
return delta.days + delta.seconds / SECONDS_PER_DAY
if __name__ == "__main__":
sys.exit(main())
|
class KNearestNeighbors:
import numpy as np
def __init__(self, n_neighbors=5):
self.k = n_neighbors
def fit(self, x, y):
self.x = np.array(x)
self.y = np.array(y)
self.n_classes = len(np.unique(self.y))
# encode labels
labels = np.unique(self.y)
self.i_to_label = {i: label for i, label in enumerate(labels)}
self.label_to_i = {label: i for i, label in enumerate(labels)}
self.y_enc = np.array([self.label_to_i[label] for label in self.y])
def single_predict(self, pred):
# search through the training data and find the distance from each point to the target
distances = []
for observation in self.x:
distances.append(np.linalg.norm(observation-pred))
# look up the target values for the k nearest neighbors
targets = list(zip(distances, self.y_enc))
targets.sort(key=lambda x: x[0])
# y_pred = target class with most neighbors
k_nearest = targets[:self.k]
votes = [0] * self.n_classes
for vote in k_nearest:
votes[vote[1]]+=1
top_vote = votes.index(max(votes))
return self.i_to_label[top_vote]
def predict(self, pred):
pred = np.array(pred)
if pred.ndim == 1:
return self.single_predict(pred)
elif pred.ndim == 2:
prediction = np.array([self.single_predict(x) for x in pred])
return prediction
|
import threading
from thread import get_ident
from collections import deque
class Local(object):
__slots__ = ('__storage__', '__ident_func__')
def __init__(self):
object.__setattr__(self, '__storage__', {})
object.__setattr__(self, '__ident_func__', get_ident)
def __iter__(self):
return iter(self.__storage__.items())
def __release_local__(self):
self.__storage__.pop(self.__ident_func__(), None)
def __getattr__(self, name):
try:
return self.__storage__[self.__ident_func__()][name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
ident = self.__ident_func__()
storage = self.__storage__
try:
storage[ident][name] = value
except KeyError:
storage[ident] = {name: value}
def __delattr__(self, name):
try:
del self.__storage__[self.__ident_func__()][name]
except KeyError:
raise AttributeError(name)
class DbLocal(threading.local):
def __init__(self, autocommit):
super(DbLocal, self).__init__()
self._autocommit = autocommit
self._transactions = deque()
self._beansdb_commands = deque()
self._lazy_funcs = deque()
self._commit_handlers = deque()
self._rollback_handlers = deque()
def start_beansdb_transaction(self):
self._beansdb_commands.append(deque())
def pop_beansdb_transaction(self):
try:
return self._beansdb_commands.pop()
except IndexError:
return deque()
def shift_beansdb_transaction(self):
try:
return self._beansdb_commands.popleft()
except IndexError:
return deque()
def append_beansdb_commands(self, *cmds):
if not self._beansdb_commands:
self.start_beansdb_transaction()
self._beansdb_commands[-1].extend(cmds)
def insert_beansdb_commands(self, *cmds):
if not self._beansdb_commands:
self.start_beansdb_transaction()
self._beansdb_commands[0].extend(cmds)
def add_lazy_func(self, func):
self._lazy_funcs.append(func)
def clear_lazy_funcs(self):
self._lazy_funcs.clear()
def add_commit_handler(self, handler):
self._commit_handlers.append(handler)
def clear_commit_handlers(self):
self._commit_handlers.clear()
def add_rollback_handler(self, handler):
self._rollback_handlers.append(handler)
def clear_rollback_handlers(self):
self._rollback_handlers.clear()
|
import os
from dotenv import load_dotenv, find_dotenv
from requests import session
import logging
__author__ = "Gahan Saraiya"
__all__ = ['TitanicDisaster']
class TitanicDisaster(object):
def __init__(self):
self.kaggle_login_url = 'https://www.kaggle.com/account/login'
self.payload = {
'action': 'login',
'username': os.environ.get("KAGGLE_USERNAME"), # kaggle username
'password': os.environ.get("KAGGLE_PASSWORD"), # kaggle password
}
self.project_dir = os.path.join(os.path.dirname(__file__), os.pardir, os.pardir)
# set the path of the raw data
self.raw_data_path = os.path.join(self.project_dir, 'data', 'raw')
self.train_data_path = os.path.join(self.raw_data_path, 'train.csv')
self.test_data_path = os.path.join(self.raw_data_path, 'test.csv')
self.log_fmt = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
logging.basicConfig(level=logging.INFO, format=self.log_fmt)
self.logger = logging.getLogger(__name__)
def extract_data(self, url, file_path):
with session() as c: # start session
c.post(self.kaggle_login_url, data=self.payload) # login to kaggle with credential
with open(file_path, 'wb') as handle: # open file in write-bytes mode
response = c.get(url, stream=True) # get content as stream
for block in response.iter_content(1024): # iterate over content
handle.write(block) # write bytes to file
def get_raw_data(self):
self.logger.info("getting raw data")
# urls to get data
train_url = "https://www.kaggle.com/c/3136/download/train.csv"
test_url = "https://www.kaggle.com/c/3136/download/test.csv"
# store data from url
self.extract_data(train_url, self.train_data_path)
self.extract_data(test_url, self.test_data_path)
self.logger.info("downloaded raw training and test data")
return self.train_data_path, self.test_data_path # return data paths
if __name__ == "__main__":
dotenv_path = find_dotenv() # find '.env'
load_dotenv(dotenv_path) # load '.env'
titanic_obj = TitanicDisaster()
train_data, test_data = titanic_obj.get_raw_data()
titanic_obj
|
import os, shutil
import time
import logging
from pytest import approx
from datetime import datetime
from HSTB.kluster import fqpr_generation, fqpr_project, xarray_conversion, fqpr_intelligence
try: # when running from pycharm console
from hstb_kluster.tests.test_datasets import RealFqpr, RealDualheadFqpr, SyntheticFqpr
except: # relative import as tests directory can vary in location depending on how kluster is installed
from .test_datasets import RealFqpr, RealDualheadFqpr, SyntheticFqpr
from HSTB.kluster.xarray_helpers import interp_across_chunks
from HSTB.kluster.fqpr_convenience import *
from HSTB.drivers import par3
datapath = ''
def test_get_orientation_vectors():
"""
get_orientation_vectors test for the em2040 dataset
"""
get_orientation_vectors(dset='real')
def test_get_orientation_vectors_dualhead():
"""
get_orientation_vectors test for the em2040 dualrx/dualtx dataset
"""
get_orientation_vectors(dset='realdualhead')
def test_build_beam_pointing_vector():
"""
build_beam_pointing_vector test for the em2040 dataset
"""
build_beam_pointing_vector(dset='real')
def test_build_beam_pointing_vector_dualhead():
"""
build_beam_pointing_vector test for the em2040 dualrx/dualtx dataset
"""
build_beam_pointing_vector(dset='realdualhead')
def test_sv_correct():
"""
sv_correct test for the em2040 dataset
"""
sv_correct(dset='real')
def test_sv_correct_dualhead():
"""
sv_correct test for the em2040 dualrx/dualtx dataset
"""
sv_correct(dset='realdualhead')
def test_georef_xyz():
"""
georef_xyz test for the em2040 dataset
"""
georef_xyz(dset='real')
def test_georef_xyz_dualhead():
"""
georef_xyz test for the em2040 dualrx/dualtx dataset
"""
georef_xyz(dset='realdualhead')
def test_find_testfile():
"""
Find the test file we use for the next tests
"""
testfile_path, expected_output = get_testfile_paths()
if not os.path.exists(testfile_path):
print('test_find_testfile: could not find {}'.format(testfile_path))
assert os.path.exists(testfile_path)
def test_process_testfile():
"""
Run conversion and basic processing on the test file
"""
global datapath
testfile_path, expected_output = get_testfile_paths()
linename = os.path.split(testfile_path)[1]
out = convert_multibeam(testfile_path)
assert not out.line_is_processed(linename)
assert out.return_next_unprocessed_line() == linename
out = process_multibeam(out, coord_system='NAD83')
assert out.line_is_processed(linename)
assert out.return_next_unprocessed_line() == ''
number_of_sectors = len(out.multibeam.raw_ping)
rp = out.multibeam.raw_ping[0].isel(time=0).isel(beam=0)
firstbeam_angle = rp.beampointingangle.values
firstbeam_traveltime = rp.traveltime.values
first_counter = rp.counter.values
first_dinfo = rp.detectioninfo.values
first_mode = rp.mode.values
first_modetwo = rp.modetwo.values
first_ntx = rp.ntx.values
firstbeam_procstatus = rp.processing_status.values
firstbeam_qualityfactor = rp.qualityfactor.values
first_soundspeed = rp.soundspeed.values
first_tiltangle = rp.tiltangle.values
first_delay = rp.delay.values
first_frequency = rp.frequency.values
first_yawpitch = rp.yawpitchstab.values
firstcorr_angle = rp.corr_pointing_angle.values
firstcorr_altitude = rp.corr_altitude.values
firstcorr_heave = rp.corr_heave.values
firstdepth_offset = rp.depthoffset.values
first_status = rp.processing_status.values
firstrel_azimuth = rp.rel_azimuth.values
firstrx = rp.rx.values
firstthu = rp.thu.values
firsttvu = rp.tvu.values
firsttx = rp.tx.values
firstx = rp.x.values
firsty = rp.y.values
firstz = rp.z.values
assert number_of_sectors == 1
assert firstbeam_angle == approx(np.float32(74.640), 0.001)
assert firstbeam_traveltime == approx(np.float32(0.3360895), 0.000001)
assert first_counter == 61967
assert first_dinfo == 2
assert first_mode == 'FM'
assert first_modetwo == '__FM'
assert first_ntx == 3
assert firstbeam_procstatus == 5
assert firstbeam_qualityfactor == 42
assert first_soundspeed == np.float32(1488.6)
assert first_tiltangle == np.float32(-0.44)
assert first_delay == approx(np.float32(0.002206038), 0.000001)
assert first_frequency == 275000
assert first_yawpitch == 'PY'
assert firstcorr_angle == approx(np.float32(1.2028906), 0.000001)
assert firstcorr_altitude == np.float32(0.0)
assert firstcorr_heave == approx(np.float32(-0.06), 0.01)
assert firstdepth_offset == approx(np.float32(92.162), 0.001)
assert first_status == 5
assert firstrel_azimuth == approx(np.float32(4.703383), 0.00001)
assert firstrx == approx(np.array([0.7870753, 0.60869384, -0.100021675], dtype=np.float32), 0.00001)
assert firstthu == approx(np.float32(8.680531), 0.0001)
assert firsttvu == approx(np.float32(2.444148), 0.0001)
assert firsttx == approx(np.array([0.6074468, -0.79435784, 0.0020107413], dtype=np.float32), 0.00001)
assert firstx == approx(539028.450, 0.001)
assert firsty == approx(5292783.977, 0.001)
assert firstz == approx(np.float32(92.742), 0.001)
assert rp.min_x == 538922.066
assert rp.min_y == 5292774.566
assert rp.min_z == 72.961
assert rp.max_x == 539320.370
assert rp.max_y == 5293236.823
assert rp.max_z == 94.294
datapath = out.multibeam.converted_pth
out.close()
out = None
def test_return_total_pings():
if not os.path.exists(datapath):
print('Please run test_process_testfile first')
out = reload_data(datapath)
pc = out.return_total_pings(min_time=1495563100, max_time=1495563130)
assert pc == 123
pc = out.return_total_pings()
assert pc == 216
out.close()
out = None
def test_return_total_soundings():
if not os.path.exists(datapath):
print('Please run test_process_testfile first')
out = reload_data(datapath)
ts = out.return_total_soundings(min_time=1495563100, max_time=1495563130)
assert ts == 49200
ts = out.return_total_soundings()
assert ts == 86400
out.close()
out = None
def test_return_soundings_in_polygon():
if not os.path.exists(datapath):
print('Please run test_process_testfile first')
out = reload_data(datapath)
polygon = np.array([[-122.47798556, 47.78949665], [-122.47798556, 47.78895117], [-122.47771027, 47.78895117],
[-122.47771027, 47.78949665]])
head, x, y, z, tvu, rejected, pointtime, beam = out.return_soundings_in_polygon(polygon)
assert head.shape == x.shape == y.shape == z.shape == tvu.shape == rejected.shape == pointtime.shape == beam.shape
assert x.shape == (1911,)
# now try just pulling the corrected beam angle for the soundings
beamangle = out.return_soundings_in_polygon(polygon, variable_selection=('corr_pointing_angle',))
assert beamangle[0].shape == (1911,)
# now use the existing filter that we set with the last return_soundings_in_polygon to get an additional variable
getbeamangle = out.get_variable_by_filter('corr_pointing_angle')
assert getbeamangle.shape == (1911,)
assert (beamangle == getbeamangle).all()
# try a 1d variable
alti = out.get_variable_by_filter('altitude')
assert alti.shape == (1911,)
# try a attitude variable
rollv = out.get_variable_by_filter('roll')
assert rollv.shape == (1911,)
# now try setting the filter separately from the return_soundings_in_polygon method. This allows you to set the
# filter without loading data if you want to do that.
out.set_filter_by_polygon(polygon)
next_getbeamangle = out.get_variable_by_filter('corr_pointing_angle')
assert next_getbeamangle.shape == (1911,)
assert (beamangle == next_getbeamangle).all()
out.close()
out = None
def test_return_cast_dict():
if not os.path.exists(datapath):
print('Please run test_process_testfile first')
out = reload_data(datapath)
cdict = out.return_cast_dict()
assert cdict == {'profile_1495563079': {'location': [47.78890945494799, -122.47711319986821],
'source': 'multibeam', 'time': 1495563079,
'data': [[0.0, 1489.2000732421875], [0.32, 1489.2000732421875], [0.5, 1488.7000732421875],
[0.55, 1488.300048828125], [0.61, 1487.9000244140625], [0.65, 1488.2000732421875],
[0.67, 1488.0], [0.79, 1487.9000244140625], [0.88, 1487.9000244140625],
[1.01, 1488.2000732421875], [1.04, 1488.0999755859375], [1.62, 1488.0999755859375],
[2.0300000000000002, 1488.300048828125], [2.43, 1488.9000244140625], [2.84, 1488.5],
[3.25, 1487.7000732421875], [3.67, 1487.2000732421875], [4.45, 1486.800048828125],
[4.8500000000000005, 1486.800048828125], [5.26, 1486.5999755859375], [6.09, 1485.7000732421875],
[6.9, 1485.0999755859375], [7.71, 1484.800048828125], [8.51, 1484.0],
[8.91, 1483.800048828125], [10.13, 1483.7000732421875], [11.8, 1483.0999755859375],
[12.620000000000001, 1482.9000244140625], [16.79, 1482.9000244140625], [20.18, 1481.9000244140625],
[23.93, 1481.300048828125], [34.79, 1480.800048828125], [51.15, 1480.800048828125],
[56.13, 1481.0], [60.67, 1481.5], [74.2, 1481.9000244140625], [12000.0, 1675.800048828125]]}}
out.close()
out = None
def test_subset_by_time():
if not os.path.exists(datapath):
print('Please run test_process_testfile first')
out = reload_data(datapath)
out.subset_by_time(mintime=1495563100, maxtime=1495563130)
assert len(out.multibeam.raw_ping[0].time) == 123
assert len(out.multibeam.raw_att.time) == 3001
out.subset.restore_subset()
assert len(out.multibeam.raw_ping[0].time) == 216
assert len(out.multibeam.raw_att.time) == 5302
out.close()
out = None
def test_subset_variables():
if not os.path.exists(datapath):
print('Please run test_process_testfile first')
out = reload_data(datapath)
dset = out.subset_variables(['z'], ping_times=(1495563100, 1495563130))
assert len(dset.time) == 123
assert dset.z.shape[0] == 123
assert len(out.multibeam.raw_ping[0].time) == 216
assert out.multibeam.raw_ping[0].z.shape[0] == 216
assert len(out.multibeam.raw_att.time) == 5302
out.close()
out = None
def test_subset_variables_filter():
if not os.path.exists(datapath):
print('Please run test_process_testfile first')
out = reload_data(datapath)
dset = out.subset_variables(['z'], ping_times=(1495563100, 1495563130), filter_by_detection=True)
assert len(dset.sounding) == 45059
assert dset.z.shape[0] == 45059
assert len(out.multibeam.raw_ping[0].time) == 216
assert out.multibeam.raw_ping[0].z.shape[0] == 216
assert len(out.multibeam.raw_att.time) == 5302
out.close()
out = None
def test_subset_variables_by_line():
if not os.path.exists(datapath):
print('Please run test_process_testfile first')
out = reload_data(datapath)
dset = out.subset_variables_by_line(['z'])
assert list(dset.keys()) == ['0009_20170523_181119_FA2806.all']
assert len(dset['0009_20170523_181119_FA2806.all'].time) == 216
assert dset['0009_20170523_181119_FA2806.all'].z.shape[0] == 216
out.close()
out = None
def test_intersects():
if not os.path.exists(datapath):
print('Please run test_process_testfile first')
out = reload_data(datapath)
assert out.intersects(5293000, 5330000, 538950, 539300, geographic=False)
assert not out.intersects(5320000, 5330000, 538950, 539300, geographic=False)
assert out.intersects(47.78895, 47.790, -122.478, -122.479, geographic=True)
assert not out.intersects(47.8899, 47.890, -122.478, -122.479, geographic=True)
out.close()
out = None
def test_return_unique_mode():
if not os.path.exists(datapath):
print('Please run test_process_testfile first')
out = reload_data(datapath)
mode = out.return_unique_mode()
assert mode == ['FM']
out.close()
out = None
def test_return_rounded_frequency():
if not os.path.exists(datapath):
print('Please run test_process_testfile first')
out = reload_data(datapath)
fq = out.return_rounded_frequency()
assert fq == [300000]
out.close()
out = None
def test_return_lines_for_times():
if not os.path.exists(datapath):
print('Please run test_process_testfile first')
out = reload_data(datapath)
lns = out.return_lines_for_times(np.array([1495400000, 1495563100, 1495563132]))
assert np.array_equal(lns, ['', '0009_20170523_181119_FA2806.all', '0009_20170523_181119_FA2806.all'])
out.close()
out = None
def test_last_operation_date():
if not os.path.exists(datapath):
print('Please run test_process_testfile first')
out = reload_data(datapath)
datetime_obj = out.last_operation_date
assert isinstance(datetime_obj, datetime)
assert datetime.strptime(out.multibeam.raw_ping[0]._total_uncertainty_complete, '%c') == datetime_obj
out.close()
out = None
def test_export_files():
if not os.path.exists(datapath):
print('Please run test_process_testfile first')
out = reload_data(datapath)
pths_one = out.export_pings_to_file(file_format='csv', filter_by_detection=True, export_by_identifiers=True)
pths_two = out.export_pings_to_file(file_format='csv', filter_by_detection=True, export_by_identifiers=False)
pths_three = out.export_pings_to_file(file_format='las', filter_by_detection=True, export_by_identifiers=True)
pths_four = out.export_pings_to_file(file_format='las', filter_by_detection=True, export_by_identifiers=False)
assert len(pths_one) == 6
assert len(pths_two) == 1
assert len(pths_three) == 6
assert len(pths_four) == 1
expected_las = os.path.join(datapath, 'las_export')
if os.path.exists(expected_las):
shutil.rmtree(expected_las)
expected_csv = os.path.join(datapath, 'csv_export')
if os.path.exists(expected_csv):
shutil.rmtree(expected_csv)
out.close()
out = None
def test_export_lines_to_file():
if not os.path.exists(datapath):
print('Please run test_process_testfile first')
out = reload_data(datapath)
pths_one = out.export_lines_to_file(['0009_20170523_181119_FA2806.all'], file_format='csv', filter_by_detection=True, export_by_identifiers=True)
pths_two = out.export_lines_to_file(['0009_20170523_181119_FA2806.all'], file_format='csv', filter_by_detection=True, export_by_identifiers=False)
pths_three = out.export_lines_to_file(['0009_20170523_181119_FA2806.all'], file_format='las', filter_by_detection=True, export_by_identifiers=True)
pths_four = out.export_lines_to_file(['0009_20170523_181119_FA2806.all'], file_format='las', filter_by_detection=True, export_by_identifiers=False)
assert len(pths_one) == 6
assert len(pths_two) == 1
assert len(pths_three) == 6
assert len(pths_four) == 1
expected_las = os.path.join(datapath, 'las_export')
if os.path.exists(expected_las):
shutil.rmtree(expected_las)
expected_csv = os.path.join(datapath, 'csv_export')
if os.path.exists(expected_csv):
shutil.rmtree(expected_csv)
out.close()
out = None
def test_export_variable():
if not os.path.exists(datapath):
print('Please run test_process_testfile first')
out = reload_data(datapath)
multicheck = os.path.join(datapath, 'multicheck')
expected_multi = os.path.join(datapath, 'multicheck_40111.csv')
out.export_variable('multibeam', 'beampointingangle', multicheck)
assert os.path.exists(expected_multi)
with open(expected_multi) as fil:
assert fil.readline().rstrip() == 'time,beam,beampointingangle'
os.remove(expected_multi)
multicheck = os.path.join(datapath, 'multicheck')
expected_multi = os.path.join(datapath, 'multicheck_40111.csv')
out.export_variable('multibeam', 'beampointingangle', multicheck, reduce_method='mean', zero_centered=True)
assert os.path.exists(expected_multi)
with open(expected_multi) as fil:
assert fil.readline().rstrip() == 'time,beampointingangle'
os.remove(expected_multi)
navcheck = os.path.join(datapath, 'navcheck')
expected_nav = os.path.join(datapath, 'navcheck_40111.csv')
out.export_variable('raw navigation', 'latitude', navcheck)
assert os.path.exists(expected_nav)
with open(expected_nav) as fil:
assert fil.readline().rstrip() == 'time,latitude'
os.remove(expected_nav)
out.close()
out = None
def test_export_dataset():
if not os.path.exists(datapath):
print('Please run test_process_testfile first')
out = reload_data(datapath)
multicheck = os.path.join(datapath, 'multicheck')
expected_multi = os.path.join(datapath, 'multicheck_40111.csv')
out.export_dataset('multibeam', multicheck)
assert os.path.exists(expected_multi)
with open(expected_multi) as fil:
assert fil.readline().rstrip() == 'time,mean_acrosstrack,mean_alongtrack,altitude,mean_beampointingangle,corr_altitude,corr_heave,mean_corr_pointing_angle,counter,mean_datum_uncertainty,mean_delay,mean_depthoffset,median_detectioninfo,median_frequency,nadir_geohash,latitude,longitude,mode,modetwo,ntx,median_processing_status,median_qualityfactor,mean_rel_azimuth,soundspeed,mean_thu,mean_tiltangle,mean_traveltime,mean_tvu,median_txsector_beam,mean_x,mean_y,yawpitchstab,mean_z'
os.remove(expected_multi)
navcheck = os.path.join(datapath, 'navcheck')
expected_nav = os.path.join(datapath, 'navcheck_40111.csv')
out.export_dataset('raw navigation', navcheck)
assert os.path.exists(expected_nav)
with open(expected_nav) as fil:
assert fil.readline().rstrip() == 'time,altitude,latitude,longitude'
os.remove(expected_nav)
out.close()
out = None
def test_set_variable_by_filter():
if not os.path.exists(datapath):
print('Please run test_process_testfile first')
out = reload_data(datapath)
polygon = np.array([[-122.47798556, 47.78949665], [-122.47798556, 47.78895117], [-122.47771027, 47.78895117],
[-122.47771027, 47.78949665]])
head, x, y, z, tvu, rejected, pointtime, beam = out.return_soundings_in_polygon(polygon)
assert head.shape == x.shape == y.shape == z.shape == tvu.shape == rejected.shape == pointtime.shape == beam.shape
assert x.shape == (1911,)
assert np.count_nonzero(out.subset.ping_filter) == 1911 # ping filter is set on return_soundings, is a bool mask of which soundings are in the selection
assert rejected[0] == 0 # first sounding is 0 status
out.set_variable_by_filter('detectioninfo', 2, selected_index=[[0]]) # set the first selected point region to rejected=2
head, x, y, z, tvu, rejected, pointtime, beam = out.return_soundings_in_polygon(polygon)
assert rejected[0] == 2 # first sounding is now status=2
out.set_variable_by_filter('detectioninfo', 2) # set the all poitns in the return_soundings selection to status=2
head, x, y, z, tvu, rejected, pointtime, beam = out.return_soundings_in_polygon(polygon)
assert (rejected == 2).all()
out.close()
out = None
cleanup_after_tests()
def test_intelligence():
"""
Test fqpr intelligence by kicking off a folder monitoring session, finding the test multibeam file, and checking
the resulting actions to see if the conversion action matches expectations.
"""
global datapath
testfile_path, expected_output = get_testfile_paths()
proj = fqpr_project.create_new_project(os.path.dirname(testfile_path))
proj_path = os.path.join(os.path.dirname(testfile_path), 'kluster_project.json')
fintel = fqpr_intelligence.FqprIntel(proj)
fintel.set_settings({'coord_system': 'NAD83'})
fintel.add_file(testfile_path)
time.sleep(3) # pause until the folder monitoring finds the multibeam file
assert os.path.exists(proj_path)
os.remove(proj_path)
assert str(fintel.action_container) == "FqprActionContainer: 1 actions of types: ['multibeam']"
assert len(fintel.action_container.actions) == 1
action = fintel.action_container.actions[0]
assert action.text[0:25] == 'Convert 1 multibeam lines'
assert action.action_type == 'multibeam'
assert action.priority == 1
assert action.is_running == False
assert len(action.input_files) == 1
assert action.kwargs == {}
assert action.args[2:] == [None, False, True]
fintel.execute_action()
action = fintel.action_container.actions[0]
assert action.text[0:21] == 'Run all processing on'
assert action.action_type == 'processing'
assert action.priority == 5
assert action.is_running is False
assert len(action.input_files) == 0
assert action.kwargs == {'run_orientation': True, 'orientation_initial_interpolation': False, 'run_beam_vec': True,
'run_svcorr': True, 'add_cast_files': [], 'run_georef': True, 'run_tpu': True, 'use_epsg': False,
'use_coord': True, 'epsg': None, 'coord_system': 'NAD83', 'vert_ref': 'waterline'}
assert isinstance(action.args[0], fqpr_generation.Fqpr)
assert isinstance(proj.get_dask_client(), Client)
assert isinstance(proj.build_raw_attitude_for_line('0009_20170523_181119_FA2806.all'), xr.Dataset)
assert proj.fqpr_instances['em2040_40111_05_23_2017'] == proj.return_line_owner('0009_20170523_181119_FA2806.all')
fintel.clear()
datapath = action.args[0].multibeam.converted_pth
proj.close()
action.args[0] = None
cleanup_after_tests()
def cleanup_after_tests():
"""
Clean up after test_intelligence and test_process_testfile
"""
global datapath
assert os.path.exists(datapath)
clear_testfile_data(datapath)
assert not os.path.exists(datapath)
def get_testfile_paths():
"""
return the necessary paths for the testfile tests
Returns
-------
str
absolute file path to the test file
str
absolute folder path to the expected output folder
"""
testfile = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'test_data', '0009_20170523_181119_FA2806.all')
expected_output = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'test_data', 'converted')
return testfile, expected_output
def clear_testfile_data(expected_output: str):
"""
remove the converted data
Parameters
----------
expected_output
path to the converted data folder
"""
if os.path.exists(expected_output):
shutil.rmtree(expected_output)
proj_file = os.path.join(os.path.dirname(expected_output), 'kluster_project.json')
if os.path.exists(proj_file):
os.remove(proj_file)
def get_orientation_vectors(dset='realdualhead'):
"""
Automated test of fqpr_generation get_orientation_vectors
Will run using the 'real' dataset or 'realdualhead' included in the test_datasets file.
No current support for the synthetic dataset, need to look at adding that in. I've yet to find a reason to do so
though, now that I have the real pings.
Parameters
----------
dset: str, specify which dataset you want to use, one of 'real' and 'realdualhead'
"""
if dset == 'real':
synth = load_dataset(RealFqpr())
expected_tx = [np.array([0.6136555921172974, -0.7895255928982701, 0.008726535498373935])]
expected_rx = [np.array([0.7834063072490661, 0.6195440454987808, -0.04939365798750035])]
elif dset == 'realdualhead':
synth = load_dataset(RealDualheadFqpr())
expected_tx = [np.array([-0.8173967230596009, -0.5756459946918305, -0.022232663846213512]),
np.array([-0.818098137098556, -0.5749317404941526, -0.013000579640495315])]
expected_rx = [np.array([0.5707251056249292, -0.8178104883650188, 0.07388380848347877]),
np.array([0.5752302545527056, -0.8157217016726686, -0.060896177270015645])]
else:
raise NotImplementedError('mode not recognized')
fq = fqpr_generation.Fqpr(synth)
fq.logger = logging.getLogger()
fq.logger.setLevel(logging.INFO)
fq.read_from_source()
# dump_data/delete_futs set the workflow to either keeping everything in memory after completion (False) or writing
# data to disk (both are True). Could probably condense these arguments to one argument in the future.
fq.get_orientation_vectors(dump_data=False, initial_interp=False)
# arrays of computed vectors
sysid = [rp.system_identifier for rp in fq.multibeam.raw_ping]
tstmp = list(fq.intermediate_dat[sysid[0]]['orientation'].keys())[0]
# since we kept data in memory, we can now get the result of get_orientation_vectors using result()
loaded_data = [fq.intermediate_dat[s]['orientation'][tstmp][0][0] for s in sysid]
# we examine the tx vector for each sector (not beam based) and the rx vector for each sector's first beam (rx
# vectors change per beam, as attitude changes based on beam traveltime)
txvecdata = [ld[0].values[0][0] for ld in loaded_data]
rxvecdata = [ld[1].values[0][0] for ld in loaded_data]
print('ORIENTATION {}'.format(dset))
print([x for y in txvecdata for x in y.flatten()])
print([x for y in rxvecdata for x in y.flatten()])
# check for the expected tx orientation vectors
for i in range(len(expected_tx)):
assert expected_tx[i] == approx(txvecdata[i], 0.000001)
# check for the expected rx orientation vectors
for i in range(len(expected_rx)):
assert expected_rx[i] == approx(rxvecdata[i], 0.000001)
fq.close()
print('Passed: get_orientation_vectors')
def build_beam_pointing_vector(dset='realdualhead'):
"""
Automated test of fqpr_generation build_beam_pointing_vector
Will run using the 'real' dataset or 'realdualhead' included in the test_datasets file.
No current support for the synthetic dataset, need to look at adding that in. I've yet to find a reason to do so
though, now that I have the real pings.
Parameters
----------
dset: str, specify which dataset you want to use, one of 'real' and 'realdualhead'
"""
if dset == 'real':
synth = load_dataset(RealFqpr())
expected_ba = [np.array([4.697702878191307, 4.697679369354361, 4.697655798111743])]
expected_bda = [np.array([1.209080677036444, 1.2074367547912856, 1.2057926824074374])]
elif dset == 'realdualhead':
synth = load_dataset(RealDualheadFqpr())
expected_ba = [np.array([4.7144694193229295, 4.714486234983295, 4.714503034301336]),
np.array([4.72527541256665, 4.725306685935214, 4.725337688174256])]
expected_bda = [np.array([1.2049043892451596, 1.20385629874863, 1.2028083855561609]),
np.array([0.5239366688735714, 0.5181768253459791, 0.5124169874635531])]
else:
raise NotImplementedError('mode not recognized')
fq = fqpr_generation.Fqpr(synth)
fq.logger = logging.getLogger()
fq.logger.setLevel(logging.INFO)
fq.read_from_source()
fq.get_orientation_vectors(dump_data=False, initial_interp=False)
fq.get_beam_pointing_vectors(dump_data=False)
# arrays of computed vectors
sysid = [rp.system_identifier for rp in fq.multibeam.raw_ping]
tstmp = list(fq.intermediate_dat[sysid[0]]['bpv'].keys())[0]
# since we kept data in memory, we can now get the result of get_orientation_vectors using result()
loaded_data = [fq.intermediate_dat[s]['bpv'][tstmp][0][0] for s in sysid]
ba_data = [ld[0].isel(time=0).values[0:3] for ld in loaded_data]
bda_data = [ld[1].isel(time=0).values[0:3] for ld in loaded_data]
print('BEAMPOINTING {}'.format(dset))
print([x for y in ba_data for x in y.flatten()])
print([x for y in bda_data for x in y.flatten()])
# beam azimuth check
for i in range(len(ba_data)):
assert ba_data[i] == approx(expected_ba[i], 0.0000001)
# beam depression angle check
for i in range(len(bda_data)):
assert bda_data[i] == approx(expected_bda[i], 0.0000001)
fq.close()
print('Passed: build_beam_pointing_vector')
def sv_correct(dset='realdualhead'):
"""
Automated test of fqpr_generation sv_correct
Will run using the 'real' dataset or 'realdualhead' included in the test_datasets file.
No current support for the synthetic dataset, need to look at adding that in. I've yet to find a reason to do so
though, now that I have the real pings.
Parameters
----------
dset: str, specify which dataset you want to use, one of 'real' and 'realdualhead'
"""
if dset == 'real':
synth = load_dataset(RealFqpr())
expected_x = [np.array([-3.419, -3.406, -3.392])]
expected_y = [np.array([-232.877, -231.562, -230.249])]
expected_z = [np.array([91.139, 91.049, 90.955])]
elif dset == 'realdualhead':
synth = load_dataset(RealDualheadFqpr())
expected_x = [np.array([0.692, 0.693, 0.693]),
np.array([0.567, 0.565, 0.564])]
expected_y = [np.array([-59.992, -59.945, -59.848]),
np.array([-9.351, -9.215, -9.078])]
expected_z = [np.array([18.305, 18.342, 18.359]),
np.array([18.861, 18.873, 18.883])]
else:
raise NotImplementedError('mode not recognized')
fq = fqpr_generation.Fqpr(synth)
fq.logger = logging.getLogger()
fq.logger.setLevel(logging.INFO)
fq.read_from_source()
fq.get_orientation_vectors(dump_data=False, initial_interp=False)
fq.get_beam_pointing_vectors(dump_data=False)
fq.sv_correct(dump_data=False)
# arrays of computed vectors
sysid = [rp.system_identifier for rp in fq.multibeam.raw_ping]
tstmp = list(fq.intermediate_dat[sysid[0]]['sv_corr'].keys())[0]
# since we kept data in memory, we can now get the result of get_orientation_vectors using result()
loaded_data = [fq.intermediate_dat[s]['sv_corr'][tstmp][0][0] for s in sysid]
x_data = [ld[0].isel(time=0).values[0:3] for ld in loaded_data]
y_data = [ld[1].isel(time=0).values[0:3] for ld in loaded_data]
z_data = [ld[2].isel(time=0).values[0:3] for ld in loaded_data]
print('SVCORR {}'.format(dset))
print([x for y in x_data for x in y.flatten()])
print([x for y in y_data for x in y.flatten()])
print([x for y in z_data for x in y.flatten()])
# forward offset check
for i in range(len(x_data)):
assert x_data[i] == approx(expected_x[i], 0.001)
# acrosstrack offset check
for i in range(len(y_data)):
assert y_data[i] == approx(expected_y[i], 0.001)
# depth offset check
for i in range(len(z_data)):
assert z_data[i] == approx(expected_z[i], 0.001)
fq.close()
print('Passed: sv_correct')
def georef_xyz(dset='realdualhead'):
"""
Automated test of fqpr_generation sv_correct
Will run using the 'real' dataset or 'realdualhead' included in the test_datasets file.
No current support for the synthetic dataset, need to look at adding that in. I've yet to find a reason to do so
though, now that I have the real pings.
Parameters
----------
dset: str, specify which dataset you want to use, one of 'real' and 'realdualhead'
"""
vert_ref = 'waterline'
datum = 'NAD83'
if dset == 'real':
synth = load_dataset(RealFqpr())
expected_x = [np.array([539017.745, 539018.535, 539019.322], dtype=np.float64)]
expected_y = [np.array([5292788.295, 5292789.346, 5292790.396], dtype=np.float64)]
expected_z = [np.array([91.789, 91.699, 91.605], dtype=np.float32)]
elif dset == 'realdualhead':
synth = load_dataset(RealDualheadFqpr())
expected_x = [np.array([492984.906, 492984.867, 492984.787], dtype=np.float64),
np.array([492943.083, 492942.971, 492942.859], dtype=np.float64)]
expected_y = [np.array([3365068.225, 3365068.25, 3365068.305], dtype=np.float64),
np.array([3365096.742, 3365096.82, 3365096.898], dtype=np.float64)]
expected_z = [np.array([22.087, 22.124, 22.141], dtype=np.float32),
np.array([22.692, 22.704, 22.714], dtype=np.float32)]
else:
raise NotImplementedError('mode not recognized')
fq = fqpr_generation.Fqpr(synth)
fq.logger = logging.getLogger()
fq.logger.setLevel(logging.INFO)
fq.read_from_source()
fq.get_orientation_vectors(dump_data=False, initial_interp=False)
fq.get_beam_pointing_vectors(dump_data=False)
fq.sv_correct(dump_data=False)
fq.construct_crs(datum=datum, projected=True, vert_ref=vert_ref)
fq.georef_xyz(dump_data=False)
# arrays of computed vectors
sysid = [rp.system_identifier for rp in fq.multibeam.raw_ping]
tstmp = list(fq.intermediate_dat[sysid[0]]['georef'].keys())[0]
# since we kept data in memory, we can now get the result of get_orientation_vectors using result()
loaded_data = [fq.intermediate_dat[s]['georef'][tstmp][0][0] for s in sysid]
x_data = [ld[0].isel(time=0).values[0:3] for ld in loaded_data]
y_data = [ld[1].isel(time=0).values[0:3] for ld in loaded_data]
z_data = [ld[2].isel(time=0).values[0:3] for ld in loaded_data]
print('GEOREF {}'.format(dset))
print([x for y in x_data for x in y.flatten()])
print([x for y in y_data for x in y.flatten()])
print([x for y in z_data for x in y.flatten()])
# easting
for i in range(len(x_data)):
assert x_data[i] == approx(expected_x[i], 0.001)
# northing
for i in range(len(y_data)):
assert y_data[i] == approx(expected_y[i], 0.001)
# depth
for i in range(len(z_data)):
assert z_data[i] == approx(expected_z[i], 0.001)
fq.close()
print('Passed: georef_xyz')
def test_interp_across_chunks():
synth = load_dataset(RealFqpr(), skip_dask=False)
# 11 attitude values, chunking by 4 gives us three chunks
# att.chunks
# Out[10]: Frozen(SortedKeysDict({'time': (4, 4, 3)}))
att = synth.raw_att.chunk(4)
times_interp_to = xr.DataArray(np.array([1495563084.455, 1495563084.490, 1495563084.975]), dims={'time'},
coords={'time': np.array([1495563084.455, 1495563084.490, 1495563084.975])})
dask_interp_att = interp_across_chunks(att, times_interp_to, dimname='time', daskclient=synth.client)
interp_att = interp_across_chunks(att, times_interp_to, dimname='time')
expected_att = xr.Dataset(
{'heading': (['time'], np.array([307.8539977551496, 307.90348427192055, 308.6139892100822])),
'heave': (['time'], np.array([0.009999999776482582, 0.009608692733222632, -0.009999999776482582])),
'roll': (['time'], np.array([0.4400004684929343, 0.07410809820512047, -4.433999538421631])),
'pitch': (['time'], np.array([-0.5, -0.5178477924436871, -0.3760000467300415]))},
coords={'time': np.array([1495563084.455, 1495563084.49, 1495563084.975])})
# make sure the dask/non-dask methods line up
assert dask_interp_att.time.values == approx(interp_att.time.values, 0.001)
assert dask_interp_att.heading.values == approx(interp_att.heading.values, 0.001)
assert dask_interp_att.heave.values == approx(interp_att.heave.values, 0.001)
assert dask_interp_att.pitch.values == approx(interp_att.pitch.values, 0.001)
assert dask_interp_att['roll'].values == approx(interp_att['roll'].values, 0.001)
# make sure the values line up with what we would expect
assert dask_interp_att.time.values == approx(expected_att.time.values, 0.001)
assert dask_interp_att.heading.values == approx(expected_att.heading.values, 0.001)
assert dask_interp_att.heave.values == approx(expected_att.heave.values, 0.001)
assert dask_interp_att.pitch.values == approx(expected_att.pitch.values, 0.001)
assert dask_interp_att['roll'].values == approx(expected_att['roll'].values, 0.001)
print('Passed: interp_across_chunks')
def build_georef_correct_comparison(dset='realdual', vert_ref='waterline', datum='NAD83'):
"""
Generate mine/kongsberg xyz88 data set from the test dataset.
Will run using the 'realdualhead' dataset included in this file or a small synthetic test dataset with meaningless
numbers that I've just come up with.
Parameters
----------
dset: str, specify which dataset you want to use
vert_ref: str, vertical reference, one of ['waterline', 'vessel', 'ellipse']
datum: str, datum identifier, anything recognized by pyproj CRS
"""
if dset == 'real':
synth_dat = RealFqpr()
synth = load_dataset(synth_dat)
elif dset == 'realdual':
synth_dat = RealDualheadFqpr()
synth = load_dataset(synth_dat)
else:
raise NotImplementedError('mode not recognized')
fq = fqpr_generation.Fqpr(synth)
fq.logger = logging.getLogger()
fq.logger.setLevel(logging.INFO)
fq.read_from_source()
fq.get_orientation_vectors(dump_data=False, initial_interp=False)
fq.get_beam_pointing_vectors(dump_data=False)
fq.sv_correct(dump_data=False)
fq.construct_crs(datum=datum, projected=True, vert_ref=vert_ref)
fq.georef_xyz(dump_data=False)
secs = fq.return_sector_ids()
tstmp = list(fq.intermediate_dat[secs[0]]['xyz'].keys())[0]
loaded_xyz_data = [fq.intermediate_dat[s]['xyz'][tstmp][0][0].result() for s in fq.return_sector_ids()]
loaded_sv_data = [fq.intermediate_dat[s]['sv_corr'][tstmp][0][0].result() for s in fq.return_sector_ids()]
loaded_ang_data = [np.rad2deg(fq.intermediate_dat[s]['bpv'][tstmp][0][0].result()[1]) for s in
fq.return_sector_ids()]
fq.intermediate_dat = {}
if dset == 'realdual':
loaded_data = [[loaded_sv_data[i][0].values[0], loaded_sv_data[i][1].values[0], loaded_xyz_data[i][2].values[0],
loaded_ang_data[i].values[0]] for i in range(int(len(loaded_xyz_data)))]
# apply waterline, z lever arm and z phase center offsets to get at the actual waterline rel value
depth_wline_addtl = [-float(fq.multibeam.xyzrph['waterline'][tstmp]) +
float(fq.multibeam.xyzrph['tx_port_z'][tstmp]) +
float(fq.multibeam.xyzrph['tx_port_z_1'][tstmp]),
-float(fq.multibeam.xyzrph['waterline'][tstmp]) +
float(fq.multibeam.xyzrph['tx_port_z'][tstmp]) +
float(fq.multibeam.xyzrph['tx_port_z_1'][tstmp]),
-float(fq.multibeam.xyzrph['waterline'][tstmp]) +
float(fq.multibeam.xyzrph['tx_stbd_z'][tstmp]) +
float(fq.multibeam.xyzrph['tx_stbd_z_1'][tstmp]),
-float(fq.multibeam.xyzrph['waterline'][tstmp]) +
float(fq.multibeam.xyzrph['tx_stbd_z'][tstmp]) +
float(fq.multibeam.xyzrph['tx_stbd_z_1'][tstmp])]
# kongsberg angles are rel horiz, here is what I came up with to get vert rel angles (to match kluster)
xyz_88_corrangle = [90 - np.array(synth_dat.xyz88_corrangle[0]), 90 - np.array(synth_dat.xyz88_corrangle[1]),
np.array(synth_dat.xyz88_corrangle[2]) - 90, np.array(synth_dat.xyz88_corrangle[3]) - 90]
xyz88_data = [[np.array(synth_dat.xyz88_alongtrack[i]), np.array(synth_dat.xyz88_acrosstrack[i]),
np.array(synth_dat.xyz88_depth[i]) + depth_wline_addtl[i],
xyz_88_corrangle[i]] for i in range(int(len(synth_dat.xyz88_depth)))]
elif dset == 'real':
loaded_data = []
for tme in [0, 1]:
for secs in [[0, 2, 4], [1, 3, 5]]:
dpth = np.concatenate(
[loaded_xyz_data[secs[0]][2].values[tme][~np.isnan(loaded_xyz_data[secs[0]][2].values[tme])],
loaded_xyz_data[secs[1]][2].values[tme][~np.isnan(loaded_xyz_data[secs[1]][2].values[tme])],
loaded_xyz_data[secs[2]][2].values[tme][~np.isnan(loaded_xyz_data[secs[2]][2].values[tme])]])
along = np.concatenate(
[loaded_sv_data[secs[0]][0].values[tme][~np.isnan(loaded_sv_data[secs[0]][0].values[tme])],
loaded_sv_data[secs[1]][0].values[tme][~np.isnan(loaded_sv_data[secs[1]][0].values[tme])],
loaded_sv_data[secs[2]][0].values[tme][~np.isnan(loaded_sv_data[secs[2]][0].values[tme])]])
across = np.concatenate(
[loaded_sv_data[secs[0]][1].values[tme][~np.isnan(loaded_sv_data[secs[0]][1].values[tme])],
loaded_sv_data[secs[1]][1].values[tme][~np.isnan(loaded_sv_data[secs[1]][1].values[tme])],
loaded_sv_data[secs[2]][1].values[tme][~np.isnan(loaded_sv_data[secs[2]][1].values[tme])]])
angle = np.concatenate(
[loaded_ang_data[secs[0]].values[tme][~np.isnan(loaded_ang_data[secs[0]].values[tme])],
loaded_ang_data[secs[1]].values[tme][~np.isnan(loaded_ang_data[secs[1]].values[tme])],
loaded_ang_data[secs[2]].values[tme][~np.isnan(loaded_ang_data[secs[2]].values[tme])]])
loaded_data.append([along, across, dpth, angle])
# in the future, include sec index to get the additional phase center offsets included here
depth_wline_addtl = -float(fq.multibeam.xyzrph['waterline'][tstmp]) + float(fq.multibeam.xyzrph['tx_z'][tstmp])
# kongsberg angles are rel horiz, here is what I came up with to get vert rel angles (to match kluster)
xyz_88_corrangle = []
for ang in synth_dat.xyz88_corrangle:
ang = 90 - np.array(ang)
ang[np.argmin(ang):] = ang[np.argmin(ang):] * -1
xyz_88_corrangle.append(ang)
xyz88_data = [[np.array(synth_dat.xyz88_alongtrack[i]), np.array(synth_dat.xyz88_acrosstrack[i]),
np.array(synth_dat.xyz88_depth[i]) + depth_wline_addtl, xyz_88_corrangle[i]] for i in
range(int(len(synth_dat.xyz88_depth)))]
else:
raise NotImplementedError('only real and realdual are currently implemented')
fq.close()
return loaded_data, xyz88_data
def build_kongs_comparison_plots(dset='realdual', vert_ref='waterline', datum='NAD83'):
"""
Use the build_georef_correct_comparison function to get kongsberg and my created values from the test_dataset
and build some comparison plots.
Parameters
----------
dset: string identifier, identifies which of the test_datasets to use
vert_ref: str, vertical reference, one of ['waterline', 'vessel', 'ellipse']
datum: str, datum identifier, anything recognized by pyproj CRS
Returns
-------
plots: list, each element of the list is a tuple of the figure and all the subplots associated with that ping
"""
mine, kongsberg = build_georef_correct_comparison(dset=dset, vert_ref=vert_ref, datum=datum)
plots = []
if dset == 'realdual':
for cnt, idxs in enumerate([[0, 2], [1, 3]]):
print('Generating Ping {} plot'.format(cnt + 1))
fig, (z_plt, x_plt, y_plt, ang_plt) = plt.subplots(4)
fig.suptitle('Ping {}'.format(cnt + 1))
z_plt.set_title('depth compare')
x_plt.set_title('along compare')
y_plt.set_title('across compare')
ang_plt.set_title('angle compare')
z_plt.plot(np.concatenate([mine[idxs[0]][2], mine[idxs[1]][2]]), c='b')
z_plt.plot(np.concatenate([kongsberg[idxs[0]][2], kongsberg[idxs[1]][2]]), c='r')
x_plt.plot(np.concatenate([mine[idxs[0]][0], mine[idxs[1]][0]]), c='b')
x_plt.plot(np.concatenate([kongsberg[idxs[0]][0], kongsberg[idxs[1]][0]]), c='r')
y_plt.plot(np.concatenate([mine[idxs[0]][1], mine[idxs[1]][1]]), c='b')
y_plt.plot(np.concatenate([kongsberg[idxs[0]][1], kongsberg[idxs[1]][1]]), c='r')
ang_plt.plot(np.concatenate([mine[idxs[0]][3], mine[idxs[1]][3]]), c='b')
ang_plt.plot(np.concatenate([kongsberg[idxs[0]][3], kongsberg[idxs[1]][3]]), c='r')
plots.append([fig, z_plt, x_plt, y_plt, ang_plt])
else:
for i in range(len(mine)):
print('Generating Ping {} plot'.format(i + 1))
fig, (z_plt, x_plt, y_plt, ang_plt) = plt.subplots(4)
fig.suptitle('Ping {}'.format(i + 1))
z_plt.set_title('depth compare')
x_plt.set_title('along compare')
y_plt.set_title('across compare')
ang_plt.set_title('angle compare')
z_plt.plot(mine[i][2], c='b')
z_plt.plot(kongsberg[i][2], c='r')
x_plt.plot(mine[i][0], c='b')
x_plt.plot(kongsberg[i][0], c='r')
y_plt.plot(mine[i][1], c='b')
y_plt.plot(kongsberg[i][1], c='r')
ang_plt.plot(mine[i][3], c='b')
ang_plt.plot(kongsberg[i][3], c='r')
plots.append([fig, z_plt, x_plt, y_plt, ang_plt])
return plots
def load_dataset(dset=None, skip_dask=True):
"""
Returns the 'real' dataset constructed using one of the synth data classes. If None, uses SyntheticFqpr with some
dummy values. Otherwise, expects one of RealFqpr, RealDualheadFqpr, SyntheticFqpr, etc. Builds the
xarray_conversion BatchRead class using the dataset data.
Parameters
----------
dset: optional, if None will use SyntheticFqpr with zeroed values, otherwise one of RealFqpr, RealDualheadFqpr,
SyntheticFqpr, etc classes.
skip_dask
Returns
-------
kongs_dat: xarray_conversion BatchRead object
"""
if dset is None:
dset = SyntheticFqpr(synth_time=0, synth_heave=0, synth_roll=0, synth_pitch=0, synth_yaw=0,
synth_tx_mountroll=0, synth_tx_mountpitch=0, synth_tx_mountyaw=0, synth_rx_mountroll=0,
synth_rx_mountpitch=0, synth_rx_mountyaw=0, secs=('999_0_290000', '999_0_300000'))
kongs_dat = xarray_conversion.BatchRead('', skip_dask=skip_dask)
kongs_dat.logger = logging.getLogger()
kongs_dat.logger.setLevel(logging.INFO)
kongs_dat.xyzrph = dset.xyzrph
kongs_dat.raw_ping = dset.raw_ping
kongs_dat.raw_att = dset.raw_att
return kongs_dat
|
import os
from pathlib import Path
from joblib import dump, load
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from gensim.models import Word2Vec, Doc2Vec, FastText
from gensim.models.phrases import Phrases
from academia_tag_recommender.stopwords import stopwordlist
from academia_tag_recommender.embeddings import Doc2Tagged, Word2Tok, doc2vector, word_averaging_list
from academia_tag_recommender.preprocessor import BasicPreprocessor
from academia_tag_recommender.tokenizer import BasicTokenizer, EnglishStemmer, Lemmatizer
from sklearn.decomposition import TruncatedSVD
from academia_tag_recommender.definitions import MODELS_PATH
PATH = Path(MODELS_PATH) / 'transformer'
RANDOM_STATE = 0
MAX_FEATURES = 5993
TRANSFORMER_OPTIONS = {
'tfidf': TfidfVectorizer,
'count': CountVectorizer,
'word2vec': Word2Vec,
'doc2vec': Doc2Vec,
'fasttext': FastText
}
TOKENIZER_OPTIONS = {
'basic': BasicTokenizer,
'stemmer': EnglishStemmer,
'lemmatizer': Lemmatizer
}
DIM_REDUCE_OPTIONS = {
'lsa': TruncatedSVD
}
class Transformer:
"""The transformer to represent texts as a vectors.
Attributes:
vectorizer_short_name: The short name of the used vectorizer as :class:`str`.
vectorizer: The used vectorizer.
"""
def __init__(self, vectorizer):
self.vectorizer_short_name = vectorizer
self.vectorizer = TRANSFORMER_OPTIONS[vectorizer]
def __str__(self):
return 'v={}'.format(self.vectorizer_short_name)
def fit(self, X):
raise NotImplementedError
def transform(self, X):
raise NotImplementedError
@classmethod
def load(cls, vectorizer_short_name):
"""Loads an existing Transformer instance from disc or creates new if none exists.
Args:
vectorizer_short_name: The short name of the used vectorizer as :class:`str`.
Returns:
The configured class:`Transformer` instance.
"""
path = cls._to_path(vectorizer_short_name)
if os.path.isfile(path):
return load(path)
else:
print('Transformer not available. Initiating data.')
return cls(vectorizer_short_name)
@staticmethod
def _to_path(vectorizer_short_name):
"""Converts vectorizer name into path name
Args:
vectorizer_short_name: The short name of the used vectorizer as :class:`str`.
Returns:
The Path of the vector as :class:`pathlib.Path`.
"""
return PATH / 'v={}.joblib'.format(vectorizer_short_name)
class BagOfWordsTransformer(Transformer):
"""The transformer to represent texts as bag of word vectors.
Attributes:
vectorizer_short_name: The short name of the used vectorizer as :class:`str`.
vectorizer: The used vectorizer.
tokenizer_short_name: The short name of the used tokenizer as :class:`str`.
tokenizer: The used tokenizer.
dimension_reduction_short_name: The short name of the used dimension reduction as :class:`str`.
dimension_reduction: The used dimension reduction.
path: The path where the transformer is stored on the disc as :class:`pathlib.Path`.
"""
def __init__(self, vectorizer, tokenizer, dimension_reduction):
"""
Args:
vectorizer_short_name: The short name of the used vectorizer as :class:`str`.
tokenizer_short_name: The short name of the used tokenizer as :class:`str`.
dimension_reduction_short_name: The short name of the used dimension reduction as :class:`str`.
"""
self.tokenizer_short_name = tokenizer
self.tokenizer = TOKENIZER_OPTIONS[tokenizer]
self.vectorizer_short_name = vectorizer
self.vectorizer = TRANSFORMER_OPTIONS[vectorizer](
min_df=2, tokenizer=self.tokenizer(), preprocessor=BasicPreprocessor(), stop_words=stopwordlist, ngram_range=(1, 1))
self.dimension_reduction_short_name = dimension_reduction
self.dimension_reduction = DIM_REDUCE_OPTIONS[dimension_reduction](
n_components=MAX_FEATURES, random_state=RANDOM_STATE)
self.path = BagOfWordsTransformer._to_path(
self.vectorizer_short_name, self.tokenizer_short_name, self.dimension_reduction_short_name)
def __str__(self):
return 'v={}&t={}&dim_reduce={}'.format(self.vectorizer_short_name, self.tokenizer_short_name, self.dimension_reduction_short_name)
def fit(self, X):
"""Creates a transformer based on X.
Args:
X: The samples data as :class:`list`.
Returns:
The transformed samples as :class:`list`.
"""
self.path = BagOfWordsTransformer._to_path(
self.vectorizer_short_name, self.tokenizer_short_name, self.dimension_reduction_short_name)
if not os.path.isfile(self.path):
features = self.vectorizer.fit_transform(X)
reduced = self.dimension_reduction.fit_transform(features)
dump(self, self.path)
return reduced
else:
return self.transform(X)
def transform(self, X):
"""Transforms X.
Args:
X: The samples data as :class:`list`.
Returns:
The transformed samples as :class:`list`.
"""
features = self.vectorizer.transform(X)
return self.dimension_reduction.transform(features)
@classmethod
def load(cls, vectorizer_short_name, tokenizer_short_name, dimension_reduction_short_name):
"""Loads an existing BagOfWordsTransformer instance from disc or creates new if none exists.
Args:
vectorizer_short_name: The short name of the used vectorizer as :class:`str`.
tokenizer_short_name: The short name of the used tokenizer as :class:`str`.
dimension_reduction_short_name: The short name of the used dimension reduction as :class:`str`.
Returns:
The configured :class:`BagOfWordsTransformer` instance.
"""
path = cls._to_path(
vectorizer_short_name, tokenizer_short_name, dimension_reduction_short_name)
if os.path.isfile(path):
return load(path)
else:
print('Transformer not available. Initiating data.')
return cls(
vectorizer_short_name, tokenizer_short_name, dimension_reduction_short_name)
@staticmethod
def _to_path(vectorizer_short_name, tokenizer_short_name, dimension_reduction_short_name):
"""Converts vectorizer name, tokenizer name and dimensionreduction name into path name.
Args:
vectorizer_short_name: The short name of the used vectorizer as :class:`str`.
tokenizer_short_name: The short name of the used tokenizer as :class:`str`.
dimension_reduction_short_name: The short name of the used dimension reduction as :class:`str`.
Returns:
The Path as :class:`pathlib.Path`.
"""
return PATH / 'v={}&t={}&dim_reduce={}.joblib'.format(vectorizer_short_name, tokenizer_short_name, dimension_reduction_short_name)
class EmbeddingTransformer(Transformer):
"""The transformer to represent texts as embedding vectors.
Attributes:
vectorizer_short_name: The short name of the used vectorizer as :class:`str`.
path: The path where the transformer is stored on the disc as :class:`pathlib.Path`.
"""
def __init__(self, vectorizer, vector_size=100):
self.vectorizer_short_name = vectorizer
self.vector_size = vector_size
self.path = EmbeddingTransformer._to_path(vectorizer, vector_size)
def __str__(self):
return 'v={}&size={}'.format(self.vectorizer_short_name, self.vector_size)
def fit(self, X, bigramify=False):
"""Creates a transformer based on X.
Args:
X: The samples data as :class:`list`.
bigramify: If True, bigrams will be included.
Returns:
The transformed samples as :class:`list`.
"""
if not hasattr(self, 'vectorizer'):
self._prepare(X, bigramify)
transformed = self.transform(X)
dump(self, self.path)
return transformed
def _prepare(self, X, bigramify):
raise NotImplementedError
def transform(self, X, bigramify=False):
"""Transforms X.
Args:
X: The samples data as :class:`list`.
bigramify: If True, bigrams will be included.
Returns:
The transformed samples as :class:`list`.
"""
X = [[x] for x in X]
if bigramify:
X = self.bigram_transformer[X]
X_word2tok = Word2Tok(X)
return word_averaging_list(self.vectorizer, X_word2tok)
@classmethod
def load(clf, vectorizer_short_name, vector_size=100):
"""Loads an existing Transformer instance from disc or creates new if none exists.
Args:
vectorizer_short_name: The short name of the used vectorizer as :class:`str`.
vector_size: The vector size of resulting vectorizer as :class:`int`.
Returns:
The configured :class:`EmbeddingTransformer` instance.
"""
path = clf._to_path(vectorizer_short_name, vector_size)
if os.path.isfile(path):
return load(path)
else:
print('Transformer not available. Initiating data.')
return clf(vectorizer_short_name, vector_size)
@staticmethod
def _to_path(vectorizer_short_name, vector_size):
"""Converts vectorizer name and vector size into path name.
Args:
vectorizer_short_name: The short name of the used vectorizer as :class:`str`.
vector_size: The vector size of resulting vectorizer as :class:`int`.
Returns:
The Path as :class:`pathlib.Path`.
"""
return PATH / 'v={}&size={}.joblib'.format(vectorizer_short_name, vector_size)
class Word2VecTransformer(EmbeddingTransformer):
"""The transformer to represent texts as word2vec vectors.
Attributes:
vectorizer: The word2vec vectorizer as :class:`gensim.models.keyedvectors.Word2VecKeyedVectors`.
bigram_transformer: The sentence to bigram transformer as :class:`gensim.models.phrases.Phrases`.
"""
def _prepare(self, X, bigramify):
"""Trains a word2vec transformer based on X using bigrams if specified.
Args:
X: The samples data as :class:`list`.
bigramify: If True, bigrams will be included.
"""
X = [[x] for x in X]
sentences = Word2Tok(X, flat=False)
if bigramify:
self.bigram_transformer = Phrases(sentences, min_count=1)
sentences = self.bigram_transformer[sentences]
model = Word2Vec(sentences=sentences, size=self.vector_size)
self.vectorizer = model.wv
del model
self.vectorizer.init_sims(replace=True)
class FastTextTransformer(EmbeddingTransformer):
"""The transformer to represent texts as fasttext vectors.
Attributes:
vectorizer: The word2vec vectorizer as :class:`gensim.models.keyedvectors.FastTextKeyedVectors`.
bigram_transformer: The sentence to bigram transformer as :class:`gensim.models.phrases.Phrases`.
"""
def _prepare(self, X, bigramify):
"""Trains a fasttext transformer based on X using bigrams if specified.
Args:
X: The samples data as :class:`list`.
bigramify: If True, bigrams will be included.
"""
X = [[x] for x in X]
sentences = Word2Tok(X, flat=False)
if bigramify:
self.bigram_transformer = Phrases(sentences, min_count=1)
sentences = self.bigram_transformer[sentences]
model = FastText(size=self.vector_size, window=3, min_count=2)
model.build_vocab(sentences=sentences)
model.train(sentences=sentences,
total_examples=model.corpus_count, epochs=20)
self.vectorizer = model.wv
del model
self.vectorizer.init_sims(replace=True)
class Doc2VecTransformer(EmbeddingTransformer):
"""The transformer to represent texts as doc2vec vectors.
Attributes:
vectorizer: The word2vec vectorizer as :class:`gensim.models.doc2vec.Doc2Vec`.
"""
def _prepare(self, X):
"""Trains a doc2vec transformer based on X.
Args:
X: The samples data as :class:`list`.
Returns:
The transformed samples as :class:`list`.
"""
X = [[x] for x in X]
tokens = Doc2Tagged(X, tag=True)
self.vectorizer = Doc2Vec(
vector_size=self.vector_size, min_count=2, epochs=20)
self.vectorizer.build_vocab(tokens)
self.vectorizer.train(tokens, total_examples=self.vectorizer.corpus_count,
epochs=self.vectorizer.epochs)
return doc2vector(self.vectorizer, [sample.words for sample in tokens])
def transform(self, X):
"""Transforms X.
Args:
X: The samples data as :class:`list`.
Returns:
The transformed samples as :class:`list`.
"""
X = [[x] for x in X]
X_doc2tok = Doc2Tagged(X)
return doc2vector(self.vectorizer, X_doc2tok)
|
import os
if "DISPLAY" not in os.environ:
import matplotlib
print("No DISPLAY found. Switching to noninteractive matplotlib backend...")
print("Old backend is: {}".format(matplotlib.get_backend()))
matplotlib.use('Agg')
print("New backend is: {}".format(matplotlib.get_backend()))
import utils.parallel.runner as pr
from validation.crossvalidate.crossval_vcgpdm import VCGPDMCrossval
from validation.crossvalidate.crossval_dynamic_mp import DMPCrossval
from validation.crossvalidate.crossval_temporal_mp import TMPCrossval
import validation.crossvalidate.common as vcc
class CrossvalidationRunner(pr.JobRunner):
def __init__(self):
super(CrossvalidationRunner, self).__init__(
realpath=os.path.realpath(__file__),
infostring="Runs crossvalidation model training")
def create_params(self):
return vcc.create_params(full=False)
def run_task(self, param):
print("Task parameters: {}".format(param))
dirname = os.path.join(self.args.dir, self.taskparams.to_dir_name(param))
handler = {
"vcgpdm": VCGPDMCrossval,
"dmp": DMPCrossval,
"tmp": TMPCrossval,
}
handler[param["model"]](dirname, param, self.args)
if __name__ == "__main__":
CrossvalidationRunner()
|
"""
Operating on values of mappings (by default, dictionaries) while keeping keys aligned
"""
from linkup.base import (
map_op_val,
key_aligned_val_op_with_forced_defaults,
key_aligned_val_op,
OperableMapping,
)
|
from django.contrib import admin
from django.contrib.auth.models import User
from django.contrib.auth.admin import UserAdmin
from hub.models import (OneallToken, Company, Category, Job,
JobApplication, IndeedJob, PremiumOrder, PaymentConfirmation)
class OneallTokenAdmin(admin.TabularInline):
model = OneallToken
can_delete = False
class UserAdmin(UserAdmin):
inlines = [OneallTokenAdmin]
save_on_top = True
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
class CompanyAdmin(admin.ModelAdmin):
list_display = ['user', 'name', 'email', 'address', 'website']
save_on_top = True
search_fields = ['name', 'user__username']
admin.site.register(Company, CompanyAdmin)
class CategoryAdmn(admin.ModelAdmin):
list_display = ['name', 'indeed_query', 'order']
list_editable = ['order']
save_on_top = True
admin.site.register(Category, CategoryAdmn)
class JobAdmin(admin.ModelAdmin):
list_display = ['title', 'user', 'status', 'approved', 'ads_type', 'company', 'started', 'ended']
save_on_top = True
search_fields = ['title', 'user__username', 'company__name']
list_filter = ('status', 'approved', 'ads_type', 'started', 'ended')
admin.site.register(Job, JobAdmin)
class JobApplicationAdmin(admin.ModelAdmin):
list_display = ['name', 'email', 'job', 'company', 'resume', 'read_status']
save_on_top = True
search_fields = ['name', 'job__title', 'company__name', 'email']
list_filter = ('read_status',)
admin.site.register(JobApplication, JobApplicationAdmin)
class IndeedJobAdmin(admin.ModelAdmin):
list_display = ['title', 'company', 'location', 'jobkey', 'category']
save_on_top = True
search_fields = ['title', 'company', 'location', 'jobkey']
list_filter = ('category__name',)
admin.site.register(IndeedJob, IndeedJobAdmin)
class PremiumOrderAdmin(admin.ModelAdmin):
list_display = ['user', 'job_title', 'status', 'amount', 'created']
save_on_top = True
search_fields = ['user__username', 'job_title']
list_filter = ('status',)
admin.site.register(PremiumOrder, PremiumOrderAdmin)
class PaymentConfirmationAdmin(admin.ModelAdmin):
list_display = ['user', 'checked', 'order', 'from_bank', 'from_name', 'from_acc', 'from_amount', 'date', 'from_note']
save_on_top = True
search_fields = ['user__username', 'from_bank', 'from_name', 'from_acc']
list_filter = ('checked', 'date',)
admin.site.register(PaymentConfirmation, PaymentConfirmationAdmin)
|
from tkinter import *
from tkinter.tix import Tk, Control, ComboBox # 升级的控件组包
from tkinter.messagebox import showinfo, showwarning, showerror # 各种类型的提示框
from tkinter import filedialog
from PIL import Image, ImageTk
from tkinter.messagebox import *
import os
import operator
from numpy import *
import cv2
from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression
# from sklearn.externals import joblib
import joblib
# 算法部分
# 加载数据集
def loadDataSet(k): # k代表在10张图片中选择几张作为训练集
dataSetDir = 'ORL_Faces'
# 显示文件夹内容
choose = random.permutation(10) + 1 # 随机排序1-10 (0-9)+1
train_face = zeros((40 * k, 112 * 92))
train_face_number = zeros(40 * k)
test_face = zeros((40 * (10 - k), 112 * 92))
test_face_number = zeros(40 * (10 - k))
for i in range(40): # 40个人
people_num = i + 1
for j in range(10): # 每个人有10个不同的脸
if j < k: # 测试集
filename = dataSetDir + '/s' + str(people_num) + '/' + str(choose[j]) + '.pgm'
img = img2vector(filename)
train_face[i * k + j, :] = img
train_face_number[i * k + j] = people_num
else:
filename = dataSetDir + '/s' + str(people_num) + '/' + str(choose[j]) + '.pgm'
img = img2vector(filename)
test_face[i * (10 - k) + (j - k), :] = img
test_face_number[i * (10 - k) + (j - k)] = people_num
return train_face, train_face_number, test_face, test_face_number
# 将图片转换成矩阵
def img2vector(filename):
img = cv2.imread(filename, 0) # 读入灰度值
print(filename)
rows, cols = img.shape
imgVector = zeros((1, rows * cols))
imgVector = reshape(img, (1, rows * cols)) # 将2维转成1维
return imgVector
def facefind():
# 获取训练集
train_face, train_face_number, test_face, test_face_number = loadDataSet(8)
print(train_face)
print(train_face_number)
print(test_face)
print(test_face_number)
# PCA训练训练集,用pca将数据降到30维
pca = PCA(n_components=30).fit(train_face)
# 返回测试集和训练集降维后的数据集
x_train_pca = pca.transform(train_face)
x_test_pca = pca.transform(test_face)
# 逻辑回归训练
classirfier = LogisticRegression()
lr = classirfier.fit(x_train_pca, train_face_number)
# 保存模型
joblib.dump(lr, 'lr.model')
# 计算精确度和召回率
accuray = classirfier.score(x_test_pca, test_face_number)
recall = accuray * 0.7
return accuray, recall, pca
# 界面部分
def choosePic(): # 选择图片函数
file_path = filedialog.askopenfilename() # 加载文件
path.set(file_path)
img_open = Image.open(file.get())
img = ImageTk.PhotoImage(img_open)
pic_label.config(image=img)
pic_label.image = img
string = str(file.get())
# 预测的人
predict = img2vector(string)
# 加载模型
LR = joblib.load('lr.model')
predict_people = LR.predict(pca.transform(predict))
string1 = str("编号:%s 精确度:%f 召回率:%f" % (predict_people, accuray, recall))
showinfo(title='图像分析', message=string1)
# 初始化Tk()
accuray, recall, pca = facefind()
root = Tk() # root便是你布局的根节点了,以后的布局都在它之上
root.geometry('260x140')
root.title("人脸识别系统") # 设置窗口标题
root.resizable(width=False, height=False) # 设置窗口是否可变
root.tk.eval('package require Tix') # 引入升级包,这样才能使用升级的组合控件
path = StringVar() # 跟踪变量的值的变化
Button(root, text='选择图片', command=choosePic, width=1, height=1).grid(row=1, column=1, sticky=W + E + N + S, padx=40,
pady=20) # command指定其回调函数
file = Entry(root, state='readonly', text=path)
file.grid(row=0, column=1, sticky=W + E + S + N, padx=6, pady=20) # 用作文本输入用
pic_label = Label(root, text='图片', padx=30, pady=10)
pic_label.grid(row=0, column=2, rowspan=4, sticky=W + E + N + S)
root.mainloop()
|
import numpy
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer.backends import intel64
from chainer import configuration
from chainer import function_node
import chainer.functions
from chainer.utils import argument
from chainer.utils import conv
from chainer.utils import type_check
import chainerx
from chainermnx.functions.halo_exchange import halo_exchange
if cuda.cudnn_enabled:
_cudnn_version = cuda.cuda.cudnn.getVersion() # type: ignore
def _pair(x):
if hasattr(x, '__getitem__'):
return x
return x, x
# Used by deconvolution_2d.py.
# TODO(beam2d): Unify matmul implementations
def _matmul(a, b):
xp = backend.get_array_module(a)
if not hasattr(xp, 'matmul'):
# NumPy 1.9 does not support matmul. We use einsum instead.
return xp.einsum('ijl,ilk->ijk', a, b)
return xp.matmul(a, b)
class Convolution2DFunction(function_node.FunctionNode):
_use_ideep = False
def __init__(self, comm, halo_size, stride=1, pad=0, cover_all=False, **kwargs):
dilate, groups = argument.parse_kwargs(
kwargs, ('dilate', 1), ('groups', 1),
deterministic='deterministic argument is not supported anymore. '
'Use chainer.using_config(\'cudnn_deterministic\', value) context '
'where value is either `True` or `False`.',
requires_x_grad='requires_x_grad argument is not supported '
'anymore. Just remove the argument. Note that whether to compute '
'the gradient w.r.t. x is automatically decided during '
'backpropagation.')
self.comm = comm
self.halo_size = halo_size
self.sy, self.sx = _pair(stride)
self.ph, self.pw = _pair(pad)
self.cover_all = cover_all
self.dy, self.dx = _pair(dilate)
self.groups = groups
if self.dx < 1 or self.dy < 1:
raise ValueError('Dilate should be positive, but {} is '
'supplied.'.format(dilate))
def check_type_forward(self, in_types):
n_in = in_types.size()
type_check.expect(2 <= n_in, n_in <= 3)
x_type = in_types[0]
w_type = in_types[1]
type_check.expect(
x_type.dtype.kind == 'f',
w_type.dtype.kind == 'f',
x_type.ndim == 4,
w_type.ndim == 4,
x_type.shape[1] == w_type.shape[1] * self.groups,
)
if type_check.eval(n_in) == 3:
b_type = in_types[2]
type_check.expect(
b_type.dtype == x_type.dtype,
b_type.ndim == 1,
b_type.shape[0] == w_type.shape[0],
)
def _get_out_size(self, inputs):
x, W = inputs[:2]
_, _, kh, kw = W.shape
_, _, h, w = x.shape
out_h = conv.get_conv_outsize(
h, kh, self.sy, self.ph, cover_all=self.cover_all, d=self.dy)
if out_h <= 0:
raise RuntimeError('Height in the output should be positive.')
out_w = conv.get_conv_outsize(
w, kw, self.sx, self.pw, cover_all=self.cover_all, d=self.dx)
if out_w <= 0:
raise RuntimeError('Width in the output should be positive.')
return out_h, out_w
def forward_chainerx(self, inputs):
# TODO(hvy): Support mixed precision.
if any([arr.dtype != inputs[0].dtype for arr in inputs[1:]]):
return chainer.Fallback
# TODO(hvy): Support dilate > 1.
if self.dy > 1 or self.dx > 1:
return chainer.Fallback
# TODO(hvy): Support groups > 1.
if self.groups > 1:
return chainer.Fallback
if inputs[0].device.backend.name == 'cuda' and self.cover_all:
return chainer.Fallback
return chainerx.conv(
*inputs, stride=(self.sy, self.sx), pad=(self.ph, self.pw),
cover_all=self.cover_all),
def forward_cpu(self, inputs):
self.retain_inputs((0, 1)) # retain only x and W
if len(inputs) == 2:
(x, W), b = inputs, None
else:
x, W, b = inputs
if (intel64.should_use_ideep('>=auto')
and intel64.inputs_all_ready(inputs)):
self._use_ideep = True
if self.groups > 1:
return self._forward_grouped_convolution(x, W, b)
else:
return self._forward_cpu_core(x, W, b)
def _forward_cpu_core(self, x, W, b):
if self._use_ideep:
return self._forward_ideep(x, W, b)
kh, kw = W.shape[2:]
col = conv.im2col_cpu(
x, kh, kw, self.sy, self.sx, self.ph, self.pw,
cover_all=self.cover_all, dy=self.dy, dx=self.dx)
y = numpy.tensordot(
col, W, ((1, 2, 3), (1, 2, 3))).astype(x.dtype, copy=False)
if b is not None:
y += b
y = numpy.rollaxis(y, 3, 1)
return y,
def _forward_ideep(self, x, W, b):
out_c, input_c, kh, kw = W.shape
n, c, h, w = x.shape
out_h, out_w = self._get_out_size((x, W))
pd = (self.sy * (out_h - 1)
+ (kh + (kh - 1) * (self.dy - 1)) - h - self.ph)
pr = (self.sx * (out_w - 1)
+ (kw + (kw - 1) * (self.dx - 1)) - w - self.pw)
param = intel64.ideep.convolution2DParam(
(n, out_c, out_h, out_w),
self.dy, self.dx,
self.sy, self.sx,
self.ph, self.pw,
pd, pr)
y = intel64.ideep.convolution2D.Forward(
intel64.ideep.array(x),
intel64.ideep.array(W),
intel64.ideep.array(b) if b is not None else None,
param)
return y,
def forward_gpu(self, inputs):
self.retain_inputs((0, 1)) # retain only x and W
if len(inputs) == 2:
(x, W), b = inputs, None
else:
x, W, b = inputs
x = halo_exchange(self.comm, x, self.halo_size, index=1)
out_c, _, kh, kw = W.shape
n, _, h, w = x.shape
out_h, out_w = self._get_out_size(inputs)
y = cuda.cupy.empty((n, out_c, out_h, out_w), dtype=x.dtype)
use_cudnn = (
chainer.should_use_cudnn('>=auto')
and not self.cover_all
and x.dtype == W.dtype
and ((self.dy == 1 and self.dx == 1) or _cudnn_version >= 6000)
and (self.groups <= 1 or _cudnn_version >= 7000)
)
if use_cudnn:
# cuDNN implementation
return self._forward_cudnn(x, W, b, y)
elif self.groups > 1:
return self._forward_grouped_convolution(x, W, b)
else:
return self._forward_gpu_core(x, W, b)
def _forward_gpu_core(self, x, W, b):
kh, kw = W.shape[2:]
# Implementation using im2col
col = conv.im2col_gpu(
x, kh, kw, self.sy, self.sx, self.ph, self.pw,
cover_all=self.cover_all, dy=self.dy, dx=self.dx)
y = cuda.cupy.tensordot(
col, W, ((1, 2, 3), (1, 2, 3))).astype(x.dtype, copy=False)
# TODO(beam2d): Support unshared bias
if b is not None:
y += b
y = cuda.cupy.rollaxis(y, 3, 1)
return y,
def _forward_grouped_convolution(self, x, W, b):
# G: group count
# N: batch size
# kH, kW: kernel height, kernel width
# iC, iH, iW: input channels, input height, input width
# oC, oH, oW: output channels, output height, output width
G = self.groups
N, iC, iH, iW = x.shape
oC, _, kH, kW = W.shape # _ == iCg
iCg = iC // G
oCg = oC // G
# (N, iC, kW, kW, oH, oW)
x = conv.im2col(x, kH, kW, self.sy, self.sx, self.ph, self.pw,
cover_all=self.cover_all, dy=self.dy, dx=self.dx)
oH, oW = x.shape[-2:]
x = x.transpose(1, 2, 3, 0, 4, 5) # (iC, kH, kW, N, oH, oW)
x = x.reshape(G, iCg * kH * kW, N * oH * oW)
W = W.reshape(G, oCg, iCg * kH * kW)
# (G, oCg, N*oH*oW) = (G, oCg, iCg*kH*kW) @ (G, iCg*kH*kW, N*oH*oW)
y = _matmul(W, x).astype(x.dtype, copy=False)
y = y.reshape(oC, N, oH, oW)
y = y.transpose(1, 0, 2, 3) # (N, oC, oH, oW)
if b is not None:
y += b.reshape(1, b.size, 1, 1)
return y,
def _forward_cudnn(self, x, W, b, y):
pad = (self.ph, self.pw)
stride = (self.sy, self.sx)
dilation = (self.dy, self.dx)
auto_tune = configuration.config.autotune
tensor_core = configuration.config.use_cudnn_tensor_core
cuda.cudnn.convolution_forward(
x, W, b, y, pad, stride, dilation, self.groups,
auto_tune=auto_tune, tensor_core=tensor_core)
return y,
def backward(self, indexes, grad_outputs):
x, W = self.get_retained_inputs()
gy, = grad_outputs
# if self.comm.rank == 0:
# print("Shape of X is : ", x.shape)
# print("Shape of gy is : ", gy.shape)
ret = []
if 0 in indexes:
xh, xw = x.shape[2:]
gx = chainer.functions.deconvolution_2d(
gy, W, stride=(self.sy, self.sx), pad=(self.ph, self.pw),
outsize=(xh, xw), dilate=(self.dy, self.dx),
groups=self.groups)
ret.append(gx)
if 1 in indexes:
gW, = Convolution2DGradW(self).apply((x, gy))
ret.append(gW)
if 2 in indexes:
gb = chainer.functions.sum(gy, axis=(0, 2, 3))
ret.append(gb)
return ret
class Convolution2DGradW(function_node.FunctionNode):
def __init__(self, conv2d):
W_node = conv2d.inputs[1]
self.kh, self.kw = W_node.shape[2:]
self.sy = conv2d.sy
self.sx = conv2d.sx
self.ph = conv2d.ph
self.pw = conv2d.pw
self.dy = conv2d.dy
self.dx = conv2d.dx
self.cover_all = conv2d.cover_all
self.W_dtype = W_node.dtype
self.groups = conv2d.groups
self._use_ideep = conv2d._use_ideep
def forward_cpu(self, inputs):
self.retain_inputs((0, 1))
x, gy = inputs
if self.groups > 1:
return self._forward_grouped_convolution(x, gy)
else:
return self._forward_cpu_core(x, gy)
def _forward_cpu_core(self, x, gy):
if self._use_ideep:
return self._forward_ideep(x, gy)
# NumPy raises an error when the array is not contiguous.
# See: https://github.com/chainer/chainer/issues/2744
# TODO(niboshi): Remove this code when NumPy is fixed.
if (not (gy.flags.c_contiguous or gy.flags.f_contiguous) and
1 in gy.shape):
gy = numpy.ascontiguousarray(gy)
col = conv.im2col_cpu(
x, self.kh, self.kw, self.sy, self.sx, self.ph, self.pw,
cover_all=self.cover_all, dy=self.dy, dx=self.dx)
gW = numpy.tensordot(gy, col, ((0, 2, 3), (0, 4, 5))
).astype(self.W_dtype, copy=False)
return gW,
def _forward_ideep(self, x, gy):
n, input_c, h, w = x.shape
n, out_c, out_h, out_w = gy.shape
pd = (self.sy * (out_h - 1)
+ (self.kh + (self.kh - 1) * (self.dy - 1))
- h - self.ph)
pr = (self.sx * (out_w - 1)
+ (self.kw + (self.kw - 1) * (self.dx - 1))
- w - self.pw)
param = intel64.ideep.convolution2DParam(
(out_c, input_c, self.kh, self.kw),
self.dy, self.dx,
self.sy, self.sx,
self.ph, self.pw,
pd, pr)
gW = intel64.ideep.convolution2D.BackwardWeights(
intel64.ideep.array(x),
intel64.ideep.array(gy),
param)
return gW,
def forward_gpu(self, inputs):
self.retain_inputs((0, 1))
x, gy = inputs
use_cudnn = (
chainer.should_use_cudnn('>=auto')
and not self.cover_all
and x.dtype == self.W_dtype
and ((self.dy == 1 and self.dx == 1)
or (_cudnn_version >= 6000
and not configuration.config.cudnn_deterministic))
and (self.groups <= 1 or _cudnn_version >= 7000)
)
if use_cudnn:
# cuDNN implementation
return self._forward_cudnn(x, gy)
elif self.groups > 1:
return self._forward_grouped_convolution(x, gy)
else:
return self._forward_gpu_core(x, gy)
def _forward_gpu_core(self, x, gy):
col = conv.im2col_gpu(
x, self.kh, self.kw, self.sy, self.sx, self.ph, self.pw,
cover_all=self.cover_all, dy=self.dy, dx=self.dx)
gW = cuda.cupy.tensordot(gy, col, ((0, 2, 3), (0, 4, 5))
).astype(self.W_dtype, copy=False)
return gW,
def _forward_grouped_convolution(self, x, gy):
# G: group count
# N: batch size
# kH, kW: kernel height, kernel width
# iC, iH, iW: input channels, input height, input width
# oC, oH, oW: output channels, output height, output width
G = self.groups
N, iC, iH, iW = x.shape
_, oC, oH, oW = gy.shape # _ == N
kH = self.kh
kW = self.kw
iCg = iC // G
oCg = oC // G
# (N, iC, kH, kW, oH, oW)
x = conv.im2col(x, kH, kW, self.sy, self.sx, self.ph, self.pw,
cover_all=self.cover_all, dy=self.dy, dx=self.dx)
x = x.transpose(1, 2, 3, 0, 4, 5) # (iC, kH, kW, N, oH, oW)
x = x.reshape(G, iCg * kH * kW, N * oH * oW)
x = x.transpose(0, 2, 1) # (G, N*oH*oW, iCg*kH*kW)
gy = gy.transpose(1, 0, 2, 3) # (oC, N, oH, oW)
gy = gy.reshape(G, oCg, N * oH * oW)
# (G, oCg, iCg*kH*kW) = (G, oCg, N*oH*oW) @ (G, N*oH*oW, iCg*kH*kW)
gW = _matmul(gy, x).astype(self.W_dtype, copy=False)
gW = gW.reshape(oC, iCg, kH, kW)
return gW,
def _forward_cudnn(self, x, gy):
_, out_c, out_h, out_w = gy.shape
n, c, h, w = x.shape
iC = c
iCg = int(iC / self.groups)
gW = cuda.cupy.empty((out_c, iCg, self.kh, self.kw),
dtype=self.W_dtype)
pad = (self.ph, self.pw)
stride = (self.sy, self.sx)
dilation = (self.dy, self.dx)
deterministic = configuration.config.cudnn_deterministic
auto_tune = configuration.config.autotune
tensor_core = configuration.config.use_cudnn_tensor_core
cuda.cudnn.convolution_backward_filter(
x, gy, gW, pad, stride, dilation, self.groups,
deterministic=deterministic, auto_tune=auto_tune,
tensor_core=tensor_core)
return gW,
def backward(self, indexes, grad_outputs):
x, gy = self.get_retained_inputs()
ggW, = grad_outputs
ret = []
if 0 in indexes:
xh, xw = x.shape[2:]
gx = chainer.functions.deconvolution_2d(
gy, ggW, stride=(self.sy, self.sx), pad=(self.ph, self.pw),
outsize=(xh, xw), dilate=(self.dy, self.dx),
groups=self.groups)
ret.append(gx)
if 1 in indexes:
ggy = convolution_2d(
x, ggW, stride=(self.sy, self.sx), pad=(self.ph, self.pw),
cover_all=self.cover_all, dilate=(self.dy, self.dx),
groups=self.groups)
ret.append(ggy)
return ret
def convolution_2d(comm, x, halo_size, W, b=None, stride=1, pad=0, cover_all=False, **kwargs):
dilate, groups = argument.parse_kwargs(
kwargs, ('dilate', 1), ('groups', 1),
deterministic='deterministic argument is not supported anymore. '
'Use chainer.using_config(\'cudnn_deterministic\', value) '
'context where value is either `True` or `False`.')
fnode = Convolution2DFunction(comm=comm, halo_size=halo_size, stride=stride, pad=pad, cover_all=cover_all, dilate=dilate,
groups=groups)
if b is None:
args = x, W
else:
args = x, W, b
y, = fnode.apply(args)
return y
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.