hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
042cef1b04899572fbadfca55ccfcbd5ddae72ee | 826 | py | Python | ghostIm/oneBitGen.py | acyanbird/flappyGhost | 28cecd35a1b405e9c1f0b93c97e1a41a82bf0b92 | [
"WTFPL"
] | 2 | 2022-02-21T01:11:58.000Z | 2022-02-27T15:12:38.000Z | ghostIm/oneBitGen.py | acyanbird/flappyGhost | 28cecd35a1b405e9c1f0b93c97e1a41a82bf0b92 | [
"WTFPL"
] | null | null | null | ghostIm/oneBitGen.py | acyanbird/flappyGhost | 28cecd35a1b405e9c1f0b93c97e1a41a82bf0b92 | [
"WTFPL"
] | null | null | null | from PIL import Image
def img2coe(name):
img = Image.open(name)
img = img.convert("1")
width, height = img.size
output_file = name.split('.')[0] + ".coe"
f = open(output_file, "w")
# using the 16, so change radix to 16, can be 2(binary), 10(decimal), 16(hex)
f.write("memory_initialization_radix=2;\nmemory_initialization_vector=\n")
for x in range(0, height):
for y in range(0, width):
pix = img.getpixel((y, x))
if pix == 255:
pix = 1
pix = str(pix)
pix += ",\n"
f.write(pix)
f.seek(f.tell() - 1, 0) # put pointer to last position - 1, relative to head
f.truncate() # del final ,
f.write(";") # add ;
if __name__ == "__main__":
name = input("Input image name:")
img2coe(name) | 25.030303 | 81 | 0.547215 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 266 | 0.322034 |
042d96c35e9f7f0bf2fb17cb3dd402631cdff742 | 473 | py | Python | cursoEmVideo/Python/Mundo 2/Exercicios/Ex067.py | VictorDG00/Cursos | b1411f3179ef17f128c883b0f5a56c2478de45e8 | [
"MIT"
] | 2 | 2021-02-08T13:34:15.000Z | 2021-02-08T19:43:42.000Z | cursoEmVideo/Python/Mundo 2/Exercicios/Ex067.py | VictorDG00/Cursos | b1411f3179ef17f128c883b0f5a56c2478de45e8 | [
"MIT"
] | null | null | null | cursoEmVideo/Python/Mundo 2/Exercicios/Ex067.py | VictorDG00/Cursos | b1411f3179ef17f128c883b0f5a56c2478de45e8 | [
"MIT"
] | null | null | null | # faça um programa que mostre a tabuada de varios numeros
# um de cada vez, para cada valor digitado
# o programa sera interrompido quando for solicityado um numero negativo
while True:
multiplicado = int(input('Digite um numero para ver sua tabuada: '))
for tab in range(1, 11):
print(f'{multiplicado}x{tab}={multiplicado * tab}')
continuacao = str(input('Quer ver outra tabuada? [S/N]'))
if continuacao in 'Nn':
break
print('cabo nego')
| 33.785714 | 72 | 0.691332 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 303 | 0.639241 |
042f01e87ae3c4cbb27a11e0655de18410d4c635 | 818 | py | Python | events/migrations/0002_auto_20210501_1442.py | seanyoung247/TWCoulsdon | 870ae7e8ea6a3fc23d24fe21bbb21965cdbab27b | [
"MIT"
] | 1 | 2021-12-28T15:43:39.000Z | 2021-12-28T15:43:39.000Z | events/migrations/0002_auto_20210501_1442.py | seanyoung247/TWCoulsdon | 870ae7e8ea6a3fc23d24fe21bbb21965cdbab27b | [
"MIT"
] | 5 | 2021-05-14T22:46:26.000Z | 2021-05-26T02:18:46.000Z | events/migrations/0002_auto_20210501_1442.py | seanyoung247/TWCoulsdon | 870ae7e8ea6a3fc23d24fe21bbb21965cdbab27b | [
"MIT"
] | 1 | 2021-05-29T18:24:49.000Z | 2021-05-29T18:24:49.000Z | # Generated by Django 3.2 on 2021-05-01 14:42
from django.db import migrations, models
import easy_thumbnails.fields
import embed_video.fields
class Migration(migrations.Migration):
dependencies = [
('events', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='venue',
name='capacity',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='event',
name='content',
field=embed_video.fields.EmbedVideoField(blank=True, null=True),
),
migrations.AlterField(
model_name='image',
name='image',
field=easy_thumbnails.fields.ThumbnailerImageField(blank=True, null=True, upload_to=''),
),
]
| 26.387097 | 100 | 0.601467 | 671 | 0.820293 | 0 | 0 | 0 | 0 | 0 | 0 | 116 | 0.141809 |
042f5abcee94ea90c313bab39cfcbe9ccffaafb9 | 6,748 | py | Python | python/utils.py | JonathanAMichaels/NeuropixelsRegistration | b2623f0777b12c72ab304f7f2c1477e9954ef54a | [
"MIT"
] | null | null | null | python/utils.py | JonathanAMichaels/NeuropixelsRegistration | b2623f0777b12c72ab304f7f2c1477e9954ef54a | [
"MIT"
] | null | null | null | python/utils.py | JonathanAMichaels/NeuropixelsRegistration | b2623f0777b12c72ab304f7f2c1477e9954ef54a | [
"MIT"
] | null | null | null | import numpy as np
from scipy.io import loadmat
import os
import logging
from scipy.signal import butter, filtfilt
def mat2npy(mat_chanmap_dir):
mat_chanmap = loadmat(mat_chanmap_dir)
x = mat_chanmap['xcoords']
y = mat_chanmap['ycoords']
npy_chanmap = np.hstack([x,y])
#np.save('chanmap.npy', npy_chanmap) # you can't just go saving this wherever
return npy_chanmap
def merge_filtered_files(filtered_location, output_directory, delete=True):
filenames = os.listdir(filtered_location)
filenames_sorted = sorted(filenames)
f_out = os.path.join(output_directory, "standardized.bin")
f = open(f_out, 'wb')
for fname in filenames_sorted:
if '.ipynb' in fname or 'standardized' in fname:
continue
res = np.load(os.path.join(filtered_location, fname)).astype('int16') # was float32
res.tofile(f)
if delete:
os.remove(os.path.join(filtered_location, fname))
# Added functions from yass to avoid mandatory yass install
"""
Filtering functions
"""
def _butterworth(ts, low_frequency, high_factor, order, sampling_frequency):
"""Butterworth filter
Parameters
----------
ts: np.array
T numpy array, where T is the number of time samples
low_frequency: int
Low pass frequency (Hz)
high_factor: float
High pass factor (proportion of sampling rate)
order: int
Order of Butterworth filter
sampling_frequency: int
Sampling frequency (Hz)
Notes
-----
This function can only be applied to a one dimensional array, to apply
it to multiple channels use butterworth
Raises
------
NotImplementedError
If a multidmensional array is passed
"""
low = float(low_frequency) / sampling_frequency * 2
high = float(high_factor) * 2
b, a = butter(order, low, btype='high', analog=False)
if ts.ndim == 1:
return filtfilt(b, a, ts)
else:
T, C = ts.shape
output = np.zeros((T, C), 'float32')
for c in range(C):
output[:, c] = filtfilt(b, a, ts[:, c])
return output
def _mean_standard_deviation(rec, centered=False):
"""Determine standard deviation of noise in each channel
Parameters
----------
rec : matrix [length of recording, number of channels]
centered : bool
if not standardized, center it
Returns
-------
sd : vector [number of channels]
standard deviation in each channel
"""
# find standard deviation using robust method
if not centered:
centers = np.mean(rec, axis=0)
rec = rec - centers[None]
else:
centers = np.zeros(rec.shape[1], 'float32')
return np.median(np.abs(rec), 0) / 0.6745, centers
def _standardize(rec, sd=None, centers=None):
"""Determine standard deviation of noise in each channel
Parameters
----------
rec : matrix [length of recording, number of channels]
recording
sd : vector [number of chnanels,]
standard deviation
centered : bool
if not standardized, center it
Returns
-------
matrix [length of recording, number of channels]
standardized recording
"""
# find standard deviation using robust method
if (sd is None) or (centers is None):
sd, centers = _mean_standard_deviation(rec, centered=False)
# standardize all channels with SD> 0.1 (Voltage?) units
# Cat: TODO: ensure that this is actually correct for all types of channels
idx1 = np.where(sd >= 0.1)[0]
rec[:, idx1] = np.divide(rec[:, idx1] - centers[idx1][None], sd[idx1])
# zero out bad channels
idx2 = np.where(sd < 0.1)[0]
rec[:, idx2] = 0.
return rec
# return np.divide(rec, sd)
def filter_standardize_batch(batch_id, reader, fname_mean_sd,
apply_filter, out_dtype, output_directory,
low_frequency=None, high_factor=None,
order=None, sampling_frequency=None):
"""Butterworth filter for a one dimensional time series
Parameters
----------
ts: np.array
T numpy array, where T is the number of time samples
low_frequency: int
Low pass frequency (Hz)
high_factor: float
High pass factor (proportion of sampling rate)
order: int
Order of Butterworth filter
sampling_frequency: int
Sampling frequency (Hz)
Notes
-----
This function can only be applied to a one dimensional array, to apply
it to multiple channels use butterworth
Raises
------
NotImplementedError
If a multidmensional array is passed
"""
logger = logging.getLogger(__name__)
# filter
if apply_filter:
# read a batch
ts = reader.read_data_batch(batch_id, add_buffer=True)
ts = _butterworth(ts, low_frequency, high_factor,
order, sampling_frequency)
ts = ts[reader.buffer:-reader.buffer]
else:
ts = reader.read_data_batch(batch_id, add_buffer=False)
# standardize
temp = np.load(fname_mean_sd)
sd = temp['sd']
centers = temp['centers']
ts = _standardize(ts, sd, centers)
# save
fname = os.path.join(
output_directory,
"standardized_{}.npy".format(
str(batch_id).zfill(6)))
np.save(fname, ts.astype(out_dtype))
# fname = os.path.join(
# output_directory,
# "standardized_{}.bin".format(
# str(batch_id).zfill(6)))
# f = open(fname, 'wb')
# f.write(ts.astype(out_dtype))
def get_std(ts,
sampling_frequency,
fname,
apply_filter=False,
low_frequency=None,
high_factor=None,
order=None):
"""Butterworth filter for a one dimensional time series
Parameters
----------
ts: np.array
T numpy array, where T is the number of time samples
low_frequency: int
Low pass frequency (Hz)
high_factor: float
High pass factor (proportion of sampling rate)
order: int
Order of Butterworth filter
sampling_frequency: int
Sampling frequency (Hz)
Notes
-----
This function can only be applied to a one dimensional array, to apply
it to multiple channels use butterworth
Raises
------
NotImplementedError
If a multidmensional array is passed
"""
# filter
if apply_filter:
ts = _butterworth(ts, low_frequency, high_factor,
order, sampling_frequency)
# standardize
sd, centers = _mean_standard_deviation(ts)
# save
np.savez(fname,
centers=centers,
sd=sd) | 27.769547 | 91 | 0.621962 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,448 | 0.510966 |
043096d9c304faa314eb3f6d6324739396959dcd | 9,024 | py | Python | fairseq/models/wav2vec/wav2vec2_cif_bert.py | eastonYi/fairseq | 3cec3773990ddc0487aae7f2f253edbe591f973c | [
"MIT"
] | null | null | null | fairseq/models/wav2vec/wav2vec2_cif_bert.py | eastonYi/fairseq | 3cec3773990ddc0487aae7f2f253edbe591f973c | [
"MIT"
] | null | null | null | fairseq/models/wav2vec/wav2vec2_cif_bert.py | eastonYi/fairseq | 3cec3773990ddc0487aae7f2f253edbe591f973c | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import contextlib
import torch
import torch.nn.functional as F
from typing import List, Tuple, Dict, Optional
from transformers import BertForMaskedLM
from fairseq import utils
from fairseq.models import (
BaseFairseqModel,
register_model,
register_model_architecture,
)
from fairseq.modules import (
LayerNorm,
GradMultiply,
PositionalEmbedding,
TransformerDecoderLayer,
TransposeLast,
Fp32LayerNorm,
Fp32GroupNorm,
FairseqDropout
)
from .wav2vec2_ctc import (
Linear,
Wav2VecEncoder,
add_common_args,
base_architecture
)
from .wav2vec2_cif import (
CIFFcModel,
CIFFcModelV2,
cif_architecture,
)
def padding2attention_mask(padding_mask):
mask1 = F.pad(padding_mask, [0, 1, 0, 0], value=1)
mask2 = F.pad(padding_mask, [1, 0, 0, 0], value=0)
mask = 1 - mask1.int() * mask2.int()
return F.pad(mask, [1, 0, 0, 0], value=1)
def pred2bert_input(pred, token_mask, cls=101, sep=102):
pred *= token_mask
end_index = token_mask.sum(-1).long().unsqueeze(1) + 1
pred.scatter_(dim=-1, index=end_index, value=sep)
pred[:, 0] = cls
return pred
def add_lm_args(parser):
parser.add_argument(
"--freeze-lm-finetune-updates", type=int, default=0, help="freeze_lm_finetune_updates"
)
parser.add_argument(
"--gold-rate-range", type=str, help="gold-rate-range"
)
parser.add_argument(
"--gold-rate-steps", type=str, help="gold-rate-steps"
)
parser.add_argument(
"--infer-threash", type=float, default=0.8, help="infer-threash"
)
parser.add_argument(
"--lambda-embedding", type=float, metavar="D", help="lambda-embedding"
)
parser.add_argument(
"--lambda-am", type=float, default=1.0, metavar="D", help="lambda-am"
)
parser.add_argument(
"--lambda-lm", type=float, default=0.2, metavar="D", help="lambda-lm"
)
parser.add_argument("--lambda-qua", type=float, default=0.1, metavar="D", help="lambda-qua")
@register_model("w2v_cif_bert")
class W2V_CIF_BERT(BaseFairseqModel):
def __init__(self, args, encoder, bert, to_vocab, tgt_dict):
"""
.copy_() clone to_vocab
"""
super().__init__()
self.encoder = encoder
self.bert = bert
self.dim_bert = bert.embeddings.word_embeddings.weight.size(1)
self.to_vocab = to_vocab # 768 -> 21128
self.to_vocab_ac = copy.deepcopy(to_vocab)
self.to_vocab_ctc = copy.deepcopy(to_vocab)
self.proj = Linear(encoder.d-1, self.dim_bert)
self.tgt_dict = tgt_dict
self.num_updates = 0
self.args = args
self.freeze_lm_finetune_updates = args.freeze_lm_finetune_updates
self.gold_rate_range = eval(args.gold_rate_range)
self.gold_rate_steps = eval(args.gold_rate_steps)
for p in self.bert.embeddings.parameters():
p.requires_grad = False
@staticmethod
def add_args(parser):
add_common_args(parser)
add_lm_args(parser)
parser.add_argument("--lambda-ctc", type=float, metavar="D", help="lambda-ctc")
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
w2v_cif_bert_architecture(args)
tgt_dict = task.target_dictionary
bert, to_vocab = cls.build_bert(args, tgt_dict)
encoder = cls.build_encoder(args) # encoder
return cls(args, encoder, bert, to_vocab, tgt_dict)
@classmethod
def build_encoder(cls, args, tgt_dict=None):
return Wav2VecEncoder(args, tgt_dict=tgt_dict)
@classmethod
def build_bert(cls, args, tgt_dict):
pretrained_model = BertForMaskedLM.from_pretrained(args.bert_name)
bert = pretrained_model.bert
to_vocab = pretrained_model.cls
return bert, to_vocab
def forward(self, **kwargs):
"""
encoder_output= "encoder_out": x,
"encoded": encoded,
"encoder_padding_mask": padding_mask, # B x T
"padding_mask": padding_mask,
"""
encoder_output = self.encoder(tbc=False, **kwargs)
hidden_encoded = encoder_output['encoder_out'][:, :, :-1]
hidden_ctc = F.pad(hidden_encoded, [0, 1, 0, 0, 0, 0], value=0)
logits_ctc = self.to_vocab_ctc(hidden_ctc)
len_logits_ctc = (~encoder_output['padding_mask']).sum(-1).long()
alphas = CIFFcModelV2.get_alphas(encoder_output)
if self.training:
gold_rate = self.set_gold_rate()
decode_length = kwargs['target_lengths']
gold_ids = kwargs['bert_input'].long()
noise = 0.0
else:
gold_rate = 0.0
decode_length = torch.round(alphas.sum(-1)).int()
gold_ids = None
noise = 0.0
_alphas, num_output = self.resize(alphas, decode_length, noise=noise)
padding_mask = ~utils.sequence_mask(decode_length).bool()
cif_outputs = self.cif(hidden_encoded, _alphas)
hidden_ac = self.proj(cif_outputs)
logits_ac = self.to_vocab_ac(hidden_ac)
ft = self.freeze_lm_finetune_updates <= self.num_updates
with torch.no_grad() if not ft else contextlib.ExitStack():
logits_lm, gold_embedding, pred_mask, token_mask = self.bert_forward(
hidden_ac, logits_ac, padding_mask, gold_ids, gold_rate,
threash=self.args.infer_threash)
logits = self.args.lambda_am * logits_ac + self.args.lambda_lm * logits_lm
logits *= (~padding_mask).unsqueeze(-1).float()
return {'logits': logits, 'len_logits': decode_length,
'alphas': alphas, 'num_output': num_output, 'gold_rate': gold_rate,
'logits_ctc': logits_ctc, 'len_logits_ctc': len_logits_ctc,
'pred_mask': pred_mask[:, 1:-1], 'token_mask': token_mask[:, 1:-1]}
def bert_forward(self, hidden, logits_ac, padding_mask, gold_ids=None, gold_rate=0.0, threash=0.8):
"""
"""
device = hidden.device
token_mask = F.pad(~padding_mask, [1, 1, 0, 0], value=0)
if self.training:
input_ids = gold_ids
pred_mask = (torch.rand(input_ids.size(), device=device) > gold_rate) * token_mask
else: # infer
probs = F.pad(utils.softmax(logits_ac.float(), dim=-1), [0, 0, 1, 1, 0, 0], value=0)
confident, preds = probs.max(-1)
input_ids = pred2bert_input(preds, token_mask)
pred_mask = (confident <= threash) * token_mask
# mixing
gold_embedding = self.bert.embeddings.word_embeddings(input_ids)
hidden_mix = torch.where(pred_mask[:, :, None].repeat(1, 1, hidden.size(-1)),
F.pad(hidden, [0, 0, 1, 1, 0, 0], value=0),
gold_embedding)
attention_mask = padding2attention_mask(padding_mask)
embeddings = self.bert.embeddings(inputs_embeds=hidden_mix)
encoder_outputs = self.bert.encoder(
embeddings,
attention_mask=attention_mask[:, None, None, :])
logits = self.to_vocab(encoder_outputs[0])
logits = logits[:, 1:-1, :]
return logits, gold_embedding, pred_mask, token_mask
@staticmethod
def resize(*args, **kwargs):
return CIFFcModel.resize(*args, **kwargs)
@staticmethod
def cif(*args, **kwargs):
return CIFFcModel.cif(*args, **kwargs)
def get_normalized_probs(self, net_output, log_probs, retrun_ctc=False):
"""Get normalized probabilities (or log probs) from a net's output."""
logits_ctc = net_output["logits_ctc"]
logits = net_output["logits"]
if log_probs:
res_ctc = utils.log_softmax(logits_ctc.float(), dim=-1)
res = utils.log_softmax(logits.float(), dim=-1)
else:
res_ctc = utils.softmax(logits_ctc.float(), dim=-1)
res = utils.softmax(logits.float(), dim=-1)
res_ctc.batch_first = True
res.batch_first = True
if retrun_ctc:
return res_ctc, res
else:
return res
def set_num_updates(self, num_updates):
"""Set the number of parameters updates."""
super().set_num_updates(num_updates)
self.num_updates = num_updates
def set_gold_rate(self):
s, e = self.gold_rate_range
s1, s2 = self.gold_rate_steps
gold_rate = max((1 - max((self.num_updates - s1), 0) / s2) * (s-e), 0) + e
return gold_rate
@register_model_architecture("w2v_cif_bert", "w2v_cif_bert")
def w2v_cif_bert_architecture(args):
cif_architecture(args)
args.share_final_proj = getattr(args, "share_final_proj", False)
| 34.707692 | 103 | 0.630762 | 6,588 | 0.730053 | 0 | 0 | 6,813 | 0.754987 | 0 | 0 | 1,250 | 0.13852 |
0430aa9e29f2dc11a3655c69ef15ad30f7e0af32 | 3,250 | py | Python | 2/src/3_2D_ritter.py | dsanmartin/IPM468-PJ | d0cdd030498bdca590cfa907941679db6438b1b9 | [
"BSD-3-Clause"
] | null | null | null | 2/src/3_2D_ritter.py | dsanmartin/IPM468-PJ | d0cdd030498bdca590cfa907941679db6438b1b9 | [
"BSD-3-Clause"
] | null | null | null | 2/src/3_2D_ritter.py | dsanmartin/IPM468-PJ | d0cdd030498bdca590cfa907941679db6438b1b9 | [
"BSD-3-Clause"
] | null | null | null | import pathlib
import numpy as np
from dambreak import Experiment2D
from plot import plot2D, plot3D, quiver
#%% Initial condition
def h0_(x, y, x0, y0, h0):
H = np.zeros_like(x)
idx = np.array((x <= x0) & (y <= y0))
H[idx] = h0
return H
#%%Parameters
h_0 = 40
x0 = 1000
y0 = 1000
L = 2000
T = 40
Nx = 100
Ny = 100
Nt = 500
f = 0
g = 1
h0 = lambda x, y: h0_(x, y, x0, y0, h_0) # h IC
u0 = lambda x, y: x * 0 # u IC
v0 = lambda x, y: y * 0 # v IC
Sf = lambda f, g, h, Q: f * np.abs(Q) * Q / (8 * g * h ** 3) # Friction
#%% Experiment
ritter = Experiment2D(
f = f,
g = g,
L = L,
T = T,
Nx = Nx,
Ny = Ny,
Nt = Nt,
h0 = h0,
u0 = u0,
v0 = v0,
Sf = Sf
)
#%% Lax-Friedrich scheme not working...
t, Xl, Yl, Hl, Q1l, Q2l = ritter.solvePDE('lf')
#%% Rusanov scheme
t, Xr, Yr, Hr, Q1r, Q2r = ritter.solvePDE('rs')
#%%
n = 4
plot3D(Xl, Yl, Hl[n])
#%%
n = -1
plot3D(Xr, Yr, Hr[n])
#%%
plot2D(Xr, Yr, Hr[n])
#%%
plot3D(Xr, Yr, Hr[n])
#%%
quiver(Xr, Yr, Q1r[n], Q2r[n])
#%%Save data
DIR = 'data/3/1/' # Directory name
pathlib.Path(DIR).mkdir(parents=True, exist_ok=True) # Create Folder
#%% Save experiment n = {0, 125, 250, 375, -1}
M, N = Hr[0].shape
data_h = np.zeros((M * N, 7))
data_h[:, 0] = Xr.flatten()
data_h[:, 1] = Yr.flatten()
data_h[:, 2] = Hr[0].flatten()
data_h[:, 3] = Hr[125].flatten()
data_h[:, 4] = Hr[250].flatten()
data_h[:, 5] = Hr[375].flatten()
data_h[:, 6] = Hr[-1].flatten()
np.savetxt(DIR + 'ritter_2D.csv', data_h, fmt='%.16f', delimiter=' ', header='x y h0 h10 h20 h30 h40', comments="") # Save data
#%%
nn = 3
MM, NN = Q1r[0, ::nn, ::nn].shape
data_v = np.zeros((MM * NN, 17))
data_v[:, 0] = Xr[::nn, ::nn].flatten()
data_v[:, 1] = Yr[::nn, ::nn].flatten()
data_v[:, 2] = Q1r[0, ::nn, ::nn].flatten() / Hr[0, ::nn, ::nn].flatten()
data_v[:, 3] = Q2r[0, ::nn, ::nn].flatten() / Hr[0, ::nn, ::nn].flatten()
data_v[:, 4] = np.sqrt(data_v[:, 2] ** 2 + data_v[:, 3] ** 2)
data_v[:, 5] = Q1r[125, ::nn, ::nn].flatten() / Hr[125, ::nn, ::nn].flatten()
data_v[:, 6] = Q2r[125, ::nn, ::nn].flatten() / Hr[125, ::nn, ::nn].flatten()
data_v[:, 7] = np.sqrt(data_v[:, 5] ** 2 + data_v[:, 6] ** 2)
data_v[:, 8] = Q1r[250, ::nn, ::nn].flatten() / Hr[250, ::nn, ::nn].flatten()
data_v[:, 9] = Q2r[250, ::nn, ::nn].flatten() / Hr[250, ::nn, ::nn].flatten()
data_v[:, 10] = np.sqrt(data_v[:, 8] ** 2 + data_v[:, 9] ** 2)
data_v[:, 11] = Q1r[375, ::nn, ::nn].flatten() / Hr[375, ::nn, ::nn].flatten()
data_v[:, 12] = Q2r[375, ::nn, ::nn].flatten() / Hr[375, ::nn, ::nn].flatten()
data_v[:, 13] = np.sqrt(data_v[:, 11] ** 2 + data_v[:, 12] ** 2)
data_v[:, 14] = Q1r[-1, ::nn, ::nn].flatten() / Hr[-1, ::nn, ::nn].flatten()
data_v[:, 15] = Q2r[-1, ::nn, ::nn].flatten() / Hr[-1, ::nn, ::nn].flatten()
data_v[:, 16] = np.sqrt(data_v[:, 14] ** 2 + data_v[:, 15] ** 2)
header_ = 'x y u_0 v_0 m0 u_10 v_10 m10 u_20 v_20 m20 u_30 v_30 m30 u_40 v_40 m40'
np.savetxt(DIR + 'ritter_v_2D.csv', data_v, fmt='%.16f', delimiter=' ', header=header_, comments="")
#%%
MMM, NNN = Hl[0].shape
data_lf = np.zeros((MMM * NNN, 3))
data_lf[:, 0] = Xl.flatten()
data_lf[:, 1] = Yl.flatten()
data_lf[:, 2] = Hl[4].flatten()
np.savetxt(DIR + "ritter_2D_lf.csv", data_lf, fmt='%.16f', delimiter=' ', header="x y h", comments="") | 27.083333 | 127 | 0.548923 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 463 | 0.142462 |
04317857b939c2cd80e35a253468a62b54df8cd9 | 1,025 | py | Python | realsense/manual/cluster.py | mrzhuzhe/yunru | faa7380a5363f654f1dc8f5d53b077d9f33bff6f | [
"MIT"
] | null | null | null | realsense/manual/cluster.py | mrzhuzhe/yunru | faa7380a5363f654f1dc8f5d53b077d9f33bff6f | [
"MIT"
] | null | null | null | realsense/manual/cluster.py | mrzhuzhe/yunru | faa7380a5363f654f1dc8f5d53b077d9f33bff6f | [
"MIT"
] | null | null | null | # 聚类
# 需要移除远处点
import open3d as o3d
import numpy as np
import matplotlib.pyplot as plt
sourcePath="./zz_test_panda/scene/integrated.ply"
tatgetPath="./zz_test_panda/scene/cropped_1.ply"
# 加载点云
print("Load a ply point cloud, print it, and render it")
pcd = o3d.io.read_point_cloud(sourcePath)
with o3d.utility.VerbosityContextManager(
o3d.utility.VerbosityLevel.Debug) as cm:
labels = np.array(
pcd.cluster_dbscan(eps=0.02, min_points=10, print_progress=True))
max_label = labels.max()
print(f"point cloud has {max_label + 1} clusters")
colors = plt.get_cmap("tab20")(labels / (max_label if max_label > 0 else 1))
colors[labels < 0] = 0
pcd.colors = o3d.utility.Vector3dVector(colors[:, :3])
o3d.visualization.draw_geometries([pcd],
front= [ -0.12109781037531148, -0.067873032753074228, -0.99031740959512848 ],
lookat= [ 0.3134765625, 0.044091457811535228, 0.34410855566261028 ],
up= [ 0.022294674751066466, -0.99759391284426524, 0.065645506577472368 ],
zoom= 0.70799999999999996) | 35.344828 | 89 | 0.736585 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 219 | 0.208373 |
0431b86b5fec8a3a6766b7ed7c6bc620c17ad76e | 86 | py | Python | functionalities.py | Dilkovak/Naggy-Bot | 7727a77d1916336d6f3f52efc40437a9d1ae960b | [
"MIT"
] | null | null | null | functionalities.py | Dilkovak/Naggy-Bot | 7727a77d1916336d6f3f52efc40437a9d1ae960b | [
"MIT"
] | null | null | null | functionalities.py | Dilkovak/Naggy-Bot | 7727a77d1916336d6f3f52efc40437a9d1ae960b | [
"MIT"
] | null | null | null | import random
def coinflip():
# print(random.random())
return random.random() | 17.2 | 28 | 0.674419 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 24 | 0.27907 |
0436d3f10986a9986bb88f66529f9631838fc465 | 295 | py | Python | class2/demo3.py | sanderslhc/python-learing | 2769f72c9b6de24d768175bed1aa9851d0469d19 | [
"MIT"
] | 1 | 2021-07-20T09:52:55.000Z | 2021-07-20T09:52:55.000Z | class2/demo3.py | sanderslhc/python-learning | 2769f72c9b6de24d768175bed1aa9851d0469d19 | [
"MIT"
] | null | null | null | class2/demo3.py | sanderslhc/python-learning | 2769f72c9b6de24d768175bed1aa9851d0469d19 | [
"MIT"
] | null | null | null | #多分支结构
score=int(input('请输入成绩'))
#判断
if score>=90 and score<=100:
print('A')
elif score>=80 and score<=89:
print('B')
elif score>=70 and score<=79:
print('C')
elif score>=60 and score<=69:
print('D')
elif score>=0 and score<=59:
print('E')
else:
print('无效') | 19.666667 | 30 | 0.572881 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 65 | 0.201238 |
0437a3c33994a58eba5b0112b7cbac4e87e6fafd | 8,126 | py | Python | lib/sdf/sdf_optimizer.py | phschoepf/PoseCNN-PyTorch | e2a67dbb37b695474d5fd93b07e85aa297293d7e | [
"BSD-Source-Code"
] | 85 | 2020-12-13T22:30:11.000Z | 2022-03-21T10:30:57.000Z | lib/sdf/sdf_optimizer.py | phschoepf/PoseCNN-PyTorch | e2a67dbb37b695474d5fd93b07e85aa297293d7e | [
"BSD-Source-Code"
] | 24 | 2021-01-08T06:02:40.000Z | 2022-03-18T13:01:53.000Z | lib/sdf/sdf_optimizer.py | phschoepf/PoseCNN-PyTorch | e2a67dbb37b695474d5fd93b07e85aa297293d7e | [
"BSD-Source-Code"
] | 24 | 2021-01-03T06:48:17.000Z | 2022-02-27T00:27:07.000Z | # Copyright (c) 2020 NVIDIA Corporation. All rights reserved.
# This work is licensed under the NVIDIA Source Code License - Non-commercial. Full
# text can be found in LICENSE.md
import sys
import cv2
import time
from .sdf_utils import *
import _init_paths
from fcn.config import cfg
from layers.sdf_matching_loss import SDFLoss
class sdf_optimizer():
def __init__(self, classes, sdf_files, lr=0.01, optimizer='Adam', use_gpu=True):
self.classes = classes
self.sdf_files = sdf_files
self.use_gpu = use_gpu
num = len(sdf_files)
self.xmins = np.zeros((num, ), dtype=np.float32)
self.ymins = np.zeros((num, ), dtype=np.float32)
self.zmins = np.zeros((num, ), dtype=np.float32)
self.xmaxs = np.zeros((num, ), dtype=np.float32)
self.ymaxs = np.zeros((num, ), dtype=np.float32)
self.zmaxs = np.zeros((num, ), dtype=np.float32)
sdf_torch_list = []
for i in range(len(sdf_files)):
sdf_file = sdf_files[i]
print(' start loading sdf from {} ... '.format(sdf_file))
if sdf_file[-3:] == 'sdf':
sdf_info = read_sdf(sdf_file)
sdf = sdf_info[0]
min_coords = sdf_info[1]
delta = sdf_info[2]
max_coords = min_coords + delta * np.array(sdf.shape)
self.xmins[i], self.ymins[i], self.zmins[i] = min_coords
self.xmaxs[i], self.ymaxs[i], self.zmaxs[i] = max_coords
sdf_torch_list.append(torch.from_numpy(sdf).float())
elif sdf_file[-3:] == 'pth':
sdf_info = torch.load(sdf_file)
min_coords = sdf_info['min_coords']
max_coords = sdf_info['max_coords']
self.xmins[i], self.ymins[i], self.zmins[i] = min_coords
self.xmaxs[i], self.ymaxs[i], self.zmaxs[i] = max_coords
sdf_torch_list.append(sdf_info['sdf_torch'][0, 0].permute(1, 0, 2))
print(' minimal coordinate = ({:.4f}, {:.4f}, {:.4f}) cm'.format(self.xmins[i] * 100, self.ymins[i] * 100, self.zmins[i] * 100))
print(' maximal coordinate = ({:.4f}, {:.4f}, {:.4f}) cm'.format(self.xmaxs[i] * 100, self.ymaxs[i] * 100, self.zmaxs[i] * 100))
print(sdf_torch_list[-1].shape)
print(' finished loading sdf ! ')
# combine sdfs
max_shape = np.array([sdf.shape for sdf in sdf_torch_list]).max(axis=0)
self.sdf_torch = torch.ones((num, max_shape[0], max_shape[1], max_shape[2]), dtype=torch.float32)
self.sdf_limits = np.zeros((num, 9), dtype=np.float32)
for i in range(num):
size = sdf_torch_list[i].shape
self.sdf_torch[i, :size[0], :size[1], :size[2]] = sdf_torch_list[i]
self.sdf_limits[i, 0] = self.xmins[i]
self.sdf_limits[i, 1] = self.ymins[i]
self.sdf_limits[i, 2] = self.zmins[i]
self.sdf_limits[i, 3] = self.xmins[i] + (self.xmaxs[i] - self.xmins[i]) * max_shape[0] / size[0]
self.sdf_limits[i, 4] = self.ymins[i] + (self.ymaxs[i] - self.ymins[i]) * max_shape[1] / size[1]
self.sdf_limits[i, 5] = self.zmins[i] + (self.zmaxs[i] - self.zmins[i]) * max_shape[2] / size[2]
self.sdf_limits[i, 6] = max_shape[0]
self.sdf_limits[i, 7] = max_shape[1]
self.sdf_limits[i, 8] = max_shape[2]
self.sdf_limits = torch.from_numpy(self.sdf_limits)
if self.use_gpu:
self.sdf_torch = self.sdf_torch.cuda()
self.sdf_limits = self.sdf_limits.cuda()
self.sdf_loss = SDFLoss()
def look_up(self, samples_x, samples_y, samples_z):
samples_x = torch.clamp(samples_x, self.xmin, self.xmax)
samples_y = torch.clamp(samples_y, self.ymin, self.ymax)
samples_z = torch.clamp(samples_z, self.zmin, self.zmax)
samples_x = (samples_x - self.xmin) / (self.xmax - self.xmin)
samples_y = (samples_y - self.ymin) / (self.ymax - self.ymin)
samples_z = (samples_z - self.zmin) / (self.zmax - self.zmin)
samples = torch.cat((samples_z.unsqueeze(0).unsqueeze(2).unsqueeze(3).unsqueeze(4),
samples_x.unsqueeze(0).unsqueeze(2).unsqueeze(3).unsqueeze(4),
samples_y.unsqueeze(0).unsqueeze(2).unsqueeze(3).unsqueeze(4)),
dim=4)
samples = samples * 2 - 1
return F.grid_sample(self.sdf_torch, samples, padding_mode="border")
def compute_dist(self, d_pose, T_oc_0, ps_c):
ps_o = torch.mm(Oplus(T_oc_0, d_pose, self.use_gpu), ps_c.permute(1, 0)).permute(1, 0)[:, :3]
dist = self.look_up(ps_o[:, 0], ps_o[:, 1], ps_o[:, 2])
return torch.abs(dist)
def refine_pose(self, T_co_0, ps_c, steps=100):
# input T_co_0: 4x4
# ps_c: nx4
if self.use_gpu:
T_oc_0 = torch.from_numpy(np.linalg.inv(T_co_0)).cuda()
else:
T_oc_0 = torch.from_numpy(np.linalg.inv(T_co_0))
self.dpose.data[:3] *= 0
self.dpose.data[3:] = self.dpose.data[3:] * 0 + 1e-12
self.dist = torch.zeros((ps_c.size(0),))
if self.use_gpu:
self.dist = self.dist.cuda()
for i in range(steps):
if self.optimizer_type == 'LBFGS':
def closure():
self.optimizer.zero_grad()
dist = self.compute_dist(self.dpose, T_oc_0, ps_c)
self.dist = dist.detach()
dist_target = torch.zeros_like(dist)
if self.use_gpu:
dist_target = dist_target.cuda()
loss = self.loss(dist, dist_target)
loss.backward()
return loss
self.optimizer.step(closure)
elif self.optimizer_type == 'Adam':
self.optimizer.zero_grad()
dist = self.compute_dist(self.dpose, T_oc_0, ps_c)
self.dist = dist.detach()
dist_target = torch.zeros_like(dist)
if self.use_gpu:
dist_target = dist_target.cuda()
loss = self.loss(dist, dist_target)
loss.backward()
self.optimizer.step()
# print('step: {}, loss = {}'.format(i + 1, loss.data.cpu().item()))
T_oc_opt = Oplus(T_oc_0, self.dpose, self.use_gpu)
T_co_opt = np.linalg.inv(T_oc_opt.cpu().detach().numpy())
dist = torch.mean(torch.abs(self.dist)).detach().cpu().numpy()
return T_co_opt, dist
def refine_pose_layer(self, T_oc_0, points, steps=100):
# input T_co_0: mx4x4, m is the number of objects
# points: nx3 in camera
# construct initial pose
pose_init = torch.from_numpy(T_oc_0).cuda()
m = T_oc_0.shape[0]
dpose = torch.zeros((m, 6), dtype=torch.float32, requires_grad=True, device=0)
dpose.data[:, :3] *= 0
dpose.data[:, 3:] = dpose.data[:, 3:] * 0 + 1e-12
treg = cfg.TEST.SDF_TRANSLATION_REG
rreg = cfg.TEST.SDF_ROTATION_REG
regularization = torch.tensor([treg, treg, treg, rreg, rreg, rreg], dtype=torch.float32, requires_grad=False, device=0)
start = time.time()
for i in range(steps):
# self.optimizer.zero_grad()
loss, sdf_values, T_oc_opt, dalpha, J = self.sdf_loss(dpose, pose_init, self.sdf_torch, self.sdf_limits, points, regularization)
# print(loss)
# loss.backward()
# self.optimizer.step()
# JTJ = JTJ.cpu().detach().numpy() + np.diag([100, 100, 100, 0.001, 0.001, 0.001]).astype(np.float32)
# J = J.cpu().detach().numpy()
# dalpha = torch.from_numpy(np.matmul(np.linalg.inv(JTJ), J)).cuda()
dpose = dpose - dalpha
# self.dpose = self.dpose - 0.001 * J
end = time.time()
print('sdf refinement iterations %d, time %f' % (steps, end - start))
return T_oc_opt.cpu().detach().numpy()
| 40.63 | 144 | 0.563992 | 7,793 | 0.95902 | 0 | 0 | 0 | 0 | 0 | 0 | 996 | 0.12257 |
043875b930e6d05d2634154e8b1a9777e170c2cc | 10,563 | py | Python | src/game_elements/Map.py | crazyStewie/ProjetoJojinho | 04657ebfcbbf940c250721a8fe00d79af0bd927c | [
"MIT"
] | null | null | null | src/game_elements/Map.py | crazyStewie/ProjetoJojinho | 04657ebfcbbf940c250721a8fe00d79af0bd927c | [
"MIT"
] | null | null | null | src/game_elements/Map.py | crazyStewie/ProjetoJojinho | 04657ebfcbbf940c250721a8fe00d79af0bd927c | [
"MIT"
] | null | null | null | from pymunk.vec2d import Vec2d
from src.utils import AngleHelper
import pymunk
class Map:
def __init__(self):
self.crossings = []
self.streets = []
self.STREET_WIDTH = 50
self.SIDEWALK_WIDTH = 60
self.sidewalk_crossings = []
self.sidewalks = []
self.distances = []
self.spawn_positions = []
self.back_sprite = None
self.front_sprite = None
self.streets_length = []
self.sidewalks_length = []
self.collision_vertices = []
self.collision_edges = []
self.col_body = None
self.col_shapes = []
self.spawn_positions = [(100, 100), (150, 100), (200, 100), (250, 100)]
self.spawn_rotations = [0, 0, 0, 0]
def generate_body(self):
self.col_body = pymunk.Body(body_type=pymunk.Body.STATIC)
for edge in self.collision_edges:
self.col_shapes += [pymunk.shapes.Segment(self.col_body,
Vec2d(self.collision_vertices[edge[0]]),
Vec2d(self.collision_vertices[edge[1]]), 1)]
#for cvertex in self.collision_vertices:
# self.col_shapes += [pymunk.shapes.Circle(self.col_body, 8, cvertex)]
def generate_matrix(self):
self.distances.clear()
for i in range(len(self.sidewalk_crossings)):
self.distances += [[]]
for i in range(len(self.sidewalk_crossings)):
for j in range(len(self.sidewalk_crossings)):
self.distances[i].append(-1)
for i in range(len(self.sidewalk_crossings)):
self.distances[i][i] = 0
for edge in self.sidewalks:
self.distances[edge[0]][edge[1]] = self.distances[edge[1]][edge[0]] = \
Vec2d(self.sidewalk_crossings[edge[0]]).get_distance(Vec2d(self.sidewalk_crossings[edge[1]]))
for k in range(len(self.sidewalk_crossings)):
for i in range(len(self.sidewalk_crossings)):
for j in range(len(self.sidewalk_crossings)):
if self.distances[i][k] != -1 and self.distances[k][j] != -1:
if self.distances[i][k] + self.distances[k][j] < self.distances[i][j] or \
self.distances[i][j] == -1:
self.distances[i][j] = self.distances[i][k] + self.distances[k][j]
def generate_sidewalks(self):
self.sidewalk_crossings.clear()
self.sidewalks.clear()
vertices_streets = []
for crossing_index in range(len(self.crossings)):
vertex_street_directions = []
for street in self.streets:
if crossing_index in street:
vertex_street_directions.append(
((Vec2d(self.crossings[street[(street.index(crossing_index) + 1) % 2]]) -
Vec2d(self.crossings[crossing_index])).normalized(), self.streets.index(street)))
vertex_street_directions.sort(key=lambda obj: obj[0].angle)
for direction_index in range(len(vertex_street_directions)):
temp_vec = vertex_street_directions[direction_index][0].rotated(AngleHelper.angle_to_positive(
vertex_street_directions[direction_index][0].get_angle_between(
vertex_street_directions[(direction_index + 1) % len(vertex_street_directions)][0]))/2)
temp_vec.length = self.SIDEWALK_WIDTH / \
(2 * abs(temp_vec.dot(vertex_street_directions[direction_index][0].perpendicular())))
vertices_streets.append((Vec2d(self.crossings[crossing_index]) + temp_vec,
vertex_street_directions[direction_index][1],
vertex_street_directions[(direction_index + 1) %
len(vertex_street_directions)][1], crossing_index))
for element in vertices_streets:
self.sidewalk_crossings.append((element[0].x, element[0].y))
for element_index in range(len(vertices_streets)):
if element_index < len(vertices_streets) - 1:
if vertices_streets[element_index][3] == vertices_streets[element_index + 1][3]:
self.sidewalks.append((self.sidewalk_crossings.index(vertices_streets[element_index][0]),
self.sidewalk_crossings.index(vertices_streets[element_index + 1][0])))
elif element_index > 0:
if vertices_streets[element_index][3] == vertices_streets[element_index - 1][3]:
lower = element_index - 1
while lower > 0 and vertices_streets[lower][3] == vertices_streets[lower - 1][3]:
lower -= 1
if vertices_streets[lower][3] == vertices_streets[element_index][3]:
self.sidewalks.append((self.sidewalk_crossings.index(vertices_streets[element_index][0]),
self.sidewalk_crossings.index(vertices_streets[lower][0])))
elif element_index > 0:
if vertices_streets[element_index][3] == vertices_streets[element_index - 1][3]:
lower = element_index - 1
while lower > 0 and vertices_streets[lower][3] == vertices_streets[lower - 1][3]:
lower -= 1
if vertices_streets[lower][3] == vertices_streets[element_index][3]:
self.sidewalks.append((self.sidewalk_crossings.index(vertices_streets[element_index][0]),
self.sidewalk_crossings.index(vertices_streets[lower][0])))
for street_index in range(len(self.streets)):
crossings = []
for vertex_street in vertices_streets:
if vertex_street[1] == street_index or vertex_street[2] == street_index:
crossings.append(vertex_street)
street_direction = (Vec2d(self.crossings[self.streets[street_index][0]]) -
Vec2d(self.crossings[self.streets[street_index][1]])).normalized()
if street_direction.perpendicular().dot(crossings[0][0] -
Vec2d(self.crossings[self.streets[street_index][0]])) * \
street_direction.perpendicular().dot(crossings[1][0] -
Vec2d(self.crossings[self.streets[street_index][0]])) > 0:
self.sidewalks.append((self.sidewalk_crossings.index(crossings[0][0]),
self.sidewalk_crossings.index(crossings[1][0])))
self.sidewalks.append((self.sidewalk_crossings.index(crossings[2][0]),
self.sidewalk_crossings.index(crossings[3][0])))
elif street_direction.perpendicular().dot(crossings[0][0] -
Vec2d(self.crossings[self.streets[street_index][0]])) * \
street_direction.perpendicular().dot(crossings[2][0] -
Vec2d(self.crossings[self.streets[street_index][0]])) > 0:
self.sidewalks.append((self.sidewalk_crossings.index(crossings[0][0]),
self.sidewalk_crossings.index(crossings[2][0])))
self.sidewalks.append((self.sidewalk_crossings.index(crossings[1][0]),
self.sidewalk_crossings.index(crossings[3][0])))
else:
self.sidewalks.append((self.sidewalk_crossings.index(crossings[0][0]),
self.sidewalk_crossings.index(crossings[3][0])))
self.sidewalks.append((self.sidewalk_crossings.index(crossings[1][0]),
self.sidewalk_crossings.index(crossings[2][0])))
def calculate_lengths(self):
self.streets_length.clear()
for street in self.streets:
self.streets_length.append((Vec2d(self.crossings[street[0]]) - Vec2d(self.crossings[street[1]])).length)
self.sidewalks_length.clear()
for sidewalk in self.sidewalks:
self.sidewalks_length.append((Vec2d(self.sidewalk_crossings[sidewalk[0]]) -
Vec2d(self.sidewalk_crossings[sidewalk[1]])).length)
def calculate_internal_variables(self):
self.generate_sidewalks()
self.generate_matrix()
self.calculate_lengths()
def get_street_direction(self, street_index):
if street_index >= len(self.streets):
return
return (Vec2d(self.crossings[self.streets[street_index][1]]) -
Vec2d(self.crossings[self.streets[street_index][0]])).normalized()
def get_sidewalk_direction(self, sidewalk_index):
if sidewalk_index >= len(self.sidewalks):
return
return (Vec2d(self.sidewalk_crossings[self.sidewalks[sidewalk_index][1]]) -
Vec2d(self.sidewalk_crossings[self.sidewalks[sidewalk_index][0]])).normalized()
def street_first_crossing_index(self, street):
return self.crossings.index(self.crossings[self.streets[street][0]])
def street_second_crossing_index(self, street):
return self.crossings.index(self.crossings[self.streets[street][1]])
def sidewalk_first_crossing_index(self, sidewalk):
return self.sidewalk_crossings.index(self.sidewalk_crossings[self.sidewalks[sidewalk][0]])
def sidewalk_second_crossing_index(self, sidewalk):
return self.sidewalk_crossings.index(self.sidewalk_crossings[self.sidewalks[sidewalk][1]])
def street_first_crossing(self, street):
return self.crossings[self.streets[street][0]]
def street_second_crossing(self, street):
return self.crossings[self.streets[street][1]]
def sidewalk_first_crossing(self, sidewalk):
return self.sidewalk_crossings[self.sidewalks[sidewalk][0]]
def sidewalk_second_crossing(self, sidewalk):
return self.sidewalk_crossings[self.sidewalks[sidewalk][1]]
def draw_back(self):
if self.back_sprite is not None:
self.back_sprite.draw()
pass
def draw_front(self):
if self.front_sprite is not None:
self.front_sprite.draw()
pass
| 55.303665 | 117 | 0.586102 | 10,481 | 0.992237 | 0 | 0 | 0 | 0 | 0 | 0 | 113 | 0.010698 |
0438c8f80da4005db7ce794a5dd25fc90d51a567 | 2,110 | py | Python | custom_components/magic_lights/setup_tasks/create_living_space.py | justanotherariel/hass_MagicLights | 61ac0db1f7c3575e52912b372176d45e647b728e | [
"MIT"
] | null | null | null | custom_components/magic_lights/setup_tasks/create_living_space.py | justanotherariel/hass_MagicLights | 61ac0db1f7c3575e52912b372176d45e647b728e | [
"MIT"
] | null | null | null | custom_components/magic_lights/setup_tasks/create_living_space.py | justanotherariel/hass_MagicLights | 61ac0db1f7c3575e52912b372176d45e647b728e | [
"MIT"
] | null | null | null | from __future__ import annotations
import asyncio
import logging
from custom_components.magic_lights.setup_tasks.task import SetupTask
from custom_components.magic_lights.helpers.service_call import create_async_call
from typing import Dict, Tuple
from custom_components.magic_lights.magicbase.share import get_magic
from custom_components.magic_lights.data_structures.living_space import (
Zone,
Scene,
Pipe,
)
_LOGGER = logging.getLogger(__name__)
def _init_pipe(scene: Scene, conf: dict) -> Pipe:
obj = Pipe()
obj.scene = scene
obj.entities = conf["entities"]
obj.modifier_conf = conf.get("modifiers", {})
obj.effect_conf = conf.get("effect", None)
# Effect conf mandatory
# TODO Check with voluptous.
if not obj.effect_conf:
_LOGGER.warn(
"Pipe in Scene %s in Zone %s has no effect configuration",
scene.name,
scene.zone.name,
)
return obj
def _init_scene(zone, name, conf: dict) -> Scene:
obj = Scene()
obj.name = name
obj.zone = zone
# Init Pipes
for pipe_conf in conf:
obj.pipes.append(_init_pipe(obj, pipe_conf))
# Set unused entities
zone_entities = set(obj.zone.entities)
used_entities = []
for pipe in obj.pipes:
used_entities.append(pipe.entities)
obj.unused_entities = [
entity for entity in zone_entities if entity not in used_entities
]
return obj
def _init_zone(name, conf: dict) -> Zone:
obj = Zone()
obj.name = name
obj.groups = conf["groups"]
obj.entities = conf["entities"]
for scene_name, scene_conf in conf["scenes"].items():
obj.scenes.update({scene_name: _init_scene(obj, scene_name, scene_conf)})
return obj
class Task(SetupTask):
def __init__(self) -> None:
self.magic = get_magic()
self.stage = 0
async def execute(self):
zones: Dict[str, Zone] = {}
for zone_name, zone_conf in self.magic.raw.items():
zones.update({zone_name: _init_zone(zone_name, zone_conf)})
self.magic.living_space = zones
| 24.252874 | 81 | 0.667773 | 351 | 0.166351 | 0 | 0 | 0 | 0 | 234 | 0.1109 | 196 | 0.092891 |
043990336053a8c30d65aae2c3797d6e1d5d4f40 | 2,382 | py | Python | layouts/landing_page.py | nikitcha/ceebios-biowser | d416a026d14dc410f5d1e48190f8a6ba74e97998 | [
"MIT"
] | null | null | null | layouts/landing_page.py | nikitcha/ceebios-biowser | d416a026d14dc410f5d1e48190f8a6ba74e97998 | [
"MIT"
] | null | null | null | layouts/landing_page.py | nikitcha/ceebios-biowser | d416a026d14dc410f5d1e48190f8a6ba74e97998 | [
"MIT"
] | null | null | null | import dash_html_components as html
import dash
app = dash.Dash(__name__)
style_div = {'display': 'flex','flex-wrap': 'wrap', 'padding':'20px'}
style_text = {'width': '500px', 'padding':'20px'}
div_octo = html.Div([
html.Div([html.Img(src=app.get_asset_url('planet1.jpg'), width="500px")]),
html.Div([
html.Span("There are some ", style={'color':'white'}),
html.Span("4 million different kinds of animals and plants ", style={'color':'#fb2056'}),
html.Span("in the world.", style={'color':'white'}),
html.P("Discover the richness of the creatures that surrounds us, where to find them, and why each of them is unique.", style={'color':'white'})
], style=style_text)
], style=style_div)
div_tukan = html.Div([
html.Div([
html.P('"It seems to me that the natural world is the greatest source of excitement; the greatest source of visual beauty; the greatest source of intellectual interest. It is the greatest source of so much in life that makes life worth living."', style={'color':'white', 'text-align':'right'}),
html.P("- Sir David Attenborough", style={'color':'#fb2056', 'text-align':'right'})
], style=style_text),
html.Div([html.Img(src=app.get_asset_url('planet2.jfif'), width="500px")])
], style=style_div)
div_whale = html.Div([
html.Div([html.Img(src=app.get_asset_url('planet3.jpg'), width="500px")]),
html.Div([
html.Span("Platform created in collaboration between ", style={'color':'white'}),
html.Span("Data For Good ",style={'color':'#fb2056'}),
html.Span("and ",style={'color':'white'}),
html.Span("Ceebios",style={'color':'#fb2056'}),
html.Span(".",style={'color':'white'}),
html.Div([
html.Span("Species data from ",style={'color':'white'}),
html.Span("GBIF",style={'color':'#fb2056'}),
html.Span(", scientific publications from ",style={'color':'white'}),
html.Span("Semantic Scholar Open Corpus",style={'color':'#fb2056'})], style={'display': "inline-block"})
], style=style_text)
], style=style_div)
intro_layout = html.Div([
html.Div([
div_octo,
div_tukan,
div_whale
], style={'margin':'auto', 'width':'1040px'})
], style={"background": 'linear-gradient(180deg, rgba(7,10,32,1) 0%, rgba(23,36,113,1) 100%)','height':'100%', "width":"100%"})
| 47.64 | 302 | 0.626784 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,181 | 0.495802 |
043b52518e89dcaaaeab64ad5579462c5d5febe7 | 1,405 | py | Python | examples/network_diagram.py | community-fabric/python-ipfabric-diagrams | 55de2ff36c853f9ed13a804768ee0f0ca5eeb467 | [
"MIT"
] | 1 | 2022-02-14T10:14:25.000Z | 2022-02-14T10:14:25.000Z | examples/network_diagram.py | community-fabric/python-ipfabric-diagrams | 55de2ff36c853f9ed13a804768ee0f0ca5eeb467 | [
"MIT"
] | 9 | 2022-02-08T19:25:47.000Z | 2022-02-16T19:29:41.000Z | examples/network_diagram.py | community-fabric/python-ipfabric-diagrams | 55de2ff36c853f9ed13a804768ee0f0ca5eeb467 | [
"MIT"
] | 2 | 2022-02-01T18:26:31.000Z | 2022-02-02T19:02:36.000Z | """
network_diagram.py
"""
from ipfabric_diagrams import IPFDiagram, Network, NetworkSettings, VALID_NET_PROTOCOLS, Layout
if __name__ == '__main__':
ipf = IPFDiagram()
net = Network(sites=['MPLS', 'LAB01'], all_network=True)
json_data = ipf.diagram_json(net)
model_data = ipf.diagram_model(net)
with open('tmp/network.png', 'wb') as f:
f.write(ipf.diagram_png(net))
settings = NetworkSettings()
settings.hide_protocol('xdp')
png_data = ipf.diagram_png(net, graph_settings=settings)
with open('tmp/network.png', 'wb') as f:
f.write(png_data)
settings.ungroup_group('Layer 3')
svg_data = ipf.diagram_svg(net, graph_settings=settings)
with open('tmp/network.svg', 'wb') as f:
f.write(svg_data)
settings = NetworkSettings()
for proto in VALID_NET_PROTOCOLS:
settings.ungroup_protocol(proto)
settings.hide_group('Layer 1')
settings.hide_group('Layer 2')
settings.hide_protocol('rib')
settings.hide_protocol('ldp')
settings.hide_protocol('ebgp')
settings.change_label('ospf', 'subnet')
mpls = Network(sites='MPLS', layouts=[Layout(path='MPLS', layout='radial')])
graph_result = ipf.diagram_model(mpls, graph_settings=settings)
svg_data = ipf.diagram_svg(mpls, graph_settings=settings)
with open('tmp/network_edited.svg', 'wb') as f:
f.write(svg_data)
ipf.close()
| 32.674419 | 95 | 0.687544 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 222 | 0.158007 |
043b92e9fc3c08c42abcd8a0ffd5711626dda83e | 878 | py | Python | msteams/adaptivecard/containers/fact_set.py | HarshadRanganathan/pyteams | d9ced98281e594b454ab7d98dce5b997d1711c8b | [
"MIT"
] | 6 | 2019-08-09T05:29:25.000Z | 2021-08-02T10:27:51.000Z | msteams/adaptivecard/containers/fact_set.py | HarshadRanganathan/pyteams | d9ced98281e594b454ab7d98dce5b997d1711c8b | [
"MIT"
] | 3 | 2020-03-24T17:06:42.000Z | 2021-02-02T22:11:50.000Z | msteams/adaptivecard/containers/fact_set.py | HarshadRanganathan/pyteams | d9ced98281e594b454ab7d98dce5b997d1711c8b | [
"MIT"
] | 3 | 2019-10-07T21:59:25.000Z | 2021-11-18T09:12:56.000Z | from msteams.adaptivecard.containers.layout import Layout
class FactSet(Layout):
"""
FactSet element displays a series of facts (i.e. name/value pairs) in a tabular form
"""
FACTS = 'facts'
def __init__(self, spacing=None, separator=None):
"""
:param spacing: amount of spacing
:param separator: draw a separating line at the top of the element
"""
Layout.__init__(self, 'FactSet')
if spacing is not None:
self.layout['spacing'] = spacing
if separator is not None:
self.layout['separator'] = separator
def fact(self, fact):
"""Fact as a key/value pair
:param fact:
:return:
"""
if self.FACTS not in self.layout.keys():
self.layout[self.FACTS] = list()
self.layout[self.FACTS].append(fact)
return self
| 28.322581 | 88 | 0.592255 | 817 | 0.930524 | 0 | 0 | 0 | 0 | 0 | 0 | 345 | 0.392938 |
043caa4a27e42e50eb3c3d0263e33d97780cb0a2 | 6,057 | py | Python | tests/functional/conftest.py | andreakropp/datarobot-user-models | 423ab8c703a545491ad6013a0b7efa3119e2c0fc | [
"Apache-2.0"
] | null | null | null | tests/functional/conftest.py | andreakropp/datarobot-user-models | 423ab8c703a545491ad6013a0b7efa3119e2c0fc | [
"Apache-2.0"
] | null | null | null | tests/functional/conftest.py | andreakropp/datarobot-user-models | 423ab8c703a545491ad6013a0b7efa3119e2c0fc | [
"Apache-2.0"
] | null | null | null | import os
import uuid
import warnings
import datarobot as dr
import pytest
from dr_usertool.datarobot_user_database import DataRobotUserDatabase
from dr_usertool.utils import get_permissions
from tests.drum.constants import TESTS_DATA_PATH, PUBLIC_DROPIN_ENVS_PATH
ENDPOINT_URL = "http://localhost/api/v2"
def dr_usertool_setup():
mongo_host = os.environ.get("MONGO_HOST", os.environ.get("HOST", "127.0.0.1")).strip()
return DataRobotUserDatabase.setup("adhoc", "", mongo_host=mongo_host)
@pytest.hookimpl(trylast=True)
def pytest_configure(config):
# check for skipping setup on xdist master process
if not config.pluginmanager.getplugin("dsession"):
suffix = str(uuid.uuid4().int)
env, db = dr_usertool_setup()
# User credentials
user_username = "local-custom-model-templates-tests-{}@datarobot.com".format(suffix)
user_api_token = "lkjkljnm988989jkr5645tv_{}".format(suffix)
user_permissions = get_permissions("tests/fixtures/user_permissions.json", user_api_token)
# Add user
DataRobotUserDatabase.add_user(
db,
env,
user_username,
invite_code="autogen",
app_user_manager=False,
permissions=user_permissions,
api_token=user_api_token,
activated=True,
unix_user="datarobot_imp",
)
os.environ["DATAROBOT_API_TOKEN"] = user_api_token
os.environ["DATAROBOT_ENDPOINT"] = ENDPOINT_URL
config.user_username = user_username
@pytest.hookimpl(trylast=True)
def pytest_unconfigure(config):
if not config.pluginmanager.getplugin("dsession"):
warnings.simplefilter("ignore")
_, db = dr_usertool_setup()
DataRobotUserDatabase.delete_user(db, config.user_username)
warnings.simplefilter("error")
def pytest_sessionstart(session):
dr.Client(endpoint=ENDPOINT_URL, token=os.environ["DATAROBOT_API_TOKEN"])
@pytest.fixture(scope="session")
def java_drop_in_env():
env_dir = os.path.join(PUBLIC_DROPIN_ENVS_PATH, "java_codegen")
environment = dr.ExecutionEnvironment.create(name="java_drop_in", programming_language="java")
environment_version = dr.ExecutionEnvironmentVersion.create(environment.id, env_dir)
return environment.id, environment_version.id
@pytest.fixture(scope="session")
def sklearn_drop_in_env():
env_dir = os.path.join(PUBLIC_DROPIN_ENVS_PATH, "python3_sklearn")
environment = dr.ExecutionEnvironment.create(
name="python3_sklearn", programming_language="python"
)
environment_version = dr.ExecutionEnvironmentVersion.create(environment.id, env_dir)
return environment.id, environment_version.id
@pytest.fixture(scope="session")
def xgboost_drop_in_env():
env_dir = os.path.join(PUBLIC_DROPIN_ENVS_PATH, "python3_xgboost")
environment = dr.ExecutionEnvironment.create(
name="python3_xgboost", programming_language="python"
)
environment_version = dr.ExecutionEnvironmentVersion.create(environment.id, env_dir)
return environment.id, environment_version.id
@pytest.fixture(scope="session")
def pytorch_drop_in_env():
env_dir = os.path.join(PUBLIC_DROPIN_ENVS_PATH, "python3_pytorch")
environment = dr.ExecutionEnvironment.create(
name="python3_pytorch", programming_language="python"
)
environment_version = dr.ExecutionEnvironmentVersion.create(environment.id, env_dir)
return environment.id, environment_version.id
@pytest.fixture(scope="session")
def keras_drop_in_env():
env_dir = os.path.join(PUBLIC_DROPIN_ENVS_PATH, "python3_keras")
environment = dr.ExecutionEnvironment.create(
name="python3_keras", programming_language="python"
)
environment_version = dr.ExecutionEnvironmentVersion.create(environment.id, env_dir)
return environment.id, environment_version.id
@pytest.fixture(scope="session")
def pmml_drop_in_env():
env_dir = os.path.join(PUBLIC_DROPIN_ENVS_PATH, "python3_pmml")
environment = dr.ExecutionEnvironment.create(name="python3_pmml", programming_language="python")
environment_version = dr.ExecutionEnvironmentVersion.create(environment.id, env_dir)
return environment.id, environment_version.id
@pytest.fixture(scope="session")
def r_drop_in_env():
env_dir = os.path.join(PUBLIC_DROPIN_ENVS_PATH, "r_lang")
environment = dr.ExecutionEnvironment.create(name="r_drop_in", programming_language="r")
environment_version = dr.ExecutionEnvironmentVersion.create(environment.id, env_dir)
return environment.id, environment_version.id
@pytest.fixture(scope="session")
def julia_drop_in_env():
env_dir = os.path.join(PUBLIC_DROPIN_ENVS_PATH, "julia_mlj")
environment = dr.ExecutionEnvironment.create(name="julia_drop_in", programming_language="other")
environment_version = dr.ExecutionEnvironmentVersion.create(environment.id, env_dir)
return environment.id, environment_version.id
@pytest.fixture(scope="session")
def binary_testing_data():
dataset = dr.Dataset.create_from_file(
file_path=os.path.join(TESTS_DATA_PATH, "iris_binary_training.csv")
)
return dataset.id
@pytest.fixture(scope="session")
def binary_vizai_testing_data():
dataset = dr.Dataset.create_from_file(
file_path=os.path.join(TESTS_DATA_PATH, "cats_dogs_small_training.csv")
)
return dataset.id
@pytest.fixture(scope="session")
def regression_testing_data():
dataset = dr.Dataset.create_from_file(
file_path=os.path.join(TESTS_DATA_PATH, "juniors_3_year_stats_regression.csv")
)
return dataset.id
@pytest.fixture(scope="session")
def multiclass_testing_data():
dataset = dr.Dataset.create_from_file(
file_path=os.path.join(TESTS_DATA_PATH, "skyserver_sql2_27_2018_6_51_39_pm.csv")
)
return dataset.id
@pytest.fixture(scope="session")
def unstructured_testing_data():
dataset = dr.Dataset.create_from_file(
file_path=os.path.join(TESTS_DATA_PATH, "unstructured_data.txt")
)
return dataset.id
| 35.215116 | 100 | 0.746409 | 0 | 0 | 0 | 0 | 5,396 | 0.89087 | 0 | 0 | 942 | 0.155523 |
0441025076cf40e78b85477fb35263fe77deca58 | 5,464 | py | Python | test/units/modules/network/f5/test_bigip_policy.py | Container-Projects/ansible-provider-docs | 100b695b0b0c4d8d08af362069557ffc735d0d7e | [
"PSF-2.0",
"BSD-2-Clause",
"MIT"
] | 37 | 2017-08-15T15:02:43.000Z | 2021-07-23T03:44:31.000Z | test/units/modules/network/f5/test_bigip_policy.py | Container-Projects/ansible-provider-docs | 100b695b0b0c4d8d08af362069557ffc735d0d7e | [
"PSF-2.0",
"BSD-2-Clause",
"MIT"
] | 12 | 2018-01-10T05:25:25.000Z | 2021-11-28T06:55:48.000Z | test/units/modules/network/f5/test_bigip_policy.py | Container-Projects/ansible-provider-docs | 100b695b0b0c4d8d08af362069557ffc735d0d7e | [
"PSF-2.0",
"BSD-2-Clause",
"MIT"
] | 49 | 2017-08-15T09:52:13.000Z | 2022-03-21T17:11:54.000Z | # -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import sys
from nose.plugins.skip import SkipTest
if sys.version_info < (2, 7):
raise SkipTest("F5 Ansible modules require Python >= 2.7")
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import Mock
from ansible.compat.tests.mock import patch
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_policy import Parameters
from library.modules.bigip_policy import ModuleManager
from library.modules.bigip_policy import SimpleManager
from library.modules.bigip_policy import ComplexManager
from library.modules.bigip_policy import ArgumentSpec
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
from test.unit.modules.utils import set_module_args
except ImportError:
try:
from ansible.modules.network.f5.bigip_policy import Parameters
from ansible.modules.network.f5.bigip_policy import ModuleManager
from ansible.modules.network.f5.bigip_policy import SimpleManager
from ansible.modules.network.f5.bigip_policy import ComplexManager
from ansible.modules.network.f5.bigip_policy import ArgumentSpec
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
from units.modules.utils import set_module_args
except ImportError:
raise SkipTest("F5 Ansible modules require the f5-sdk Python library")
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters_none_strategy(self):
args = dict(
name='foo',
description='asdf asdf asdf',
password='password',
server='localhost',
user='admin'
)
p = Parameters(params=args)
assert p.name == 'foo'
assert p.description == 'asdf asdf asdf'
assert p.strategy is None
def test_module_parameters_with_strategy_no_partition(self):
args = dict(
name='foo',
description='asdf asdf asdf',
password='password',
server='localhost',
strategy='foo',
user='admin',
partition='Common'
)
p = Parameters(params=args)
assert p.name == 'foo'
assert p.description == 'asdf asdf asdf'
assert p.strategy == '/Common/foo'
def test_module_parameters_with_strategy_partition(self):
args = dict(
name='foo',
description='asdf asdf asdf',
password='password',
server='localhost',
strategy='/Common/foo',
user='admin',
partition='Common'
)
p = Parameters(params=args)
assert p.name == 'foo'
assert p.description == 'asdf asdf asdf'
assert p.strategy == '/Common/foo'
def test_module_parameters_with_strategy_different_partition(self):
args = dict(
name='foo',
description='asdf asdf asdf',
password='password',
server='localhost',
strategy='/Foo/bar',
user='admin',
partition='Common'
)
p = Parameters(params=args)
assert p.name == 'foo'
assert p.description == 'asdf asdf asdf'
assert p.strategy == '/Foo/bar'
def test_api_parameters(self):
args = dict(
name='foo',
description='asdf asdf asdf',
strategy='/Common/asdf'
)
p = Parameters(params=args)
assert p.name == 'foo'
assert p.description == 'asdf asdf asdf'
assert p.strategy == '/Common/asdf'
class TestSimpleTrafficPolicyManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create_policy(self, *args):
set_module_args(dict(
name="Policy-Foo",
state='present',
strategy='best',
password='password',
server='localhost',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
tm = SimpleManager(module=module, params=module.params)
tm.exists = Mock(return_value=False)
tm.create_on_device = Mock(return_value=True)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.version_is_less_than_12 = Mock(return_value=True)
mm.get_manager = Mock(return_value=tm)
results = mm.exec_module()
assert results['changed'] is True
| 32.141176 | 91 | 0.64202 | 3,247 | 0.594253 | 0 | 0 | 0 | 0 | 0 | 0 | 876 | 0.160322 |
04411ddb9c08843c7fce53cffbe76f07e4b16489 | 58 | py | Python | experiments/spooky-mouse.py | Nithanaroy/invisible-pen | d6e69e20866b2992b5c2c5b47d623a693bed3134 | [
"MIT"
] | null | null | null | experiments/spooky-mouse.py | Nithanaroy/invisible-pen | d6e69e20866b2992b5c2c5b47d623a693bed3134 | [
"MIT"
] | 6 | 2020-07-19T07:33:33.000Z | 2022-03-27T04:52:28.000Z | experiments/spooky-mouse.py | Nithanaroy/invisible-pen | d6e69e20866b2992b5c2c5b47d623a693bed3134 | [
"MIT"
] | null | null | null | import pyautogui
pyautogui.moveTo(2317, 425, duration=1)
| 14.5 | 39 | 0.793103 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
04428cad6e41d07f787bb5f233146087def3d4cc | 50 | py | Python | __main__.py | wwakabobik/openweather_pws | bda08b550982b7e3d797a57c23ae97d4d4ececf7 | [
"MIT"
] | null | null | null | __main__.py | wwakabobik/openweather_pws | bda08b550982b7e3d797a57c23ae97d4d4ececf7 | [
"MIT"
] | null | null | null | __main__.py | wwakabobik/openweather_pws | bda08b550982b7e3d797a57c23ae97d4d4ececf7 | [
"MIT"
] | null | null | null | from openweather_pws import Station, Measurements
| 25 | 49 | 0.88 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
044384aad57c8b47c884459c4de588bd7b7bdff9 | 2,731 | py | Python | main.py | JakeSichley/Discord-Bot | 4fd968a0e588f8ea3ea5a8e6636b0d1579eb974b | [
"MIT"
] | 1 | 2021-05-13T01:57:16.000Z | 2021-05-13T01:57:16.000Z | main.py | JakeSichley/Discord-Bot | 4fd968a0e588f8ea3ea5a8e6636b0d1579eb974b | [
"MIT"
] | null | null | null | main.py | JakeSichley/Discord-Bot | 4fd968a0e588f8ea3ea5a8e6636b0d1579eb974b | [
"MIT"
] | null | null | null | """
MIT License
Copyright (c) 2021 Jake Sichley
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from os import getenv
from sys import version
from dotenv import load_dotenv
from dreambot import DreamBot
import logging
import discord
def main() -> None:
"""
Driver method.
"""
print(f'Current Python Version: {version}')
print(f'Current Discord Version: {discord.__version__}')
logging.basicConfig(level=logging.INFO, format='%(asctime)s: %(levelname)s:%(name)s: %(message)s',
datefmt='%I:%M %p on %A, %B %d, %Y')
load_dotenv()
# required
token = getenv('DISCORD_TOKEN')
owner = int(getenv('OWNER_ID'))
prefix = getenv('PREFIX', '>')
database = getenv('DATABASE')
environment = getenv('ENVIRONMENT', 'DEV')
# optional
options = {
'status_type': discord.ActivityType(int(getenv('STATUS_TYPE', 1))),
'status_text': getenv('STATUS_TEXT')
}
# explicitly disabled cogs
try:
options['disabled_cogs'] = getenv('DISABLED_COGS').split(',')
except AttributeError:
pass
# git optionals
git_options = {
'git_user': getenv('GITHUB_USER'),
'git_repo': getenv('GITHUB_REPO'),
'git_token': getenv('GITHUB_TOKEN')
}
if all(git_options.values()):
options['git'] = git_options
# specify intents (members requires explicit opt-in via dev portal)
intents = discord.Intents(guilds=True, members=True, bans=True, emojis=True, voice_states=True, messages=True,
reactions=True)
dream_bot = DreamBot(intents, database, prefix, owner, environment, options=options)
dream_bot.run(token)
# Run the bot
if __name__ == '__main__':
main()
| 32.129412 | 114 | 0.696814 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,644 | 0.601977 |
04446591d618aa90ff9944a3d3bb5ea29803ace3 | 415 | py | Python | tests/test_lighting.py | HighCWu/neural-renderer-paddle | c5c8375b0400a0b7722ab893e46ca706153b5a43 | [
"MIT"
] | 14 | 2021-12-11T10:37:14.000Z | 2022-01-04T05:34:59.000Z | tests/test_lighting.py | HighCWu/neural-renderer-paddle | c5c8375b0400a0b7722ab893e46ca706153b5a43 | [
"MIT"
] | null | null | null | tests/test_lighting.py | HighCWu/neural-renderer-paddle | c5c8375b0400a0b7722ab893e46ca706153b5a43 | [
"MIT"
] | 1 | 2021-12-17T05:30:24.000Z | 2021-12-17T05:30:24.000Z | import unittest
import paddle
import neural_renderer_paddle as nr
class TestLighting(unittest.TestCase):
def test_case1(self):
"""Test whether it is executable."""
faces = paddle.randn([64, 16, 3, 3], dtype=paddle.float32)
textures = paddle.randn([64, 16, 8, 8, 8, 3], dtype=paddle.float32)
nr.lighting(faces, textures)
if __name__ == '__main__':
unittest.main()
| 20.75 | 75 | 0.653012 | 294 | 0.708434 | 0 | 0 | 0 | 0 | 0 | 0 | 46 | 0.110843 |
044584bfb16a9cd74a91b6362bd86d9a6c685e01 | 4,709 | py | Python | InterpretationTechniques/featureExamination/iceForPredictor.py | HelenaMaria112/PredictionInterpreter | d87f0386114da76df8c84025d4a23ea04dd142d4 | [
"MIT"
] | 1 | 2020-04-17T08:42:05.000Z | 2020-04-17T08:42:05.000Z | InterpretationTechniques/featureExamination/iceForPredictor.py | HelenaMaria112/PredictionInterpreter | d87f0386114da76df8c84025d4a23ea04dd142d4 | [
"MIT"
] | null | null | null | InterpretationTechniques/featureExamination/iceForPredictor.py | HelenaMaria112/PredictionInterpreter | d87f0386114da76df8c84025d4a23ea04dd142d4 | [
"MIT"
] | null | null | null | '''
Created on 04.10.2019
@author: areb
'''
import pycebox.ice as pIce
from Connections.predictor import *
import matplotlib.pyplot as plt
from InterpretationTechniques.PlotAndShow import *
# https://github.com/AustinRochford/PyCEbox/blob/master/pycebox/ice.py
def plotIce(data, pr):
'''
:param data: pandas dataframe with datasets where each row represents a dataset
:param resultColumnName: Name of column in data that contains actual results
:param pr: Predictor of ML-System
saves and plots ICE
'''
pr.setReturnDistanceOfClass(True)
resultColumnName = pr.resultColumn
for i in pr.listOfNumericalColumns:
data[i]= data[i].astype(float).round(2).astype(str)
data = pr.encode(data)
columnCombinations = pr.unsortedColumnCombinations(data, resultColumnName)
for columnCombination in columnCombinations:
if not isinstance(columnCombination, tuple):
iceResult = pIce.ice(data, columnCombination, pr.predict, num_grid_points=None)
ax = pIce.ice_plot(iceResult, frac_to_plot=1.,
plot_points=True, point_kwargs=None,
x_quantile=False, plot_pdp=True,
centered=False, centered_quantile=0.,
color_by=None, cmap=None,
ax=None, pdp_kwargs=None)
ax.set_ylabel("Distance to Hyperplane of true result")
ax.set_xlabel(columnCombination)
ax.set_title("ICE for " + columnCombination)
lines = ax.lines
for lineIndex in range(len(lines)):
lines[lineIndex].set_label("Dataset "+str(lineIndex))
lines[len(lines)-1].set_label("Pdp")
#ax.legend(loc='upper left', bbox_to_anchor=(1, 1))
for line in ax.lines:
line.set_color("k")
line._linewidth = 0.5
lines[-1].linewidth=1
lines[-1].set_color("r")
xValues = pr.encodingDictionary[columnCombination]
ax.set_xticks(np.arange(1, len(xValues), 1))
ax.set_xticklabels(xValues[1:])
ax.tick_params(axis='both', which='major', labelsize=6)
ax.tick_params(axis='both', which='minor', labelsize=6)
plt.xticks(rotation=90)
saveName="ice"+str(columnCombination)
save(saveName, plt=plt)
def bspSammlung(data):
import pandas as pd
pr= Predictor(returnDistanceOfClass=True)
d = data.sample(3)
y = pd.DataFrame(index=range(9), columns=d.columns)
for i in range(len(d)):
y.iloc[3 * i + 0] = d.iloc[i]
y.iloc[3 * i + 0].loc["brutto"] = d.iloc[0].loc["brutto"]
y.iloc[3 * i + 1] = d.iloc[i]
y.iloc[3 * i + 1].loc["brutto"] = d.iloc[1].loc["brutto"]
y.iloc[3 * i + 2] = d.iloc[i]
y.iloc[3 * i + 2].loc["brutto"] = d.iloc[2].loc["brutto"]
y.loc[:]["gknto"] = pr.predict(y).astype(str)
ystr = ""
for r in range(len(y)):
for c in range(len(y.columns)):
ystr = ystr + y.iloc[r, c][0:min(50, len(y.iloc[r, c]))] + " \n"
ystr = ystr + "\n"
f = open("bspDaten.txt", "w+")
f.write(ystr)
f.close()
import pandas as pd
d = data.sample(3)
y = pd.DataFrame(index=range(9), columns=d.columns)
for i in range(len(d)):
y.iloc[3 * i + 0] = d.iloc[i]
y.iloc[3 * i + 0].loc["text"] = d.iloc[0].loc["text"]
y.iloc[3 * i + 1] = d.iloc[i]
y.iloc[3 * i + 1].loc["text"] = d.iloc[1].loc["text"]
y.iloc[3 * i + 2] = d.iloc[i]
y.iloc[3 * i + 2].loc["text"] = d.iloc[2].loc["text"]
y.loc[:]["gknto"] = pr.predict(y).astype(str)
ystr = ""
for r in range(len(y)):
for c in range(len(y.columns)):
ystr = ystr + y.iloc[r, c][0:min(50, len(y.iloc[r, c]))] + " \n"
ystr = ystr + "\n"
f = open("bspDaten.txt", "w+")
f.write(ystr)
f.close()
def writedataToFileWPredictionhead3(data, pr):
d=data[:3]
y = pd.DataFrame(index=range(9), columns=(d.columns))
for i in range(len(d)):
y.iloc[3 * i + 0] = d.iloc[i]
y.iloc[3 * i + 0].loc["text"] = d.iloc[0].loc["text"]
y.iloc[3 * i + 1] = d.iloc[i]
y.iloc[3 * i + 1].loc["text"] = d.iloc[1].loc["text"]
y.iloc[3 * i + 2] = d.iloc[i]
y.iloc[3 * i + 2].loc["text"] = d.iloc[2].loc["text"]
y["Result"] = pr.predict(y).astype(str)
ystr = ""
for r in range(len(y)):
for c in range(len(y.columns)):
ystr = ystr + y.iloc[r, c][:min(10, len(y.iloc[r, c])-1)] + " \n"
ystr = ystr + "\n"
f = open("bspDaten.txt", "w+")
f.write(ystr)
f.close() | 39.241667 | 91 | 0.556806 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 736 | 0.156296 |
0447af988a6ba77384680fe4e01e6a7e24dba0af | 2,253 | py | Python | src/ch3/generatefeedvector.py | amolnayak311/Programming-Collective-Intelligence | eaa55c3989a8d36e7b766fbaba267b4cbaedf5be | [
"Apache-2.0"
] | null | null | null | src/ch3/generatefeedvector.py | amolnayak311/Programming-Collective-Intelligence | eaa55c3989a8d36e7b766fbaba267b4cbaedf5be | [
"Apache-2.0"
] | null | null | null | src/ch3/generatefeedvector.py | amolnayak311/Programming-Collective-Intelligence | eaa55c3989a8d36e7b766fbaba267b4cbaedf5be | [
"Apache-2.0"
] | null | null | null | '''
Created on Sep 4, 2015
@author: Amol
'''
from feedparser import parse
import re
from itertools import groupby
#Remove HTML and get the remaining words
def getwords(html):
txt = re.compile(r'<[^>]+>').sub('', html)
words = re.compile(r'[^A-Z^a-z]+').split(txt)
return [word.lower() for word in words if word != '']
#Short implementation to count words, however, the performance is not something I have benchmarked
#As this includesm sort, groupby and len(list(group))
def getwordcounts(url):
d = parse(url)
print "Getting feed from URL %s" % url
feed_map = d['feed']
if 'title' in feed_map:
res = [getwords(e['title'] + ' ' + e['summary' if 'summary' in e else 'description']) for e in d['entries']]
word_count_map = dict((key, len(list(group))) for key, group in groupby(sorted([word for l in res for word in l])))
return feed_map['title'], word_count_map
else:
print "Warn: Unable to access data from feed %s" % url
#Special handling for some URLs not found or Forbidden
return 'NA', {}
# TODO: Clean implementation of the following code
# TODO: Experiment using Stopword filters
apcount = {}
wordcount = {}
feedlist = 0
for feedurl in file('feedlist.txt'):
title, wc = getwordcounts(feedurl)
if title != 'NA' and len(wc) > 0:
feedlist += 1
wordcount[title] = wc
for word, count in wc.items():
apcount.setdefault(word, 0)
if count > 1:
apcount[word] += 1
print "Retrieved and parsed all words from the List of Blogs"
wordlist = []
for w,bc in apcount.items():
frac = float(bc) / float(feedlist)
if frac > 0.1 and frac < 0.5: wordlist.append(w)
print "Writing to blogdata.txt"
out = file('blogdata.txt', 'w')
out.write('Blog')
for word in wordlist: out.write('\t%s'% word)
out.write('\n')
for blog, wc in wordcount.items():
out.write(blog)
for word in wordlist:
word_count = wc[word] if word in wc else 0
out.write("\t%d" % word_count)
out.write("\n")
out.close()
print "Successfully written to blogdata.txt"
#Note that the output generated will not match the one given by the author. Some of the URLs dont even work now | 31.732394 | 127 | 0.637816 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 842 | 0.373724 |
0447f89b16924c658675e5b392cea181645339b7 | 2,904 | py | Python | examples/withRaycing/01_SynchrotronSources/U32TaperedScan.py | adinatan/xrt | 75b884c0cba7e1aac15b30f2d0d803597328a208 | [
"MIT"
] | null | null | null | examples/withRaycing/01_SynchrotronSources/U32TaperedScan.py | adinatan/xrt | 75b884c0cba7e1aac15b30f2d0d803597328a208 | [
"MIT"
] | null | null | null | examples/withRaycing/01_SynchrotronSources/U32TaperedScan.py | adinatan/xrt | 75b884c0cba7e1aac15b30f2d0d803597328a208 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
__author__ = "Roman Chernikov"
__date__ = "08 Mar 2016"
#import pickle
import numpy as np
#import matplotlib.pyplot as plt
import os, sys; sys.path.append(os.path.join('..', '..', '..')) # analysis:ignore
import xrt.backends.raycing as raycing
import xrt.backends.raycing.sources as rs
import xrt.backends.raycing.screens as rsc
import xrt.backends.raycing.run as rr
#import xrt.backends.raycing.materials as rm
import xrt.plotter as xrtp
import xrt.runner as xrtr
showIn3D = False
prefix = 'taper_'
xlimits = [-5.0, 5.0]
zlimits = [-2.5, 2.5]
E0 = 10200
dEw = 800
dE = 0.5
eMin, eMax = E0-dEw, E0+dEw
elimits = [eMin, eMax]
def build_beamline(nrays=5e5):
beamLine = raycing.BeamLine()
rs.Undulator(
beamLine, 'P06', nrays=nrays, eEspread=0.0011,
eSigmaX=34.64, eSigmaZ=6.285, eEpsilonX=1., eEpsilonZ=0.01,
period=31.4, K=2.1392-0.002, n=63, eE=6.08, eI=0.1, xPrimeMax=1.5e-2,
zPrimeMax=1.5e-2, eMin=eMin, eMax=eMax, distE='BW',
xPrimeMaxAutoReduce=False, zPrimeMaxAutoReduce=False,
uniformRayDensity=True,
# targetOpenCL='CPU',
taper=(1.09, 11.254))
beamLine.fsm1 = rsc.Screen(beamLine, 'FSM1', (0, 75000, 0))
return beamLine
def run_process(beamLine):
beamSource = beamLine.sources[0].shine()
beamFSM1 = beamLine.fsm1.expose(beamSource)
outDict = {'beamSource': beamSource,
'beamFSM1': beamFSM1}
if showIn3D:
beamLine.prepare_flow()
return outDict
rr.run_process = run_process
def define_plots(beamLine):
plots = []
plotsE = []
xaxis = xrtp.XYCAxis(r'$x$', 'mm', limits=xlimits, bins=512, ppb=1)
yaxis = xrtp.XYCAxis(r'$z$', 'mm', limits=zlimits, bins=256, ppb=1)
caxis = xrtp.XYCAxis('energy', 'eV', limits=elimits,
offset=E0, bins=256, ppb=1)
plot = xrtp.XYCPlot(
'beamFSM1', (1,), xaxis=xaxis, yaxis=yaxis, caxis=caxis,
aspect='auto', title='total flux', ePos=1)
plot.baseName = prefix + '1TotalFlux - nearAxis - s'
plot.saveName = plot.baseName + '.png'
plots.append(plot)
plotsE.append(plot)
for plot in plots:
plot.fluxFormatStr = '%.2p'
return plots, plotsE
def plot_generator(plots, beamLine):
for stepE in np.linspace(E0-dEw, E0+dEw, 161):
beamLine.sources[0].E_max = stepE + 0.5
beamLine.sources[0].E_min = stepE - 0.5
for plot in plots:
plot.title = str(stepE) + " eV"
plot.saveName = plot.title + ".png"
yield
def main():
beamLine = build_beamline()
if showIn3D:
beamLine.glow()
else:
plots, plotsE = define_plots(beamLine)
xrtr.run_ray_tracing(plots, repeats=20, beamLine=beamLine,
generator=plot_generator, threads=4,
globalNorm=1)
if __name__ == '__main__':
main()
| 28.470588 | 82 | 0.626033 | 0 | 0 | 316 | 0.108815 | 0 | 0 | 0 | 0 | 365 | 0.125689 |
044827b9d07d435e78e2b33c9bff2fa4f98ef026 | 1,277 | py | Python | Python_10_Plot_Bokeh_Candlestick.py | rogerolowski/SimpleStockAnalysisPython | 758e04b28eda4b8eb4124f4b1e0ed493b0f93106 | [
"MIT"
] | 195 | 2019-04-19T16:52:22.000Z | 2022-03-28T12:16:12.000Z | Python_10_Plot_Bokeh_Candlestick.py | rogerolowski/SimpleStockAnalysisPython | 758e04b28eda4b8eb4124f4b1e0ed493b0f93106 | [
"MIT"
] | null | null | null | Python_10_Plot_Bokeh_Candlestick.py | rogerolowski/SimpleStockAnalysisPython | 758e04b28eda4b8eb4124f4b1e0ed493b0f93106 | [
"MIT"
] | 72 | 2019-05-02T12:30:30.000Z | 2022-03-25T07:11:09.000Z | # -*- coding: utf-8 -*-
"""
Created on Fri Nov 27 08:09:11 2020
@author: Tin
"""
# Plot Candlestick in bokeh
import pandas as pd # Dataframe Library
from math import pi
from bokeh.plotting import figure, show, output_file
pd.set_option('max_columns', None) # To show all columns
import yfinance as yf
yf.pdr_override()
# input
symbol = 'AAPL'
start = '2019-12-01'
end = '2020-01-01'
# dataframe
df = yf.download(symbol,start,end)
df["Date"] = pd.to_datetime(df.index)
mids = (df['Open'] + df['Adj Close'])/2
spans = abs(df['Adj Close']-df['Open'])
inc = df['Adj Close'] > df['Open']
dec = df['Open'] > df['Adj Close']
w = 12*60*60*1000 # half day in ms
TOOLS = "pan,wheel_zoom,box_zoom,reset,save"
p = figure(x_axis_type="datetime", tools=TOOLS, plot_width=1000, title = symbol + " Candlestick")
p.xaxis.major_label_orientation = pi/4
p.grid.grid_line_alpha=0.3
p.segment(df.Date, df.High, df.Date, df.Low, color="black")
p.vbar(df.Date[inc], w, df.Open[inc], df['Adj Close'][inc], fill_color="#D5E1DD", line_color="black")
p.vbar(df.Date[dec], w, df.Open[dec], df['Adj Close'][dec], fill_color="#F2583E", line_color="black")
output_file("candlestick.html", title= symbol + " candlestick")
show(p) # open a browser | 26.604167 | 102 | 0.660141 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 478 | 0.374315 |
044b50d8db56607e7050551dc8a60c8b30177833 | 7,124 | py | Python | elephant.py | polewskm/lvl1-elephant | 511c9248762e108342f6cc0e8b48a1118c182538 | [
"Apache-2.0"
] | null | null | null | elephant.py | polewskm/lvl1-elephant | 511c9248762e108342f6cc0e8b48a1118c182538 | [
"Apache-2.0"
] | null | null | null | elephant.py | polewskm/lvl1-elephant | 511c9248762e108342f6cc0e8b48a1118c182538 | [
"Apache-2.0"
] | null | null | null | #
# elephant.py
#
from threading import Thread
from adafruit_servokit import ServoKit
from gpiozero import LED,Button,Servo
import time
import random
import board
import neopixel
import subprocess
import os.path
print("Initializing...")
# pin definitions
PIN_LEFT_EYE = 17
PIN_LEFT_BROW = 23
PIN_RIGHT_EYE = 27
PIN_RIGHT_BROW = 24
PIN_SENSOR = 25
PIN_BUTTON = 22
# TODO: choose a random WAV file from subfolder
# otherwise you will always be rick rolled
soundFile = "/home/pi/rickroll.wav"
# if this file exists, the the program will exit
# this is how to we stop the program gracefully
stopFile = "/home/pi/elephant.stop"
keepRunning = True
servoEarRight = 0 # the servo channel number for the left ear
servoEarLeft = 15 # the servo channel number for the right ear
kit = ServoKit(channels=16)
leftEye = LED(PIN_LEFT_EYE)
leftBrow = LED(PIN_LEFT_BROW)
rightEye = LED(PIN_RIGHT_EYE)
rightBrow = LED(PIN_RIGHT_BROW)
sensorInput = Button(PIN_SENSOR, pull_up=False)
buttonInput = Button(PIN_BUTTON, pull_up=False)
neoCount = 10 # the number of pixels in the strip
neoDelay = 0.5 # the amount of time to wait before changing effects
# change the NeoPixel pin in the next line
neoPixels = neopixel.NeoPixel(board.D10, neoCount)
servoValue = 0.0 # the current servo angle
servoMin = 90 # the minimum allowable servo angle, depends on library
servoMax = 180 # the maximum allowable servo angle, depends on library
servoDirection = 1.0 # will always be positive 1 or negative 1
servoStep = 10.0 # the amount the servo moves in each iteration
servoDelay = 0.05 # the amount to wait before moving the servo in each iteration
servoEnabled = False # the button will toggle this value
browState = 0 # 0 is off and 1 is on, the loop changes this value
eyeState = 0 # 0 is off and 1 is on, the loop changes this value
eyeDelay = 0 # see below, when state=1 the delay is long, when state=0 the delay is short
eyeDelayOn = 6 # the amount of seconds the eyes are on
eyeDelayOff = 1 # the amount of seconds the eyes are off
buttonDelay = 10 # the amount of time the servo is enabled after button press
# turn off all the LEDs
leftEye.off()
leftBrow.off()
rightEye.off()
rightBrow.off()
# start any process that exits immediatly so that the omxprocess variable is defined for down below
omxprocess = subprocess.Popen(['/bin/false'], stdin=subprocess.PIPE, stdout=None, stderr=None, bufsize=0)
# display all GREEN
neoPixels.fill((0, 255, 0))
neoPixels.show()
time.sleep(1)
# display all RED
neoPixels.fill((255, 0, 0))
neoPixels.show()
time.sleep(1)
def wheel(pos):
if pos < 0 or pos > 255:
r = g = b = 0
elif pos < 85:
r = int(pos * 3)
g = int(255 - pos * 3)
b = 0
elif pos < 170:
pos -= 85
r = int(255 - pos * 3)
g = 0
b = int(pos * 3)
else:
pos -= 170
r = 0
g = int(pos * 3)
b = int(255 - pos * 3)
return (r, g, b)
for j in range(255):
for i in range(neoCount):
pixel_index = (i * 256 // neoCount) + j
neoPixels[i] = wheel(pixel_index & 255)
neoPixels.show()
time.sleep(0.001)
# The main loop is implemented as a timed-loop that enables certain
# behaviors at certain intervals. These intervals as maintained by
# remembering when the last behavior occurred (in seconds) and only
# enabling the next behavior when a certain amount of time has surpased.
# To accomplish this, we check the current time vs the behavior time,
# see the main loop for more details.
currentTime = time.perf_counter()
neoTime = currentTime
eyeTime = currentTime
servoTime = currentTime
buttonTime = currentTime
def color_chase(color, wait, dir):
for i in range(neoCount):
if dir == -1:
i = neoCount - i - 1
neoPixels[i] = color
time.sleep(wait)
neoPixels.show()
time.sleep(0.05)
RED = (255, 0, 0)
YELLOW = (255, 150, 0)
GREEN = (0, 255, 0)
CYAN = (0, 255, 255)
BLUE = (0, 0, 255)
PURPLE = (180, 0, 255)
# DESIGN: we control the neopixels from a separate thread
# because the main loop is implemented as a timed-loop
# (i.e. minimize usage of sleep calls) vs a busy-wait-loop
# (i.e. use sleep to control timing). A busy-wait-loop is always
# inefficient so therefore dedicate a separate thread for that.
class NeoManager:
def run(self):
while keepRunning:
color_chase(RED, 0.05, 1)
color_chase(YELLOW, 0.05, -1)
color_chase(GREEN, 0.05, 1)
color_chase(CYAN, 0.05, -1)
color_chase(BLUE, 0.05, 1)
color_chase(PURPLE, 0.05, -1)
print("NeoPixel thread exiting")
# start the NeoPixel thread
neoManager = NeoManager()
neoThread = Thread(target=neoManager.run)
neoThread.start()
# wheeeeee......
print("Main loop starting")
while keepRunning:
currentTime = time.perf_counter()
# toggle the eyebrows from the motion sensor
if sensorInput.value:
if browState == 0:
print("Enable Brows")
browState = 1
leftBrow.on()
rightBrow.on()
else:
if browState == 1:
print("Disable Brows")
browState = 0
leftBrow.off()
rightBrow.off()
# enable the servo for a period of time after the button is pressed
if buttonInput.value:
if not servoEnabled:
print("Servo enabled")
# audio is played by starting the omxplayer in a child process
# but we protect our selves from playing again while already
# playing by checking the exitcode of the process. If the result
# of poll() is None then the audio is still playing.
if not omxprocess.poll() is None:
print("Playing sound")
omxprocess = subprocess.Popen(['omxplayer', '--adev', 'local', '--vol', '90', soundFile], stdin=subprocess.PIPE, stdout=None, stderr=None, bufsize=0)
servoEnabled = True
buttonTime = currentTime
elif servoEnabled and currentTime > (buttonTime + buttonDelay):
print("Servo disabled")
servoEnabled = False
# toggle the eyes periodically, on-time is long, off-time is short
if currentTime > (eyeTime + eyeDelay):
eyeTime = currentTime
if eyeState == 0:
print("Enable Eyes")
eyeState = 1
eyeDelay = eyeDelayOn
leftEye.on()
rightEye.on()
else:
print("Disable Eyes")
eyeState = 0
eyeDelay = eyeDelayOff
leftEye.off()
rightEye.off()
# if enabled, move the servos
if servoEnabled and currentTime > (servoTime + servoDelay):
servoTime = currentTime
# calculate the next servo value
servoValue = servoValue + (servoStep * servoDirection)
# check for allowable maximum value
if servoValue >= servoMax:
servoValue = servoMax
servoDirection = -1.0
# check for allowable minimum value
elif servoValue <= servoMin:
servoValue = servoMin
servoDirection = 1.0
print(f"Servo Value = {servoValue}")
# for the left servo we have to subtract 180
# because its mounted mirror opposite of the right side
kit.servo[servoEarLeft].angle = 180 - servoValue
kit.servo[servoEarRight].angle = servoValue
# if the stop file exists, then thats an instruction to exit the program
keepRunning = not os.path.isfile(stopFile)
# bye bye...
print("Main loop exiting")
# if the audio is still playing, instruct the player to quit
if omxprocess.poll() == None:
print("Attempting to stop audio")
omxprocess.stdin.write(b'q')
| 29.683333 | 153 | 0.711538 | 276 | 0.038742 | 0 | 0 | 0 | 0 | 0 | 0 | 3,071 | 0.431078 |
044bb9506b68dc9263b143fd71b62d2aa484539b | 10,954 | py | Python | 0.17/_downloads/8c453dbbabf4b225611c41642ea9b1d5/plot_morph_stc.py | drammock/mne-tools.github.io | 5d3a104d174255644d8d5335f58036e32695e85d | [
"BSD-3-Clause"
] | null | null | null | 0.17/_downloads/8c453dbbabf4b225611c41642ea9b1d5/plot_morph_stc.py | drammock/mne-tools.github.io | 5d3a104d174255644d8d5335f58036e32695e85d | [
"BSD-3-Clause"
] | null | null | null | 0.17/_downloads/8c453dbbabf4b225611c41642ea9b1d5/plot_morph_stc.py | drammock/mne-tools.github.io | 5d3a104d174255644d8d5335f58036e32695e85d | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
r"""
================================================================
Morphing source estimates: Moving data from one brain to another
================================================================
Morphing refers to the operation of transferring
:ref:`source estimates <sphx_glr_auto_tutorials_plot_object_source_estimate.py>`
from one anatomy to another. It is commonly referred as realignment in fMRI
literature. This operation is necessary for group studies as one needs
then data in a common space.
In this tutorial we will morph different kinds of source estimation results
between individual subject spaces using :class:`mne.SourceMorph` object.
We will use precomputed data and morph surface and volume source estimates to a
reference anatomy. The common space of choice will be FreeSurfer's 'fsaverage'
See :ref:`sphx_glr_auto_tutorials_plot_background_freesurfer.py` for more
information. Method used for cortical surface data in based
on spherical registration [1]_ and Symmetric Diffeomorphic Registration (SDR)
for volumic data [2]_.
Furthermore we will convert our volume source estimate into a NIfTI image using
:meth:`morph.apply(..., output='nifti1') <mne.SourceMorph.apply>`.
In order to morph :class:`labels <mne.Label>` between subjects allowing the
definition of labels in a one brain and transforming them to anatomically
analogous labels in another use :func:`mne.Label.morph`.
.. contents::
:local:
Why morphing?
=============
Modern neuroimaging techniques, such as source reconstruction or fMRI analyses,
make use of advanced mathematical models and hardware to map brain activity
patterns into a subject specific anatomical brain space.
This enables the study of spatio-temporal brain activity. The representation of
spatio-temporal brain data is often mapped onto the anatomical brain structure
to relate functional and anatomical maps. Thereby activity patterns are
overlaid with anatomical locations that supposedly produced the activity.
Anatomical MR images are often used as such or are transformed into an inflated
surface representations to serve as "canvas" for the visualization.
In order to compute group level statistics, data representations across
subjects must be morphed to a common frame, such that anatomically and
functional similar structures are represented at the same spatial location for
*all subjects equally*.
Since brains vary, morphing comes into play to tell us how the data
produced by subject A, would be represented on the brain of subject B.
See also this :ref:`tutorial on surface source estimation
<sphx_glr_auto_tutorials_plot_mne_solutions.py>`
or this :ref:`example on volumetric source estimation
<sphx_glr_auto_examples_inverse_plot_compute_mne_inverse_volume.py>`.
Morphing **volume** source estimates
====================================
A volumetric source estimate represents functional data in a volumetric 3D
space. The difference between a volumetric representation and a "mesh" (
commonly referred to as "3D-model"), is that the volume is "filled" while the
mesh is "empty". Thus it is not only necessary to morph the points of the
outer hull, but also the "content" of the volume.
In MNE-Python, volumetric source estimates are represented as
:class:`mne.VolSourceEstimate`. The morph was successful if functional data of
Subject A overlaps with anatomical data of Subject B, in the same way it does
for Subject A.
Setting up :class:`mne.SourceMorph` for :class:`mne.VolSourceEstimate`
----------------------------------------------------------------------
Morphing volumetric data from subject A to subject B requires a non-linear
registration step between the anatomical T1 image of subject A to
the anatomical T1 image of subject B.
MNE-Python uses the Symmetric Diffeomorphic Registration [2]_ as implemented
in dipy_ [3]_ (See
`tutorial <http://nipy.org/dipy/examples_built/syn_registration_3d.html>`_
from dipy_ for more details).
:class:`mne.SourceMorph` uses segmented anatomical MR images computed
using :ref:`FreeSurfer <sphx_glr_auto_tutorials_plot_background_freesurfer.py>`
to compute the transformations. In order tell SourceMorph which MRIs to use,
``subject_from`` and ``subject_to`` need to be defined as the name of the
respective folder in FreeSurfer's home directory.
See :ref:`sphx_glr_auto_examples_inverse_plot_morph_volume_stc.py`
usage and for more details on:
- How to create a SourceMorph object for volumetric data
- Apply it to VolSourceEstimate
- Get the output is NIfTI format
- Save a SourceMorph object to disk
Morphing **surface** source estimates
=====================================
A surface source estimate represents data relative to a 3-dimensional mesh of
the cortical surface computed using FreeSurfer. This mesh is defined by
its vertices. If we want to morph our data from one brain to another, then
this translates to finding the correct transformation to transform each
vertex from Subject A into a corresponding vertex of Subject B. Under the hood
:ref:`FreeSurfer <sphx_glr_auto_tutorials_plot_background_freesurfer.py>`
uses spherical representations to compute the morph, as relies on so
called *morphing maps*.
The morphing maps
-----------------
The MNE software accomplishes morphing with help of morphing
maps which can be either computed on demand or precomputed.
The morphing is performed with help
of the registered spherical surfaces (``lh.sphere.reg`` and ``rh.sphere.reg`` )
which must be produced in FreeSurfer.
A morphing map is a linear mapping from cortical surface values
in subject A (:math:`x^{(A)}`) to those in another
subject B (:math:`x^{(B)}`)
.. math:: x^{(B)} = M^{(AB)} x^{(A)}\ ,
where :math:`M^{(AB)}` is a sparse matrix
with at most three nonzero elements on each row. These elements
are determined as follows. First, using the aligned spherical surfaces,
for each vertex :math:`x_j^{(B)}`, find the triangle :math:`T_j^{(A)}` on the
spherical surface of subject A which contains the location :math:`x_j^{(B)}`.
Next, find the numbers of the vertices of this triangle and set
the corresponding elements on the *j* th row of :math:`M^{(AB)}` so that
:math:`x_j^{(B)}` will be a linear interpolation between the triangle vertex
values reflecting the location :math:`x_j^{(B)}` within the triangle
:math:`T_j^{(A)}`.
It follows from the above definition that in general
.. math:: M^{(AB)} \neq (M^{(BA)})^{-1}\ ,
*i.e.*,
.. math:: x_{(A)} \neq M^{(BA)} M^{(AB)} x^{(A)}\ ,
even if
.. math:: x^{(A)} \approx M^{(BA)} M^{(AB)} x^{(A)}\ ,
*i.e.*, the mapping is *almost* a bijection.
Morphing maps can be computed on the fly or read with
:func:`mne.read_morph_map`. Precomputed maps are
located in ``$SUBJECTS_DIR/morph-maps``.
The names of the files in ``$SUBJECTS_DIR/morph-maps`` are
of the form:
<*A*> - <*B*> -``morph.fif`` ,
where <*A*> and <*B*> are names of subjects. These files contain the maps
for both hemispheres, and in both directions, *i.e.*, both :math:`M^{(AB)}`
and :math:`M^{(BA)}`, as defined above. Thus the files
<*A*> - <*B*> -``morph.fif`` or <*B*> - <*A*> -``morph.fif`` are
functionally equivalent. The name of the file produced depends on the role
of <*A*> and <*B*> in the analysis.
About smoothing
---------------
The current estimates are normally defined only in a decimated
grid which is a sparse subset of the vertices in the triangular
tessellation of the cortical surface. Therefore, any sparse set
of values is distributed to neighboring vertices to make the visualized
results easily understandable. This procedure has been traditionally
called smoothing but a more appropriate name
might be smudging or blurring in
accordance with similar operations in image processing programs.
In MNE software terms, smoothing of the vertex data is an
iterative procedure, which produces a blurred image :math:`x^{(N)}` from
the original sparse image :math:`x^{(0)}` by applying
in each iteration step a sparse blurring matrix:
.. math:: x^{(p)} = S^{(p)} x^{(p - 1)}\ .
On each row :math:`j` of the matrix :math:`S^{(p)}` there
are :math:`N_j^{(p - 1)}` nonzero entries whose values
equal :math:`1/N_j^{(p - 1)}`. Here :math:`N_j^{(p - 1)}` is
the number of immediate neighbors of vertex :math:`j` which
had non-zero values at iteration step :math:`p - 1`.
Matrix :math:`S^{(p)}` thus assigns the average
of the non-zero neighbors as the new value for vertex :math:`j`.
One important feature of this procedure is that it tends to preserve
the amplitudes while blurring the surface image.
Once the indices non-zero vertices in :math:`x^{(0)}` and
the topology of the triangulation are fixed the matrices :math:`S^{(p)}` are
fixed and independent of the data. Therefore, it would be in principle
possible to construct a composite blurring matrix
.. math:: S^{(N)} = \prod_{p = 1}^N {S^{(p)}}\ .
However, it turns out to be computationally more effective
to do blurring with an iteration. The above formula for :math:`S^{(N)}` also
shows that the smudging (smoothing) operation is linear.
From theory to practice
-----------------------
In MNE-Python, surface source estimates are represented as
:class:`mne.SourceEstimate` or :class:`mne.VectorSourceEstimate`. Those can
be used together with :class:`mne.SourceSpaces` or without.
The morph was successful if functional data of Subject A overlaps with
anatomical surface data of Subject B, in the same way it does for Subject A.
See :ref:`sphx_glr_auto_examples_inverse_plot_morph_surface_stc.py`
usage and for more details:
- How to create a :class:`mne.SourceMorph` object using
:func:`mne.compute_source_morph` for surface data
- Apply it to :class:`mne.SourceEstimate` or
:class:`mne.VectorSourceEstimate`
- Save a :class:`mne.SourceMorph` object to disk
Please see also Gramfort *et al.* (2013) [4]_.
References
==========
.. [1] Greve D. N., Van der Haegen L., Cai Q., Stufflebeam S., Sabuncu M.
R., Fischl B., Brysbaert M.
A Surface-based Analysis of Language Lateralization and Cortical
Asymmetry. Journal of Cognitive Neuroscience 25(9), 1477-1492, 2013.
.. [2] Avants, B. B., Epstein, C. L., Grossman, M., & Gee, J. C. (2009).
Symmetric Diffeomorphic Image Registration with Cross- Correlation:
Evaluating Automated Labeling of Elderly and Neurodegenerative
Brain, 12(1), 26-41.
.. [3] Garyfallidis E, Brett M, Amirbekian B, Rokem A, van der Walt S,
Descoteaux M, Nimmo-Smith I and Dipy Contributors (2014). DIPY, a
library for the analysis of diffusion MRI data. Frontiers in
Neuroinformatics, vol.8, no.8.
.. [4] Gramfort A., Luessi M., Larson E., Engemann D. A., Strohmeier D.,
Brodbeck C., Goj R., Jas. M., Brooks T., Parkkonen L. & Hämäläinen, M.
(2013). MEG and EEG data analysis with MNE-Python. Frontiers in
neuroscience, 7, 267.
.. _dipy: http://nipy.org/dipy/
""" # noqa: E501
| 43.125984 | 80 | 0.726675 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10,953 | 0.999635 |
044c425f4547c6aed3f6f7f9c9c78fa729ba7b06 | 680 | py | Python | jasper.py | ramosmy/acoustic_model | 3c721b15830dcaeb71f3b828cacb999c14e9651c | [
"MIT"
] | 6 | 2019-07-18T07:33:51.000Z | 2021-11-27T12:48:02.000Z | jasper.py | ramosmy/acoustic_model | 3c721b15830dcaeb71f3b828cacb999c14e9651c | [
"MIT"
] | 2 | 2019-11-08T07:25:39.000Z | 2019-12-03T16:38:37.000Z | jasper.py | ramosmy/acoustic_model | 3c721b15830dcaeb71f3b828cacb999c14e9651c | [
"MIT"
] | 7 | 2019-09-23T05:30:48.000Z | 2021-01-19T08:34:18.000Z | """
Citing from jasper from Nvidia
"""
import torch
import torch.nn as nn
import torch.functional as F
class SubBlock(nn.Module):
def __init__(self, dropout):
super(SubBlock, self).__init__()
self.conv = nn.Conv1d(in_channels=256, out_channels=256,
kernel_size=11, stride=1, padding=5)
self.batch_norm = nn.BatchNorm1d(num_features=256)
self.activation = nn.ReLU()
self.dropout = nn.Dropout(p=dropout)
def forward(self, x):
x = self.conv(x)
x = self.batch_norm(x)
x = self.activation(x)
return self.dropout(x)
class Block():
def __init__(self):
pass | 25.185185 | 66 | 0.607353 | 568 | 0.835294 | 0 | 0 | 0 | 0 | 0 | 0 | 42 | 0.061765 |
044cb4af3a6867d40b90f56d954b5919ff9ca98a | 513 | py | Python | charts/create_datasets.py | Smelly-London/datavisualisation | feaf0e8e3f48b0f70640149ce37d149ae4219c0d | [
"Apache-2.0"
] | 1 | 2016-09-28T13:42:29.000Z | 2016-09-28T13:42:29.000Z | charts/create_datasets.py | Smelly-London/datavisualisation | feaf0e8e3f48b0f70640149ce37d149ae4219c0d | [
"Apache-2.0"
] | null | null | null | charts/create_datasets.py | Smelly-London/datavisualisation | feaf0e8e3f48b0f70640149ce37d149ae4219c0d | [
"Apache-2.0"
] | null | null | null | import pandas as pd
# Load the original csv
df = pd.read_csv('data/carto.csv')
# Group by name of borough
# The output is a list of tuples with ('name of borough', dataframe of that borough)
grouped = list(df.groupby('location_name'))
for x in grouped:
# Get the name of the borough
name_of_borough = x[0]
# Sort the dataframe by date
sorted_df = x[1].sort(['date'])
# Create a csv for each dataframe
sorted_df.to_csv('data/output/' + name_of_borough + '.tsv', index=None, sep='\t')
| 25.65 | 85 | 0.680312 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 284 | 0.553606 |
044cd6d55aaaa85ed616104d85f7411196227c7a | 91 | py | Python | pm4pymdl/algo/mvp/gen_framework/rel_activities/__init__.py | dorian1000/pm4py-mdl | 71e0c2425abb183da293a58d31e25e50137c774f | [
"MIT"
] | 5 | 2021-01-31T22:45:29.000Z | 2022-02-22T14:26:06.000Z | pm4pymdl/algo/mvp/gen_framework/rel_activities/__init__.py | Javert899/pm4py-mdl | 4cc875999100f3f1ad60b925a20e40cf52337757 | [
"MIT"
] | 3 | 2021-07-07T15:32:55.000Z | 2021-07-07T16:15:36.000Z | pm4pymdl/algo/mvp/gen_framework/rel_activities/__init__.py | dorian1000/pm4py-mdl | 71e0c2425abb183da293a58d31e25e50137c774f | [
"MIT"
] | 9 | 2020-09-23T15:34:11.000Z | 2022-03-17T09:15:40.000Z | from pm4pymdl.algo.mvp.gen_framework.rel_activities import classic, rel_activities_builder
| 45.5 | 90 | 0.89011 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
044e54d26f4864c1f8e33c9bd102edca09c831c3 | 5,047 | py | Python | batched_flattened_indices_pseudo_random_permutation.py | eladn/tensors_data_class | 78dd3182d5d070f3ae87bb7141cb38e778d0740a | [
"MIT"
] | null | null | null | batched_flattened_indices_pseudo_random_permutation.py | eladn/tensors_data_class | 78dd3182d5d070f3ae87bb7141cb38e778d0740a | [
"MIT"
] | null | null | null | batched_flattened_indices_pseudo_random_permutation.py | eladn/tensors_data_class | 78dd3182d5d070f3ae87bb7141cb38e778d0740a | [
"MIT"
] | null | null | null | import torch
import hashlib
import dataclasses
import numpy as np
from typing import List, Tuple, final
from .misc import collate_tensors_with_variable_shapes, CollateData, inverse_permutation
from .tensors_data_class_base import TensorsDataClass
from .mixins import HasTargetIndexingGroupMixin
__all__ = ['BatchedFlattenedIndicesPseudoRandomPermutation']
@final
@dataclasses.dataclass
class BatchedFlattenedIndicesPseudoRandomPermutation(HasTargetIndexingGroupMixin, TensorsDataClass):
permutations: torch.LongTensor = dataclasses.field(default=None, init=False)
inverse_permutations: torch.LongTensor = dataclasses.field(default=None, init=False)
batch_dependent_seed: bool = dataclasses.field(default=True)
example_dependent_seed: bool = dataclasses.field(default=True)
initial_seed_salt: str = dataclasses.field(default='0')
@classmethod
def get_management_fields(cls) -> Tuple[str, ...]:
return super(BatchedFlattenedIndicesPseudoRandomPermutation, cls).get_management_fields() + \
('batch_dependent_seed', 'example_dependent_seed', 'initial_seed_salt')
@classmethod
def get_indices_fields(cls) -> Tuple[dataclasses.Field, ...]:
return tuple(field for field in dataclasses.fields(cls)
if field.name in {'permutations', 'inverse_permutations'})
@classmethod
def _collate_first_pass(
cls, inputs: List['BatchedFlattenedIndicesPseudoRandomPermutation'],
collate_data: CollateData) \
-> 'BatchedFlattenedIndicesPseudoRandomPermutation':
collated = super(BatchedFlattenedIndicesPseudoRandomPermutation, cls)._collate_first_pass(
inputs, collate_data=collate_data)
collated.tgt_indexing_group = inputs[0].tgt_indexing_group
return collated
def post_collate_indices_fix(self, parents: Tuple['TensorsDataClass', ...], fields_path: Tuple[str, ...],
collate_data: CollateData):
if self.tgt_indexing_group is None:
raise ValueError(f'`{self.__class__.__name__}` must have an `tgt_indexing_group`.')
addressed_flattened_tensor = self.find_addressed_batched_flattened_tensor(parents[0])
if addressed_flattened_tensor is None:
raise ValueError(
f'Not found field in tensors data class which is addressable '
f'via index group `{self.tgt_indexing_group}`.')
nr_items_per_example = addressed_flattened_tensor.nr_items_per_example
index_offsets = addressed_flattened_tensor.batched_index_offset_additive_fix_per_example
if self.batch_dependent_seed and self.example_dependent_seed:
random_seed_per_example = [
int(hashlib.sha256(f'{self.initial_seed_salt}|{"-".join(collate_data.example_hashes)}|{example_idx}'
.encode('ascii')).hexdigest(), 16) % (2 ** 32)
for example_idx, _ in enumerate(collate_data.example_hashes)]
elif not self.batch_dependent_seed and self.example_dependent_seed:
random_seed_per_example = [
int(hashlib.sha256(f'{self.initial_seed_salt}|{example_hash}'
.encode('ascii')).hexdigest(), 16) % (2 ** 32)
for example_hash in collate_data.example_hashes]
elif self.batch_dependent_seed and not self.example_dependent_seed:
random_seed_per_example = [
int(hashlib.sha256(f'{self.initial_seed_salt}|{"-".join(collate_data.example_hashes)}'
.encode('ascii')).hexdigest(), 16) % (2 ** 32)
for _ in collate_data.example_hashes]
else:
random_seed_per_example = [
int(hashlib.sha256(f'{self.initial_seed_salt}'
.encode('ascii')).hexdigest(), 16) % (2 ** 32)
for _ in collate_data.example_hashes]
permutations_without_offsets = [
torch.LongTensor(np.random.RandomState(random_seed_per_example[example_idx]).permutation(int(nr_items)))
for example_idx, nr_items in enumerate(nr_items_per_example)]
inverse_permutations_without_offsets = [inverse_permutation(perm) for perm in permutations_without_offsets]
permutations_with_offsets = [
perm + index_offset for perm, index_offset in zip(permutations_without_offsets, index_offsets)]
inverse_permutations_with_ranges = [
perm + index_offset for perm, index_offset in zip(inverse_permutations_without_offsets, index_offsets)]
self.permutations = collate_tensors_with_variable_shapes(
tensors=tuple(permutations_with_offsets), create_collate_mask=False,
create_collate_lengths=False, last_variable_dim=0)
self.inverse_permutations = collate_tensors_with_variable_shapes(
tensors=tuple(inverse_permutations_with_ranges), create_collate_mask=False,
create_collate_lengths=False, last_variable_dim=0)
| 54.858696 | 116 | 0.699425 | 4,655 | 0.92233 | 0 | 0 | 4,685 | 0.928274 | 0 | 0 | 685 | 0.135724 |
044fa9bc2b971a59a7630fd706352bd033dc7523 | 2,761 | py | Python | legacy/zero_training.py | GabrielePicco/pytorch-lightning | 0d6dfd42d8965347a258e3d20e83bddd344e718f | [
"Apache-2.0"
] | 4 | 2021-07-27T14:39:02.000Z | 2022-03-07T10:57:13.000Z | legacy/zero_training.py | GabrielePicco/pytorch-lightning | 0d6dfd42d8965347a258e3d20e83bddd344e718f | [
"Apache-2.0"
] | 2 | 2021-07-03T07:07:32.000Z | 2022-03-10T16:07:20.000Z | legacy/zero_training.py | GabrielePicco/pytorch-lightning | 0d6dfd42d8965347a258e3d20e83bddd344e718f | [
"Apache-2.0"
] | 1 | 2021-02-16T00:47:46.000Z | 2021-02-16T00:47:46.000Z | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
from torch.utils.data import Dataset
import pytorch_lightning as pl
PATH_LEGACY = os.path.dirname(__file__)
class RandomDataset(Dataset):
def __init__(self, size, length: int = 100):
self.len = length
self.data = torch.randn(length, size)
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return self.len
class DummyModel(pl.LightningModule):
def __init__(self):
super().__init__()
self.layer = torch.nn.Linear(32, 2)
def forward(self, x):
return self.layer(x)
def _loss(self, batch, prediction):
# An arbitrary loss to have a loss that updates the model weights during `Trainer.fit` calls
return torch.nn.functional.mse_loss(prediction, torch.ones_like(prediction))
def _step(self, batch, batch_idx):
output = self.layer(batch)
loss = self._loss(batch, output)
# return {'loss': loss} # used for PL<1.0
return loss # used for PL >= 1.0
def training_step(self, batch, batch_idx):
return self._step(batch, batch_idx)
def validation_step(self, batch, batch_idx):
self._step(batch, batch_idx)
def test_step(self, batch, batch_idx):
self._step(batch, batch_idx)
def configure_optimizers(self):
optimizer = torch.optim.SGD(self.layer.parameters(), lr=0.1)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1)
return [optimizer], [lr_scheduler]
def train_dataloader(self):
return torch.utils.data.DataLoader(RandomDataset(32, 64))
def val_dataloader(self):
return torch.utils.data.DataLoader(RandomDataset(32, 64))
def test_dataloader(self):
return torch.utils.data.DataLoader(RandomDataset(32, 64))
def main_train(dir_path, max_epochs: int = 5):
trainer = pl.Trainer(
default_root_dir=dir_path,
checkpoint_callback=True,
max_epochs=max_epochs,
)
model = DummyModel()
trainer.fit(model)
if __name__ == '__main__':
path_dir = os.path.join(PATH_LEGACY, 'checkpoints', str(pl.__version__))
main_train(path_dir)
| 29.063158 | 100 | 0.688156 | 1,673 | 0.60594 | 0 | 0 | 0 | 0 | 0 | 0 | 750 | 0.271641 |
045138c95caf400b1764962f762f8235e32b7a22 | 1,404 | py | Python | lab02/lab02/operaciones/views.py | josepilco7501/TECSUP-DAE-2021-2 | f6e433193edd2b9547a7385f0e03b0aacdb4dcd0 | [
"MIT"
] | null | null | null | lab02/lab02/operaciones/views.py | josepilco7501/TECSUP-DAE-2021-2 | f6e433193edd2b9547a7385f0e03b0aacdb4dcd0 | [
"MIT"
] | null | null | null | lab02/lab02/operaciones/views.py | josepilco7501/TECSUP-DAE-2021-2 | f6e433193edd2b9547a7385f0e03b0aacdb4dcd0 | [
"MIT"
] | null | null | null | from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def calculadora(request):
context = {
'titulo' : "Ingrese los numeros",
}
return render(request,'operaciones/formulario.html',context)
def resultado(request):
a=request.POST['numeroa']
b=request.POST['numerob']
if request.POST['operacion'] == 'suma':
resultado= int(a)+int(b)
if request.POST['operacion'] == 'resta':
resultado= int(a)-int(b)
if request.POST['operacion'] == 'multiplicacion':
resultado= int(a)*int(b)
context = {
'operacion' : request.POST['operacion'],
'numeroa' : request.POST['numeroa'],
'numerob' : request.POST['numerob'],
'titulo' : "Resultado de la operación",
'resultado' :resultado
}
return render(request,'operaciones/resultados.html',context)
def datosCilindro(request):
context = {
'titulo' : "CÁLCULO DEL VOLUMEN DE UN CILINDRO "
}
return render(request,'operaciones/formCilindro.html',context)
def resultVolumen(request):
diametro=request.POST['diametro']
altura=request.POST['altura']
radio=float(diametro)/2
volumen=(3.1416*(radio)**2)*float(altura)
context = {
'titulo' : 'VOLUMEN DEL CILINDRO',
'volumen' : volumen
}
return render(request,'operaciones/resultVolumen.html',context)
| 28.653061 | 67 | 0.64245 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 463 | 0.329303 |
0451439c3c8986ca671570c793596df3e6737f99 | 40,552 | py | Python | sim/turn.py | adacker10/showdown | 8ceb1ff46d5c33ec3055928d6ad293224446f63c | [
"MIT"
] | 8 | 2019-02-02T01:15:57.000Z | 2021-12-23T04:43:46.000Z | sim/turn.py | adacker10/showdown | 8ceb1ff46d5c33ec3055928d6ad293224446f63c | [
"MIT"
] | null | null | null | sim/turn.py | adacker10/showdown | 8ceb1ff46d5c33ec3055928d6ad293224446f63c | [
"MIT"
] | 6 | 2020-09-11T13:15:05.000Z | 2022-03-18T15:46:35.000Z | '''
Nicolas Lindbloom-Airey
turn.py
This file is entirely devoted to the do_turn(Battle) method.
One call of do_turn does all the logic for a single turn.
Functions defined in this file are:
* functions are are only called in the do_turn method.
Functions called by run_move are called only once per run_move call.
turn_start and turn_end are called once per turn.
create_move and populate_action_queue are called once per active pokemon
in the battle.
run_action is called once on every action created.
ORDER OF FUNCTION CALL:
turn_start
create_move
populate_action_queue
run_action
run_move
update_move_before_running
accuracy_check
calc_damage
-> damage called in pokemon.py
unique_moves_after_damage
boosts_statuses
turn_end
'''
import heapq
from sim.player import *
def turn_start(B:Battle) -> None:
'''
ONLY CALLED IN do_turn()
Returns early if this is a pseudo turn
Updates volatile statuses that only last one turn
Resets the pokemon.pokemon_hit_this_turn for every active pokemon
'''
B.turn += 1
B.turn = math.floor(B.turn)
if B.pseudo_turn:
B.turn -= 0.5
return
for pokemon in get_active_pokemon(B):
pokemon.active_turns += 1
pokemon.pokemon_hit_this_turn = 0
# remove volatile statuses that only last one turn
one_turn_statuses = {None, 'flinch', 'endure', 'protect',
'banefulbunker', 'spikyshield'}
pokemon.volatile_statuses -= one_turn_statuses
return
def turn_end(B:Battle) -> None:
'''
ONLY CALLED IN do_turn()
Updates flags after all actions are done.
PRE: B.pseudo_turn = False
'''
if B.pseudo_turn:
sys.exit('ERROR: turn_end() called on pseudo turn')
# tick weather counter down
if B.weather in ['sunlight', 'rain', 'sandstorm', 'hail']:
B.weather_n -= 1
if B.weather_n == 0:
B.weather = 'clear'
# weather damage
for pokemon in get_active_pokemon(B):
if pokemon.item == 'safteygoggles':
continue
if B.weather == 'sandstorm':
if {'Steel','Rock','Ground'}.isdisjoint(pokemon.types):
damage(pokemon, 1/16, flag='percentmaxhp')
if B.weather == 'hail':
if 'Ice' not in pokemon.types:
damage(pokemon, 1/16, flag='percentmaxhp')
# volatile statuses
for pokemon in get_active_pokemon(B):
# bound
if 'partiallytrapped' in pokemon.volatile_statuses:
damage(pokemon, pokemon.bound_damage, flag='percentmaxhp')
pokemon.bound_n -= 1
if pokemon.bound_n == 0:
pokemon.volatile_statuses -= {'partiallytrapped'}
# aqua ring
if pokemon.aqua_ring:
dmg = -1/16
if pokemon.item == 'bigroot':
dmg *= 1.3
damage(pokemon, dmg, 'percentmaxhp')
# leech seed
#if 'leechseed' in pokemon.volatile_statuses:
# foe = B.sides[0 if pokemon.side_id else 1].active_pokemon[0]
# dmg = 1/8
# if foe.item == 'bigroot':
# dmg *= 1.3
# damage(foe, -(damage(pokemon, dmg, 'percentmaxhp')))
# nightmare
if 'nightmare' in pokemon.volatile_statuses:
if pokemon.status == 'slp':
damage(pokemon, 1/4, 'percentmaxhp')
# perish song
if 'perishsong' in pokemon.volatile_statuses:
pokemon.perishsong_n -= 1
if pokemon.perishsong_n == 0:
faint(pokemon)
# encore
if 'encore' in pokemon.volatile_statuses:
pokemon.encore_n -= 1
if pokemon.encore_n == 0:
pokemon.volatile_statuses -= {'encore'}
# taunt
if 'taunt' in pokemon.volatile_statuses:
pokemon.taunt_n -= 1
if pokemon.taunt_n == 0:
pokemon.volatile_statuses.remove('taunt')
# curse
if 'curse' in pokemon.volatile_statuses:
if 'Ghost' in pokemon.types:
damage(pokemon, 1/4, flag='percentmaxhp')
# do major status checks
for pokemon in get_active_pokemon(B):
if pokemon.status == 'brn':
damage(pokemon, 1/16, flag='percentmax')
elif pokemon.status == 'psn':
damage(pokemon, 1/8, flag='percentmax')
elif pokemon.status == 'tox':
dmg = 1/16*pokemon.toxic_n
damage(pokemon, dmg, flag='percentmax')
pokemon.toxic_n += 1
elif pokemon.status == 'frz':
#twenty percent chance to be thawed
if random.random() < 0.20:
cure_status(pokemon)
if B.weather == 'sunlight':
cure_status(pokemon)
elif pokemon.status == 'slp':
pokemon.sleep_n -= 1
if pokemon.sleep_n == 0:
cure_status(pokemon)
return
def run_action(B, a : Action) -> None:
'''
ONLY CALLED IN do_turn()
Takes Action object and if it is a switch, calls player.switch().
If it is a mega evolution, call pokemon.mega_evolve().
Lastly, if the action is a move, find the target pokemon's pointer
and call battle.run_move(Pokemon, Move, Pokemon)
'''
#user, move, target, zmove, base_move = action
# action is set up three different ways
# one way for switches
if a.action_type == 'switch':
if a.user.player_uid == 1:
switch(B.p1, a.user, a.pos)
if a.user.player_uid == 2:
switch(B.p2, a.user, a.pos)
return
# another for mega evolutions
if a.action_type == 'mega':
a.user.mega_evolve()
return
# if it is a single battle and target starts with 'foe'
if not B.doubles and a.target[0:3] == 'foe':
p = a.user.player_uid
if p == 1:
run_move(B, a.user, a.move, B.p2.active_pokemon[0])
if p == 2:
run_move(B, a.user, a.move, B.p1.active_pokemon[0])
return
if a.target == 'all':
move = move._replace(base_power = move.base_power * 0.75)
for pokemon in get_active_pokemon(B):
B.run_move(user, move, pokemon)
return
return
def run_move(B:Battle, user:Pokemon, move:dex.Move, target:Pokemon) -> None:
'''
ONLY CALLED IN run_action()
Does the move logic.
'''
# Fainted pokemon can't use their move.
if user.fainted:
#B.log(user.name + " fainted before they could move")
return
# subtract pp
# struggle and zmoves do not have pp
if move.id != 'struggle' and move.z_move.crystal is None:
if move.id in user.pp:
user.pp[move.id] -= 1
# remove the move from the pokemons move list if it has no pp left
#if user.pp[move.id] == 0:
# if move.id in user.moves:
# user.moves.remove(move.id)
# update last used move
user.last_used_move = move.id
move = update_move_before_running(B, user, move, target)
# ACCURACY CHECK
if not accuracy_check(B, user, move, target):
if B.debug:
print(user.name + ' used ' + move.id + ' and missed')
# move missed! do nothing
return
# zmove pp
if move.z_move.crystal is not None:
pass
#user.side.used_zmove = True
# Handle multi hit moves.
# move.multi_hit is a probability distribution of number of hits in
# increasing order.
# move.multi_hit[-1] is last element.
number_hits = 1
if move.multi_hit is not None:
number_hits = random.choice(move.multi_hit)
if user.ability == 'skilllink':
number_hits = move.multi_hit[-1]
#---------------
# do damage
#---------------
for i in range(number_hits):
# Triplekick checks accuracy for every hit
if i != 1 and move.id == 'triplekick':
if not accuracy_check(B, user, move, target):
return
# damage calculation
dmg = calc_damage(B, user, move, target)
# do the damage
# how much damage was actually done
# is limited by how much hp the target had left
dmg = damage(target, dmg)
if dmg > 0:
target.last_damaging_move = move.id
# do unique move things
unique_moves_after_damage(B, user, move, target, dmg)
# handle boosts and statuses
boosts_statuses(B, user, move, target)
if B.debug:
print(user.name + ' used ' + move.id + '')
return
def calc_damage(B:Battle, user:Pokemon, move:dex.Move, target:Pokemon) -> int:
'''
ONLY CALLED IN run_move()
Calculates the amount of damage done to the target by the user
using the move.
'''
# status moves do zero dmg, return early
if move.category == 'Status':
return 0
dmg = 0
# Was it a critical hit? -> Make local var.
crit = False
move_crit = move.crit_ratio if move.crit_ratio is not None else 0
crit_chance = user.crit_chance + move_crit
random_crit = random.random() < (dex.crit[crit_chance] if crit_chance <= 3 else 1) and B.rng
if crit_chance >= 3 or random_crit:
crit = True
# Set up power, attack, defense variables
power = move.base_power
if move.category == 'Special':
attack = get_specialattack(user, crit, B.weather)
defense = get_specialdefense(target, crit, B.weather)
elif move.category == 'Physical':
attack = get_attack(user, crit, B.weather)
defense = get_defense(target, crit, B.terrain)
# Main damage formula.
dmg = (math.floor(math.floor(math.floor(((2 * user.level) / 5) + 2) * attack * power / defense) / 50) + 2)
# multiply the damage by each modifier
modifier = 1
# CRITICAL DAMAGE
if crit:
modifier *= 1.5
# RANDOMNESS
if B.rng:
modifier *= random.uniform(0.85, 1.0)
# STAB (same type attack bonus)
if move.type in user.types:
modifier *= 1.5
# TYPE EFFECTIVENESS
type_modifier = 1
for each in target.types:
type_effect = dex.typechart_dex[each].damage_taken[move.type]
type_modifier *= type_effect
modifier *= type_effect
# moves that hit multiple pokemon do less damage
if B.doubles and move.target_type in ['foeSide', 'allyTeam', 'allAdjacent', 'allAdjacentFoes', 'allySide']:
modifier *= 0.75
# WEATHER
if B.weather in ['rain', 'heavy_rain']:
if move.type == 'Water':
modifier *= 1.5
elif move.type == 'Fire':
modifier *= 0.5
elif B.weather in ['sunlight', 'heavy_sunlight']:
if move.type == 'Water':
modifier *= 0.5
elif move.type == 'Fire':
modifier *= 1.5
# burn
if user.status == 'brn' and \
move.category == 'Physical' and \
user.ability != 'guts':
modifier *= 0.5
# zmove goes through protect at 1/4 damage
if move.z_move.crystal is not None and 'protect' in target.volatile_statuses:
modifier *= 0.25
# aurora veil, lightscreen, reflect
#if 'auroraveil' in target.side.side_conditions and not crit and user.ability != 'infiltrator':
#double battle this should be 0.66
# modifier *= 0.5
#elif 'lightscreen' in target.side.side_conditions and move.category == 'Special' and not crit and user.ability != 'infiltrator':
#double battle this should be 0.66
# modifier *= 0.5
#elif 'reflect' in target.side.side_conditions and move.category == 'Physical'and not crit and user.ability != 'infiltrator':
#double battle this should be 0.66
# modifier *= 0.5
if 'minimize' in target.volatile_statuses:
if move.id in ['dragonrush', 'bodyslam', 'heatcrash', 'heavyslam', 'phantomforce', 'shadowforce', 'stomp']:
modifier *= 2
#magnitude, earthquake, surf, whirlpool do double dmg to dig and dive states
# ABILITIES
if target.ability == 'fluffy':
if move.flags.contact and move.type != 'Fire':
modifier *= 0.5
elif not move.flags.contact and move.type == 'Fire':
modifier *= 2
elif target.ability == 'filter' or target.ability == 'prismarmor':
if type_modifier > 1:
modifier *= 0.75
# friend guard
elif target.ability in ['multiscale', 'shadowshield', 'solidrock']:
if target.hp == target.maxhp:
modifier *= 0.5
if user.ability == 'sniper' and crit:
modifier *= 1.5
elif user.ability == 'tintedlens':
if type_modifier < 1:
modifier *= 2
# ITEMS
if target.item == 'chilanberry' and move.type == 'Normal':
modifier *= 0.5
if user.item == 'expertbelt' and type_modifier > 1:
modifier *= 1.2
if user.item == 'lifeorb':
modifier *= 1.3
if user.item == 'metronome':
modifier *= (1+(user.consecutive_move_uses*0.2))
# type-resist berries
if target.item in list(dex.type_resist_berries.keys()):
if dex.type_resist_berries[target.item] == move.type:
if type_modifier > 1:
modifier *= 0.5
#floor damage before and after applying modifier
dmg = math.floor(dmg)
dmg *= modifier
dmg = math.floor(dmg)
return dmg
def accuracy_check(B:Battle, user:Pokemon, move:dex.Move, target:Pokemon) -> bool:
'''
ONLY CALLED IN run_move
Check if the move hits(true) or misses(false).
Check specific move requirements to not fail
at the end.
A False return here ends the run_move method.
'''
# full paralyze
if user.status == 'par' and random.random() < 0.25:
return False
# asleep pokemon miss unless they use snore or sleeptalk
elif user.status == 'slp' and not move.sleep_usable:
return False
# Is the target protected?
if 'protect' in target.volatile_statuses and move.flags.protect:
return False
if 'banefulbunker' in target.volatile_statuses and move.flags.protect:
return False
if 'spikyshield' in target.volatile_statuses and move.flags.protect:
return False
if 'kingsshield' in target.volatile_statuses and move.flags.protect and move.category != 'Status':
return False
# protect moves accuracy
if move.id in ['protect', 'detect', 'endure', 'wide guard', 'quick guard', 'spikyshield', 'kingsshield', 'banefulbunker']:
rand_float = random.random()
n = user.protect_n
user.protect_n += 3
if not B.rng and n > 0:
return False
if n == 0 or rand_float < (1.0 / n):
return True
return False
else:
# if the move is not a protect move, reset the counter
user.protect_n = 0
# flinched
if 'flinch' in user.volatile_statuses:
return False
# fake out fails after 1 turn active
if move.id == 'fakeout' and user.active_turns > 1:
return False
# if user is taunted, status moves fail
if move.category == 'Status' and 'taunt' in user.volatile_statuses:
return False
# these moves dont check accuracy in certain weather
if move.id == 'thunder' and B.weather == 'rain':
return True
if move.id == 'hurricane' and B.weather == 'rain':
return True
if move.id == 'blizzard' and B.weather == 'hail':
return True
# some moves don't check accuracy
if move.accuracy is True:
return True
# returns a boolean whether the move hit the target
# if temp < check then the move hits
temp = random.randint(0, 99)
accuracy = get_accuracy(user)
evasion = get_evasion(target)
check = 100
if B.rng:
check = (move.accuracy * accuracy * evasion)
return temp < check
def boosts_statuses(B:Battle, user:Pokemon, move:dex.Move, target:Pokemon) -> None:
'''
ONLY CALLED in run_move()
Handles boosts and statuses.
'''
# stat changing moves
user_volatile_status = ''
target_volatile_status = ''
# primary effects
if move.primary['boosts'] is not None:
boost(target, move.primary['boosts'])
if move.primary['volatile_status'] is not None:
target_volatile_status = move.primary['volatile_status']
target.volatile_statuses.add(target_volatile_status)
if move.primary['self'] is not None:
if 'boosts' in move.primary['self']:
boost(user, move.primary['self']['boosts'])
if 'volatile_status' in move.primary['self']:
user_volatile_status = move.primary['self']['volatile_status']
user.volatile_statuses.add(user_volatile_status)
if move.primary['status'] is not None:
add_status(target, move.primary['status'])
# secondary effects
for effect in move.secondary:
if not target.fainted:
temp = random.randint(0, 99)
check = effect['chance']
if check != 100 and not B.rng:
check = 0
if temp < check:
if 'boosts' in effect:
boost(target, effect['boosts'])
if 'status' in effect:
add_status(target, effect['status'], user)
if 'volatile_status' in effect:
target_volatile_status = effect['volatile_status']
target.volatile_statuses.add(target_volatile_status)
if target_volatile_status == 'partiallytrapped':
target.bound_n = 4 if random.random() < 0.5 else 5
target.bound_damage = 1/16
if user.item == 'gripclaw':
target.bound_n = 7
if user.item == 'bindingband':
target.bound_damage = 1/8
if target_volatile_status == 'taunt':
target.taunt_n = 3
if target_volatile_status == 'encore':
target.encore_n = 3
return
def update_move_before_running(B:Battle, user:Pokemon, move:dex.Move, target:Pokemon) -> dex.Move:
'''
ONLY CALLED IN run_move()
Returns the new move reference
Some moves need to have their info updated based on the current state
before running. the namedtuple._replace returns a new namedtuple
instance with updated values so we dont have to worry about ruining the
game data
'''
# update the moves power
# acrobatics
if move.id == 'acrobatics' and user.item == '':
power = move.base_power * 2
move = move._replace(base_power = power)
elif move.id == 'beatup':
power = 0
#for pokemon in user.side.pokemon:
# power += (dex.pokedex[pokemon.id].baseStats.attack / 10) + 5
move = move._replace(base_power = power)
elif move.id == 'crushgrip' or move.id == 'wringout':
power = math.floor(120 * (target.hp / target.maxhp))
if power < 1:
power = 1
move = move._replace(base_power = power)
elif move.id == 'electroball':
power = 1
speed = target.stats.speed / user.stats.speed
if speed <= 0.25:
power = 150
if speed > 0.25:
power = 120
if speed > .3333:
power = 80
if speed > 0.5:
power = 60
if speed > 1:
power = 40
move = move._replace(base_power = power)
elif move.id == 'eruption' or move.id == 'waterspout':
power = math.floor(150 * (user.hp / user.maxhp))
if power < 1:
power = 1
move = move._replace(base_power = power)
elif move.id == 'flail' or move.id == 'reversal':
power = 1
hp = user.hp / user.maxhp
if hp < 0.0417:
power = 200
if hp >= 0.0417:
power = 150
if hp > 0.1042:
power = 100
if hp > 0.2083:
power = 80
if hp > 0.3542:
power = 40
if hp >= 0.6875:
power = 20
move = move._replace(base_power = power)
elif move.id == 'fling':
#default base power will be 30
power = dex.fling.get(user.item, 30)
move = move._replace(base_power = power)
if user.item == 'kingsrock' or user.item == 'razorfang':
move = move._replace(primary= {'boosts': None, 'status': None, 'volatile_status': 'flinch', 'self': None})
elif user.item == 'flameorb':
move = move._replace(primary= {'boosts': None, 'status': 'brn', 'volatile_status': None, 'self': None})
elif user.item == 'toxicorb':
move = move._replace(primary= {'boosts': None, 'status': 'tox', 'volatile_status': None, 'self': None})
elif user.item == 'lightball':
move = move._replace(primary= {'boosts': None, 'status': 'par', 'volatile_status': None, 'self': None})
elif user.item == 'poisonbarb':
move = move._replace(primary= {'boosts': None, 'status': 'psn', 'volatile_status': None, 'self': None})
# assume max base_power for return and frustration
elif move.id == 'frustration' or move.id == 'return':
move = move._replace(base_power = 102)
elif move.id == 'grassknot':
power = 1
weight = dex.pokedex[target.species].weightkg
if weight >= 200:
power = 120
if weight < 200:
power = 100
if weight < 100:
power = 80
if weight < 50:
power = 60
if weight < 25:
power = 40
if weight < 10:
power = 20
move = move._replace(base_power = power)
elif move.id == 'heatcrash' or move.id == 'heavyslam':
power = 1
weight = dex.pokedex[target.id].weightkg / dex.pokedex[user.id].weightkg
if weight > 0.5:
power = 40
if weight < 0.5:
power = 60
if weight < 0.333:
power = 80
if weight < 0.25:
power = 100
if weight < 0.20:
power = 120
move = move._replace(base_power = power)
elif move.id == 'gyroball':
target_player = (B.p1 if target.player_uid == 1 else B.p2)
user_player = (B.p1 if user.player_uid == 1 else B.p2)
power = math.floor(25 * (get_speed(target, B.weather, B.terrain, B.trickroom, target_player.tailwind) / get_speed(user, B.weather, B.terrain, B.trickroom, user_player.tailwind)))
if power < 1:
power = 1
move = move._replace(base_power = power)
elif move.id == 'magnitude':
power = random.choice(dex.magnitude_power)
move = move._replace(base_power = power)
elif move.id == 'naturalgift':
item = dex.item_dex[user.item]
if item.isBerry:
move = move._replace(base_power = item.naturalGift['basePower'])
move = move._replace(type = item.naturalGift['type'])
elif move.id == 'powertrip' or move.id == 'storedpower':
power = 20
for stat in user.boosts:
if user.boosts[stat] > 0:
power += (user.boosts[stat] * 20)
move = move._replace(base_power = power)
elif move.id == 'present':
power = random.choice([0, 0, 120, 80, 80, 80, 40, 40, 40, 40])
move = move._replace(base_power = power)
elif move.id == 'punishment':
power = 60
for stat in target.boosts:
if target.boosts[stat] > 0:
power += (target.boosts[stat] * 20)
if power > 200:
power = 200
move = move._replace(base_power = power)
elif move.id == 'spitup':
power = 100 * user.stockpile
move = move._replace(base_power = power)
user.stockpile = 0
# assist
elif move.id == 'assist':
# it hink this is broken
#move = dex.move_dex[random.choice(target.moves)]
pass
# metronome
elif move.id == 'metronome':
while move.id in dex.no_metronome:
move = dex.move_dex[random.choice(list(dex.move_dex.keys()))]
# mimic
elif move.id == 'mimic':
pass
# if target.last_used_move is not None:
# if 'mimic' in user.moves:
# user.moves.remove('mimic')
# user.moves.append(target.last_used_move)
# move = dex.move_dex[target.last_used_move]
# copycat
elif move.id == 'copycat':
if target.last_used_move is not None:
move = dex.move_dex[target.last_used_move]
elif move.id == 'naturepower':
move = dex.move_dex['triattack']
# mirror move
elif move.id == 'mirror move':
if target.last_used_move is not None:
move = dex.move_dex[target.last_used_move]
# check non unique stuff
# baneful bunker
if 'banefulbunker' in target.volatile_statuses:
if move.flags.contact:
add_status(user, 'psn')
# spiky shield
if 'spikeyshield' in target.volatile_statuses:
damage(user, 0.125, flag='percentmaxhp')
# RECOIL DAMAGE
if move.recoil.damage != 0:
if move.recoil.condition == 'always':
if move.recoil.type == 'maxhp':
damage(user, move.recoil.damage, flag='percentmaxhp')
return move
def unique_moves_after_damage(B:Battle, user:Pokemon, move:dex.Move, target:Pokemon, dmg:int):
'''
ONLY CALLED IN run_move()
'''
#drain moves
if user.item == 'bigroot':
damage(user, -(math.floor(dmg * move.drain * 1.3)))
else:
damage(user, -(math.floor(dmg * move.drain)))
#heal moves
if move.heal > 0:
damage(user, -(move.heal), flag='percentmaxhp')
#heal moves that depend on weather
if move.id in ['moonlight', 'morningsun', 'synthesis']:
if B.weather == 'clear':
damage(user, -(0.5), flag='percentmaxhp')
elif B.weather == 'sunlight':
damage(user, -(0.66), flag='percentmaxhp')
else:
damage(user, -(0.25), flag='percentmaxhp')
if move.id == 'shoreup':
if B.weather == 'sandstorm':
damage(user, -(0.66), flag='percentmaxhp')
else:
damage(user, -(0.50), flag='percentmaxhp')
#recoil moves
if move.recoil.damage != 0:
if move.recoil.condition == 'hit':
if move.recoil.type == 'maxhp':
damage(user, move.recoil.damage, flag='percentmaxhp')
if move.recoil.type == 'damage':
damage(user, dmg* move.recoil.damage)
# terrain moves
if move.terrain is not None:
B.terrain = move.terrain
# weather moves
if move.weather is not None and B.weather != move.weather:
B.weather = move.weather
B.weather_n = 5
if user.item == 'heatrock' and B.weather == 'sunlight':
B.weather_n = 8
if user.item == 'damprock' and B.weather == 'rain':
B.weather_n = 8
if user.item == 'smoothrock' and B.weather == 'sandstorm':
B.weather_n = 8
if user.item == 'icyrock' and B.weather == 'hail':
B.weather_n = 8
# accupressure
if move.id == 'acupressure':
possible_stats = [stat for stat in user.boosts if user.boosts[stat] < 6]
if len(possible_stats) > 0:
rand_int = random.randint(0, len(possible_stats)-1)
boost_stat = possible_stats[rand_int]
user.boosts[boost_stat] += 2
if user.boosts[boost_stat] > 6:
user.boosts[boost_stat] = 6
# aqua ring
if move.id == 'aquaring':
user.aqua_ring = True
# ingrain
if move.id == 'ingrain':
user.aqua_ring = True
user.trapped = True
# aromatherapy
#if move.id == 'aromatherapy' or move.id == 'healbell':
# for pokemon in user.side.pokemon:
# pokemon.cure_status()
# belly drum
if move.id == 'bellydrum' and user.hp > (0.5 * user.maxhp):
boost(user, {'atk': 6})
damage(user, 0.5, flag='percentmax')
# bestow
if move.id == 'bestow':
if user.item != '' and target.item == '':
target.item = user.item
user.item = ''
# camouflage
# every battle played using the sim is a 'link battle'
if move.id == 'camouflage':
user.types = ['Normal']
# conversion
move_types = []
if move.id == 'conversion':
for user_move in user.moves:
if dex.move_dex[user_move].type not in user.types:
move_types.append(dex.move_dex[user_move].type)
if len(move_types) > 0:
user.types = [move_types[random.randint(0, len(move_types)-1)]]
# this causes an attribute error and i dont know why
# attrubiteError starts here
# I THINK I FIGURED IT OUT
# conversion 2
#print(str(move))
if move.id == 'conversion2':
if user.last_damaging_move is not None:
for type in dex.typechart_dex[dex.move_dex[user.last_damaging_move].type].damage_taken:
if type not in user.types:
move_types.append(type)
if len(move_types) > 0:
user.types = [move_types[random.randint(0, len(move_types)-1)]]
# curse
if move.id == 'curse' and 'Ghost' not in user.types:
boost(user, {'atk': 1, 'def': 1, 'spe': -1})
# defog
if move.id == 'defog':
boost(target, {'evasion': -1})
#target.side.side_condition = set()
# entrainment
if move.id == 'entrainment':
target.ability = user.ability
# flower shield
if move.id == 'flowershield':
if 'Grass' in user.types:
boost(user, {'def': 1})
if 'Grass' in target.types:
boost(target, {'def': 1})
# focus energy
if move.id == 'focusenergy':
user.crit_chance += 2
# forests curse
if move.id == 'forestscurse':
target.types.append('Grass')
# gastro acid
if move.id == 'gastroacid':
if target.ability not in ['multitype', 'stancechance', 'schooling', 'comatose', 'shieldsdown', 'disguise', 'rkssystem', 'battlebond', 'powerconstruct']:
target.ability = 'suppressed'
# guard split
if move.id == 'guardsplit':
avg_def = (user.stats.defense + target.stats.defense)/2
avg_spd = (user.stats.specialdefense + target.stats.specialdefense)/2
user.stats.defense = avg_def
target.stats.defense = avg_def
user.stats.specialdefense = avg_spd
target.stats.specialdefense = avg_spd
# guard swap
if move.id == 'guardswap':
user_def = user.stats.defense
target_def = target.stats.defense
user_spd = user.stats.specialdefense
target_spd = target.stats.specialdefense
user.stats.defense = target_def
target.stats.defense = user_def
user.stats.specialdefense = target_spd
target.stats.specialdefense = user_spd
# heart swap
if move.id == 'heartswap':
user_boosts = user.boosts
target_boosts = target.boosts
user.boosts = target_boosts
target.boosts = user_boosts
# kinggsheild
if move.id == 'kingsshield' and user.id == 'aegislash':
pass
#user.form_change('aegislashshield')
# pain split
if move.id == 'painsplit':
avg_hp = (user.hp + target.hp) /2
user.hp = avg_hp if avg_hp < user.stats.hp else user.stats.hp
target.hp = avg_hp if avg_hp < target.stats.hp else target.stats.hp
# power split
if move.id == 'powersplit':
avg_atk = (user.stats.attack + target.stats.attack)/2
avg_spa = (user.stats.specialattack + target.stats.specialattack)/2
user.stats.attack = avg_atk
target.stats.attack = avg_atk
user.stats.specialattack = avg_spa
target.stats.specialattack = avg_spa
# power swap
if move.id == 'powerswap':
user_atk = user.stats.attack
target_atk = target.stats.attack
user_spa = user.stats.specialattack
target_spa = target.stats.specialattack
user.stats.attack = target_atk
target.stats.attack = user_atk
user.stats.specialattack = target_spa
target.stats.specialattack = user_spa
# power trick
if move.id == 'powertrick':
user_atk = user.stats.attack
user_spa = user.stats.specialattack
user.stats.specialattack = user_atk
user.stats.attack = user_spa
# psychoshift
if move.id == 'psychoshift':
if add_status(target, user.status):
cure_status(user)
# psychup
if move.id == 'psychup':
user.boosts = target.boosts
# purify
if move.id == 'purify':
if cure_status(target):
damage(user, -0.5, flag='percentmaxhp')
# recycle
if move.id == 'recycle':
if user.last_used_item is not None and user.item == '':
user.item = user.last_used_item
# reflect type
if move.id == 'reflecttype':
user.types = target.types
# refresh
if move.id == 'refresh':
if user.status in ['brn', 'par', 'psn', 'tox']:
cure_status(user)
# rest
if move.id == 'rest':
if user.status != 'slp':
user.status = 'slp'
user.sleep_n = 2
damage(user, -1, flag='percentmaxhp')
# role play
if move.id == 'roleplay':
user.ability = target.ability
# simple beam
if move.id == 'simplebeam':
target.ability = 'simple'
# sketch
if move.id == 'sketch':
pass
# if target.last_used_move is not None:
# if 'sketch' in user.moves:
# user.moves.remove('sketch')
# user.moves.append(target.last_used_move)
# skill swap
if move.id == 'skillswap':
user_ability = user.ability
target_ability = target.ability
user.ability = target_ability
target.ability = user_ability
# sleep talk
if move.id == 'sleeptalk':
if user.status == 'slp' and len(user.moves) > 1:
while move.id == 'sleeptalk':
move = dex.move_dex[user.moves[random.randint(0, len(user.moves)-1)]]
# soak
if move.id == 'soak':
target.types = ['Water']
# speed swap
if move.id == 'speedswap':
user_speed = user.stats.speed
target_speed = target.stats.speed
user.stats.speed = target_speed
target.stats.speed = user_speed
# stockpile
if move.id == 'stockpile':
user.stockpile += 1
if user.stockpile > 3:
user.stockpile = 3
boost(user, {'def': 1, 'spd': 1})
# strength sap
if move.id == 'strengthsap':
if target.boosts['atk'] != -6:
damage(user, -(get_attack(target, B.weather)))
boost(target, {'atk': -1})
# substitute
if move.id == 'substitute':
if not user.substitute and user.hp > user.stats.hp*0.25:
user.substitute = True
damage(user, 0.25, flag='percentmaxhp')
user.substitute_hp = math.floor(0.25 * user.stats.hp)
# switcheroo and trick
if move.id == 'switcheroo' or move.id == 'trick':
user_item = user.item
target_item = target.item
user.item = target_item
target.item = user_item
# topsy-turvy
if move.id == 'topsyturvy':
for stat in target.boosts:
target.boosts[stat] = -target.boosts[stat]
# trick or treat
if move.id == 'trickortreat':
target.types.append('Ghost')
# worry seed
if move.id == 'worryseed':
target.ability = 'insomnia'
# breaks protect
if move.breaks_protect:
if 'protect' in target.volatile_statuses:
target.volatile_statuses.remove('protect')
if 'banefulbunker' in target.volatile_statuses:
target.volatile_statuses.remove('banefulbunker')
if 'spikyshield' in target.volatile_statuses:
target.volatile_statuses.remove('spikyshield')
if 'kingsshield' in target.volatile_statuses:
target.volatile_statuses.remove('kingsshield')
# growth (the move) does another stat boost in the sun
if move.id == 'growth':
if B.weather == 'sunlight':
boost(target, move.primary['self']['boosts'])
def create_move(B:Battle, p:Pokemon, c:Decision) -> dex.Move:
'''
ONLY NEEDS BATTLE FOR the pokemon's players used_zmove attribute
This method takes in the user pokemon and the decision corresponding
to that pokmeon and returns a Move (namedtuple) object that contains
all information about the move that pokemon will use.
'''
if c.type != 'move':
return
if p.pp[p.moves[c.selection]] <= 0:
move = dex.move_dex['struggle']
return move
move = dex.move_dex[p.moves[c.selection]]
# encore overwrites move decision
if 'encore' in p.volatile_statuses and p.last_used_move is not None:
move = dex.move_dex[p.last_used_move]
#return move # Can you z move the encored move? I think yes?
player = B.p2
if p.player_uid == 1:
player = B.p1
if not can_z(p, move) or not c.zmove or player.used_zmove:
return move
# update z move power
# THIS NEEDS UPDATING
item = dex.item_dex[p.item]
if item.zMove is True:
zmove_id = dex.zmove_chart[item.id]
else:
zmove_id = re.sub(r'\W+', '', item.zMove.lower())
base_move = move
move = dex.move_dex[zmove_id]
if move.base_power == 1:
move = move._replace(base_power = base_move.z_move.base_power)
move = move._replace(category = base_move.category)
z_move = move.z_move._replace(boosts=base_move.z_move.boosts)
z_move = move.z_move._replace(effect=base_move.z_move.effect)
move = move._replace(z_move=z_move)
return move
def populate_action_queue(q:List, p:Pokemon, c:Decision, m:dex.Move,
T:Player, B:Battle) -> None:
'''
ONLY NEEDS BATTLE AND PLAYER TO CALL resolve_priority()
Creates and appends actions to the queue q for the given decisions.
'''
a = None
# generate SWITCH actions
if c.type == 'switch':
a = Action('switch', user=p, pos=c.selection)
# generate MEGA EVOLUTION actions
elif c.type == 'mega' and c.mega:
a = Action('mega', user=p)
# move decision actions
elif c.type == 'move':
a = Action('move', user=p, move=m, target=c.target)
if a is not None:
heapq.heappush(q, (resolve_priority(a, B, T), a))
return
def resolve_priority(action, B:Battle, T:Player) -> float:
'''
ONLY NEEDS BATTLE AND PLAYER TO CALL get_speed()
returns a number
(action_priority_tier * 13000) + user.get_speed() + random.random()
lower number is the higher priority
multiply tier by 13_000 because 12_096 is maximum calculated speed
stat of any pokemon
add random.random() to the allow the priority queue to handle possible
ties with a coin flip
priority tiers
0-pursuit on a pokemon that is switching out but not yet fainted
-if mega, mega first then pursuit
1-switches
-calculated speed
2-mega
-calculated speed
-update this pokemons later actions with new calculated speed
3-15-moves
- +5 to -7 priority tiers
-Gale Wings, Prankster, Triage affect priority tier
-calculated speed
-Full Incense, Lagging Tail, Stall go last in priority tier
-Quick Claw, Custap Berry go first in priority tier
-Trick Room
the priority queue can also be re-ordered by certain effects
ex. Round, Dancer, Instruct, Pledge moves, After You, Quash
'''
# apt - action priority tier as defined above
action_priority_tier : int = None
if action.action_type == 'switch':
action_priority_tier = 1
elif action.action_type == 'mega':
action_priority_tier = 2
elif action.action_type == 'move':
action_priority_tier = 3 + (5 - action.move.priority)
# get_speed() returns higher number = faster speed
# priority speed needs to be lower number = faster speed
speed = 12096 - get_speed(action.user, B.weather, B.terrain,
B.trickroom, T.tailwind)
# multiply priority tier by 13000 because it supercedes speed
priority = action_priority_tier * 13000
# random.random() is the tie breaker
return priority + speed + random.random()
| 32.782538 | 186 | 0.592301 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 12,824 | 0.316236 |
04522f05b28efbf881a74b37b11db3f8da797911 | 8,936 | py | Python | opencamlib-read-only/scripts/voronoi/voronoi_bisectors.py | play113/swer | 78764c67885dfacb1fa24e494a20681265f5254c | [
"MIT"
] | null | null | null | opencamlib-read-only/scripts/voronoi/voronoi_bisectors.py | play113/swer | 78764c67885dfacb1fa24e494a20681265f5254c | [
"MIT"
] | null | null | null | opencamlib-read-only/scripts/voronoi/voronoi_bisectors.py | play113/swer | 78764c67885dfacb1fa24e494a20681265f5254c | [
"MIT"
] | 1 | 2020-07-04T13:58:00.000Z | 2020-07-04T13:58:00.000Z | import ocl
import camvtk
import time
import vtk
import datetime
import math
import random
def drawVertex(myscreen, p, vertexColor, rad=1):
myscreen.addActor( camvtk.Sphere( center=(p.x,p.y,p.z), radius=rad, color=vertexColor ) )
def drawEdge(myscreen, e, edgeColor=camvtk.yellow):
p1 = e[0]
p2 = e[1]
myscreen.addActor( camvtk.Line( p1=( p1.x,p1.y,p1.z), p2=(p2.x,p2.y,p2.z), color=edgeColor ) )
def drawCircle(myscreen, c, circleColor):
myscreen.addActor( camvtk.Circle( center=(c.c.x,c.c.y,c.c.z), radius=c.r, color=circleColor ) )
def drawLine(myscreen, l, lineColor):
# a x + b y + c = 0
# x = -c/a
p1 = 100*ocl.Point( -l.c/l.a , 0 )
p2 = 100*ocl.Point( 0, -l.c/l.b )
myscreen.addActor( camvtk.Line( p1=( p1.x,p1.y,p1.z), p2=(p2.x,p2.y,p2.z), color=lineColor ) )
# CIRCLE def
# (x(t) - xc1)^2 + (y(t)-yc1)^2 = (r1+k1*t)^2
class Circle:
def __init__(self,c=ocl.Point(0,0),r=1,cw=1):
self.c = c
self.r = r
self.cw = cw # CW=1, CCW = -1
# k +1 enlarging circle
# k -1 shrinking circle
# LINE def
# a1 x + b1 y + c1 + k1 t = 0 and a*a + b*b = 1
class Line:
def __init__(self,a,b,c,k):
self.a = a
self.b = b
self.c = c
self.k = k # offset to left or right of line
# from Held 1991, page 94->
#
# bisectors are of the form
# line, parabola, ellipse, hyperbola
# x(t) = x1 - x2 - x3*t +/- x4 sqrt( square(x5+x6*t) - square(x7+x8*t) )
# y(t) = y1 - y2 - y3*t +/- y4 sqrt( square(y5+y6*t) - square(y7+y8*t) )
# line/line: line
# circle/line: parabola
# circle/circle: ellipse/hyperbola
# !only valid if no parallel lines and no concentric arcs
#
# line: (a, b, c, k)
# a1 x + b1 y + c1 + k1 t = 0 and a*a + b*b = 1
# k= +/- 1 indicates offset to right/left
#
# circle: (xc, yc, r, lambda)
# (x(t) - xc1)^2 + (y(t)-yc1)^2 = (r1+k1*t)^2
# lambda=-1 for CCW arc and +1 otherwise
# k +1 enlarging circle, k -1 shrinking circle
#
# for a bisector we store only four parameters (alfa1, alfa2, alfa3, alfa4)
#
# line/line
# delta = a1*b2 - a2*b1
# alfa1= (b1*d2-b2*d1)/delta
# alfa2= (a2*d1-a1*d2)/delta
# alfa3= b2-b1
# alfa4= a1-a2
# bisector-params:
# x1 = alfa1, x3 = -alfa3, x2 = x4 = x5 = x6 = x7 = x8 = 0
# y1 = alfa2, y3 = -alfa4, y2=y4=y5=y6=y7=y8 = 0
#
# circle/line
#
# alfa1= a2
# alfa2= b2
# alfa3= a2*xc1 + b2*yc1+d2
# alfa4= r1
# params:
# x1 = xc1, x2 = alfa1*alfa3, x3 = -alfa1, x3 = alfa2, x5 = alfa4, x6 = lambda1, x7 = alfa3, x8 = -1
# y1 = yc1, y2 = alfa2*alfa3, y3 = -alfa2, y4 = alfa1, y5 = alfa4, y6 = lambda1, y7 = alfa3, y8 = -1
#
# circle / circle
# d= sqrt( square(xc1-xc2) + square(yc1-yc2) )
# alfa1= (xc2-xc1)/d
# alfa2= (yc2-yc1)/d
# alfa3= (r2*r2-r1*r1-d*d)/2d
# alfa4= (lambda2*r2-lambda1*r1)/d
# params:
# x1 = xc1, x2 = alfa1*alfa3, x3 = alfa1*alfa4, x4 = alfa2, x5 = r1, x6 = lambda1, x7 = alfa3, x8 = alfa4
# y1 = yc1, y2 = alfa2*alfa3, y3 = alfa2*alfa4, y4 = alfa1, y5 = r1, y6 = lambda1, y7 = alfa3, y8 = alfa4
class LineLine:
""" line/line bisector is a line """
def __init__(self,l1,l2):
self.delta= l1.a*l2.b-l2.a*l1.b
self.alfa1 = (l1.b*l2.c-l2.b*l1.c) / self.delta
self.alfa2 = (l2.a*l1.c-l1.a*l2.c) / self.delta
self.alfa3 = l2.b*l1.k-l1.b*l2.k
self.alfa4 = l1.a*l2.k-l2.a*l1.k
def getX(self):
x = []
x.append( self.alfa1 )
x.append( 0 )
x.append( -self.alfa3 )
x.append( 0 )
x.append( 0 )
x.append( 0 )
x.append( 0 )
x.append( 0 )
return x
def getY(self):
y = []
y.append( self.alfa2 )
y.append( 0 )
y.append( -self.alfa4 )
y.append( 0 )
y.append( 0 )
y.append( 0 )
y.append( 0 )
y.append( 0 )
return y
# CIRCLE/LINE (same as point-line?)
# * alfa1= a2
# * alfa2= b2
# * alfa3= a2*xc1 + b2*yc1+d2 (c2?)
# * alfa4= r1
# x1 = xc1
# x2 = alfa1*alfa3
# x3 = -alfa1,
# x3 = alfa2,
# x5 = alfa4,
# x6 = lambda1,
# x7 = alfa3,
# x8 = -1
# y1 = yc1,
# y2 = alfa2*alfa3,
# y3 = -alfa2,
# y4 = alfa1,
# y5 = alfa4,
# y6 = lambda1,
# y7 = alfa3,
# y8 = -1
class CircleCircle:
# CIRCLE / CIRCLE
# d= sqrt( square(xc1-xc2) + square(yc1-yc2) )
# cw=-1 for CCW arc and +1 otherwise
def __init__(self, c1, c2):
self.d = (c1.c-c2.c).xyNorm()
self.alfa1 = 0.0
self.alfa2 = 0.0
self.alfa3 = 0.0
self.alfa4 = 0.0
if ( self.d > 0.0 ):
self.alfa1 = (c2.c.x-c1.c.x)/self.d
self.alfa2 = (c2.c.y-c1.c.y)/self.d
self.alfa3 = (c2.r*c2.r-c1.r*c1.r-self.d*self.d)/(2*self.d)
self.alfa4 = (c2.cw*c2.r-c1.cw*c1.r)/self.d
self.c1 = c1 # store all of c1 also??
def getX(self):
x = []
x.append( self.c1.c.x )
x.append( self.alfa1*self.alfa3 )
x.append( self.alfa1*self.alfa4 )
x.append( self.alfa2 )
x.append( self.c1.r )
x.append( self.c1.cw )
x.append( self.alfa3 )
x.append( self.alfa4 )
return x
def getY(self):
y = []
y.append( self.c1.c.y )
y.append( self.alfa2*self.alfa3 )
y.append( self.alfa2*self.alfa4 )
y.append( self.alfa1 )
y.append( self.c1.r )
y.append( self.c1.cw )
y.append( self.alfa3 )
y.append( self.alfa4 )
return y
class Bisector:
def __init__(self, Bis):
self.x= Bis.getX()
self.y= Bis.getY()
def Point(self, t, k):
x=self.x
y=self.y
detx = ( math.pow((x[4]+x[5]*t),2) - math.pow((x[6]+x[7]*t),2) )
dety = ( math.pow((y[4]+y[5]*t),2) - math.pow((y[6]+y[7]*t),2) )
xp = x[0]-x[1]-x[2]*t + x[3]*math.sqrt( detx )
yp = y[0]-y[1]-y[2]*t + y[3]*math.sqrt( dety )
xm = x[0]-x[1]-x[2]*t - x[3]*math.sqrt( detx )
ym = y[0]-y[1]-y[2]*t - y[3]*math.sqrt( dety )
return [ocl.Point(xp,yp), ocl.Point(xm,ym)]
def minT(self):
# the minimum t that makes sense sets the sqrt() to zero
# (x[4]+x[5]*t)^2 - (x[6]+x[7]*t)^2 = 0
# (x[4]+x[5]*t)^2 = (x[6]+x[7]*t)^2
# (x[4]+x[5]*t) = (x[6]+x[7]*t) OR (x[4]+x[5]*t) = -(x[6]+x[7]*t)
# (x[5]-x[7])*t = (x[6]-x[4]) OR (x[5]+x7*t) = x4-x[6]
# t = x6-x4 / (x5-x7) or t = x4-x6 / (x5+x7)
x = self.x
y = self.y
t1=0
t2=0
t3=0
t4=0
if (((x[5]-x[7])!=0) and ((x[5]+x[7])!=0) ):
t1 = (x[6]-x[4]) / (x[5]-x[7])
t2 = (-x[4]-x[6]) / (x[5]+x[7])
t3 = (y[6]-y[4]) / (y[5]-y[7])
t4 = (-y[4]-y[6]) / (y[5]+y[7])
print " t1 solution= ",t1
print " t2 solution= ",t2
print " t3 solution= ",t3
print " t4 solution= ",t4
return t2
def drawBisector(myscreen, bis):
N = 300
t= bis.minT()
tmax = 400
dt = float(tmax)/float(N)
ppts = []
mpts = []
for n in range(0,N):
ppts.append( bis.Point(t,1)[0] )
mpts.append( bis.Point(t,1)[1] )
t= t+dt
for p in ppts:
drawVertex(myscreen, p, camvtk.green, rad=1)
for p in mpts:
drawVertex(myscreen, p, camvtk.red, rad=1)
if __name__ == "__main__":
print ocl.revision()
myscreen = camvtk.VTKScreen()
myscreen.camera.SetPosition(0.01, 0, 1000 )
myscreen.camera.SetFocalPoint(0, 0, 0)
myscreen.camera.SetClippingRange(-100,3000)
camvtk.drawOCLtext(myscreen)
w2if = vtk.vtkWindowToImageFilter()
w2if.SetInput(myscreen.renWin)
lwr = vtk.vtkPNGWriter()
lwr.SetInput( w2if.GetOutput() )
c1 = Circle(c=ocl.Point(100,30), r=100, cw=1)
c2 = Circle(c=ocl.Point(20,30), r=60, cw=1)
drawCircle(myscreen, c1, camvtk.cyan)
drawCircle(myscreen, c2, camvtk.cyan)
c1c2 = CircleCircle(c1,c2)
bicc = Bisector( c1c2 )
drawBisector( myscreen, bicc )
c1a = Circle(c=ocl.Point(100,30), r=100, cw=-1)
c2a = Circle(c=ocl.Point(20,30), r=60, cw=1)
c1c2alt = CircleCircle(c1a,c2a)
biccalt = Bisector( c1c2alt )
drawBisector( myscreen, biccalt )
l1 = Line( math.cos(1), math.sin(1) , 1 , -1)
l2 = Line( math.cos(0.1), math.sin(0.1) , -1, 1)
drawLine(myscreen, l1, camvtk.yellow )
drawLine(myscreen, l2, camvtk.yellow )
l1l2 = LineLine( l1, l2)
bill = Bisector( l1l2 )
drawBisector( myscreen, bill )
myscreen.render()
#w2if.Modified()
#lwr.SetFileName("frames/vd_dt_20_"+ ('%05d' % n)+".png")
#lwr.Write()
print "PYTHON All DONE."
myscreen.render()
myscreen.iren.Start()
| 30.189189 | 109 | 0.513653 | 3,871 | 0.433192 | 0 | 0 | 0 | 0 | 0 | 0 | 3,088 | 0.345568 |
045237c527cde898e9b9472e8d4dba096a190def | 760 | py | Python | wheel5/scheduler.py | xdralex/pytorch-wheel5 | 336529e354a45908cf3f8f12cd401a95fb2a5351 | [
"MIT"
] | 2 | 2020-06-08T13:10:06.000Z | 2020-07-07T05:34:18.000Z | wheel5/scheduler.py | xdralex/pytorch-wheel5 | 336529e354a45908cf3f8f12cd401a95fb2a5351 | [
"MIT"
] | 1 | 2020-04-29T08:46:14.000Z | 2020-04-29T08:46:14.000Z | wheel5/scheduler.py | xdralex/pytorch-wheel5 | 336529e354a45908cf3f8f12cd401a95fb2a5351 | [
"MIT"
] | null | null | null | from torch.optim.lr_scheduler import _LRScheduler
class WarmupScheduler(_LRScheduler):
def __init__(self, optimizer, epochs, next_scheduler):
self.epochs = epochs
self.next_scheduler = next_scheduler
super(WarmupScheduler, self).__init__(optimizer)
def get_lr(self):
if self.last_epoch > self.epochs:
return self.next_scheduler.get_lr()
else:
return [lr * float(self.last_epoch) / self.epochs for lr in self.base_lrs]
def step(self, epoch=None):
if self.last_epoch > self.epochs:
epoch = None if epoch is None else epoch - self.epochs
return self.next_scheduler.step(epoch)
else:
return super(WarmupScheduler, self).step(epoch)
| 34.545455 | 86 | 0.659211 | 707 | 0.930263 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0453131e08ad9df3feb4129ffbeb4612445ebb21 | 763 | py | Python | analysis/dbase/tracking/train.py | BrancoLab/FC_analysis | 7124a7d998275bce6f7a18c264399c7dabfd430b | [
"MIT"
] | 1 | 2018-08-20T14:47:09.000Z | 2018-08-20T14:47:09.000Z | analysis/dbase/tracking/train.py | BrancoLab/FC_analysis | 7124a7d998275bce6f7a18c264399c7dabfd430b | [
"MIT"
] | null | null | null | analysis/dbase/tracking/train.py | BrancoLab/FC_analysis | 7124a7d998275bce6f7a18c264399c7dabfd430b | [
"MIT"
] | 1 | 2018-09-24T15:58:57.000Z | 2018-09-24T15:58:57.000Z | import deeplabcut as dlc
import os
from fcutils.file_io.utils import listdir
# from fcutils.video.utils import trim_clip
config_file = 'D:\\Dropbox (UCL - SWC)\\Rotation_vte\\Locomotion\\dlc\\locomotion-Federico\\config.yaml'
dlc.train_network(config_file)
# fld = 'D:\\Dropbox (UCL - SWC)\\Rotation_vte\\Locomotion\\dlc'
# vids = [os.path.join(fld, '200203_CA8493_video_trim.mp4'), os.path.join(fld, '200204_CA8491_video_trim.mp4'), os.path.join(fld, '200204_CA8494_video_trim.mp4')]
# dlc.extract_outlier_frames(config_file, vids, epsilon=40)
# dlc.merge_datasets(config_file)
# vids = [f for f in listdir(fld) if f.endswith('.mp4')]
# for vid in vids:
# savepath = vid.split(".")[0]+'_trim.mp4'
# trim_clip(vid, savepath, start=0.25, stop=0.35) | 34.681818 | 162 | 0.728702 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 624 | 0.817824 |
04538ee88a4559bebed7924c46e1d13c7c263e79 | 5,290 | py | Python | rmApp.py | LREN-CHUV/mip-apps-manager | 989d4a9fa5bf398ba71b5e4622e3f1deed0ac055 | [
"MIT"
] | null | null | null | rmApp.py | LREN-CHUV/mip-apps-manager | 989d4a9fa5bf398ba71b5e4622e3f1deed0ac055 | [
"MIT"
] | null | null | null | rmApp.py | LREN-CHUV/mip-apps-manager | 989d4a9fa5bf398ba71b5e4622e3f1deed0ac055 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import argparse, shutil, os, sys
def getArgs():
parser = argparse.ArgumentParser()
parser.add_argument('app', help='Application identifier (used by the app developer)')
parser.add_argument('mipDir', help='Directory containing the mip application (<path>/app/)')
return parser.parse_args()
def checkArgs(args):
if not os.path.isdir(args.mipDir):
print 'Error in main : '+args.mipDir+' is not a directory ! '
sys.exit(1)
if not os.path.isdir(os.path.join(args.mipDir, 'scripts/app/', args.app)):
print 'Error in main : The app project '+args.app+' do not seem to exist ! '
print 'Check that you entered the right pseudonym and the right applcation directory ! '
sys.exit(1)
def writeFile(fileName, content):
try:
f = open(fileName, 'w')
f.write(content)
f.close()
except IOError:
print 'Error in writeFile : Cannot write in '+fileName+' file ! '
sys.exit(1)
def findAndRemove(fileName, pattern):
try:
content = ''
f = open(fileName, 'r')
for line in f:
if pattern not in line:
content += line
f.close()
writeFile(fileName, content)
except IOError:
print 'Error in findAndRemove : Cannot read the file '+fileName+' ! '
sys.exit(1)
def fileContains(fileName, pattern):
try:
f = open(fileName, 'r')
for line in f:
if pattern in line:
f.close()
return True
f.close()
return False
except IOError:
print 'Error in fileContains : Cannot read the file '+fileName+' ! '
sys.exit(1)
def findTagLimits(fileName, pattern, del1, del2):
try:
f = open(fileName, 'r')
hasFoundPattern = False
startLine = 0
stopLine = 0
divCount = 0
lineNum = 0
for line in f:
lineNum += 1
if pattern in line:
hasFoundPattern = True
startLine = lineNum
elif hasFoundPattern and divCount == 0:
stopLine = lineNum - 1
hasFoundPattern = False
if hasFoundPattern:
if del1 in line:
divCount += 1
if del2 in line:
divCount -= 1
f.close()
return (startLine, stopLine)
except IOError:
print 'Error in findTagLimits : Cannot read the file '+fileName+' ! '
sys.exit(1)
def removeBetween(fileName, start, stop):
try:
content = ''
f = open(fileName, 'r')
lineNum = 0
for line in f:
lineNum += 1
if lineNum < start or lineNum > stop:
content += line
f.close()
writeFile(fileName, content)
except IOError:
print 'Error in removeBetween : Cannot read the file '+fileName+' ! '
sys.exit(1)
def strContainsListElement(string, strList):
for l in strList:
if l in string:
return True
return False
def main():
# Get arguments
args = getArgs()
args.app = args.app.lower()
checkArgs(args)
# Remove application folder
appFolder = os.path.join(args.mipDir, 'scripts/app/', args.app)
shutil.rmtree(appFolder)
# Remove module from `app.js`
path = os.path.join(args.mipDir, 'scripts/app/app.js')
if fileContains(path, '\'chuvApp.'+args.app+'\''):
findAndRemove(path, ' \'chuvApp.'+args.app+'\',\n')
else:
print 'The module '+'\'chuvApp.'+args.app+'\'seems to have already been deleted ! '
# Remove module and controller inclusions from main `index.html`
path = os.path.join(args.mipDir, 'index.html')
linesToRm = []
linesToRm.append('<!-- JS inclusions for external app "'+args.app+'" -->')
linesToRm.append('<script src="scripts/app/'+args.app+'/'+args.app+'.module.js"></script>')
linesToRm.append('<script src="scripts/app/'+args.app+'/'+args.app+'.controller.js"></script>')
for line in linesToRm:
findAndRemove(path, line)
# Remove existing tiles from html
path = os.path.join(args.mipDir, 'scripts/app/hbpapps/hbpapps.html')
if fileContains(path,'tile-'+args.app):
limits = findTagLimits(path, '<div class="info-tile tile-'+args.app+'">', '<div', '</div')
removeBetween(path, limits[0]-1, limits[1]+1)
else:
print 'The tile for this app seems to have already been removed from the `hbpapps.html` file ! '
# Remove tile from less file
path = os.path.join(args.mipDir, 'styles/less/virtua/dashboard.less')
if fileContains(path,'tile-'+args.app):
limits = findTagLimits(path, '&.tile-'+args.app+' {', '{', '}')
removeBetween(path, limits[0], limits[1])
else:
print 'The tile for this app seems to have already been removed from the `hbpapps.html` file ! '
# Update tiles colors
try:
exclList = ['&.tile-orange','&.tile-blue', '&.tile-gray', '&.tile-edit']
content = ''
needChange = False
cssCode = ''
f = open(path, 'r')
tileNum = 0
for line in f:
if needChange:
content += cssCode
needChange = False
else:
content += line
if '&.tile-' in line and not strContainsListElement(line, exclList):
tileNum += 1
needChange = True
if (tileNum+3) % 4 == 0:
cssCode = ' background-color: rgba(222, 147, 109, 0.25);\n' # orange
elif (tileNum+2) % 4 == 0:
cssCode = ' background-color: rgba(59, 139, 144, 0.25);\n' # blue
elif (tileNum+1) % 4 == 0:
cssCode = ' background-color: rgba(158, 158, 158, 0.251);\n' # gray
elif tileNum % 4 == 0:
cssCode = ' background-color: rgba(45, 77, 79, 0.251);\n' # indigo
f.close()
writeFile(path, content)
except IOError:
print 'Error in main : Cannot read the file '+path+' ! '
sys.exit(1)
if __name__ == '__main__':
main()
| 28.138298 | 98 | 0.656333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,830 | 0.345936 |
04548c65b3a29cd8c065d7a21cdd9addeb849019 | 1,846 | py | Python | setup.py | aparafita/flow-torch | 4b94a444d05f75334c91bfd697087b393c49d3c3 | [
"MIT"
] | 22 | 2020-01-20T02:32:45.000Z | 2021-12-14T17:22:40.000Z | setup.py | aparafita/flow-torch | 4b94a444d05f75334c91bfd697087b393c49d3c3 | [
"MIT"
] | null | null | null | setup.py | aparafita/flow-torch | 4b94a444d05f75334c91bfd697087b393c49d3c3 | [
"MIT"
] | 2 | 2020-08-19T03:03:29.000Z | 2021-06-10T05:49:26.000Z | #!/usr/bin/env python
from setuptools import setup
version = '0.1.2'
long_description = """
# flow
This project implements basic Normalizing Flows in PyTorch
and provides functionality for defining your own easily,
following the conditioner-transformer architecture.
This is specially useful for lower-dimensional flows and for learning purposes.
Nevertheless, work is being done on extending its functionalities
to also accomodate for higher dimensional flows.
Supports conditioning flows, meaning, learning probability distributions
conditioned by a given conditioning tensor.
Specially useful for modelling causal mechanisms.
For more information,
please look at our [Github page](https://github.com/aparafita/flow).
"""
with open('requirements.txt') as f:
install_requires = [line.strip() for line in f if line.strip()]
setup(
name='flow-torch',
packages=['flow'],
version=version,
license='MIT',
description='Normalizing Flow models in PyTorch',
long_description=long_description,
long_description_content_type='text/markdown',
author='Álvaro Parafita',
author_email='parafita.alvaro@gmail.com',
url='https://github.com/aparafita/flow',
download_url=f'https://github.com/aparafita/flow/archive/v{version}.tar.gz',
keywords=[
'flow', 'density', 'estimation',
'sampling', 'probability', 'distribution'
],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Operating System :: OS Independent',
],
install_requires=install_requires,
include_package_data=True,
)
| 32.385965 | 80 | 0.712351 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,254 | 0.678939 |
0454da70bf56318261f39eb6d452f26d17c27244 | 430 | py | Python | app/routes/__init__.py | Poketnans/capstone-q3 | 38d550a54ff41387534241df85eb8aa8c9b6ba7e | [
"MIT"
] | null | null | null | app/routes/__init__.py | Poketnans/capstone-q3 | 38d550a54ff41387534241df85eb8aa8c9b6ba7e | [
"MIT"
] | 4 | 2022-03-03T12:47:02.000Z | 2022-03-08T18:10:34.000Z | app/routes/__init__.py | Poketnans/capstone-q3 | 38d550a54ff41387534241df85eb8aa8c9b6ba7e | [
"MIT"
] | 1 | 2022-03-17T14:21:30.000Z | 2022-03-17T14:21:30.000Z | from flask import Flask
from .storage_blueprint import bp_storage
from .tattooists_blueprint import bp_tattooists
from .tattoos_blueprint import bp_tattoos
from .clients_blueprint import bp_clients
def init_app(app: Flask) -> None:
''' Registra as blueprints '''
app.register_blueprint(bp_storage)
app.register_blueprint(bp_tattooists)
app.register_blueprint(bp_tattoos)
app.register_blueprint(bp_clients)
| 26.875 | 47 | 0.8 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 30 | 0.069767 |
0455b199d187f2d50b426f8cc0174c4016c20c0b | 692 | py | Python | PDF-Tools/makepdf/make-pdf-helloworld.py | maysam-h/pdf-toolz | dc182242b11dd1737ee787f19395569382af510f | [
"BSD-2-Clause"
] | null | null | null | PDF-Tools/makepdf/make-pdf-helloworld.py | maysam-h/pdf-toolz | dc182242b11dd1737ee787f19395569382af510f | [
"BSD-2-Clause"
] | null | null | null | PDF-Tools/makepdf/make-pdf-helloworld.py | maysam-h/pdf-toolz | dc182242b11dd1737ee787f19395569382af510f | [
"BSD-2-Clause"
] | 1 | 2020-09-17T23:17:16.000Z | 2020-09-17T23:17:16.000Z | #20080518
#20080519
import mPDF
import time
import zlib
import sys
if len(sys.argv) != 2:
print "Usage: make-pdf-helloworld pdf-file"
print " "
print " Source code put in the public domain by Didier Stevens, no Copyright"
print " Use at your own risk"
print " https://DidierStevens.com"
else:
pdffile = sys.argv[1]
oPDF = mPDF.cPDF(pdffile)
oPDF.header()
oPDF.template1()
#oPDF.stream(5, 0, "BT /F1 24 Tf 100 700 Td (Hello World) Tj ET")
oPDF.stream(5, 0, """BT /F1 12 Tf 100 700 Td 15 TL
(Hello World) Tj
(Second Line) '
(Third Line) '
ET
100 712 100 -100 re S""")
oPDF.xrefAndTrailer("1 0 R")
| 20.352941 | 83 | 0.605491 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 376 | 0.543353 |
0455f230ec9eb1646bbd8b355eef81e8a026f547 | 5,969 | py | Python | tests/test_adapters.py | bernt-matthias/cutadapt | 9ba5b705ba0e6cc5e32d4ce3810788b05b16a306 | [
"MIT"
] | null | null | null | tests/test_adapters.py | bernt-matthias/cutadapt | 9ba5b705ba0e6cc5e32d4ce3810788b05b16a306 | [
"MIT"
] | null | null | null | tests/test_adapters.py | bernt-matthias/cutadapt | 9ba5b705ba0e6cc5e32d4ce3810788b05b16a306 | [
"MIT"
] | null | null | null | import pytest
from dnaio import Sequence
from cutadapt.adapters import Adapter, Match, Where, LinkedAdapter
def test_issue_52():
adapter = Adapter(
sequence='GAACTCCAGTCACNNNNN',
where=Where.BACK,
remove='suffix',
max_error_rate=0.12,
min_overlap=5,
read_wildcards=False,
adapter_wildcards=True)
read = Sequence(name="abc", sequence='CCCCAGAACTACAGTCCCGGC')
am = Match(astart=0, astop=17, rstart=5, rstop=21, matches=15, errors=2,
remove_before=False, adapter=adapter, read=read)
assert am.wildcards() == 'GGC'
"""
The result above should actually be 'CGGC' since the correct
alignment is this one:
adapter GAACTCCAGTCACNNNNN
mismatches X X
read CCCCAGAACTACAGTC-CCGGC
Since we do not keep the alignment, guessing 'GGC' is the best we
can currently do.
"""
def test_issue_80():
# This issue turned out to not be an actual issue with the alignment
# algorithm. The following alignment is found because it has more matches
# than the 'obvious' one:
#
# TCGTATGCCGTCTTC
# =========X==XX=
# TCGTATGCCCTC--C
#
# This is correct, albeit a little surprising, since an alignment without
# indels would have only two errors.
adapter = Adapter(
sequence="TCGTATGCCGTCTTC",
where=Where.BACK,
remove='suffix',
max_error_rate=0.2,
min_overlap=3,
read_wildcards=False,
adapter_wildcards=False)
read = Sequence(name="seq2", sequence="TCGTATGCCCTCC")
result = adapter.match_to(read)
assert result.errors == 3, result
assert result.astart == 0, result
assert result.astop == 15, result
def test_str():
a = Adapter('ACGT', where=Where.BACK, remove='suffix', max_error_rate=0.1)
str(a)
str(a.match_to(Sequence(name='seq', sequence='TTACGT')))
def test_linked_adapter():
front_adapter = Adapter('AAAA', where=Where.PREFIX, min_overlap=4)
back_adapter = Adapter('TTTT', where=Where.BACK, min_overlap=3)
linked_adapter = LinkedAdapter(
front_adapter, back_adapter, front_required=True, back_required=False, name='name')
assert linked_adapter.front_adapter.min_overlap == 4
assert linked_adapter.back_adapter.min_overlap == 3
sequence = Sequence(name='seq', sequence='AAAACCCCCTTTT')
trimmed = linked_adapter.match_to(sequence).trimmed()
assert trimmed.name == 'seq'
assert trimmed.sequence == 'CCCCC'
def test_info_record():
adapter = Adapter(
sequence='GAACTCCAGTCACNNNNN',
where=Where.BACK,
max_error_rate=0.12,
min_overlap=5,
read_wildcards=False,
adapter_wildcards=True,
name="Foo")
read = Sequence(name="abc", sequence='CCCCAGAACTACAGTCCCGGC')
am = Match(astart=0, astop=17, rstart=5, rstop=21, matches=15, errors=2, remove_before=False,
adapter=adapter, read=read)
assert am.get_info_record() == (
"abc",
2,
5,
21,
'CCCCA',
'GAACTACAGTCCCGGC',
'',
'Foo',
'',
'',
'',
)
def test_random_match_probabilities():
a = Adapter('A', where=Where.BACK, max_error_rate=0.1).create_statistics()
assert a.back.random_match_probabilities(0.5) == [1, 0.25]
assert a.back.random_match_probabilities(0.2) == [1, 0.4]
for s in ('ACTG', 'XMWH'):
a = Adapter(s, where=Where.BACK, max_error_rate=0.1).create_statistics()
assert a.back.random_match_probabilities(0.5) == [1, 0.25, 0.25**2, 0.25**3, 0.25**4]
assert a.back.random_match_probabilities(0.2) == [1, 0.4, 0.4*0.1, 0.4*0.1*0.4, 0.4*0.1*0.4*0.1]
a = Adapter('GTCA', where=Where.FRONT, max_error_rate=0.1).create_statistics()
assert a.front.random_match_probabilities(0.5) == [1, 0.25, 0.25**2, 0.25**3, 0.25**4]
assert a.front.random_match_probabilities(0.2) == [1, 0.4, 0.4*0.1, 0.4*0.1*0.4, 0.4*0.1*0.4*0.1]
def test_add_adapter_statistics():
stats = Adapter('A', name='name', where=Where.BACK, max_error_rate=0.1).create_statistics()
end_stats = stats.back
end_stats.adjacent_bases['A'] = 7
end_stats.adjacent_bases['C'] = 19
end_stats.adjacent_bases['G'] = 23
end_stats.adjacent_bases['T'] = 42
end_stats.adjacent_bases[''] = 45
end_stats.errors[10][0] = 100
end_stats.errors[10][1] = 11
end_stats.errors[10][2] = 3
end_stats.errors[20][0] = 600
end_stats.errors[20][1] = 66
end_stats.errors[20][2] = 6
stats2 = Adapter('A', name='name', where=Where.BACK, max_error_rate=0.1).create_statistics()
end_stats2 = stats2.back
end_stats2.adjacent_bases['A'] = 43
end_stats2.adjacent_bases['C'] = 31
end_stats2.adjacent_bases['G'] = 27
end_stats2.adjacent_bases['T'] = 8
end_stats2.adjacent_bases[''] = 5
end_stats2.errors[10][0] = 234
end_stats2.errors[10][1] = 14
end_stats2.errors[10][3] = 5
end_stats2.errors[15][0] = 90
end_stats2.errors[15][1] = 17
end_stats2.errors[15][2] = 2
stats += stats2
r = stats.back
assert r.adjacent_bases == {'A': 50, 'C': 50, 'G': 50, 'T': 50, '': 50}
assert r.errors == {
10: {0: 334, 1: 25, 2: 3, 3: 5},
15: {0: 90, 1: 17, 2: 2},
20: {0: 600, 1: 66, 2: 6},
}
def test_issue_265():
"""Crash when accessing the matches property of non-anchored linked adapters"""
s = Sequence('name', 'AAAATTTT')
front_adapter = Adapter('GGG', where=Where.FRONT)
back_adapter = Adapter('TTT', where=Where.BACK)
la = LinkedAdapter(front_adapter, back_adapter, front_required=False, back_required=False, name='name')
assert la.match_to(s).matches == 3
@pytest.mark.parametrize("where", [Where.PREFIX, Where.SUFFIX])
def test_no_indels_empty_read(where):
# Issue #376
adapter = Adapter('ACGT', where=where, indels=False)
empty = Sequence('name', '')
adapter.match_to(empty)
| 33.346369 | 107 | 0.640978 | 0 | 0 | 0 | 0 | 236 | 0.039538 | 0 | 0 | 1,140 | 0.190987 |
04562a0ab45671a8901e0ae58163340c6b6aee32 | 160 | py | Python | oaff/app/oaff/app/data/sources/common/provider.py | JBurkinshaw/ogc-api-fast-features | 4fc6ba3cc4df1600450fe4c9f35320b00c69f158 | [
"MIT"
] | 19 | 2021-07-06T16:35:27.000Z | 2022-02-08T04:59:21.000Z | oaff/app/oaff/app/data/sources/common/provider.py | JBurkinshaw/ogc-api-fast-features | 4fc6ba3cc4df1600450fe4c9f35320b00c69f158 | [
"MIT"
] | 30 | 2021-07-14T04:13:11.000Z | 2021-11-22T20:45:15.000Z | oaff/app/oaff/app/data/sources/common/provider.py | JBurkinshaw/ogc-api-fast-features | 4fc6ba3cc4df1600450fe4c9f35320b00c69f158 | [
"MIT"
] | 6 | 2021-07-06T16:35:28.000Z | 2021-09-17T19:24:49.000Z | from typing import List, Optional
from pydantic import BaseModel
class Provider(BaseModel):
url: str
name: str
roles: Optional[List[str]] = None
| 16 | 37 | 0.7125 | 91 | 0.56875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0456a960cd6f7a8fb4aa5ca64de8acbb20393184 | 1,909 | py | Python | hackerrank/Data Structures/Super Maximum Cost Queries/solution.py | ATrain951/01.python-com_Qproject | c164dd093954d006538020bdf2e59e716b24d67c | [
"MIT"
] | 4 | 2020-07-24T01:59:50.000Z | 2021-07-24T15:14:08.000Z | hackerrank/Data Structures/Super Maximum Cost Queries/solution.py | ATrain951/01.python-com_Qproject | c164dd093954d006538020bdf2e59e716b24d67c | [
"MIT"
] | null | null | null | hackerrank/Data Structures/Super Maximum Cost Queries/solution.py | ATrain951/01.python-com_Qproject | c164dd093954d006538020bdf2e59e716b24d67c | [
"MIT"
] | null | null | null | #!/bin/python3
import os
#
# Complete the 'solve' function below.
#
# The function is expected to return an INTEGER_ARRAY.
# The function accepts following parameters:
# 1. 2D_INTEGER_ARRAY tree
# 2. 2D_INTEGER_ARRAY queries
#
def solve(tree, queries):
# Write your code here
from bisect import bisect_right
def find(x, p):
while p[x] != x:
p[x] = p[p[x]]
x = p[x]
return p[x]
def union(x, y, w8, p, r, d):
px = find(x, p)
py = find(y, p)
d[w8] += len(r[px]) * len(r[py])
if px != py:
if len(r[py]) < len(r[px]):
p[py] = px
r[px].update(r[py])
del r[py]
else:
p[px] = py
r[py].update(r[px])
del r[px]
ln = len(tree) + 1
tree.sort(key=lambda x: x[-1])
paths = {0: 0}
weights = [0]
parents = {i: i for i in range(1, ln + 1)}
rep = {i: {i} for i in range(1, ln + 1)}
prev = 0
for u, v, w in tree:
if w != prev:
weights.append(w)
paths[w] = paths[prev]
union(u, v, w, parents, rep, paths)
prev = w
for left, right in queries:
wr = weights[bisect_right(weights, right) - 1]
wl = weights[bisect_right(weights, left - 1) - 1]
yield paths[wr] - paths[wl]
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
first_multiple_input = input().rstrip().split()
n = int(first_multiple_input[0])
q = int(first_multiple_input[1])
tree = []
for _ in range(n - 1):
tree.append(list(map(int, input().rstrip().split())))
queries = []
for _ in range(q):
queries.append(list(map(int, input().rstrip().split())))
result = solve(tree, queries)
fptr.write('\n'.join(map(str, result)))
fptr.write('\n')
fptr.close()
| 23 | 64 | 0.510739 | 0 | 0 | 1,138 | 0.596124 | 0 | 0 | 0 | 0 | 266 | 0.13934 |
0457bad8f142fab99c1a60023685415f8bbe17c7 | 2,598 | py | Python | tests/test.py | bitlang/kabob | fe5c428df2723979183bb72a7435ffd34e404199 | [
"MIT"
] | null | null | null | tests/test.py | bitlang/kabob | fe5c428df2723979183bb72a7435ffd34e404199 | [
"MIT"
] | null | null | null | tests/test.py | bitlang/kabob | fe5c428df2723979183bb72a7435ffd34e404199 | [
"MIT"
] | null | null | null | from unittest import TestCase
from kabob import _
class KabobTestCase(TestCase):
def _T(self, obj, kbb):
from kabob.wand import Kabob
self.assertIsInstance(kbb, Kabob)
return [x for x in kbb(obj)]
class TestKabob(KabobTestCase):
def test_contains(self):
fixture = [dict(bar=10), dict(bar=15), dict(foo=100), dict(bar=13), dict(barr=552)]
self.assertEqual(self._T(fixture, _.contains('bar')), [
dict(bar=10), dict(bar=15), dict(bar=13)])
self.assertEqual(self._T(fixture, _.contains('foo')), [
dict(foo=100)])
self.assertEqual(self._T(fixture, _.contains('barr')), [
dict(barr=552)])
self.assertEqual(self._T(fixture, _.contains('foo', 'bar')), [])
def test_getattr(self):
class TestClass(object):
def __init__(self, x, y):
self.x = x
self.y = y
fixture = [TestClass(10, 15), TestClass(4, 44), TestClass(19, 20)]
self.assertEqual(self._T(fixture, _.x), [10, 4, 19])
self.assertEqual(self._T(fixture, _.y), [15, 44, 20])
def test_getattr_nested(self):
class TestValue(object):
def __init__(self, v):
self.value = v
class TestClass(object):
def __init__(self, x, y):
self.x = TestValue(x)
self.y = TestValue(y)
fixture = [TestClass(10, 15), TestClass(4, 44), TestClass(19, 20)]
self.assertEqual(self._T(fixture, _.x.value), [10, 4, 19])
self.assertEqual(self._T(fixture, _.y.value), [15, 44, 20])
def test_getitem(self):
fixture = [
dict(bar=10, foo=15),
dict(bar=65, foo=22),
dict(bar=19, foo=11),
dict(bar=99, foo=24)
]
self.assertEqual(self._T(fixture, _['bar']), [10, 65, 19, 99])
self.assertEqual(self._T(fixture, _['foo']), [15, 22, 11, 24])
def test_or(self):
fixture = [
dict(foo=dict(bar=10)),
dict(foo=dict(bar=15)),
dict(foo=dict(bar=119)),
]
self.assertEqual(self._T(fixture, _['foo'] | _['bar']), [10, 15, 119])
def test_multi(self):
fixture = [dict(bar=10), dict(bar=15), dict(foo=100), dict(bar=13), dict(barr=552)]
self.assertEqual(self._T(fixture, _(_.contains('foo'), _.contains('bar'))), [
dict(foo=100), dict(bar=10), dict(bar=15), dict(bar=13)])
self.assertEqual(self._T(fixture, _(_.contains('foo'), _.contains('barr'))), [
dict(foo=100), dict(barr=552)])
| 33.307692 | 91 | 0.550038 | 2,541 | 0.97806 | 0 | 0 | 0 | 0 | 0 | 0 | 67 | 0.025789 |
0459c4c4ec84836ab5b740797c06885228f4e2e7 | 1,336 | bzl | Python | flatbuffers/internal/run_flatc.bzl | kgreenek/rules_flatbuffers | 057e887e81749e64f405381a3471eb9e9674b9b5 | [
"Apache-2.0"
] | 2 | 2021-09-27T05:43:14.000Z | 2021-10-04T09:31:55.000Z | flatbuffers/internal/run_flatc.bzl | kgreenek/rules_flatbuffers | 057e887e81749e64f405381a3471eb9e9674b9b5 | [
"Apache-2.0"
] | null | null | null | flatbuffers/internal/run_flatc.bzl | kgreenek/rules_flatbuffers | 057e887e81749e64f405381a3471eb9e9674b9b5 | [
"Apache-2.0"
] | null | null | null | load("//flatbuffers/internal:string_utils.bzl", "capitalize_first_char")
def _include_args_from_depset(includes_depset):
# Always include the workspace root.
include_args = ["-I", "."]
for include in includes_depset.to_list():
include_args.append("-I")
include_args.append(include)
return include_args
def run_flatc(
ctx,
fbs_toolchain,
fbs_lang_toolchain,
srcs,
srcs_transitive,
includes_transitive,
outputs):
flatc = fbs_toolchain.flatc.files_to_run.executable
include_args = _include_args_from_depset(includes_transitive)
output_prefix = ctx.genfiles_dir.path + "/" + ctx.label.package
mnemonic = "Flatbuffers{}Gen".format(capitalize_first_char(fbs_lang_toolchain.lang_shortname))
progress_message = "Generating flatbuffers {} file for {}:".format(
fbs_lang_toolchain.lang_shortname,
ctx.label,
)
genrule_args = \
fbs_lang_toolchain.flatc_args + \
["-o", output_prefix] + \
include_args + \
[src.path for src in srcs]
ctx.actions.run(
inputs = srcs_transitive,
outputs = outputs,
executable = flatc,
tools = [flatc],
arguments = genrule_args,
mnemonic = mnemonic,
progress_message = progress_message,
)
| 32.585366 | 98 | 0.656437 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 176 | 0.131737 |
045a80d7f686a5ac42ed1b41490dca313e15dfed | 2,412 | py | Python | src/search_name.py | MansurS404/MS403 | 77f582dfdbc8918c9ea160c4f1af3cf14f9ea691 | [
"Apache-2.0"
] | 1 | 2020-10-09T18:36:58.000Z | 2020-10-09T18:36:58.000Z | src/search_name.py | MansurS404/MS403 | 77f582dfdbc8918c9ea160c4f1af3cf14f9ea691 | [
"Apache-2.0"
] | 1 | 2020-10-09T18:38:48.000Z | 2020-10-09T18:38:48.000Z | src/search_name.py | MansurS404/MS403 | 77f582dfdbc8918c9ea160c4f1af3cf14f9ea691 | [
"Apache-2.0"
] | null | null | null | #!usr/bin/python2.7
# coding=utf-8
#######################################################
# Name : Multi BF (MBF) <cookie method> #
# File : search_name.py #
# Author : DulLah #
# Github : https://github.com/dz-id #
# Facebook : https://www.facebook.com/dulahz #
# Telegram : https://t.me/unikers #
# Python version : 2.7 #
#######################################################
import os, re, sys, json
from bs4 import BeautifulSoup as parser
from datetime import datetime
def main(self, cookie, url, config):
ask = raw_input('\nQuery name: ')
if ask.strip() == '':
exit("\n\033[0;91mRequired, can't empty.\033[0m")
try:
max = int(raw_input('How many? (ex: 100): '))
except ValueError:
exit("\n\033[0;91mStuppid.\033[0m")
if max == 0:
exit("\n\033[0;91mRequired, can't empty.\033[0m")
url_search = url+'/search/people/?q='+ask
statusStop = False
output = 'dump/'+ask.replace(' ', '_')+'.json'.strip()
id = []
print('')
while True:
try:
response = config.httpRequest(url_search, cookie).encode('utf-8')
html = parser(response, 'html.parser')
find = html.find_all('a')
for i in find:
name = i.find('div')
if '+' in str(name) or name == None:
continue
else:
full_name = str(name.text.encode('utf-8'))
if 'profile.php?id=' in str(i):
uid = re.findall(r'\?id=(.*?)&', str(i))
else:
uid = re.findall('/(.*?)\?refid=', str(i))
if len(uid) == 1:
id.append({'uid': uid[0], 'name': full_name})
sys.stdout.write("\r - %s \r\n[\033[0;96m%s\033[0m] [\033[0;91m%s\033[0m] Writing Id don't close."%(
full_name, datetime.now().strftime('%H:%M:%S'), len(id)
)); sys.stdout.flush()
if len(id) == max or len(id) > max:
statusStop = True
break
if statusStop == False:
if 'Lihat Hasil Selanjutnya' in str(html):
url_search = html.find('a', string='Lihat Hasil Selanjutnya')['href']
else: break
else: break
except KeyboardInterrupt:
print('\n\n\033[0;91mKeyInterrupt, stopped!!\033[0m')
break
try:
for filename in os.listdir('dump'):
os.remove('dump/'+filename)
except: pass
print('\n\nOutput: '+output)
save = open(output, 'w')
save.write(json.dumps(id))
save.close()
| 32.16 | 144 | 0.532338 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,087 | 0.450663 |
045ab6d7b2818c3906b03b2e9bb0ddef41f82336 | 26,619 | py | Python | vistrails/gui/modules/constant_configuration.py | remram44/VisTrails-mybinder | ee7477b471920d738f3ac430932f01901b56ed44 | [
"BSD-3-Clause"
] | 83 | 2015-01-05T14:50:50.000Z | 2021-09-17T19:45:26.000Z | vistrails/gui/modules/constant_configuration.py | remram44/VisTrails-mybinder | ee7477b471920d738f3ac430932f01901b56ed44 | [
"BSD-3-Clause"
] | 254 | 2015-01-02T20:39:19.000Z | 2018-11-28T17:16:44.000Z | vistrails/gui/modules/constant_configuration.py | remram44/VisTrails-mybinder | ee7477b471920d738f3ac430932f01901b56ed44 | [
"BSD-3-Clause"
] | 40 | 2015-04-17T16:46:36.000Z | 2021-09-28T22:43:24.000Z | ###############################################################################
##
## Copyright (C) 2014-2016, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
""" This file specifies the configuration widget for Constant
modules. Please notice that this is different from the module configuration
widget described in module_configure.py. We present a Color constant to be
used as a template for creating a configuration widget for other custom
constants.
"""
from __future__ import division
from PyQt4 import QtCore, QtGui
from vistrails.core.utils import any, expression, versions_increasing
from vistrails.core import system
from vistrails.gui.theme import CurrentTheme
import copy
import os
############################################################################
def setPlaceholderTextCompat(self, value):
""" Qt pre 4.7.0 does not have setPlaceholderText
"""
if versions_increasing(QtCore.QT_VERSION_STR, '4.7.0'):
self.setText(value)
else:
self.setPlaceholderText(value)
class ConstantWidgetMixin(object):
# subclasses need to add this signal:
# contentsChanged = QtCore.pyqtSignal(tuple)
def __init__(self, contents=None):
if not hasattr(self, 'contentsChanged'):
raise Exception('ConstantWidget must define contentsChanged signal')
self._last_contents = contents
self.psi = None
def update_parent(self):
newContents = self.contents()
if newContents != self._last_contents:
if self.parent() and hasattr(self.parent(), 'updateMethod'):
self.parent().updateMethod()
self._last_contents = newContents
self.contentsChanged.emit((self, newContents))
class ConstantWidgetBase(ConstantWidgetMixin):
class FocusFilter(QtCore.QObject):
def __init__(self, cwidget):
QtCore.QObject.__init__(self, cwidget)
self.__cwidget = cwidget
def eventFilter(self, o, event):
if event.type() == QtCore.QEvent.FocusIn:
self.__cwidget._focus_in(event)
elif event.type() == QtCore.QEvent.FocusOut:
self.__cwidget._focus_out(event)
return False
def __init__(self, param):
if param is None:
raise ValueError("Must pass param as first argument.")
psi = param.port_spec_item
if not param.strValue and psi and psi.default:
value = psi.default
else:
value = param.strValue
ConstantWidgetMixin.__init__(self, value)
self.psi = psi
if psi and psi.default and param.strValue == '':
self.setDefault(psi.default)
else:
self.setContents(param.strValue)
self.__focus_filter = self.FocusFilter(self)
self.installEventFilter(self.__focus_filter)
def watchForFocusEvents(self, widget):
widget.installEventFilter(self.__focus_filter)
def setDefault(self, value):
# default to setting the contents silenty
self.setContents(value, True)
def setContents(self, strValue, silent=True):
raise NotImplementedError("Subclass must implement this method.")
def contents(self):
raise NotImplementedError("Subclass must implement this method.")
def eventFilter(self, o, event):
if event.type() == QtCore.QEvent.FocusIn:
self._focus_in(event)
elif event.type() == QtCore.QEvent.FocusOut:
self._focus_out(event)
return False
def _focus_in(self, event):
""" focusInEvent(event: QEvent) -> None
Pass the event to the parent
"""
if self.parent():
QtCore.QCoreApplication.sendEvent(self.parent(), event)
def _focus_out(self, event):
self.update_parent()
if self.parent():
QtCore.QCoreApplication.sendEvent(self.parent(), event)
class ConstantEnumWidgetBase(ConstantWidgetBase):
def __init__(self, param):
psi = param.port_spec_item
self.setValues(psi.values)
self.setFree(psi.entry_type == "enumFree")
self.setNonEmpty(psi.entry_type == "enumNonEmpty")
ConstantWidgetBase.__init__(self, param)
def setValues(self, values):
raise NotImplementedError("Subclass must implement this method.")
def setFree(self, is_free):
pass
def setNonEmpty(self, is_non_empty):
pass
class QGraphicsLineEdit(QtGui.QGraphicsTextItem, ConstantWidgetBase):
""" A GraphicsItem version of ConstantWidget
"""
contentsChanged = QtCore.pyqtSignal(tuple)
def __init__(self, param, parent=None):
QtGui.QGraphicsTextItem.__init__(self, parent)
self.setTextInteractionFlags(QtCore.Qt.TextEditorInteraction)
self.setTabChangesFocus(True)
self.setFont(CurrentTheme.MODULE_EDIT_FONT)
self.installEventFilter(self)
self.offset = 0
self.is_valid = True
self.document().setDocumentMargin(1)
ConstantWidgetBase.__init__(self, param)
self.document().contentsChanged.connect(self.ensureCursorVisible)
def setContents(self, value, silent=False):
self.setPlainText(expression.evaluate_expressions(value))
if not silent:
self.update_parent()
block = self.document().firstBlock()
w = self.document().documentLayout().blockBoundingRect(block).width()
self.offset = max(w - 140, 0)
block.layout().lineAt(0).setPosition(QtCore.QPointF(-self.offset,0))
self.validate(value)
def contents(self):
contents = expression.evaluate_expressions(unicode(self.toPlainText()))
self.setPlainText(contents)
self.validate(contents)
return contents
def validate(self, value):
try:
self.psi and \
self.psi.descriptor.module.translate_to_python(value)
except Exception, e:
self.setToolTip("Invalid value: %s" % str(e))
self.is_valid = False
else:
self.setToolTip("")
self.is_valid = True
def setDefault(self, value):
self.setContents(value, silent=True)
def boundingRect(self):
# calc font height
#height = CurrentTheme.MODULE_EDIT_FONT_METRIC.height()
height = 11 # hardcoded because fontmetric can give wrong value
return QtCore.QRectF(0.0, 0.0, 150, height + 3)
def eventFilter(self, obj, event):
if event.type() == QtCore.QEvent.KeyPress and \
event.key() in [QtCore.Qt.Key_Enter, QtCore.Qt.Key_Return]:
self.clearFocus()
return True
result = QtGui.QGraphicsTextItem.eventFilter(self, obj, event)
if event.type() in [QtCore.QEvent.KeyPress, QtCore.QEvent.MouseButtonPress, QtCore.QEvent.GraphicsSceneMouseMove]:
if not self.hasFocus():
self.setFocus()
self.ensureCursorVisible()
return result
def ensureCursorVisible(self):
block = self.document().firstBlock()
line = block.layout().lineAt(0)
pos = line.cursorToX(self.textCursor().positionInBlock())
cursor = self.document().documentLayout().blockBoundingRect(\
block).y() + pos[0] - line.position().x()
w = self.document().documentLayout().blockBoundingRect(block).width()
if cursor - self.offset > 130:
self.offset = min(w-140, self.offset + 25)
if cursor - self.offset < 20:
self.offset = max(0, self.offset - 25)
line.setPosition(QtCore.QPointF(-self.offset,0))
self.update()
def focusOutEvent(self, event):
self.update_parent()
result = QtGui.QGraphicsTextItem.focusOutEvent(self, event)
# show last part of text
block = self.document().firstBlock()
w = self.document().documentLayout().blockBoundingRect(block).width()
self.offset = max(w - 140, 0)
block.layout().lineAt(0).setPosition(QtCore.QPointF(-self.offset,0))
return result
def focusInEvent(self, event):
result = QtGui.QGraphicsTextItem.focusInEvent(self, event)
# set cursor to last if not already set
cursor = self.textCursor()
cursor.setPosition(self.document().firstBlock().length()-1)
self.setTextCursor(cursor)
return result
def paint(self, painter, option, widget):
""" Override striped selection border
First unset selected and hasfocus flags
Then draw custom rect """
s = QtGui.QStyle.State_Selected | QtGui.QStyle.State_HasFocus
state = s.__class__(option.state) # option.state
option.state &= ~s
painter.pen().setWidth(1)
result = QtGui.QGraphicsTextItem.paint(self, painter, option, widget)
option.state = state
if state & s:
color = QtGui.QApplication.palette().color(QtGui.QPalette.Highlight)
painter.setPen(QtGui.QPen(color, 0))
painter.drawRect(self.boundingRect())
elif not self.is_valid:
painter.setPen(QtGui.QPen(CurrentTheme.PARAM_INVALID_COLOR, 0))
painter.drawRect(self.boundingRect())
else:
color = QtGui.QApplication.palette().color(QtGui.QPalette.Dark)
painter.setPen(QtGui.QPen(color, 0))
painter.drawRect(self.boundingRect())
return result
class StandardConstantWidget(QtGui.QLineEdit,ConstantWidgetBase):
contentsChanged = QtCore.pyqtSignal(tuple)
GraphicsItem = QGraphicsLineEdit
def __init__(self, param, parent=None):
QtGui.QLineEdit.__init__(self, parent)
ConstantWidgetBase.__init__(self, param)
self.connect(self, QtCore.SIGNAL("returnPressed()"),
self.update_parent)
def setContents(self, value, silent=False):
self.setText(expression.evaluate_expressions(value))
self.validate(value)
if not silent:
self.update_parent()
def contents(self):
contents = expression.evaluate_expressions(unicode(self.text()))
self.setText(contents)
self.validate(contents)
return contents
def validate(self, value):
try:
self.psi and \
self.psi.descriptor.module.translate_to_python(value)
except Exception, e:
# Color background yellow and add tooltip
self.setStyleSheet("border:2px dashed %s;" %
CurrentTheme.PARAM_INVALID_COLOR.name())
self.setToolTip("Invalid value: %s" % str(e))
else:
self.setStyleSheet("")
self.setToolTip("")
def setDefault(self, value):
setPlaceholderTextCompat(self, value)
def findEmbeddedParentWidget(widget):
""" See showPopup below
"""
if widget.graphicsProxyWidget():
return widget
elif widget.parentWidget():
return findEmbeddedParentWidget(widget.parentWidget())
return None
class StandardConstantEnumWidget(QtGui.QComboBox, ConstantEnumWidgetBase):
contentsChanged = QtCore.pyqtSignal(tuple)
GraphicsItem = None
def __init__(self, param, parent=None):
QtGui.QComboBox.__init__(self, parent)
ConstantEnumWidgetBase.__init__(self, param)
self.connect(self,
QtCore.SIGNAL('currentIndexChanged(int)'),
self.update_parent)
def setValues(self, values):
self.addItems(values)
def setFree(self, is_free):
if is_free:
self.setEditable(True)
self.setInsertPolicy(QtGui.QComboBox.NoInsert)
self.connect(self.lineEdit(),
QtCore.SIGNAL('returnPressed()'),
self.update_parent)
def setNonEmpty(self, is_non_empty):
if not is_non_empty:
self.setCurrentIndex(-1)
def contents(self):
return self.currentText()
def setContents(self, strValue, silent=True):
idx = self.findText(strValue)
if idx > -1:
self.setCurrentIndex(idx)
if self.isEditable():
self.lineEdit().setText(strValue)
elif self.isEditable():
self.lineEdit().setText(strValue)
def setDefault(self, value):
idx = self.findText(value)
if idx > -1:
self.setCurrentIndex(idx)
if self.isEditable():
setPlaceholderTextCompat(self.lineEdit(), value)
elif self.isEditable():
setPlaceholderTextCompat(self.lineEdit(), value)
def showPopup(self, *args, **kwargs):
""" Fixes popup when use in a GraphicsView. See:
https://bugreports.qt-project.org/browse/QTBUG-14090
"""
QtGui.QComboBox.showPopup(self, *args, **kwargs)
parent = findEmbeddedParentWidget(self)
if parent:
item = parent.graphicsProxyWidget()
scene = item.scene()
view = None
if scene:
views = scene.views()
for v in views:
if v == QtGui.QApplication.focusWidget():
view = v
if not view:
view = views[0]
if view:
br = item.boundingRect()
rightPos = view.mapToGlobal(view.mapFromScene(item.mapToScene(
QtCore.QPointF(br.width(), br.height()))))
pos = view.mapToGlobal(view.mapFromScene(item.mapToScene(
QtCore.QPointF(0, br.height()))))
self.view().parentWidget().move(pos)
self.view().parentWidget().setFixedWidth(rightPos.x()-pos.x())
self.view().parentWidget().installEventFilter(self)
def eventFilter(self, o, e):
""" See showPopup
"""
if o.parentWidget() and e.type() == QtCore.QEvent.MouseButtonPress:
return True
return QtGui.QComboBox.eventFilter(self, o, e)
###############################################################################
# Multi-line String Widget
class MultiLineStringWidget(QtGui.QTextEdit, ConstantWidgetBase):
contentsChanged = QtCore.pyqtSignal(tuple)
def __init__(self, param, parent=None):
QtGui.QTextEdit.__init__(self, parent)
self.setAcceptRichText(False)
ConstantWidgetBase.__init__(self, param)
def setContents(self, contents):
self.setPlainText(expression.evaluate_expressions(contents))
def contents(self):
contents = expression.evaluate_expressions(unicode(self.toPlainText()))
self.setPlainText(contents)
return contents
def sizeHint(self):
metrics = QtGui.QFontMetrics(self.font())
# On Mac OS X 10.8, the scrollbar doesn't show up correctly
# with 3 lines
return QtCore.QSize(QtGui.QTextEdit.sizeHint(self).width(),
(metrics.height() + 1) * 4 + 5)
def minimumSizeHint(self):
return self.sizeHint()
###############################################################################
# File Constant Widgets
class PathChooserWidget(QtGui.QWidget, ConstantWidgetMixin):
"""
PathChooserWidget is a widget containing a line edit and a button that
opens a browser for paths. The lineEdit is updated with the pathname that is
selected.
"""
contentsChanged = QtCore.pyqtSignal(tuple)
def __init__(self, param, parent=None):
"""__init__(param: core.vistrail.module_param.ModuleParam,
parent: QWidget)
Initializes the line edit with contents
"""
QtGui.QWidget.__init__(self, parent)
ConstantWidgetMixin.__init__(self, param.strValue)
layout = QtGui.QHBoxLayout()
self.line_edit = StandardConstantWidget(param, self)
self.browse_button = self.create_browse_button()
layout.setMargin(0)
layout.setSpacing(5)
layout.addWidget(self.line_edit)
layout.addWidget(self.browse_button)
self.setLayout(layout)
def create_browse_button(self, cls=None):
from vistrails.gui.common_widgets import QPathChooserToolButton
if cls is None:
cls = QPathChooserToolButton
button = cls(self, self.line_edit,
defaultPath=system.vistrails_data_directory())
button.pathChanged.connect(self.update_parent)
return button
def updateMethod(self):
if self.parent() and hasattr(self.parent(), 'updateMethod'):
self.parent().updateMethod()
def contents(self):
"""contents() -> str
Return the contents of the line_edit
"""
return self.line_edit.contents()
def setContents(self, strValue, silent=True):
"""setContents(strValue: str) -> None
Updates the contents of the line_edit
"""
self.line_edit.setContents(strValue, silent)
if not silent:
self.update_parent()
def focusInEvent(self, event):
""" focusInEvent(event: QEvent) -> None
Pass the event to the parent
"""
if self.parent():
QtCore.QCoreApplication.sendEvent(self.parent(), event)
QtGui.QWidget.focusInEvent(self, event)
def focusOutEvent(self, event):
self.update_parent()
QtGui.QWidget.focusOutEvent(self, event)
if self.parent():
QtCore.QCoreApplication.sendEvent(self.parent(), event)
class FileChooserWidget(PathChooserWidget):
def create_browse_button(self):
from vistrails.gui.common_widgets import QFileChooserToolButton
return PathChooserWidget.create_browse_button(self,
QFileChooserToolButton)
class DirectoryChooserWidget(PathChooserWidget):
def create_browse_button(self):
from vistrails.gui.common_widgets import QDirectoryChooserToolButton
return PathChooserWidget.create_browse_button(self,
QDirectoryChooserToolButton)
class OutputPathChooserWidget(PathChooserWidget):
def create_browse_button(self):
from vistrails.gui.common_widgets import QOutputPathChooserToolButton
return PathChooserWidget.create_browse_button(self,
QOutputPathChooserToolButton)
###############################################################################
# Constant Boolean widget
class BooleanWidget(QtGui.QCheckBox, ConstantWidgetBase):
_values = ['True', 'False']
_states = [QtCore.Qt.Checked, QtCore.Qt.Unchecked]
contentsChanged = QtCore.pyqtSignal(tuple)
def __init__(self, param, parent=None):
"""__init__(param: core.vistrail.module_param.ModuleParam,
parent: QWidget)
Initializes the line edit with contents
"""
QtGui.QCheckBox.__init__(self, parent)
ConstantWidgetBase.__init__(self, param)
self.connect(self, QtCore.SIGNAL('stateChanged(int)'),
self.change_state)
def contents(self):
return self._values[self._states.index(self.checkState())]
def setContents(self, strValue, silent=True):
if strValue not in self._values:
return
self.setCheckState(self._states[self._values.index(strValue)])
if not silent:
self.update_parent()
def change_state(self, state):
self.update_parent()
###############################################################################
# Constant Color widgets
# FIXME ColorChooserButton remains because the parameter exploration
# code uses it, really should be removed at some point
class ColorChooserButton(QtGui.QPushButton):
contentsChanged = QtCore.pyqtSignal(tuple)
def __init__(self, parent=None):
QtGui.QPushButton.__init__(self, parent)
# self.setFrameStyle(QtGui.QFrame.Box | QtGui.QFrame.Plain)
# self.setAttribute(QtCore.Qt.WA_PaintOnScreen)
self.setFlat(True)
self.setAutoFillBackground(True)
self.setColor(QtGui.QColor(255,255,255))
self.setFixedSize(30,22)
if system.systemType == 'Darwin':
#the mac's nice look messes up with the colors
self.setAttribute(QtCore.Qt.WA_MacMetalStyle, False)
self.clicked.connect(self.openChooser)
def setColor(self, qcolor, silent=True):
self.qcolor = qcolor
self.setStyleSheet("border: 1px solid black; "
"background-color: rgb(%d, %d, %d);" %
(qcolor.red(), qcolor.green(), qcolor.blue()))
self.update()
if not silent:
self.emit(QtCore.SIGNAL("color_selected"))
def sizeHint(self):
return QtCore.QSize(24,24)
def openChooser(self):
"""
openChooser() -> None
"""
color = QtGui.QColorDialog.getColor(self.qcolor, self.parent())
if color.isValid():
self.setColor(color, silent=False)
else:
self.setColor(self.qcolor)
class QColorWidget(QtGui.QToolButton):
def __init__(self, parent=None):
QtGui.QToolButton.__init__(self, parent)
self.setToolButtonStyle(QtCore.Qt.ToolButtonIconOnly)
self.setIconSize(QtCore.QSize(26,18))
self.color_str = '1.0,1.0,1.0'
def colorFromString(self, color_str):
color = color_str.split(',')
return QtGui.QColor(float(color[0])*255,
float(color[1])*255,
float(color[2])*255)
def stringFromColor(self, qcolor):
return "%s,%s,%s" % (qcolor.redF(), qcolor.greenF(), qcolor.blueF())
def buildIcon(self, qcolor, qsize):
pixmap = QtGui.QPixmap(qsize)
pixmap.fill(qcolor)
return QtGui.QIcon(pixmap)
def setColorString(self, color_str, silent=True):
if color_str != '':
self.color_str = color_str
qcolor = self.colorFromString(color_str)
self.setIcon(self.buildIcon(qcolor, self.iconSize()))
if not silent:
self.update_parent()
def setColor(self, qcolor, silent=True):
self.setIcon(self.buildIcon(qcolor, self.iconSize()))
self.color_str = self.stringFromColor(qcolor)
if not silent:
self.update_parent()
def openChooser(self):
"""
openChooser() -> None
"""
qcolor = self.colorFromString(self.color_str)
color = QtGui.QColorDialog.getColor(qcolor, self.parent())
if color.isValid():
self.setColor(color, silent=False)
else:
self.setColor(qcolor)
class ColorWidget(QColorWidget, ConstantWidgetBase):
contentsChanged = QtCore.pyqtSignal(tuple)
def __init__(self, param, parent=None):
QColorWidget.__init__(self, parent)
ConstantWidgetBase.__init__(self, param)
self.connect(self, QtCore.SIGNAL("clicked()"), self.openChooser)
def contents(self):
return self.color_str
def setContents(self, strValue, silent=True):
self.setColorString(strValue, silent)
class ColorEnumWidget(QColorWidget, ConstantEnumWidgetBase):
contentsChanged = QtCore.pyqtSignal(tuple)
def __init__(self, param, parent=None):
QColorWidget.__init__(self, parent)
self.setPopupMode(QtGui.QToolButton.MenuButtonPopup)
ConstantEnumWidgetBase.__init__(self, param)
def setFree(self, is_free):
if is_free:
self.connect(self, QtCore.SIGNAL("clicked()"), self.openChooser)
def wasTriggered(self, action):
self.setColorString(action.data())
self.update_parent()
def setValues(self, values):
menu = QtGui.QMenu()
self.action_group = QtGui.QActionGroup(menu)
self.action_group.setExclusive(True)
self.connect(self.action_group, QtCore.SIGNAL('triggered(QAction*)'),
self.wasTriggered)
size = menu.style().pixelMetric(QtGui.QStyle.PM_SmallIconSize)
for i, color_str in enumerate(values):
qcolor = self.colorFromString(color_str)
icon = self.buildIcon(qcolor, QtCore.QSize(size, size))
action = menu.addAction(icon, "")
action.setIconVisibleInMenu(True)
action.setData(color_str)
action.setCheckable(True)
self.action_group.addAction(action)
self.setMenu(menu)
def contents(self):
return self.color_str
def setContents(self, strValue, silent=True):
self.setColorString(strValue)
for action in self.action_group.actions():
if action.data() == strValue:
action.setChecked(True)
| 37.125523 | 122 | 0.626845 | 23,016 | 0.864646 | 0 | 0 | 0 | 0 | 0 | 0 | 5,328 | 0.200158 |
045aff75c614e75b486992543a832ede496de5f2 | 1,324 | py | Python | setup.py | davidrpugh/solowPy | 91577e04481cec80679ae571ec2bdaa5788151b4 | [
"MIT"
] | 31 | 2016-02-29T00:20:53.000Z | 2022-01-26T17:40:38.000Z | setup.py | rfonsek/solowPy | 91577e04481cec80679ae571ec2bdaa5788151b4 | [
"MIT"
] | 11 | 2015-04-04T20:01:35.000Z | 2017-02-20T05:42:49.000Z | setup.py | rfonsek/solowPy | 91577e04481cec80679ae571ec2bdaa5788151b4 | [
"MIT"
] | 20 | 2015-08-23T23:42:09.000Z | 2022-02-23T08:00:53.000Z | import os
from distutils.core import setup
def read(*paths):
"""Build a file path from *paths* and return the contents."""
with open(os.path.join(*paths), 'r') as f:
return f.read()
DESCRIPTION = ("Library for solving, simulating, and estimating the " +
"Solow (1956) model of economic growth.")
CLASSIFIERS = ['Development Status :: 3 - Alpha',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
]
setup(
name="solowpy",
packages=['solowpy',
'solowpy.tests'],
version='0.2.0-alpha',
license="MIT License",
author="davidrpugh",
author_email="david.pugh@maths.ox.ac.uk",
url='https://github.com/solowPy/solowPy',
description=DESCRIPTION,
long_description=read('README.rst'),
classifiers=CLASSIFIERS,
)
| 33.1 | 71 | 0.561934 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 707 | 0.533988 |
045b9a5f27ab07d95821029cdcec623c2884d66a | 3,236 | py | Python | seaport/pull_request/portfile.py | fossabot/seaport | e4a5c5ada7f21529912f688e36c75cecd3a2c53c | [
"BSD-3-Clause"
] | null | null | null | seaport/pull_request/portfile.py | fossabot/seaport | e4a5c5ada7f21529912f688e36c75cecd3a2c53c | [
"BSD-3-Clause"
] | null | null | null | seaport/pull_request/portfile.py | fossabot/seaport | e4a5c5ada7f21529912f688e36c75cecd3a2c53c | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2021, harens
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of seaport nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Functions related to parsing the portfile."""
import os
import sys
from typing import Tuple
import click
from seaport.clipboard.checks import user_path
from seaport.clipboard.format import format_subprocess
def new_contents() -> Tuple[str, str]:
"""Determines the new contents and version number of a portfile.
Returns:
(str, str): The contents of the new portfile and the new version number
"""
# Determine the output of the clip function from clipboard
# Newline added since clipboard removes it
contents = format_subprocess(["pbpaste"]) + "\n"
if "port" not in contents:
click.secho("Cannot retrieve portfile contents from clipboard", fg="red")
sys.exit(1)
# Get updated version number from clip
# Separate var to get mypy to work
# This is since os.getenv is Optional[str]
env_variable = os.getenv("BUMP")
if env_variable is None:
click.secho(
"Cannot determine version number from env variable `bump`", fg="red"
)
sys.exit(1)
return contents, env_variable
def determine_category(name: str) -> str:
"""Given the name of a port, output the category.
Args:
name: The name of the port
Returns:
str: The category of the port
"""
# Category determined so as to know where to put the portfile
# e.g. macports-ports/category/name/Portfile
category_list = format_subprocess(
[f"{user_path(True)}/port", "info", "--category", name]
).split(" ")
# Remove comma, and only take the first category
return category_list[1][:-1] if len(category_list) > 2 else category_list[1]
| 36.772727 | 82 | 0.717553 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,471 | 0.763597 |
045e78652d194f324a318fa3d010fd1ce3d5d4e7 | 12,020 | py | Python | CRYSTAL/crystal_raman.py | permanentchange/raman-sc | 909c8d879dd97f118860e2a3a4edae2aa03f02b5 | [
"MIT"
] | 1 | 2021-01-30T12:38:07.000Z | 2021-01-30T12:38:07.000Z | CRYSTAL/crystal_raman.py | permanentchange/raman-sc | 909c8d879dd97f118860e2a3a4edae2aa03f02b5 | [
"MIT"
] | null | null | null | CRYSTAL/crystal_raman.py | permanentchange/raman-sc | 909c8d879dd97f118860e2a3a4edae2aa03f02b5 | [
"MIT"
] | 1 | 2021-05-10T01:38:30.000Z | 2021-05-10T01:38:30.000Z | #!/usr/bin/env python
#
# Raman off-resonant activity calculator
# using CRYSTAL as a back-end.
#
# Contributors: Alexandr Fonari (Georgia Tech)
# MIT license, 2013
#
#
def parse_fort34_header(fort34_fh):
import sys
from math import sqrt
#
fort34_fh.seek(0) # just in case
#
header = [fort34_fh.next() for x in range(5)] # read first 5 lines
#
vol = 0.0
b = []
#
for i in range(1,4): #cartesian components of direct lattice vectors
b.append( [float(s) for s in header[i].split()] )
#
vol = b[0][0]*b[1][1]*b[2][2] + b[1][0]*b[2][1]*b[0][2] + b[2][0]*b[0][1]*b[1][2] - \
b[0][2]*b[1][1]*b[2][0] - b[2][1]*b[1][2]*b[0][0] - b[2][2]*b[0][1]*b[1][0]
#
symm_ops = int(header[-1])
print "[parse_fort34_header]: Number of symmetry operations: %d" % symm_ops
header.extend( [fort34_fh.next() for x in range(symm_ops*4)] )# each symm_op has 4 lines
#
header.extend( [fort34_fh.next()] ) # nat
#
nat_asym, nat_tot = [int(x) for x in header[-1].split()] # number of the irreducible atoms and total number of atoms in the primitive cell
header[-1] = " %d\n" % nat_asym # I know, dirty hack
#
return nat_tot, vol, header
#
def parse_env_params(params):
import sys
#
tmp = params.strip().split('_')
if len(tmp) != 4:
print "[parse_env_params]: ERROR there should be exactly four parameters"
sys.exit(1)
#
[first, last, nderiv, step_size] = [int(tmp[0]), int(tmp[1]), int(tmp[2]), float(tmp[3])]
#
return first, last, nderiv, step_size
#
def get_modes_from_OUTCAR(outcar_fh, nat):
import sys
import re
from math import sqrt
eigvals = [ 0.0 for i in range(nat*3) ]
eigvecs = [ 0.0 for i in range(nat*3) ]
norms = [ 0.0 for i in range(nat*3) ]
activity = [ '' for i in range(nat*3) ]
atom_number = [ 0 for i in range(nat) ]
pos = [ 0.0 for i in range(nat) ]
asym = [ 0.0 for i in range(nat) ]
#
outcar_fh.seek(0) # just in case
while True:
line = outcar_fh.readline()
if not line:
break
#
if "Eigenvectors after division by SQRT(mass)" in line:
outcar_fh.readline() # empty line
outcar_fh.readline() # Eigenvectors and eigenvalues of the dynamical matrix
outcar_fh.readline() # ----------------------------------------------------
outcar_fh.readline() # empty line
#
for i in range(nat*3): # all frequencies should be supplied, regardless of those requested to calculate
outcar_fh.readline() # empty line
p = re.search(r'^\s*(\d+).+?([-\.\d]+) cm-1 (\w)', outcar_fh.readline())
eigvals[i] = float(p.group(2))
activity[i] = p.group(3)
#
outcar_fh.readline() # X Y Z dx dy dz
eigvec = []
#
for j in range(nat):
tmp = outcar_fh.readline().split()
#
if i == 0: # get atomic positions only once
atom_number[j] = int(tmp[0])
pos[j] = [ float(tmp[x]) for x in range(1,4) ]
asym[j] = int(tmp[-1]) # is this atom in the asymmetric unit
#
eigvec.append([ float(tmp[x]) for x in range(4,7) ])
#
eigvecs[i] = eigvec
norms[i] = sqrt( sum( [abs(x)**2 for sublist in eigvec for x in sublist] ) )
#
return pos, asym, atom_number, eigvals, activity, eigvecs, norms
#
print "[get_modes_from_OUTCAR]: ERROR Couldn't find 'Eigenvectors after division by SQRT(mass)' in OUTCAR, exiting..."
sys.exit(1)
#
###########################################
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
###########################################
def get_epsilon_from_OUTCAR(outcar_fh):
#
eps = [[0.0 for i in range(3)] for j in range(3)]
#
outcar_fh.seek(0) # just in case
while True:
line = outcar_fh.readline()
if not line:
break
#
if "COMPONENT ALPHA EPSILON CHI(1)" in line: # geeeeeez
while True:
line = outcar_fh.readline().strip()
if not line: # empty line
break
#
direction, alpha, eps1, chi = line.split()
for case in switch(direction.strip()):
if case('XX'):
eps[0][0] = float(eps1)
break
if case('XY'):
eps[0][1]= float(eps1)
break
if case('XZ'):
eps[0][2]= float(eps1)
break
if case('YY'):
eps[1][1]= float(eps1)
break
if case('YZ'):
eps[1][2]= float(eps1)
break
if case('ZZ'):
eps[2][2]= float(eps1)
break
#
#eps[2][0] = eps[0][2]
#eps[1][0] = eps[0][1]
#eps[2][1] = eps[1][2]
#
return eps
break # while True
#
# no eps - no next mode
raise RuntimeError("[get_epsilon_from_OUTCAR]: ERROR Couldn't find dielectric tensor in OUTCAR")
return 1
#
if __name__ == '__main__':
import sys
from math import pi
from shutil import move
import os
import datetime
import time
#import argparse
import optparse
#
print ""
print " Raman off-resonant activity calculator,"
print " using CRYSTAL as a back-end."
print ""
print " Contributors: Alexandr Fonari (Georgia Tech)"
print " MIT License, 2013"
print " URL: http://..."
print " Started at: "+datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
print ""
#
description = "Set environment variables:\n"
description += " CRYSTAL_RAMAN_RUN='runmpi09 INCAR'\n"
description += " CRYSTAL_RAMAN_PARAMS='[first-mode]_[last-mode]_[nderiv]_[step-size]'\n\n"
# description += "One-liner bash is:\n"
# description += "CRYSTAL_RAMAN_RUN='mpirun vasp' CRYSTAL_RAMAN_PARAMS='1 2 2 0.01' python vasp_raman.py -h"
CRYSTAL_RAMAN_RUN = os.environ.get('CRYSTAL_RAMAN_RUN')
if CRYSTAL_RAMAN_RUN == None:
print "[__main__]: ERROR Set environment variable 'CRYSTAL_RAMAN_RUN'"
print ""
parser.print_help()
sys.exit(1)
print "[__main__]: CRYSTAL_RAMAN_RUN='"+CRYSTAL_RAMAN_RUN+"'"
#
CRYSTAL_RAMAN_PARAMS = os.environ.get('CRYSTAL_RAMAN_PARAMS')
if CRYSTAL_RAMAN_PARAMS == None:
print "[__main__]: ERROR Set environment variable 'CRYSTAL_RAMAN_PARAMS'"
print ""
parser.print_help()
sys.exit(1)
print "[__main__]: CRYSTAL_RAMAN_PARAMS='"+CRYSTAL_RAMAN_PARAMS+"'"
#
first, last, nderiv, step_size = parse_env_params(CRYSTAL_RAMAN_PARAMS)
assert first >= 1, '[__main__]: First mode should be equal or larger than 1'
assert last >= first, '[__main__]: Last mode should be equal or larger than first mode'
assert nderiv == 2, '[__main__]: At this time, nderiv = 2 is the only supported'
disps = [-1, 1] # hardcoded for
coeffs = [-0.5, 0.5] # three point stencil (nderiv=2)
#
try:
fort34_fh = open('FORT34.phon', 'r')
except IOError:
print "[__main__]: ERROR Couldn't open input file FORT34.phon, exiting...\n"
sys.exit(1)
#
nat, vol, fort34_header = parse_fort34_header(fort34_fh)
fort34_fh.close()
#
try:
outcar_fh = open('OUTCAR.phon', 'r')
except IOError:
print "[__main__]: ERROR Couldn't open OUTCAR.phon, exiting...\n"
sys.exit(1)
#
pos, asym, atom_number, eigvals, activity, eigvecs, norms = get_modes_from_OUTCAR(outcar_fh, nat)
outcar_fh.close()
#
output_fh = open('crystal_raman.dat', 'w')
output_fh.write("# mode freq(cm-1) alpha beta2 activity\n")
for i in range(first-1, last):
eigval = eigvals[i]
eigvec = eigvecs[i]
norm = norms[i]
#
print ""
print "[__main__]: Mode #%i: frequency %10.7f cm-1; norm: %10.7f" % ( i+1, eigval, norm )
#
if activity[i] != 'A':
print "[__main__]: Mode inactive, skipping..."
continue
#
ra = [[0.0 for x in range(3)] for y in range(3)]
for j in range(len(disps)):
disp_filename = 'OUTCAR.%04d.%+d.out' % (i+1, disps[j])
#
try:
outcar_fh = open(disp_filename, 'r')
print "[__main__]: File "+disp_filename+" exists, parsing..."
except IOError:
print "[__main__]: File "+disp_filename+" not found, preparing displaced INCAR.gui"
poscar_fh = open('INCAR.gui', 'w')
poscar_fh.write("".join(fort34_header))
#
for k in range(nat): # do the deed
if asym[k] == 0: continue # this atom is NOT in the asymmetric unit, skip!
#
pos_disp = [ pos[k][l] + eigvec[k][l]*step_size*disps[j]/norm for l in range(3)]
poscar_fh.write( "%3d %15.10f %15.10f %15.10f\n" % (atom_number[k], pos_disp[0], pos_disp[1], pos_disp[2]) )
#print '%10.6f %10.6f %10.6f %10.6f %10.6f %10.6f' % (pos[k][0], pos[k][1], pos[k][2], dis[k][0], dis[k][1], dis[k][2])
poscar_fh.close()
#
# run CRYSTAL here
print "[__main__]: Running CRYSTAL..."
os.system(CRYSTAL_RAMAN_RUN)
try:
move('INCAR.out', disp_filename)
except IOError:
print "[__main__]: ERROR Couldn't find INCAR.out file, exiting..."
sys.exit(1)
#
outcar_fh = open(disp_filename, 'r')
#
try:
eps = get_epsilon_from_OUTCAR(outcar_fh)
outcar_fh.close()
except Exception, err:
print err
print "[__main__]: Moving "+disp_filename+" back to 'OUTCAR' and exiting..."
move(disp_filename, 'OUTCAR')
sys.exit(1)
#
for m in range(3):
for n in range(3):
ra[m][n] += eps[m][n] * coeffs[j]/step_size * norm * vol/(4.0*pi)
#units: A^2/amu^1/2 = dimless * 1/A * 1/amu^1/2 * A^3
#
alpha = (ra[0][0] + ra[1][1] + ra[2][2])/3.0
beta2 = ( (ra[0][0] - ra[1][1])**2 + (ra[0][0] - ra[2][2])**2 + (ra[1][1] - ra[2][2])**2 + 6.0 * (ra[0][1]**2 + ra[0][2]**2 + ra[1][2]**2) )/2.0
print ""
print "! %4i freq: %10.5f alpha: %10.7f beta2: %10.7f activity: %10.7f " % (i+1, eigval, alpha, beta2, 45.0*alpha**2 + 7.0*beta2)
output_fh.write("%i %10.5f %10.7f %10.7f %10.7f\n" % (i+1, eigval, alpha, beta2, 45.0*alpha**2 + 7.0*beta2))
output_fh.flush()
#
output_fh.close()
sys.exit(0)
# done.
| 39.281046 | 152 | 0.505324 | 543 | 0.045175 | 126 | 0.010483 | 0 | 0 | 0 | 0 | 3,709 | 0.308569 |
045eb6a2ba55cf3cf66628da3f1e015463d0468c | 2,281 | py | Python | src/eval/novel_bigrams.py | HenryDashwood/sentence-planner | 0bf9b88d7af36a936febe90672eeb5aed0022c63 | [
"MIT"
] | 3 | 2021-09-30T15:11:50.000Z | 2022-02-15T09:25:57.000Z | src/eval/novel_bigrams.py | HenryDashwood/sentence-planner | 0bf9b88d7af36a936febe90672eeb5aed0022c63 | [
"MIT"
] | null | null | null | src/eval/novel_bigrams.py | HenryDashwood/sentence-planner | 0bf9b88d7af36a936febe90672eeb5aed0022c63 | [
"MIT"
] | 2 | 2022-02-23T14:34:11.000Z | 2022-03-11T08:21:34.000Z | #
# Copyright (c) 2021 Idiap Research Institute, https://www.idiap.ch/
# Written by Andreas Marfurt <andreas.marfurt@idiap.ch>
#
""" Computes the proportion of novel bigrams in the summary. """
import numpy as np
import pandas as pd
from interface import Evaluation
from eval_utils import preprocess_article, preprocess_summary
class NovelBigrams(Evaluation):
def __init__(self, input_dir, sent_sep='<q>'):
super().__init__(input_dir)
self.name = 'novel_bigrams'
self.sent_sep = sent_sep
def run(self):
# read articles and summaries
articles = self.read_articles()
summaries = self.read_candidate_summaries()
assert len(articles) == len(summaries)
# compute novel bigrams for each article-summary pair
novel_bigrams = []
for article, summary in zip(articles, summaries):
article_words = preprocess_article(article)
summary_tokenized_sents = preprocess_summary(summary, self.sent_sep)
novel_bigrams.append(NovelBigrams.compute_novel_bigrams(article_words, summary_tokenized_sents))
novel_bigrams = [score for score in novel_bigrams if score is not None] # filter bad summaries
# write results
df = pd.DataFrame({'novel_bigrams': np.mean(novel_bigrams)}, index=[0])
df.to_csv(self.get_output_path(), index=False)
@staticmethod
def compute_novel_bigrams(article_words, summary_tokenized_sents):
""" Computes the proportion of novel bigrams in the summary. """
bigrams_article = set((article_words[i], article_words[i + 1]) for i in range(len(article_words) - 1))
bigrams_summary = set()
for sentence_words in summary_tokenized_sents:
bigrams_summary |= set((sentence_words[i], sentence_words[i + 1]) for i in range(len(sentence_words) - 1))
return len(bigrams_summary - bigrams_article) / len(bigrams_summary) if len(bigrams_summary) > 0 else None
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Computes the proportion of novel bigrams in the summary.')
parser.add_argument('--eval_dir', required=True, help='Evaluation directory')
args = parser.parse_args()
NovelBigrams(args.eval_dir).run()
| 40.732143 | 118 | 0.704516 | 1,638 | 0.718106 | 0 | 0 | 589 | 0.25822 | 0 | 0 | 509 | 0.223148 |
f086f739861192cb28069ef1a4ac8f79101a91b4 | 2,003 | py | Python | examples/caget.py | delta-accelerator/channel_access.client | 531a35634845a6b009210b9537c3acf42c86f76f | [
"MIT"
] | null | null | null | examples/caget.py | delta-accelerator/channel_access.client | 531a35634845a6b009210b9537c3acf42c86f76f | [
"MIT"
] | null | null | null | examples/caget.py | delta-accelerator/channel_access.client | 531a35634845a6b009210b9537c3acf42c86f76f | [
"MIT"
] | null | null | null | import argparse
import channel_access.common as ca
import channel_access.client as cac
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Read process values')
parser.add_argument('pvs', metavar='PV', type=str, nargs='+',
help='list of process values')
args = parser.parse_args()
with cac.Client() as client:
# Create list of PVs. They automatically connect in the background.
# We don't need monitoring or automatic retreival of data.
pvs = [ client.createPV(name, monitor=False, initialize=cac.InitData.NONE) for name in args.pvs ]
# Asynchronous requests are queued. In order for all background
# request to be send we flush here so while waiting for the
# first pvs to connected the others can also connect in the
# background.
client.flush()
for pv in pvs:
print(pv.name, end='\t')
try:
# Make sure the pvs is connected before calling other
# functions, this can block for up to one second.
pv.ensure_connected(timeout=1.0)
# Retreive the value, this can block for up to
# one second. This also retreives timestamp, status and severity
if pv.get(block=1.0) is None:
raise RuntimeError('No value')
except RuntimeError:
# Something went wrong
print('NOT FOUND')
else:
# We need access to multiple attributes whose values
# should all be from the same request. For this we need
# to access the data dictionary ourselfs. ``pv.data``
# returns a copy of the data dictionary.
# Using ``pv.timestamp`` and ``pv.value`` does not guarantee that
# the timestamp belongs to the value.
data = pv.data
print("{timestamp}\t{value}".format(**data))
| 43.543478 | 105 | 0.591113 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 969 | 0.483774 |
f087078a59a53f53c94da1a5511f5c19ca713f04 | 6,975 | py | Python | utils/hamiltonian.py | fhoeb/fh-thesis-scripts | 8894296ee2ca64bc208cc28803ac888b33bb4a94 | [
"BSD-3-Clause"
] | 2 | 2020-09-27T16:17:06.000Z | 2022-02-01T15:25:40.000Z | utils/hamiltonian.py | fhoeb/fh-thesis-scripts | 8894296ee2ca64bc208cc28803ac888b33bb4a94 | [
"BSD-3-Clause"
] | null | null | null | utils/hamiltonian.py | fhoeb/fh-thesis-scripts | 8894296ee2ca64bc208cc28803ac888b33bb4a94 | [
"BSD-3-Clause"
] | 1 | 2021-01-18T00:13:01.000Z | 2021-01-18T00:13:01.000Z | from scipy.special import factorial
from itertools import count
import numpy as np
from tmps.utils import pauli, fock
def get_boson_boson_dim(alpha, cutoff_coh):
"""
Find the cutoff for the local dimension (identical everywhere) from the chosen accuracy alpha for the impurity
coherent state.
"""
#
pop = lambda x: np.exp(-np.abs(alpha) ** 2 / 2) * alpha ** x / np.sqrt(factorial(x, exact=True))
cutoff_dim = 2
for n in count(cutoff_dim, 1):
if np.abs(pop(n))**2 < cutoff_coh:
cutoff_dim = n
break
return cutoff_dim
def get_spin_boson_chain_hamiltonian(omega_0, c0, omega, t, bath_local_dim, finite_T=False):
"""
Returns local and coupling parts of the Spin-Boson model chain Hamiltonian
used in Sec. 4.4.1 and 4.4.2 of the thesis.
:param omega_0: Spin energy
:param c0: Spin-Bath coupling
:param omega: Bath energies
:param t: Bath-bath couplings
:param bath_local_dim: Local dimension of the bath
:param finite_T: If set True builds the Hamiltonian for Sec. 4.4.2. If False builds the Hamiltonian for Sec. 4.4.1
:returns: List of local Hamiltonians, List of coupling Hamiltonians
"""
if not finite_T:
# Local Hamiltonian of the System:
spin_loc = omega_0 / 2 * pauli.X
# Coupling between System and bath:
spin_coupl = pauli.Z
else:
# Local Hamiltonian of the System:
spin_loc = omega_0 / 2 * pauli.Z
# Coupling between System and bath:
spin_coupl = np.array([[0, 0], [1, 0]], dtype=np.complex128)
# Local Hamiltonian of the bath
fock_n = fock.n(bath_local_dim)
bath_loc = [energy * fock_n for energy in omega]
# Bath coupling
bath_coupling_op = np.kron(fock.a(bath_local_dim), fock.a_dag(bath_local_dim)) + \
np.kron(fock.a_dag(bath_local_dim), fock.a(bath_local_dim))
bath_bath_coupl = [coupling * bath_coupling_op for coupling in t]
# Spin-Bath coupling
spin_bath_coupl = c0 * (np.kron(spin_coupl, fock.a_dag(bath_local_dim)) +
np.kron(spin_coupl.conj().T, fock.a(bath_local_dim)))
return [spin_loc] + bath_loc, [spin_bath_coupl] + bath_bath_coupl
def get_spin_boson_star_hamiltonian(omega_0, system_index, gamma, xi, bath_local_dim, finite_T=False):
"""
Returns local and coupling parts of the Spin-Boson model star Hamiltonian
used in Sec. 4.4.1 and 4.4.2 of the thesis.
:param omega_0: Spin energy
:param system_index: Index of the system in the auxiliary chain
:param gamma: System-Bath couplings
:param xi: Bath energies
:param bath_local_dim: Local dimension of the bath
:param finite_T: If set True uses the Hamiltonian for Sec. 4.4.2. If False builds the Hamiltonian for Sec. 4.4.1
:returns: List of local Hamiltonians, List of coupling Hamiltonians
"""
if not finite_T:
# Local Hamiltonian of the System:
spin_loc = omega_0 / 2 * pauli.X
# Coupling between System and bath:
spin_coupl = pauli.Z
else:
# Local Hamiltonian of the System:
spin_loc = omega_0 / 2 * pauli.Z
# Coupling between System and bath:
spin_coupl = np.array([[0, 0], [1, 0]], dtype=np.complex128)
# Local Hamiltonian of the bath
fock_n = fock.n(bath_local_dim)
bath_loc = [energy * fock_n for energy in xi]
# Coupling operators for the bath to the left of the system
left_bath_coupling_op = np.kron(fock.a(bath_local_dim), spin_coupl.conj().T) + \
np.kron(fock.a_dag(bath_local_dim), spin_coupl)
left_bath_coupl = [coupling * left_bath_coupling_op for coupling in gamma[:system_index]]
# Coupling operators for the bath to the right of the system
right_bath_coupling_op = np.kron(spin_coupl.conj().T, fock.a(bath_local_dim)) + \
np.kron(spin_coupl, fock.a_dag(bath_local_dim))
right_bath_coupl = [coupling * right_bath_coupling_op for coupling in gamma[system_index:]]
return bath_loc[:system_index] + [spin_loc] + bath_loc[system_index:], left_bath_coupl + right_bath_coupl
def get_boson_boson_chain_hamiltonian(omega_0, c0, omega, t, cutoff_dim):
"""
Returns local and coupling parts of the Spin-Boson model chain Hamiltonian
used in Sec. 4.4.3 of the thesis.
:param omega_0: Spin energy
:param c0: Spin-Bath coupling
:param omega: Bath energies
:param t: Bath-bath couplings
:param cutoff_dim: Local dimension of the impurity and bath
:returns: List of local Hamiltonians, List of coupling Hamiltonians
"""
# Local Hamiltonian of the System:
sys_loc = omega_0 * fock.n(cutoff_dim)
# Coupling between System and bath:
sys_coupl = fock.a(cutoff_dim)
# Local Hamiltonian of the bath
fock_n = fock.n(cutoff_dim)
bath_loc = [energy * fock_n for energy in omega]
# Bath coupling
bath_coupling_op = np.kron(fock.a(cutoff_dim), fock.a_dag(cutoff_dim)) + \
np.kron(fock.a_dag(cutoff_dim), fock.a(cutoff_dim))
bath_bath_coupl = [coupling * bath_coupling_op for coupling in t]
# Spin-Bath coupling
spin_bath_coupl = c0 * (np.kron(sys_coupl, fock.a_dag(cutoff_dim)) +
np.kron(sys_coupl.conj().T, fock.a(cutoff_dim)))
return [sys_loc] + bath_loc, [spin_bath_coupl] + bath_bath_coupl
def get_boson_boson_star_hamiltonian(omega_0, system_index, gamma, xi, cutoff_dim):
"""
Returns local and coupling parts of the Spin-Boson model star Hamiltonian
used in Sec. 4.4.3 of the thesis.
:param omega_0: Spin energy
:param system_index: Index of the system in the auxiliary chain
:param gamma: System-Bath couplings
:param xi: Bath energies
:param cutoff_dim: Local dimension of the impurity and bath
:returns: List of local Hamiltonians, List of coupling Hamiltonians
"""
# Local Hamiltonian of the System:
sys_loc = omega_0 * fock.n(cutoff_dim)
# Coupling between System and bath:
sys_coupl = fock.a(cutoff_dim)
# Local Hamiltonian of the bath
fock_n = fock.n(cutoff_dim)
bath_loc = [energy * fock_n for energy in xi]
# Coupling operators for the bath to the left of the system
left_bath_coupling_op = np.kron(fock.a(cutoff_dim), sys_coupl.conj().T) + \
np.kron(fock.a_dag(cutoff_dim), sys_coupl)
left_bath_coupl = [coupling * left_bath_coupling_op for coupling in gamma[:system_index]]
# Coupling operators for the bath to the right of the system
right_bath_coupling_op = np.kron(sys_coupl.conj().T, fock.a(cutoff_dim)) + \
np.kron(sys_coupl, fock.a_dag(cutoff_dim))
right_bath_coupl = [coupling * right_bath_coupling_op for coupling in gamma[system_index:]]
return bath_loc[:system_index] + [sys_loc] + bath_loc[system_index:], left_bath_coupl + right_bath_coupl
| 42.791411 | 118 | 0.677993 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,927 | 0.419642 |
f08884e338e058e8d57f6ec08eecc817c73c4a4f | 2,564 | py | Python | zpgc_2016/include/logbook.py | mpatacchiola/naogui | 2c71c82362edcf66b1a24a5f2af23e9719011146 | [
"MIT"
] | 2 | 2017-12-22T14:33:07.000Z | 2020-07-23T09:35:59.000Z | zpgc_2016/include/logbook.py | mpatacchiola/naogui | 2c71c82362edcf66b1a24a5f2af23e9719011146 | [
"MIT"
] | null | null | null | zpgc_2016/include/logbook.py | mpatacchiola/naogui | 2c71c82362edcf66b1a24a5f2af23e9719011146 | [
"MIT"
] | 4 | 2016-04-01T10:02:39.000Z | 2018-04-14T08:05:20.000Z | #!/usr/bin/env python
import os
import time
class Logbook(object):
def __init__(self):
"""
Class initialization
"""
self._id = time.strftime("%d%m%Y_%H%M%S", time.gmtime()) #id of the log, it's the timestamp
self._trial = 0
self._pinv = 0.0
self._rinv = 0.0
self._pmult = 0.0
self._rmult = 0.0
self._gaze = False
self._pointing = False
self._timer = 0
self._mp3 = ""
#create the file
open(self._id + ".csv", 'a').close()
#Write the header as first line
try:
path_to_file = self._id + ".csv"
with open(path_to_file, "a") as f:
f.write( "trial," + "pinv," + "rinv," + "pmult," + "rmult," + "total," + "gaze," + "pointing," + "timer," + "audio" + '\n')
f.close()
except:
# log exception
print("* LOGBOOK: execpion creating the header.")
def AddTextLine(self, stringToAdd):
try:
path_to_file = self._id + ".csv"
with open(path_to_file, "a") as f:
f.write( stringToAdd + '\n')
f.close()
except:
# log exception
print("* LOGBOOK: execpion adding a text line to the file.")
def AddLine(self, trial, pinv, rinv, pmult, rmult, total, gaze, pointing, timer, mp3 ):
try:
self._trial = trial
self._pinv = pinv
self._rinv = rinv
self._pmult = pmult
self._rmult = rmult
self._gaze = gaze
self._pointing = pointing
self._mp3 = mp3
self._timer = timer
path_to_file = self._id + ".csv"
with open(path_to_file, "a") as f:
f.write( str(trial) + "," + str(pinv) + "," + str(rinv) + "," + str(pmult) + "," + str(rmult) + "," + str(total) + "," + str(gaze) + "," + str(pointing) + "," + str(timer) + "," + str(mp3) + '\n')
f.close()
except:
# log exception
print("* LOGBOOK: execpion adding a line to the file.")
def SaveFile(self, filePath):
print(filePath)
if os.path.isfile(filePath):
self._path = filePath
self._doc = minidom.parse(filePath)
print("PARSER: the XML file was correctly loaded.")
return True
else:
print("PARSER: Error the XML file does not exist, please check if the path is correct.")
return False
| 32.871795 | 213 | 0.49454 | 2,516 | 0.981279 | 0 | 0 | 0 | 0 | 0 | 0 | 630 | 0.24571 |
f08b09f830197b622c222148be38b3159d8bff5d | 6,346 | py | Python | src/experimental_results/outdoor test/path_planning_analysis.py | NASLab/GroundROS | 6673db009ffcff59500eb1e3d5873111282e7749 | [
"MIT"
] | 1 | 2017-12-17T11:11:55.000Z | 2017-12-17T11:11:55.000Z | src/experimental_results/outdoor test/path_planning_analysis.py | NASLab/GroundROS | 6673db009ffcff59500eb1e3d5873111282e7749 | [
"MIT"
] | 2 | 2015-10-02T19:02:06.000Z | 2015-10-02T19:02:36.000Z | src/experimental_results/outdoor test/path_planning_analysis.py | NASLab/GroundROS | 6673db009ffcff59500eb1e3d5873111282e7749 | [
"MIT"
] | null | null | null | # python experimental tests for Husky
from numpy import sin, cos, pi, load
import matplotlib.pyplot as plt
from time import sleep
yaw_bound = 2 * pi / 180
yaw_calibrate = pi / 180 * (0)
x_offset_calibrate = 0
y_offset_calibrate = -.08
f0 = plt.figure()
ax0 = f0.add_subplot(111)
ax1 = f0.add_subplot(111)
env_data = load('loginfo2.npy')[1:]
x = [[]] * len(env_data)
y = [[]] * len(env_data)
m=2
# print len(env_data)
for i in range(m, len(env_data) - m):
if len(env_data[i]) > 0:
x[i] = env_data[i][0]
y[i] = env_data[i][1]
print i, x[i], y[i]
yaw = env_data[i][2]
# filter some of the readings; comment to see the effect
if len(env_data[i + m]) == 0 or abs(yaw - env_data[i - m][2]) > yaw_bound or abs(yaw - env_data[i + m][2]) > yaw_bound:
continue
readings = env_data[i][3]
readings_x = [[]] * len(readings)
readings_y = [[]] * len(readings)
k = 0
for j in range(len(readings)):
# lidar readings in lidar frame
x_temp = readings[j][0] * cos(-readings[j][1])
y_temp = readings[j][0] * sin(-readings[j][1])
# lidar readings in robot frame
x_temp2 = x_temp * \
cos(yaw_calibrate) - y_temp * \
sin(yaw_calibrate) + x_offset_calibrate
y_temp2 = y_temp * \
cos(yaw_calibrate) + x_temp * \
sin(yaw_calibrate) + y_offset_calibrate
# lidar readings in global frame
readings_x[k] = x_temp2 * cos(yaw) - y_temp2 * sin(yaw) + x[i]
readings_y[k] = y_temp2 * cos(yaw) + x_temp2 * sin(yaw) + y[i]
k += 1
ax0.plot(readings_x, readings_y, 'r.')
ax0.plot(x[i],y[i], 'go', lw=3)
ax0.plot(0,-10,'ko')
ax0.set_xlim([-50, 50])
ax0.set_ylim([-50, 50])
# ax0.axis('equal')
plt.draw()
plt.pause(.0001)
ax0.clear()
ax0.plot([], [], 'r.', label='Lidar Reading')
# env_data = load('planner_of_agent_0.npy')[1:]
# x = [[]] * len(env_data)
# y = [[]] * len(env_data)
# for i in range(1, len(env_data) - 1):
# if len(env_data[i]) > 0:
# x[i] = env_data[i][0]
# y[i] = env_data[i][1]
ax0.plot([value for value in x if value],
[value for value in y if value], 'go', lw=3, label='Robot\'s Trajectory')
# env_data = load('planner_of_agent_1.npy')[1:]
# x = [[]] * len(env_data)
# y = [[]] * len(env_data)
# for i in range(1, len(env_data) - 1):
# if len(env_data[i]) > 0:
# x[i] = env_data[i][0]
# y[i] = env_data[i][1]
# ax0.plot([value for value in x if value],
# [value for value in y if value], 'bo', lw=3, label='Robot\'s Trajectory')
# ax0.legend()
# ax0.axis('equal')
# plt.draw()
# plt.pause(.1)
# raw_input("<Hit Enter To Close>")
plt.close(f0)
# yaw_bound = 3 * pi / 180
# yaw_calibrate = pi / 180 * (0)
# x_offset_calibrate = .23
# y_offset_calibrate = -.08
# data = np.load('pos.npy')[1:]
# print len(data)
# error_long = data[:, 0]
# error_lat = data[:, 1]
# ref_x = [value for value in data[:, 2]]
# print ref_x[:30]
# ref_y = [value for value in data[:, 3]]
# pos_x = [value for value in data[:, 4]][0::1]
# pos_y = [value for value in data[:, 5]][0::1]
# pos_theta = data[:, 6]
# print data
# time = data[:, 7] - data[0, 7]
# vel = data[:, 8]
# plt.plot(ref_x, ref_y, 'ro')
# plt.gca().set_aspect('equal', adjustable='box')
# f0 = plt.figure(1, figsize=(9, 9))
# ax0 = f0.add_subplot(111)
# ax0.plot(ref_x, ref_y, '--', lw=3, label='Reference Trajectory')
# ax0.plot(pos_x[0], pos_y[0], 'ms', markersize=10, label='Start Point')
# ax0.plot(pos_x, pos_y, 'go', label='Robot Trajectory')
# env_data = np.load('planner_of_agent_0.npy')[1:]
# x = [[]] * len(env_data)
# y = [[]] * len(env_data)
# print len(env_data)
# for i in range(1, len(env_data) - 1):
# if len(env_data[i]) > 0:
# x[i] = env_data[i][0]
# y[i] = env_data[i][1]
# yaw = env_data[i][2]
# filter some of the readings; comment to see the effect
# if len(env_data[i + 1]) == 0 or abs(yaw - env_data[i - 1][2]) > yaw_bound or abs(yaw - env_data[i + 1][2]) > yaw_bound:
# continue
# readings = env_data[i][3]
# readings_x = [[]] * len(readings)
# readings_y = [[]] * len(readings)
# k = 0
# for j in range(len(readings)):
# lidar readings in lidar frame
# x_temp = readings[j][0] * cos(-readings[j][1])
# y_temp = readings[j][0] * sin(-readings[j][1])
# lidar readings in robot frame
# x_temp2 = x_temp * \
# cos(yaw_calibrate) - y_temp * \
# sin(yaw_calibrate) + x_offset_calibrate
# y_temp2 = y_temp * \
# cos(yaw_calibrate) + x_temp * \
# sin(yaw_calibrate) + y_offset_calibrate
# lidar readings in global frame
# readings_x[k] = x_temp2 * cos(yaw) - y_temp2 * sin(yaw) + x[i]
# readings_y[k] = y_temp2 * cos(yaw) + x_temp2 * sin(yaw) + y[i]
# k += 1
# ax0.plot(readings_x, readings_y, 'r.')
# for i in range(len(env_data)):
# if len(env_data[i])>0:
# x[i] = env_data[i][0]
# y[i] = env_data[i][1]
# yaw = env_data[i][2]
# print yaw
# readings = env_data[i][3]
# readings_x = [[]]*len(readings)
# readings_y = [[]]*len(readings)
# print len(readings),len(readings_x)
# k=0
# for j in range(len(readings)):
# if i<200:
# print k,j,len(readings_x)
# readings_x[k] = x[i] + readings[j][0]*sin(pi/2-yaw+readings[j][1])
# readings_y[k] = y[i] + readings[j][0]*cos(pi/2-yaw+readings[j][1])
# k+=1
# ax0.plot(readings_x, readings_y,'r.')
# ax0.plot([], [], 'r.', label='Lidar Reading')
# print x
# ax0.plot([value for value in x if value],
# [value for value in y if value], 'go', lw=3,label='Robot\'s Trajectory')
# env_y = np.load('env.npy')[1]
# env_x = [value for value in env_x if value]
# env_y = [value for value in env_y if value]
# ax0.plot(env_x, env_y, 'r.', )
# ax0.plot(-.5, 2.7, 'cs', markersize=10, label='Destination')
# ax0.legend()
# ax0.axis('equal')
# ax0.set_xlim(-3.5, 3.5)
# ax0.set_ylim(-3, 4)
# ax0.set_xlabel('X (m)')
# ax0.set_ylabel('Y (m)')
# ax0.axis('equal')
# plt.tight_layout()
# plt.draw()
# plt.pause(.1) # <-------
# raw_input("<Hit Enter To Close>")
# plt.close(f0)
| 30.956098 | 129 | 0.560668 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,359 | 0.686889 |
f08b140b4aa6ed14ab345d4ef1509b7d92f9f433 | 1,770 | py | Python | source/study/score.py | mverleg/WW | b58a9bbfc91d19541840f490ed59997d85389c0a | [
"MIT"
] | null | null | null | source/study/score.py | mverleg/WW | b58a9bbfc91d19541840f490ed59997d85389c0a | [
"MIT"
] | 1 | 2016-03-18T09:29:42.000Z | 2016-03-18T09:29:42.000Z | source/study/score.py | mverleg/WW | b58a9bbfc91d19541840f490ed59997d85389c0a | [
"MIT"
] | null | null | null | from sys import stderr
from study.models import Result, ActiveTranslation
def update_score(learner, result, verified=False):
"""
Update the score after a phrase has been judged.
:param result: Result.CORRECT, Result.CLOSE or Result.INCORRECT
:return: Result instance
"""
if result == Result.CORRECT:
base = learner.reward_magnitude
elif result == Result.CLOSE:
base = - learner.reward_magnitude
elif result == Result.INCORRECT:
base = -2 * learner.reward_magnitude
else:
raise Exception('Scoring does not know how to deal with result = %s' % result)
learner.study_active.score += base
#todo: take history into account: the same phrase correct 5 times in a row should increase score a lot (don't show again) [actually independent of new result: many correct before should amplify result]
#if learner.study_show_learn:
# judge_translation = learner.study_shown
#else:
# judge_translation = learner.study_hidden
#print judge_translation.language
#try:
# active = ActiveTranslation.objects.get(learner = learner, translation = judge_translation)
#except ActiveTranslation.DoesNotExist:
# raise ActiveTranslation.DoesNotExist('The ActiveTranslation with learner="%s" translation="%" (%s) was not found while assigning scores. This means it disappeared between asking the question and answering it (they shouldn\'t disappear) or that the algorithm doesn\'t recover it correctly')
#if not judge_translation.language == request.LEARN_LANG:
# stderr.write('the translation to which score will be assigned is not in the learning langauge')
result = Result(
learner = learner,
asked = learner.study_hidden,
known = learner.study_shown,
result = result,
verified = False
)
learner.study_active.save()
result.save()
return result
| 39.333333 | 292 | 0.766102 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,138 | 0.642938 |
f08ba58aa1ee5462b9589a7cefae921b8d9e4b35 | 477 | py | Python | titan/react_pkg/prettier/__init__.py | mnieber/moonleap | 2c951565c32f2e733a063b4a4f7b3d917ef1ec07 | [
"MIT"
] | null | null | null | titan/react_pkg/prettier/__init__.py | mnieber/moonleap | 2c951565c32f2e733a063b4a4f7b3d917ef1ec07 | [
"MIT"
] | null | null | null | titan/react_pkg/prettier/__init__.py | mnieber/moonleap | 2c951565c32f2e733a063b4a4f7b3d917ef1ec07 | [
"MIT"
] | null | null | null | from pathlib import Path
from moonleap import add, create
from titan.project_pkg.service import Tool
from titan.react_pkg.nodepackage import load_node_package_config
class Prettier(Tool):
pass
base_tags = [("prettier", ["tool"])]
@create("prettier")
def create_prettier(term, block):
prettier = Prettier(name="prettier")
prettier.add_template_dir(Path(__file__).parent / "templates")
add(prettier, load_node_package_config(__file__))
return prettier
| 22.714286 | 66 | 0.761006 | 30 | 0.062893 | 0 | 0 | 235 | 0.492662 | 0 | 0 | 47 | 0.098532 |
f08c68a65e967b9aa34c701a06cb12906d568f55 | 787 | py | Python | docs/make_readme.py | thombashi/tabledata | 4e93930f89ae27c4ed852b4b61082a0457183019 | [
"MIT"
] | 4 | 2018-09-15T14:57:57.000Z | 2022-01-05T20:27:37.000Z | docs/make_readme.py | thombashi/tabledata | 4e93930f89ae27c4ed852b4b61082a0457183019 | [
"MIT"
] | null | null | null | docs/make_readme.py | thombashi/tabledata | 4e93930f89ae27c4ed852b4b61082a0457183019 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
import sys
from readmemaker import ReadmeMaker
PROJECT_NAME = "tabledata"
OUTPUT_DIR = ".."
def main():
maker = ReadmeMaker(
PROJECT_NAME,
OUTPUT_DIR,
is_make_toc=True,
project_url=f"https://github.com/thombashi/{PROJECT_NAME}",
)
maker.inc_indent_level()
maker.write_chapter("Summary")
maker.write_introduction_file("summary.txt")
maker.write_introduction_file("badges.txt")
maker.write_introduction_file("installation.rst")
maker.set_indent_level(0)
maker.write_chapter("Documentation")
maker.write_lines([f"https://{PROJECT_NAME:s}.rtfd.io/"])
return 0
if __name__ == "__main__":
sys.exit(main())
| 19.675 | 67 | 0.683609 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 266 | 0.337992 |
f08e63b84393000dc066899772d576a46b3ce28c | 1,860 | py | Python | expired_cert_finder/scanner.py | alphagov/expired-cert-finder | c77843dab36a12f82cba9cdf18d85dfc5dd5e7c5 | [
"MIT"
] | 1 | 2021-01-30T18:45:02.000Z | 2021-01-30T18:45:02.000Z | expired_cert_finder/scanner.py | alphagov/expired-cert-finder | c77843dab36a12f82cba9cdf18d85dfc5dd5e7c5 | [
"MIT"
] | null | null | null | expired_cert_finder/scanner.py | alphagov/expired-cert-finder | c77843dab36a12f82cba9cdf18d85dfc5dd5e7c5 | [
"MIT"
] | 2 | 2021-01-31T00:07:09.000Z | 2021-04-10T20:17:57.000Z | #! /usr/bin/env python
from expired_cert_finder.plugins.raw import RawParser
from expired_cert_finder.plugins.yaml import YamlParser
from expired_cert_finder.allowed_certs import AllowedCerts
rawParser = RawParser
yamlParser = YamlParser
# handle dynamic loading.
def scan_file_for_certificate(path, expired_only, debug):
results = []
try:
if path in AllowedCerts.instance().allowed_certs:
return results
file = open(path, "r")
file_contents = file.read()
run_default = True
certs = []
if path.endswith('.yaml') or path.endswith('.yml'):
try:
certs = yamlParser.process(path, file_contents, debug)
run_default = False
except Exception as ex:
if debug:
print("Error while using YAML Parser: " + path)
print(ex)
pass
if run_default:
certs = rawParser.process(path, file_contents, debug)
for cert in certs:
try:
cert_info = cert.getInfo()
if cert_info is not None and (cert_info['is_expired']
or (cert_info['close_to_expiry'] and expired_only == False)):
status = "EXPIRED" if cert_info['is_expired'] else "CLOSE_TO_EXPIRY"
cert_info['message'] = '%s, %s, %s: %s' % (cert.path, cert_info['subject'], status, cert_info['not_after'])
results.append(cert_info)
except Exception as e:
print(cert, e)
raise
except UnicodeDecodeError:
pass
except Exception as ex:
if debug:
print("File that caused exception: %s" % path)
raise ex
else:
print("Error processing %s, %s" % (path, ex))
return results
| 31 | 127 | 0.56828 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 265 | 0.142473 |
f08fc0d043f30f7b77fc0be9f310cc14919727ea | 153 | py | Python | pywi/processing/__init__.py | jeremiedecock/mrif | 094b0dd81ff2be0e24bf3871caab48da1b5d138b | [
"MIT"
] | 1 | 2021-07-06T06:02:45.000Z | 2021-07-06T06:02:45.000Z | pywi/processing/__init__.py | jeremiedecock/mrif | 094b0dd81ff2be0e24bf3871caab48da1b5d138b | [
"MIT"
] | null | null | null | pywi/processing/__init__.py | jeremiedecock/mrif | 094b0dd81ff2be0e24bf3871caab48da1b5d138b | [
"MIT"
] | 1 | 2019-01-07T10:50:38.000Z | 2019-01-07T10:50:38.000Z | """Processing modules
This package contains image processing algorithms.
"""
from . import compositing
from . import filtering
from . import transform
| 17 | 50 | 0.784314 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 77 | 0.503268 |
f08fde25b780c828cb41e9681045e08d41cd14d7 | 3,128 | py | Python | atlas/foundations_contrib/src/test/test_lazy_bucket.py | DeepLearnI/atlas | 8aca652d7e647b4e88530b93e265b536de7055ed | [
"Apache-2.0"
] | 296 | 2020-03-16T19:55:00.000Z | 2022-01-10T19:46:05.000Z | atlas/foundations_contrib/src/test/test_lazy_bucket.py | DeepLearnI/atlas | 8aca652d7e647b4e88530b93e265b536de7055ed | [
"Apache-2.0"
] | 57 | 2020-03-17T11:15:57.000Z | 2021-07-10T14:42:27.000Z | atlas/foundations_contrib/src/test/test_lazy_bucket.py | DeepLearnI/atlas | 8aca652d7e647b4e88530b93e265b536de7055ed | [
"Apache-2.0"
] | 38 | 2020-03-17T21:06:05.000Z | 2022-02-08T03:19:34.000Z |
import unittest
from mock import Mock
from foundations_spec.helpers.spec import Spec
from foundations_spec.helpers import let, let_mock, set_up
class TestLazyBucket(Spec):
@let
def lazy_bucket(self):
from foundations_contrib.lazy_bucket import LazyBucket
return LazyBucket(self.bucket_constructor)
@set_up
def set_up(self):
self.bucket_constructor.return_value = self.bucket
bucket_constructor = let_mock()
bucket = let_mock()
name = let_mock()
data = let_mock()
input_file = let_mock()
output_file = let_mock()
dummy = let_mock()
pathname = let_mock()
source = let_mock()
destination = let_mock()
def test_ensure_bucket_is_not_constructed(self):
self.lazy_bucket
self.bucket_constructor.assert_not_called()
def test_upload_from_string_calls_bucket(self):
self.bucket.upload_from_string.return_value = self.dummy
result = self.lazy_bucket.upload_from_string(self.name, self.data)
self.bucket.upload_from_string.assert_called_with(self.name, self.data)
self.assertEqual(self.dummy, result)
def test_upload_from_file_calls_bucket(self):
self.bucket.upload_from_file.return_value = self.dummy
result = self.lazy_bucket.upload_from_file(self.name, self.input_file)
self.bucket.upload_from_file.assert_called_with(self.name, self.input_file)
self.assertEqual(self.dummy, result)
def test_exists_calls_bucket(self):
self.bucket.exists.return_value = self.dummy
result = self.lazy_bucket.exists(self.name)
self.bucket.exists.assert_called_with(self.name)
self.assertEqual(self.dummy, result)
def test_download_as_string_calls_bucket(self):
self.bucket.download_as_string.return_value = self.dummy
result = self.lazy_bucket.download_as_string(self.name)
self.bucket.download_as_string.assert_called_with(self.name)
self.assertEqual(self.dummy, result)
def test_download_to_file_calls_bucket(self):
self.bucket.download_to_file.return_value = self.dummy
result = self.lazy_bucket.download_to_file(self.name, self.output_file)
self.bucket.download_to_file.assert_called_with(self.name, self.output_file)
self.assertEqual(self.dummy, result)
def test_list_files_calls_bucket(self):
self.bucket.list_files.return_value = self.dummy
result = self.lazy_bucket.list_files(self.pathname)
self.bucket.list_files.assert_called_with(self.pathname)
self.assertEqual(self.dummy, result)
def test_remove_calls_bucket(self):
self.bucket.remove.return_value = self.dummy
result = self.lazy_bucket.remove(self.name)
self.bucket.remove.assert_called_with(self.name)
self.assertEqual(self.dummy, result)
def test_move_calls_bucket(self):
self.bucket.move.return_value = self.dummy
result = self.lazy_bucket.move(self.source, self.destination)
self.bucket.move.assert_called_with(self.source, self.destination)
self.assertEqual(self.dummy, result)
| 38.146341 | 84 | 0.728581 | 2,979 | 0.952366 | 0 | 0 | 233 | 0.074488 | 0 | 0 | 0 | 0 |
f0924e750052f6e9608fdfdf58fe2cfd7f5812d2 | 1,557 | py | Python | Hw/H3_CNN/model.py | zhigangjiang/DLAndML-Experiment | 162fb9c2a0865b6adf16f9a9e9a3f6a9b3e7f9b6 | [
"MIT"
] | 2 | 2021-11-09T14:28:19.000Z | 2022-03-03T17:49:23.000Z | Hw/H3_CNN/model.py | zhigangjiang/DLAndML-Experiment | 162fb9c2a0865b6adf16f9a9e9a3f6a9b3e7f9b6 | [
"MIT"
] | null | null | null | Hw/H3_CNN/model.py | zhigangjiang/DLAndML-Experiment | 162fb9c2a0865b6adf16f9a9e9a3f6a9b3e7f9b6 | [
"MIT"
] | null | null | null | from abc import ABC
import torch.nn as nn
class CNN5(nn.Module, ABC):
def __init__(self):
super(CNN5, self).__init__()
# torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding)
# torch.nn.MaxPool2d(kernel_size, stride, padding)
# input 維度 [3, 128, 128]
self.cnn = nn.Sequential(
nn.Conv2d(3, 64, 3, 1, 1), # [64, 128, 128]
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d(2, 2, 0), # [64, 64, 64]
nn.Conv2d(64, 128, 3, 1, 1), # [128, 64, 64]
nn.BatchNorm2d(128),
nn.ReLU(),
nn.MaxPool2d(2, 2, 0), # [128, 32, 32]
nn.Conv2d(128, 256, 3, 1, 1), # [256, 32, 32]
nn.BatchNorm2d(256),
nn.ReLU(),
nn.MaxPool2d(2, 2, 0), # [256, 16, 16]
nn.Conv2d(256, 512, 3, 1, 1), # [512, 16, 16]
nn.BatchNorm2d(512),
nn.ReLU(),
nn.MaxPool2d(2, 2, 0), # [512, 8, 8]
nn.Conv2d(512, 512, 3, 1, 1), # [512, 8, 8]
nn.BatchNorm2d(512),
nn.ReLU(),
nn.MaxPool2d(2, 2, 0), # [512, 4, 4]
)
self.fc = nn.Sequential(
nn.Linear(512 * 4 * 4, 1024),
nn.Dropout(0.5),
nn.ReLU(),
nn.Linear(1024, 512),
nn.Dropout(0.5),
nn.ReLU(),
nn.Linear(512, 11)
)
def forward(self, x):
out = self.cnn(x)
out = out.view(out.size()[0], -1)
return self.fc(out)
| 30.529412 | 82 | 0.451509 | 1,516 | 0.971172 | 0 | 0 | 0 | 0 | 0 | 0 | 296 | 0.189622 |
f0935e4564c845dcf620246319af92237bea563f | 167 | py | Python | calvestbr/__init__.py | IsaacHiguchi/calvestbr | ebf702e9e67299c822a6cc21cad60b247446fcfa | [
"MIT"
] | null | null | null | calvestbr/__init__.py | IsaacHiguchi/calvestbr | ebf702e9e67299c822a6cc21cad60b247446fcfa | [
"MIT"
] | null | null | null | calvestbr/__init__.py | IsaacHiguchi/calvestbr | ebf702e9e67299c822a6cc21cad60b247446fcfa | [
"MIT"
] | null | null | null | """Top-level package for Calendário dos Vestibulares do Brasil."""
__author__ = """Ana_Isaac_Marina"""
__email__ = 'marinalara170303@gmail.com'
__version__ = '0.0.1'
| 27.833333 | 66 | 0.742515 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 124 | 0.738095 |
f095a7bbf05cec0014035a747e47610d7205956c | 776 | py | Python | controllers/utils_faq.py | haoyuchen1992/CourseBuilder | ba8f0e05c53cc74bb4e46235a7855fdfbd63dff7 | [
"Apache-2.0"
] | null | null | null | controllers/utils_faq.py | haoyuchen1992/CourseBuilder | ba8f0e05c53cc74bb4e46235a7855fdfbd63dff7 | [
"Apache-2.0"
] | null | null | null | controllers/utils_faq.py | haoyuchen1992/CourseBuilder | ba8f0e05c53cc74bb4e46235a7855fdfbd63dff7 | [
"Apache-2.0"
] | null | null | null | from utils import BaseHandler
class FaqHandler(BaseHandler):
"""Handler for FAQ page."""
def get(self):
"""Handler GET requests."""
# print "Get current get_user"
# print self.get_user()
# if not self.get_user():
# self.transient_student = True
#This if statement will let the non-student unaccessable to the FAQ
#self.personalize_page_and_get_user():
# if not self.personalize_page_and_get_enrolled():
# return
self.template_value['navbar'] = {'faq': True}
if not self.get_user():
self.template_value['transient_student'] = True
self.template_value['loginUrl'] = True
else:
self.template_value['transient_student'] = False
self.template_value['logoutUrl'] = True
self.render('faq.html')
| 32.333333 | 71 | 0.675258 | 740 | 0.953608 | 0 | 0 | 0 | 0 | 0 | 0 | 409 | 0.527062 |
f095d3380b5ebd2361d49e633805f48a1b42caba | 2,866 | py | Python | tests/knowledge/rules/aws/context_aware/test_ec2_role_share_rule.py | my-devops-info/cloudrail-knowledge | b7c1bbd6fe1faeb79c105a01c0debbe24d031a0e | [
"MIT"
] | null | null | null | tests/knowledge/rules/aws/context_aware/test_ec2_role_share_rule.py | my-devops-info/cloudrail-knowledge | b7c1bbd6fe1faeb79c105a01c0debbe24d031a0e | [
"MIT"
] | null | null | null | tests/knowledge/rules/aws/context_aware/test_ec2_role_share_rule.py | my-devops-info/cloudrail-knowledge | b7c1bbd6fe1faeb79c105a01c0debbe24d031a0e | [
"MIT"
] | null | null | null | import unittest
from cloudrail.knowledge.context.aws.aws_connection import PublicConnectionDetail, PolicyConnectionProperty, ConnectionDirectionType
from cloudrail.knowledge.context.aws.ec2.ec2_instance import Ec2Instance
from cloudrail.knowledge.context.aws.ec2.network_interface import NetworkInterface
from cloudrail.knowledge.context.aws.iam.role import Role
from cloudrail.knowledge.context.aws.aws_environment_context import AwsEnvironmentContext
from cloudrail.knowledge.rules.aws.context_aware.ec2_role_share_rule import Ec2RoleShareRule
from cloudrail.knowledge.rules.base_rule import RuleResultType
from cloudrail.dev_tools.rule_test_utils import create_empty_entity
class TestEc2RoleShareRule(unittest.TestCase):
def setUp(self):
self.rule = Ec2RoleShareRule()
def test_ec2_role_share_rule_fail(self):
# Arrange
connection_detail = PublicConnectionDetail(PolicyConnectionProperty([]), ConnectionDirectionType.INBOUND)
network_interface: NetworkInterface = create_empty_entity(NetworkInterface)
network_interface.inbound_connections.add(connection_detail)
private_ec2: Ec2Instance = create_empty_entity(Ec2Instance)
private_ec2.iam_profile_id = 'iam_profile_id'
role: Role = create_empty_entity(Role)
role.role_name = 'iam_role'
private_ec2.iam_role = role
public_ec2: Ec2Instance = create_empty_entity(Ec2Instance)
public_ec2.iam_profile_id = 'iam_profile_id'
public_ec2.network_resource.network_interfaces.append(network_interface)
context = AwsEnvironmentContext(ec2s=[private_ec2, public_ec2])
# Act
result = self.rule.run(context, {})
# Assert
self.assertEqual(RuleResultType.FAILED, result.status)
self.assertEqual(1, len(result.issues))
def test_ec2_role_share_rule_pass(self):
# Arrange
connection_detail = PublicConnectionDetail(PolicyConnectionProperty([]), ConnectionDirectionType.INBOUND)
network_interface: NetworkInterface = create_empty_entity(NetworkInterface)
network_interface.inbound_connections.add(connection_detail)
private_ec2: Ec2Instance = create_empty_entity(Ec2Instance)
private_ec2.iam_profile_id = 'iam_profile_id1'
role: Role = create_empty_entity(Role)
role.role_name = 'iam_role'
private_ec2.iam_role = role
public_ec2: Ec2Instance = create_empty_entity(Ec2Instance)
public_ec2.iam_profile_id = 'iam_profile_id2'
public_ec2.network_resource.network_interfaces.append(network_interface)
context = AwsEnvironmentContext(ec2s=[private_ec2, public_ec2])
# Act
result = self.rule.run(context, {})
# Assert
self.assertEqual(RuleResultType.SUCCESS, result.status)
self.assertEqual(0, len(result.issues))
| 40.942857 | 132 | 0.759944 | 2,185 | 0.762387 | 0 | 0 | 0 | 0 | 0 | 0 | 130 | 0.045359 |
f097797de16f7deb2c318c5dc0e0e9f9d0347528 | 1,967 | py | Python | python/setup.py | rpiotrow/lolo | 54c9b065daccb1f617639dc8f5ff262eb572f626 | [
"Apache-2.0"
] | 34 | 2017-01-30T18:57:10.000Z | 2022-02-24T22:34:59.000Z | python/setup.py | rpiotrow/lolo | 54c9b065daccb1f617639dc8f5ff262eb572f626 | [
"Apache-2.0"
] | 121 | 2017-01-10T16:47:39.000Z | 2022-02-15T03:23:20.000Z | python/setup.py | rpiotrow/lolo | 54c9b065daccb1f617639dc8f5ff262eb572f626 | [
"Apache-2.0"
] | 14 | 2017-02-15T18:40:51.000Z | 2021-10-20T05:47:41.000Z | from setuptools import setup
from glob import glob
import shutil
import sys
import os
# single source of truth for package version
version_ns = {}
with open(os.path.join("lolopy", "version.py")) as f:
exec(f.read(), version_ns)
version = version_ns['__version__']
# Find the lolo jar
JAR_FILE = glob(os.path.join('..', 'target', 'scala-2.13', 'lolo-jar-with-dependencies.jar'))
if len(JAR_FILE) == 0:
raise Exception('No Jar files found. Build lolo first by calling "make" or "cd ..; sbt assembly"')
elif len(JAR_FILE) > 1:
raise Exception('Found >1 Jar file. Clean and rebuild lolopy: cd ..; sbt assembly')
# Copy the jar file to a directory at the same level as the package
jar_path = os.path.join('lolopy', 'jar')
if os.path.isdir(jar_path):
shutil.rmtree(jar_path)
os.mkdir(jar_path)
shutil.copy(JAR_FILE[0], os.path.join(jar_path, 'lolo-jar-with-dependencies.jar'))
# Convert the README.md file to rst (rst is rendered by PyPi not MD)
# Taken from: https://github.com/apache/spark/blob/master/python/setup.py
# Parse the README markdown file into rst for PyPI
long_description = "!!!!! missing pandoc do not upload to PyPI !!!!"
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
except ImportError:
print("Could not import pypandoc - required to package PySpark", file=sys.stderr)
except OSError:
print("Could not convert - pandoc is not installed", file=sys.stderr)
# Make the installation
setup(
name='lolopy',
version=version,
url='https://github.com/CitrineInformatics/lolo',
maintainer='Max Hutchinson',
maintainer_email='maxhutch@citrine.io',
packages=[
'lolopy',
'lolopy.jar' # Used for the PyPi packaging
],
include_package_data=True,
package_data={'lolopy.jar': ['*.jar']},
install_requires=['scikit-learn', 'py4j'],
description='Python wrapper for the Lolo machine learning library',
long_description=long_description,
)
| 34.508772 | 102 | 0.708185 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,024 | 0.52059 |
f0994ccd4067e97928229e99678562d13da50032 | 5,254 | py | Python | src/zebrafish_ec_migration/pipelines/cell_trajectory_analysis_pipeline/compute_trajectory_features.py | wgiese/zebrafish_ec_migration | 6c0e83716f4b9dcf8ca67528ae1efba031c75117 | [
"Apache-2.0"
] | null | null | null | src/zebrafish_ec_migration/pipelines/cell_trajectory_analysis_pipeline/compute_trajectory_features.py | wgiese/zebrafish_ec_migration | 6c0e83716f4b9dcf8ca67528ae1efba031c75117 | [
"Apache-2.0"
] | null | null | null | src/zebrafish_ec_migration/pipelines/cell_trajectory_analysis_pipeline/compute_trajectory_features.py | wgiese/zebrafish_ec_migration | 6c0e83716f4b9dcf8ca67528ae1efba031c75117 | [
"Apache-2.0"
] | null | null | null | import pandas as pd
from typing import Dict, List
import numpy as np
import pylab as plt
def compute_trajectory_features_set1(aligned_trajector_key_file: pd.DataFrame, parameters: Dict, start_time, end_time):
features_df = pd.DataFrame()
data_statistics_df = pd.DataFrame()
counter = 0
frame_interval = 12.0
time_interval = frame_interval*10.0/60.0
for fish_number in aligned_trajector_key_file["fish_number"].unique():
if (np.isnan(fish_number)):
continue
df_single_fish_all_groups = aligned_trajector_key_file[aligned_trajector_key_file['fish_number'] == fish_number]
for analysis_group in df_single_fish_all_groups["analysis_group"].unique():
df_single_fish = df_single_fish_all_groups[df_single_fish_all_groups["analysis_group"] == analysis_group]
movement_data = pd.DataFrame(data=[],
columns=["x", "y", "z", "frame", "link_id", "object_id", "vessel_type"])
for index, row in df_single_fish.iterrows():
object_data = pd.read_csv(row["object_data"])
link_data = pd.read_csv(row["link_data"])
movement_data = pd.merge(object_data, link_data, on='object_id')
for link_id in movement_data["link_id"].unique():
movement_data_ = movement_data[movement_data["link_id"]==link_id]
dist = movement_data_.diff(frame_interval).fillna(np.nan)
dist_step = movement_data_.diff(1).fillna(np.nan)
movement_data_['step_size'] = np.sqrt(dist.x**2 + dist.y**2)
movement_data_['step_size_x'] = dist_step.x
movement_data_['step_size_y'] = dist_step.y
movement_data_['velocity_micron_per_h'] = np.sqrt(dist.x**2 + dist.y**2)/time_interval
movement_data_['vd_velocity_micron_per_h'] = dist.y/time_interval
#movement_data_['step_size_y'] = dist.y
movement_data_['fish_number'] = fish_number
movement_data_['vessel_type'] = row['vessel_type']
movement_data_['analysis_group'] = analysis_group
movement_data_['time_in_hpf'] = 24.0 + 10.0 * (movement_data_['frame'] - 1)/60.0
movement_data_['time_in_min'] = 10 * (movement_data_['frame'] - 1)
if len(features_df.columns) > 1:
features_df = movement_data_.append(features_df)
else:
features_df = movement_data_.copy()
return features_df#, data_statistics_df
def compute_trajectory_features_set2(aligned_trajector_key_file: pd.DataFrame, parameters: Dict, start_time, end_time):
features_df = pd.DataFrame()
data_statistics_df = pd.DataFrame()
counter = 0
frame_interval = 12.0
time_interval = frame_interval*10.0/60.0
for fish_number in aligned_trajector_key_file["fish_number"].unique():
if (np.isnan(fish_number)):
continue
df_single_fish_all_groups = aligned_trajector_key_file[aligned_trajector_key_file['fish_number'] == fish_number]
for analysis_group in df_single_fish_all_groups["analysis_group"].unique():
df_single_fish = df_single_fish_all_groups[df_single_fish_all_groups["analysis_group"] == analysis_group]
movement_data = pd.DataFrame(data=[],
columns=["x", "y", "z", "frame", "link_id", "object_id", "vessel_type"])
for index, row in df_single_fish.iterrows():
object_data = pd.read_csv(row["object_data"])
link_data = pd.read_csv(row["link_data"])
movement_data = pd.merge(object_data, link_data, on='object_id')
for link_id in movement_data["link_id"].unique():
movement_data_ = movement_data[movement_data["link_id"]==link_id]
dist = movement_data_.diff(frame_interval).fillna(np.nan)
dist_step = movement_data_.diff(1).fillna(np.nan)
movement_data_['step_size'] = np.sqrt(dist.x**2 + dist.y**2)
movement_data_['step_size_x'] = dist_step.x
movement_data_['step_size_y'] = dist_step.y
movement_data_['velocity_micron_per_h'] = np.sqrt(dist.x**2 + dist.y**2)/time_interval
movement_data_['vd_velocity_micron_per_h'] = dist.y/time_interval
#movement_data_['step_size_y'] = dist.y
movement_data_['fish_number'] = fish_number
movement_data_['vessel_type'] = row['vessel_type']
movement_data_['analysis_group'] = analysis_group
movement_data_['time_in_hpf'] = 24.0 + 10.0 * (movement_data_['frame'] - 1)/60.0
movement_data_['time_in_min'] = 10 * (movement_data_['frame'] - 1)
if len(features_df.columns) > 1:
features_df = movement_data_.append(features_df)
else:
features_df = movement_data_.copy()
return features_df#, data_statistics_df | 47.763636 | 120 | 0.611153 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 802 | 0.152646 |
f09aa0a3772d06c65a4b92ef0f3b86114c850ba5 | 22,421 | py | Python | scripts/beck/st/error_injection.py | papan-singh/cortx-motr-1 | 79351a56dffcb3968db9a6d4fee685a5443ed2f7 | [
"Apache-2.0"
] | null | null | null | scripts/beck/st/error_injection.py | papan-singh/cortx-motr-1 | 79351a56dffcb3968db9a6d4fee685a5443ed2f7 | [
"Apache-2.0"
] | 1 | 2022-02-03T09:51:48.000Z | 2022-02-03T09:51:48.000Z | scripts/beck/st/error_injection.py | papan-singh/cortx-motr-1 | 79351a56dffcb3968db9a6d4fee685a5443ed2f7 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
#
# Copyright (c) 2020 Seagate Technology LLC and/or its Affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For any questions about this software or licensing,
# please email opensource@seagate.com or cortx-questions@seagate.com.
#
import binascii
import sys
import os
import random
import argparse
import time
import logging
timestr = time.strftime("%Y%m%d-%H%M%S")
log_filename = "hole_creation_" + timestr + ".log"
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler(log_filename)
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
fformatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
cformatter = logging.Formatter('%(levelname)s : %(message)s')
fh.setFormatter(fformatter)
ch.setFormatter(cformatter)
logger.addHandler(fh)
logger.addHandler(ch)
logger.info("***** Script Started *****")
parser = argparse.ArgumentParser(description="Basic Arguments to run the script")
parser.add_argument('-rn', action='store_true', default=False, dest='random',
help='For inducing error at Random place')
parser.add_argument('-e', action='store', default=0, type=int, dest='noOfErr',
help='How Many number of error do you want to induce in Metadata')
parser.add_argument('-rt', action='store', dest='Record_Type',
help='Record Type For inducing error at particular record like:'
' BE_BTREE, BE_EMAP, CAS_CTG etc')
parser.add_argument('-m', action='store', dest='mfile', help='Metadata Path')
parser.add_argument('-v', action='store_true', default=False, dest='verify',
help='Read full Metadata and Print all the Records entries counts')
parser.add_argument('-a', action='store_true', default=False, dest='allErr',
help='Induce Error in All Record at Random place')
parser.add_argument('-gmd', action='store_true', default=False, dest='allGMD',
help='Induce Error in All GMD type of Record at Random place')
parser.add_argument('-dmd', action='store_true', default=False, dest='allDMD',
help='Induce Error in All DMD type of Record at Random place')
parser.add_argument('-512k', action='store_true', default=False, dest='err512k',
help='Induce 512K bytes error in Metadata')
parser.add_argument('-huge', action='store_true', default=False, dest='hugeCorruption',
help='Induce Huge amount of corruption in Metadata')
parser.add_argument('-seed', action='store', default=0, type=float, dest='seed',
help='Seed is used to initialize the "random" library:'
' to initialize the random generation')
parser.add_argument('-corrupt_emap', action='store', dest='corrupt_emap',
help='Induce Error in Emap specified by Cob Id')
parser.add_argument('-list_emap', action='store_true', default=False, dest='list_emap',
help='Display all Emap keys with device id')
parser.add_argument('-parse_size', action='store', dest='parse_size', type=int,
help='Limit for metadata parsing size in bytes for list_emap and verify option')
parser.add_argument('-offset', action='store', default=0, type=int, dest='seek_offset',
help='Starting offset of metadata file in multiple of 8 bytes')
args = parser.parse_args()
results = parser.parse_args()
logger.info('Induce Random Error = {!r}'.format(args.random))
logger.info('Number of Error induce = {!r}'.format(args.noOfErr))
logger.info('Record Type = {!r}'.format(args.Record_Type))
logger.info('Metadata file path = {!r}'.format(args.mfile))
logger.info('Verify Record entries = {!r}'.format(args.verify))
logger.info('Induce Error in All Record = {!r}'.format(args.allErr))
logger.info('Induce Error in GMD Record = {!r}'.format(args.allGMD))
logger.info('Induce Error in DMD Record = {!r}'.format(args.allDMD))
logger.info('Induce 512k errors = {!r}'.format(args.err512k))
logger.info('Induce huge errors = {!r}'.format(args.hugeCorruption))
logger.info('Seed for random number = {!r}'.format(args.seed))
logger.info('Induce Error in emap by Cob Id = {!r}'.format(args.corrupt_emap))
logger.info('List all Emap Keys and Records = {!r}'.format(args.list_emap))
logger.info('Limit for parsing size in bytes = {!r}'.format(args.parse_size))
logger.info('Metadata seek offset in bytes multiple of 8 bytes = {!r}'.format(args.seek_offset))
filename = args.mfile
recordType = args.Record_Type
noOfErr = args.noOfErr
if args.seed != 0:
seed = args.seed
logger.info("Seed used: {}".format(seed))
else:
seed = time.time()
logger.info("Seed used: {}".format(seed))
random.seed(seed)
random_system=random.SystemRandom()
if not os.walk(filename):
logger.error('Failed: The path specified does not exist or Missing file path')
sys.exit(1)
# M0_FORMAT_HEADER_MAGIC = 0x33011ca5e511de77
header = b'33011ca5e511de77'
# M0_FORMAT_FOOTER_MAGIC = 0x33f007e7f007e777
footer = b'33f007e7f007e777'
typeDict = {b'01': 'RPC_PACKET', b'02': 'RPC_ITEM', b'03': 'BE_BTREE',
b'04': 'BE_BNODE', b'05': 'BE_EMAP_KEY', b'06': 'BE_EMAP_REC',
b'07': 'BE_EMAP', b'08': 'BE_LIST', b'09': 'BE_SEG_HDR',
b'0a': 'BALLOC', b'0b': 'ADDB2_FRAME_HEADER', b'0c': 'STOB_AD_0TYPE_REC',
b'0d': 'STOB_AD_DOMAIN', b'0e': 'COB_DOMAIN', b'0f': 'COB_NSREC',
b'10': 'BALLOC_GROUP_DESC', b'11': 'EXT', b'12': 'CAS_INDEX',
b'13': 'POOLNODE', b'14': 'POOLDEV', b'15': 'POOL_SPARE_USAGE',
b'16': 'CAS_STATE', b'17': 'CAS_CTG', b'22': 'WRONG_ENTRY', b'44': 'WRONG_ENTRY'}
recordDict = {'BE_BTREE': [], 'BE_BNODE': [], 'BE_EMAP_KEY': [], 'BE_EMAP_REC': [],
'BE_EMAP': [], 'BE_LIST': [], 'BE_SEG_HDR': [], 'BALLOC': [],
'STOB_AD_0TYPE_REC': [], 'STOB_AD_DOMAIN': [], 'COB_DOMAIN': [],
'COB_NSREC': [], 'BALLOC_GROUP_DESC': [], 'EXT': [], 'POOLNODE': [],
'POOLDEV': [], 'POOL_SPARE_USAGE': [], 'CAS_STATE': [], 'CAS_CTG': [], 'EXTRA': []}
AllRecordList = ['BE_BTREE', 'BE_BNODE', 'BE_EMAP_KEY', 'BE_EMAP_REC', 'BE_EMAP', 'BE_LIST',
'BE_SEG_HDR', 'BALLOC', 'STOB_AD_0TYPE_REC', 'STOB_AD_DOMAIN', 'COB_DOMAIN',
'COB_NSREC', 'BALLOC_GROUP_DESC', 'EXT', 'POOLNODE',
'POOLDEV', 'POOL_SPARE_USAGE', 'CAS_STATE', 'CAS_CTG', 'EXTRA']
DMDList = ['BE_BNODE', 'BE_EMAP_KEY', 'BE_EMAP_REC', 'COB_NSREC', 'BALLOC_GROUP_DESC']
GMDList = ['BE_BTREE', 'BE_EMAP', 'BE_LIST', 'BE_SEG_HDR', 'BALLOC', 'STOB_AD_0TYPE_REC',
'STOB_AD_DOMAIN', 'COB_DOMAIN', 'CAS_STATE', 'CAS_CTG']
btreeType = {b'01': 'M0_BBT_INVALID', b'02': 'M0_BBT_BALLOC_GROUP_EXTENTS',
b'03': 'M0_BBT_BALLOC_GROUP_DESC', b'04': 'M0_BBT_EMAP_EM_MAPPING',
b'05': 'M0_BBT_CAS_CTG', b'06': 'M0_BBT_COB_NAMESPACE',
b'07': 'M0_BBT_COB_OBJECT_INDEX', b'08': 'M0_BBT_COB_FILEATTR_BASIC',
b'09': 'M0_BBT_COB_FILEATTR_EA', b'0a': 'M0_BBT_COB_FILEATTR_OMG',
b'0b': 'M0_BBT_CONFDB', b'0c': 'M0_BBT_UT_KV_OPS', b'0d': 'M0_BBT_NR'}
BeBnodeTypeKeys = {}
def RecordOffset(record, i, size):
if record in recordDict.keys():
recordDict[record].append(i)
if record == "BE_BNODE":
bliType = i + 16 # bli_type offet
btNumActiveKey = i + 56 # active key count offset
BeBnodeTypeKeys[i] = [bliType, btNumActiveKey]
else:
recordDict['EXTRA'].append(i)
def ReadTypeSize(byte): # Ex: 0001(ver) 0009(type) 00003dd8(size)
# ver = byte[:4] # .ot_version = src->hd_bits >> 48,
rtype = byte[6:8] # .ot_type = src->hd_bits >> 32 & 0x0000ffff,
size = byte[8:16] # .ot_size = src->hd_bits & 0xffffffff
# logger.info("Version {}, Type {}, Size {}".format(ver, rtype, size)) #debug print
return rtype, size
def EditMetadata(offset):
"""Edit metadata with the fixed pattern of 0x1111222244443333."""
with open(filename, 'r+b') as wbfr:
logger.info("** Corrupting 8byte of Metadata at offset {}"
" with b'1111222244443333' **".format(offset))
wbfr.seek(offset)
wbfr.write(b'\x33\x33\x44\x44\x22\x22\x11\x11')
wbfr.seek(offset)
ReadMetadata(offset)
def ReadMetadata(offset):
"""Verifies that meta-data contains the valid footer at the given offset."""
with open(filename, "rb") as mdata:
mdata.seek(offset)
data = binascii.hexlify((mdata.read(8))[::-1])
if data == footer:
return True, data
return False, data
def ReadCompleteRecord(offset):
"""Function read complete record starting after header and until footer for record."""
curr_record = []
while 1:
footerFound, data=ReadMetadata(offset)
if footerFound:
break
curr_record.append(data.decode('utf-8'))
offset = offset + 8 # check next 8 bytes
# Convert list to hex representation
curr_record = [ hex(int(i, 16)) for i in curr_record]
return curr_record, offset # Return record data and footer offset
def ReadBeBNode(offset):
"""Reads BeNode data."""
llist = BeBnodeTypeKeys[offset]
with open(filename, "rb") as mdata:
mdata.seek(llist[0])
data = binascii.hexlify((mdata.read(8))[::-1])
data = data[14:16]
logger.info("bli_type of BE_BNODE is: {0}: {1}".format( data, btreeType[data]))
mdata.seek(llist[1])
data = binascii.hexlify((mdata.read(8))[::-1])
data = data[8:16]
logger.info("Active key count of BE_BNODE is: {}".format( int(data,16)))
def InduceCorruption(recordType, noOfErr):
"""Induces Corruption in a record with number of error."""
count = 0
read_metadata_file()
logger.info(recordType)
logger.info("Number of Error want to induce: {}".format(noOfErr))
lookupList = recordDict[recordType]
if (len(lookupList) and noOfErr) == 0:
logger.error("Record List is empty. Please choose another Record")
count = 0
return count
elif len(lookupList) < noOfErr:
logger.error(
" Record List contains Less number of entries than input."
" Please reduce the number of Error Injection")
count = 0
return count
else:
logger.info(lookupList)
logger.info("**** Inducing {} Error in Record: {} ****".format(noOfErr, recordType))
for i in range(noOfErr):
offset = lookupList[i] # Please add offset here for starting from middle of offset list
ReadMetadata(offset + 8)
EditMetadata(offset + 8)
if recordType == "BE_BNODE":
ReadBeBNode(offset)
count = count + 1
return count
def InduceRandomCorruption(noOfErr):
"""Induces corruption in meta data at random offset."""
count = 0
read_metadata_file()
while 1:
recType = random_system.choice(list(recordDict))
logger.info("+++ Picked a Random Record from Dictionary Record type:{}+++".format(recType))
logger.info("Number of Error want to induce: {}".format(noOfErr))
lookupList = recordDict[recType]
logger.info(lookupList)
if (len(lookupList) == 0) or (len(lookupList) < noOfErr):
logger.info("Record List is empty OR contains Less number of entries than input."
" Going to next Record")
else:
lookupList = random_system.sample(lookupList, noOfErr)
logger.info(lookupList)
for i in range(noOfErr):
offset = lookupList[i]
logger.info("**** Inducing RANDOM Error in Record at offsets: {}****"
.format(hex(offset + 8)))
ReadMetadata(offset + 8) # Read original
EditMetadata(offset + 8) # Modify
ReadMetadata(offset + 8) # Verify
count = count + 1
break
return count
def InduceErrInRecords(recList):
"""Function which induces error in a particular type of record."""
count = 0
read_metadata_file()
logger.info("++++ Induce Random number of errors in All Records ++++")
for recType in recList:
logger.info("Record Name: {}".format(recType))
lookupList = recordDict[recType]
length = len(lookupList)
if length == 0:
logger.info("Record List is empty. Moving to Next Record")
else:
lookupList = random_system.sample(lookupList,
random_system.randint(1, length))
logger.info("Inducing {} Error at these offsets".format(len(lookupList)))
logger.info(lookupList)
for offset in lookupList:
logger.info("**** Inducing Error in Record at offsets {}****"
.format(hex(offset + 8)))
ReadMetadata(offset + 8) # Read original
EditMetadata(offset + 8) # Modify
ReadMetadata(offset + 8) # Verify
count = count + 1
return count
def InduceHugeError():
"""Corrupt Metadata file from random location till end of metadata file."""
count = 0
with open(filename, 'r+b') as wbfr:
logger.info("** Corrupting 8byte of Metadata with b'1111222244443333' all place")
wbfr.seek(-1, os.SEEK_END)
endOffset = wbfr.tell()
offset = random_system.randint(1, endOffset)
logger.info("Start offset is {}".format(offset))
while 1:
offset = offset + 8
wbfr.seek(offset)
byte = wbfr.read(8)
if not byte:
break
else:
EditMetadata(offset + 8)
count = count + 1
return count
def Induce512kbError():
"""Corrupt 512k Metadata in Metadata file from random location."""
count = 0
j = 0
with open(filename, 'r+b') as wbfr:
wbfr.seek(-524400, os.SEEK_END) # Took a bigger number than 512k
endOffset = wbfr.tell()
offset = random_system.randint(1, endOffset)
logger.info("Start offset is {}".format(offset))
while 1:
offset = offset + 8
j = j + 8
wbfr.seek(offset)
byte = wbfr.read(8)
if not byte:
break
else:
if j > 524288:
break
else:
EditMetadata(offset)
count = count + 1
return count
def ConvertAdstob2Cob(stob_f_container, stob_f_key):
"""Method to extract cob related data."""
M0_FID_DEVICE_ID_OFFSET = 32
M0_FID_DEVICE_ID_MASK = 72057589742960640
M0_FID_TYPE_MASK = 72057594037927935
# m0_fid_tassume()
tid = int(67) # Char 'C' Ascii Value
cob_f_container = ((tid << (64 - 8 )) | (int(stob_f_container, 16) & M0_FID_TYPE_MASK))
cob_f_key = int(stob_f_key, 16)
device_id = (int(cob_f_container) & M0_FID_DEVICE_ID_MASK) >> M0_FID_DEVICE_ID_OFFSET
return cob_f_container, cob_f_key, device_id
def ConvertCobAdstob(cob_f_container, cob_f_key):
"""Method take cob_f_cotainer, cob_f_key and returns stob_f_container, stob_f_key."""
M0_FID_TYPE_MASK = 72057594037927935
# m0_fid_tassume()
tid = 2 # STOB_TYPE_AD = 0x02
stob_f_container = ((tid << (64 - 8 )) | (int(cob_f_container, 16) & M0_FID_TYPE_MASK))
stob_f_key = int(cob_f_key, 16)
return stob_f_container, stob_f_key
def CorruptEmap(recordType, stob_f_container, stob_f_key):
"""Method corrupts EMAP record specified by Cob ID."""
count = 0
read_metadata_file()
lookupList = recordDict[recordType]
# logger.info("Offset List of {} = {} ".format(recordType, lookupList))
logger.info("*****Corrupting BE_EMAP_KEY for Cob ID {}*****".format(args.corrupt_emap))
for offset in lookupList:
emap_key_data, offset = ReadCompleteRecord(offset)
if (hex(stob_f_container) in emap_key_data) and\
(hex(stob_f_key) in emap_key_data) and \
("0xffffffffffffffff" not in emap_key_data):
# 16 bytes of BE_EMAP_KEY (footer) + 16 bytes of BE_EMAP_REC(header)
# gives offset of corresponding BE_EMAP_REC
rec_offset = offset + 32
emap_rec_data, rec_offset = ReadCompleteRecord(rec_offset)
# Check er_cs_nob and if it is not 0 then go and corrupt last checksum 8 bytes
if emap_rec_data[3] != "0x0":
logger.info("** Metadata at offset {},"
" BE_EMAP_KEY ek_prefix = {}:{},"
" ek_offset = {}".format(offset-24,
emap_key_data[0], emap_key_data[1], emap_key_data[2]))
logger.info("** Metadata at offset {},"
" BE_EMAP_REC er_start = {},"
" er_value = {}, er_unit_size = {},"
" er_cs_nob = {}, checksum = {}".format(
offset+32, emap_rec_data[0], emap_rec_data[1],
emap_rec_data[2], emap_rec_data[3], emap_rec_data[4:]))
EditMetadata(rec_offset-8)
count = count + 1
print()
return count
def ListAllEmapPerDevice():
logger.info("*****Listing all emap keys and emap records with device id*****")
recordType = "BE_EMAP_KEY"
read_metadata_file()
lookupList = recordDict[recordType]
# logger.info(lookupList)
for offset in lookupList:
print()
emap_key_data , offset = ReadCompleteRecord(offset)
stob_f_container_hex = emap_key_data[0]
stob_f_key_hex = emap_key_data[1]
_, _, device_id = ConvertAdstob2Cob(stob_f_container_hex, stob_f_key_hex)
# 16 bytes of BE_EMAP_KEY (footer) + 16 bytes of BE_EMAP_REC(header)
# gives offset of Corresponding BE_EMAP_REC
emap_rec_offset = offset + 32
emap_rec_data, _ = ReadCompleteRecord(emap_rec_offset)
logger.info("** Metadata at offset {},"
" BE_EMAP_KEY ek_prefix = {}:{},"
" ek_offset = {}, Device ID = {}".format(offset,
emap_key_data[0], emap_key_data[1], emap_key_data[2], device_id))
logger.info("** Metadata at offset {},"
" BE_EMAP_REC er_start = {},"
" er_value = {}, er_unit_size = {},"
" er_cs_nob = {}, checksum = {}"
.format(emap_rec_offset, emap_rec_data[0],
emap_rec_data[1], emap_rec_data[2],
emap_rec_data[3], emap_rec_data[4:]))
def VerifyLengthOfRecord(recordDict):
count = 0
read_metadata_file()
logger.info("***********Record list will be print here************")
for record, items in recordDict.items():
logger.info(" {} : {}".format(record, len(items)))
count = count + 1
return count
def read_metadata_file():
with open(filename, "rb") as metadata:
i: int = 0
metadata.seek(args.seek_offset)
while 1:
byte = metadata.read(8)
i = i + 8
if not byte:
break
byte = binascii.hexlify(byte[::-1])
if byte == header:
byte = binascii.hexlify((metadata.read(8))[::-1]) # Read the Type Size Version
rtype, size = ReadTypeSize(byte)
if rtype not in typeDict.keys():
continue
record = typeDict[rtype]
i = i + 8
if size > b'00000000':
RecordOffset(record, i, size)
i = int(size, 16) + i - 16
metadata.seek(i)
# Not parsing the whole file for few test
# as It will take many hours, depending on metadata size
if (args.verify == True) or (args.list_emap == True):
if args.parse_size:
if i > args.parse_size: # This will parse metadata file util specified parse_size for list_emap and verify option
break
else:
pass # we will read complete metadata file in case of -v or -list_emap option
else:
if i > 111280000: # Increase this number for reading more location in metadata
break
noOfErrs = 0
if args.err512k:
noOfErrs = Induce512kbError()
elif args.hugeCorruption:
noOfErrs = InduceHugeError()
elif args.random:
noOfErrs = InduceRandomCorruption(noOfErr)
elif recordType:
noOfErrs = InduceCorruption(recordType, noOfErr)
elif args.verify:
noOfErrs = VerifyLengthOfRecord(recordDict)
elif args.allErr:
noOfErrs = InduceErrInRecords(AllRecordList) #InduceErrInAllRecord()
elif args.allGMD:
noOfErrs = InduceErrInRecords(GMDList) #InduceErrInGMDRecords()
elif args.allDMD:
noOfErrs = InduceErrInRecords(DMDList) #InduceErrInDMDRecords()
elif args.corrupt_emap:
_f_container, _f_key = args.corrupt_emap.split(":")
cob_f_container = hex(int(_f_container, 16))
cob_f_key = hex(int(_f_key, 16))
stob_f_container, stob_f_key = ConvertCobAdstob(cob_f_container, cob_f_key)
noOfErrs = CorruptEmap("BE_EMAP_KEY", stob_f_container, stob_f_key)
elif args.list_emap:
ListAllEmapPerDevice()
if not args.verify:
logger.info("Number of errors induced by script: {}".format(noOfErrs))
if noOfErrs > 0:
logger.info("**** Successfully injected holes in metadata ****")
else:
logger.error("**** Failed to inject holes in metadata ****")
| 41.908411 | 134 | 0.611659 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8,411 | 0.375139 |
f09b0ac82d16a4658253e26c01ef5ddc01555542 | 703 | py | Python | vaksinasi/migrations/0003_auto_20211029_0123.py | harmonica-pacil/invid19 | b4986f3375721deb02c9b9b8c982cd4e426c423c | [
"Unlicense"
] | 1 | 2021-12-27T12:50:05.000Z | 2021-12-27T12:50:05.000Z | vaksinasi/migrations/0003_auto_20211029_0123.py | harmonica-pacil/invid19 | b4986f3375721deb02c9b9b8c982cd4e426c423c | [
"Unlicense"
] | null | null | null | vaksinasi/migrations/0003_auto_20211029_0123.py | harmonica-pacil/invid19 | b4986f3375721deb02c9b9b8c982cd4e426c423c | [
"Unlicense"
] | null | null | null | # Generated by Django 3.2.8 on 2021-10-28 18:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('vaksinasi', '0002_alter_pendaftar_nik'),
]
operations = [
migrations.AlterField(
model_name='pendaftar',
name='NIK',
field=models.CharField(error_messages={'unique': 'This email has already been registered'}, max_length=16, unique=True),
),
migrations.AlterField(
model_name='vaksin',
name='kode',
field=models.CharField(error_messages={'unique': 'This email has already been registered'}, max_length=3, unique=True),
),
]
| 29.291667 | 132 | 0.618777 | 610 | 0.86771 | 0 | 0 | 0 | 0 | 0 | 0 | 210 | 0.29872 |
f09c5c08205d285d8d7e7dce3b7328bf5dab9194 | 1,991 | py | Python | classes/stack.py | EashanKaushik/Data-Structures | e5bd391e029cb47e650d5665647ff57590b9b343 | [
"MIT"
] | null | null | null | classes/stack.py | EashanKaushik/Data-Structures | e5bd391e029cb47e650d5665647ff57590b9b343 | [
"MIT"
] | null | null | null | classes/stack.py | EashanKaushik/Data-Structures | e5bd391e029cb47e650d5665647ff57590b9b343 | [
"MIT"
] | null | null | null | # node class for develping linked list
class Node:
def __init__(self, data=None, pointer=None):
self.data = data
self.pointer = pointer
def set_data(self, data):
self.data = data
def get_data(self):
return self.data
def set_pointer(self, pointer):
self.pointer = pointer
def get_pointer(self):
return self.pointer
def __str__(self):
return f'(data: {self.data} & pointer: {self.pointer})'
class Stack:
def __init__(self, buttom=None, top=None):
self.buttom = buttom
self.top = top
# push operation
def push(self, data):
if self.buttom == None:
self.buttom = self.top = Node(data)
else:
new_node = Node(data, self.top)
self.top = new_node
return self
# pop operation
def pop(self):
if self.top == None:
return None
data = self.top.get_data()
self.top = self.top.get_pointer()
return data
# peek operation
def peek(self):
return self.top.get_data()
# returns stack as list
def as_list(self):
curr_node = self.top
stack_list = list()
while curr_node.get_pointer() != None:
stack_list.append(curr_node.get_data())
curr_node = curr_node.get_pointer()
stack_list.append(curr_node.get_data())
return stack_list
# returns True if stack empty and False if its not
def is_empty(self):
if self.top:
return False
else:
return True
def __str__(self):
return f'top: {self.top} & buttom: {self.buttom}'
if __name__ == '__main__':
stack = Stack()
stack.push('Google')
stack.push('Udemy')
stack.push('Facebook')
# print(stack.peek())
stack.pop()
print(stack.peek())
print(stack.as_list())
| 21.641304 | 64 | 0.547464 | 1,710 | 0.858865 | 0 | 0 | 0 | 0 | 0 | 0 | 311 | 0.156203 |
f09cb2a4e4c26044201ef4e090ad7fb351584737 | 2,562 | py | Python | demo.py | jjegg01/abp.spherical2d.pairdistribution | 0bf26cda1190239e66050a747aed0a7999e12fa5 | [
"MIT"
] | null | null | null | demo.py | jjegg01/abp.spherical2d.pairdistribution | 0bf26cda1190239e66050a747aed0a7999e12fa5 | [
"MIT"
] | null | null | null | demo.py | jjegg01/abp.spherical2d.pairdistribution | 0bf26cda1190239e66050a747aed0a7999e12fa5 | [
"MIT"
] | null | null | null | #!/usb/bin/env python3
"""Demo script for abp.spherical2d.pairdistribution module."""
__author__ = "Julian Jeggle, Raphael Wittkowski"
__copyright__ = "Copyright (C) 2019 Julian Jeggle, Raphael Wittkowski"
__license__ = "MIT"
__version__ = "1.0"
import argparse
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
import abp.spherical2d.pairdistribution as abp2dpd
# -- Parse args --
parser = argparse.ArgumentParser(
description=r"""Display the pair-distribution function for a given particle
distance, Peclet number and packing density. The default values are the
same as in Fig. 3 of the accompanying article by J. Jeggle,
J. Stenhammar and R. Wittkowski.""")
parser.add_argument(
"-r", metavar="dist", dest="dist", type=float, default=1.0,
help="Particle distance in multiples of sigma (default: 1)")
parser.add_argument(
"-d", metavar="phi0", dest="phi0", type=float, default=0.2,
help="Packing density (default: 0.2)")
parser.add_argument(
"-p", metavar="peclet", dest="peclet", type=float, default=50.0,
help="Peclet number (default: 50)")
args = parser.parse_args()
# Validate args
r_min = 0.7775
r_max = 2**(1/6)
if args.dist < r_min or args.dist > r_max:
print("Warning: Distance is outside of approximation bounds!")
if args.peclet < 0:
print("Warning: Unphysical argument for Peclet number")
if args.phi0 < 0 or args.phi0 > 1:
print("Warning: Unphysical argument for packing density")
# -- Calculate pair-distribution function --
# Generate arrays for r, phi1 and phi2
phi1 = np.linspace(-np.pi,np.pi,180,endpoint=False)
phi2 = np.linspace(0,2*np.pi,180,endpoint=False)
phi1,phi2 = np.meshgrid(phi1, phi2)
r = args.dist # Just take a single distance
# Calculate -gU'_2
gU2 = abp2dpd.reconstruct_mgU2prime(r, phi1, phi2, args.phi0, args.peclet)[0]
# Divide by U'_2 to obtain the pair-distribution function g
g = -gU2/abp2dpd.getU2prime(args.dist)
# -- Plotting code --
fig, ax = plt.subplots(1)
test = np.zeros((11,100))
cax = ax.imshow(g.T, cmap="inferno", origin="lower",
extent=(0,g.shape[0],0,g.shape[0]))
cbar = fig.colorbar(cax)
cbar.set_label("$g$")
ax.set_xlabel(r"$\phi_1$")
ax.set_ylabel(r"$\phi_2$")
ax.set_xticks([0,g.shape[0]//2,g.shape[0]])
ax.set_xticklabels([r"$-\pi$",r"0",r"$\pi$"])
ax.xaxis.set_minor_locator(MultipleLocator(g.shape[0]//4))
ax.set_yticks([0,g.shape[0]//2,g.shape[0]])
ax.set_yticklabels(["0",r"$\pi$",r"$2\pi$"])
ax.yaxis.set_minor_locator(MultipleLocator(g.shape[0]//4))
plt.show()
| 29.790698 | 79 | 0.704528 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,084 | 0.423107 |
f09cd4790660d9a86da869eaae87a9f25e67fb79 | 12,481 | py | Python | pdfMerge/pdf_merge.py | R3CEPT0R/pdfuse | beeb4f9ef42d49728653752094563d3e0bcef68f | [
"MIT"
] | null | null | null | pdfMerge/pdf_merge.py | R3CEPT0R/pdfuse | beeb4f9ef42d49728653752094563d3e0bcef68f | [
"MIT"
] | null | null | null | pdfMerge/pdf_merge.py | R3CEPT0R/pdfuse | beeb4f9ef42d49728653752094563d3e0bcef68f | [
"MIT"
] | null | null | null | import PyPDF2
import fitz
import io
from PIL import Image
import os
import random
import string
import pikepdf
from tqdm import tqdm
from zipfile import ZipFile
from reportlab.pdfgen import canvas
from reportlab.lib.pagesizes import letter
from pathlib import Path
"""
Converts entire PDF to set of disjoint
PNG images and returns as .zip
"""
def toPNG(pdf_path):
pdffile = pdf_path
doc = fitz.open(pdffile)
N = 12
res = ''.join(random.choices(string.ascii_uppercase +
string.digits, k=N))
zipname=res+'.zip'
with ZipFile(zipname, 'w') as zipper :
for i in range(len(doc)):
page = doc.loadPage(i) # number of page
# to keep pdf quality preserved for image
zoom = 2 # zoom factor
mat = fitz.Matrix(zoom, zoom)
pix = page.getPixmap(matrix=mat)
# saving
index=i+1
tmp='page'+str(index)
filename=tmp+'.png'
pix.writePNG(filename)
zipper.write(filename)
os.remove(filename)
"""
Converts PDF to JPG images and returns as .zip
"""
def toJPG(pdf_path):
pdffile = pdf_path
doc = fitz.open(pdffile)
N = 12
res = ''.join(random.choices(string.ascii_uppercase +
string.digits, k=N))
zipname=res+'.zip'
with ZipFile(zipname, 'w') as zipper :
for i in range(len(doc)):
page = doc.loadPage(i) # number of page
# to keep pdf quality preserved for image
zoom = 2 # zoom factor
mat = fitz.Matrix(zoom, zoom)
pix = page.getPixmap(matrix=mat)
# saving
index=i+1
tmp='page'+str(index)
filename=tmp+'.jpg'
pix.writePNG(filename)
zipper.write(filename)
os.remove(filename)
"""
Converts singular image to a PDF
"""
def toPDF(image,path):
x=image.split('.')
filename=str(x[0])+'.pdf'
tmp=Image.open(path+"\\"+image,mode='r')
tmp1=tmp.convert('RGB')
tmp1.save(path+'\\'+filename)
"""
Merges all PDFs given, if the given file is not a
PDF, but an image, then it is converted to PDF
"""
def merge(path,filename):
filename+='.pdf'
if os.path.exists(path):
for i in os.listdir(path):
if not i.endswith('pdf'):
toPDF(i,path)
x=[i for i in os.listdir(path) if i.endswith('.pdf')]
print(x)
merger=PyPDF2.PdfFileMerger()
for i in x:
merger.append(open(path+'\\'+i,'rb'))
with open("out\\"+filename,"wb") as handler:
merger.write(handler)
else:
print("Invalid Path")
"""
Parses all pages from PDF and returns a .zip of all individual files
"""
def parseAll(pdf_path):
inputpdf = PyPDF2.PdfFileReader(open(pdf_path, "rb"))
N = 12
res = ''.join(random.choices(string.ascii_uppercase +string.digits, k=N))
zipname=res+'.zip'
with ZipFile(zipname, 'w') as zipper :
for i in range(inputpdf.numPages) :
output = PyPDF2.PdfFileWriter()
output.addPage(inputpdf.getPage(i))
index=str(i+1)
filename="page"+index+'.pdf'
with open(filename, "wb") as stream :
output.write(stream)
zipper.write(filename)
os.remove(filename)
"""
Extracts specified PDF pages (given in 2nd argument as a list)
And returns a zip containing the specified pages
"""
def extract(pdf_path,lst):
inputpdf = PyPDF2.PdfFileReader(open(pdf_path, "rb"))
N = 12
res = ''.join(random.choices(string.ascii_uppercase + string.digits, k=N))
zipname = res + '.zip'
with ZipFile(zipname,"w") as zipper:
for i in range(inputpdf.numPages):
if(i in lst):
output=PyPDF2.PdfFileWriter()
output.addPage(inputpdf.getPage(i))
index=str(i+1)
filename="page"+index+'.pdf'
with open(filename,"wb") as handler:
output.write(handler)
zipper.write(filename)
os.remove(filename)
"""
Deletes specified pages in lst and returns a single
PDF excluding given pages
"""
def delete(pdf_path,lst):
inputpdf = PyPDF2.PdfFileReader(open(pdf_path, "rb"))
output = PyPDF2.PdfFileWriter()
for i in range(inputpdf.getNumPages()) :
if i not in lst :
p = inputpdf.getPage(i)
output.addPage(p)
with open('newfile.pdf', 'wb') as f :
output.write(f)
"""
Rearranges pages in PDF and merges them back together
by creating a new PDF and appending to it in the order
specified in lst (same length is assumed between
lst and number of pages in pdf_path)
"""
def rearrange(pdf_path,lst):
inputpdf=PyPDF2.PdfFileReader(open(pdf_path,"rb"))
output=PyPDF2.PdfFileWriter()
for i in lst:
index=i-1 # to account for +1 offset
p=inputpdf.getPage(index)
output.addPage(p)
with open("rearranged.pdf","wb") as handler:
output.write(handler)
"""
Adds page numbers to given PDF file and returns this new file.
There are 6 positions to choose from: BC, BL, BR, TC, TL TR
which essentially correspond to bottom-center, bottom-left,
bottom-right, top-center, etc...
"""
def addPageNumber(pdf_path,position):
filename=Path(pdf_path).resolve().stem+"-numbered.pdf"
if (position == "BC") :
x = 300
y = 20
elif (position == "BL") :
x = 30
y = 20
elif (position == "BR") :
x = 580
y = 20
elif (position == "TL") :
x = 30
y = 760
elif (position == "TC") :
x = 300
y = 760
else : # TR
x = 580
y = 760
output = PyPDF2.PdfFileWriter()
existing_pdf = PyPDF2.PdfFileReader(open(pdf_path, "rb"))
for i in range(existing_pdf.getNumPages()):
packet = io.BytesIO()
index=str(i+1)
# create a new PDF with Reportlab
can = canvas.Canvas(packet, pagesize=letter)
if position[0] == 'B' :
can.drawCentredString(x, y, index)
else :
can.drawCentredString(x, y, index)
can.save()
#move to the beginning of the StringIO buffer
packet.seek(0)
new_pdf = PyPDF2.PdfFileReader(packet)
# add the "watermark" (which is the new pdf) on the existing page
page = existing_pdf.getPage(i)
page.mergePage(new_pdf.getPage(0))
output.addPage(page)
# save the resulting PDF
with open(filename,"wb") as handler:
output.write(handler)
"""
Rotates all pages in a given PDF by "deg" degrees
in the specified direction given ("CW or CCW")
"""
def rotateAll(pdf_path,deg,direction):
pdf_writer = PyPDF2.PdfFileWriter()
pdf_reader = PyPDF2.PdfFileReader(pdf_path)
filename = Path(pdf_path).resolve().stem + "-rotated.pdf"
for i in range(pdf_reader.getNumPages()):
try:
# Rotate page
if direction=="CW":
tmp = pdf_reader.getPage(i).rotateClockwise(deg)
elif direction=="CCW":
tmp = pdf_reader.getPage(i).rotateCounterClockwise(deg)
else:
return -1
pdf_writer.addPage(tmp)
except:
print("Please make sure deg is multiple of 90")
with open(filename, 'wb') as handler:
pdf_writer.write(handler)
"""
Rotates a given page CW or CCW and returns a
newly generated PDF file after modifying with the
given config parameters
"""
def rotatePage(pdf_path,page,deg,direction):
result_pdf = PyPDF2.PdfFileWriter()
pdf = PyPDF2.PdfFileReader(pdf_path)
filename = Path(pdf_path).resolve().stem + "-rotate.pdf"
for i in range(pdf.getNumPages()):
if(page==i+1):
if direction=="CW":
tmp=pdf.getPage(i).rotateClockwise(deg)
else:
tmp=pdf.getPage(i).rotateCounterClockwise(deg)
result_pdf.addPage(tmp)
else:
result_pdf.addPage(pdf.getPage(i))
with open(filename,"wb") as handler:
result_pdf.write(handler)
"""
Rotates multiple pages in a PDF by varying degree and
direction. Input PDF path is supplied and a list of
dictionaries is given for each query (page,deg,direction)
"""
def rotatePages(pdf_path,lst):
result_pdf = PyPDF2.PdfFileWriter()
pdf = PyPDF2.PdfFileReader(pdf_path)
filename = Path(pdf_path).resolve().stem + "-rotation.pdf"
pages=[]
for i in lst:
pages.append(i['page'])
for i in range(pdf.getNumPages()) :
if i in pages:
page=i+1
direction=lst[i-1]["direction"]
deg=lst[i-1]["deg"]
if (page == i + 1) :
if direction == "CW" :
tmp = pdf.getPage(i).rotateClockwise(deg)
else :
tmp = pdf.getPage(i).rotateCounterClockwise(deg)
result_pdf.addPage(tmp)
else :
result_pdf.addPage(pdf.getPage(i))
with open(filename, "wb") as handler :
result_pdf.write(handler)
"""
Encrypts a PDF file using the supplied password
"password" by the user. To decrypt, users will have
to enter the password you provided at the time
of encryption
"""
def encryptPDF(pdf_path,password):
pdf_writer=PyPDF2.PdfFileWriter()
pdf_reader=PyPDF2.PdfFileReader(pdf_path)
filename = Path(pdf_path).resolve().stem + "-encrypted.pdf"
for page in range(pdf_reader.getNumPages()):
pdf_writer.addPage(pdf_reader.getPage(page))
pdf_writer.encrypt(user_pwd=password,owner_pwd=None,use_128bit=True)
with open(filename,"wb") as handler:
pdf_writer.write(handler)
"""
Decrypts (or at least attempts to) PDF
and saves decrypted file according to the output
path (i.e. result.pdf). Standard rockyou.txt is used
as the wordlist of choice but one can certainly
get creative and customize it accordingly
"""
def decrypt_pdf(input_path, output_path):
# MUST HAVE ROCKYOU.TXT ALREADY DOWNLOADED
passwords = [line.strip() for line in open("rockyou.txt",encoding="latin-1")]
# iterate over passwords
for pwd in tqdm(passwords, "Decrypting PDF") :
try :
# open PDF file
with pikepdf.open(input_path, password=pwd) as pdf :
# Password decrypted successfully, break out of the loop
print("[+] Password found:", pwd)
with open(input_path, 'rb') as input_file, \
open(output_path, 'wb') as output_file :
reader = PyPDF2.PdfFileReader(input_file)
reader.decrypt(pwd)
writer = PyPDF2.PdfFileWriter()
for i in range(reader.getNumPages()) :
writer.addPage(reader.getPage(i))
writer.write(output_file)
break
except pikepdf._qpdf.PasswordError as e :
# wrong password, just continue in the loop
continue
print("No passwords were found :(")
"""
Signs given PDF file at the page specified.
For now, signature must be saved as image and in
the working directory, but for the web app
we will be able to dynamically get the coordinates
by drawing a bow to where we want to place it
"""
def sign(pdf_path,image_path,page,x1,y1,x2,y2):
filename=Path(pdf_path).resolve().stem+"-signed.pdf"
try:
image_rect=fitz.Rect(x1,y1,x2,y2)
except:
print('bad coordinates')
try:
file_handle=fitz.open(pdf_path)
pg=file_handle[page-1]
# draw rectangle where image shall be placed
pg.insertImage(image_rect,filename=image_path)
file_handle.save(filename)
except:
print('invalid page range')
if __name__=="__main__":
path="in"
file="result"
#merge(path,file)
pdfPath="<your path here>"
sign_path="<your path here>"
#parseAll(pdfPath)
#extract(pdfPath,[0,3,4])
#delete(pdfPath,[0])
#toJPG(pdfPath)
#rearrange(pdfPath,[3,1,2])
#addPageNumber(pdfPath,"BC")
#rotateAll(pdfPath,360,"CCW")
#lst=[{"page":0,"direction":"CW","deg":90},{"page":2,"direction":"CCW","deg":270}]
#rotatePage(pdfPath,3,90,"CCW")
#rotatePages(pdfPath,lst)
#encryptPDF(pdfPath,"twofish")
decrypt_pdf("path for file to encrypt",'<output>.pdf')
#sign(pdfPath,sign_path,1,450,50,550,120)
| 31.438287 | 86 | 0.60548 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,687 | 0.295409 |
f09d6bc79182104f7a69ace2308580f3f628dc90 | 1,902 | py | Python | grAdapt/sampling/equidistributed/Mitchell.py | mkduong-ai/grAdapt | 94c2659b0f6ff9a2984a9dc58e3c83213313bf90 | [
"Apache-2.0"
] | 25 | 2020-11-13T05:57:01.000Z | 2021-06-18T11:16:03.000Z | grAdapt/sampling/equidistributed/Mitchell.py | mkduong-ai/grAdapt | 94c2659b0f6ff9a2984a9dc58e3c83213313bf90 | [
"Apache-2.0"
] | null | null | null | grAdapt/sampling/equidistributed/Mitchell.py | mkduong-ai/grAdapt | 94c2659b0f6ff9a2984a9dc58e3c83213313bf90 | [
"Apache-2.0"
] | null | null | null | # Python Standard Libraries
import warnings
# grAdapt
# from .base import Equidistributed
from .MaximalMinDistance import MaximalMinDistance
class Mitchell(MaximalMinDistance):
"""
[Mitchell et al., 1991],
Spectrally optimal sampling for distribution ray tracing
"""
def __init__(self, m=3):
"""
Parameters
----------
m : integer
number of candidates = m * n
"""
warnings.warn('Mitchell\' best candidate has a time complexity of O(n^3) '
'and memory issues when dealing with higher sample numbers. '
'Use MaximalMinDistance instead which is an improved version '
'with linear time complexity.', ResourceWarning)
super().__init__(n_candidates=m, window_size=0)
self.candidates_set = False
def sample(self, bounds, n, x_history=None):
"""Samples low discrepancy/equidistributed sequences according to Mitchell.
Method has to handle with new bounds and n.
Parameters
----------
bounds : list of tuples or list of grAdapt.space.datatype.base
Each tuple in the list defines the bounds for the corresponding variable
Example: [(1, 2), (2, 3), (-1, 4)...]
n : int
number of points to be sampled
x_history : array-like (2d)
History points. Consider those to prevent sampling in dense regions.
Returns
-------
array-like (n, len(bounds))
Returns a 2D array. dim is the dimension of a single point
Each row corresponds to a single point.
Each column corresponds to a dimension.
"""
if self.candidates_set is False:
self.n_candidates = self.n_candidates * n
self.candidates_set = True
return super().sample(bounds, n, x_history)
| 35.886792 | 84 | 0.605152 | 1,757 | 0.923764 | 0 | 0 | 0 | 0 | 0 | 0 | 1,303 | 0.685068 |
f09dd989151cb3043faf87d4533e594413a8d5d4 | 1,141 | py | Python | poly_py_tools/payload.py | mjmunger/PolyPyTools | 116014a47479f360ee73006d6ba3ddc7f362c7a1 | [
"MIT"
] | 7 | 2017-11-15T19:25:37.000Z | 2022-01-20T01:30:56.000Z | poly_py_tools/payload.py | DrDamnit/PolyPyTools | 116014a47479f360ee73006d6ba3ddc7f362c7a1 | [
"MIT"
] | 40 | 2020-05-19T19:46:20.000Z | 2020-11-12T16:13:55.000Z | poly_py_tools/payload.py | mjmunger/PolyPyTools | 116014a47479f360ee73006d6ba3ddc7f362c7a1 | [
"MIT"
] | null | null | null | import os
from poly_py_tools.dialplan_entry import Entry
from poly_py_tools.polypy_config import PolypyConfig
class Payload:
dialplan_entry = None
provisioned_directory = None
config = None
sources = None
def __init__(self, config:PolypyConfig, dialplan_entry: Entry):
self.dialplan_entry = dialplan_entry
self.config = config
buffer = str(self.dialplan_entry.site).split(".")
buffer.reverse()
self.provisioned_directory = ".".join(list(buffer))
self.sources = []
self.build_sources()
def build_sources(self):
self.sources.append(os.path.join(str(self.config.json['paths']['tftproot']), self.dialplan_entry.mac))
self.sources.append(os.path.join(str(self.config.json['paths']['tftproot']), self.dialplan_entry.mac + ".cfg"))
self.sources.append(os.path.join(str(self.config.json['paths']['tftproot']), self.dialplan_entry.mac + "-directory.xml"))
def __str__(self):
buffer = []
for attr, value in self.__dict__.items():
buffer.append("{}: {}".format(attr,value))
return "\n".join(buffer)
| 32.6 | 129 | 0.660824 | 1,028 | 0.900964 | 0 | 0 | 0 | 0 | 0 | 0 | 91 | 0.079755 |
f09e721d7a2aea6a6da9968525dbcc1b80b3e3d5 | 549 | py | Python | tests/test_cosine_estimator.py | lefnire/lefnire_ml_utils | 65b84bb59faa41268aa14405daa58f6ba0b2509b | [
"MIT"
] | 3 | 2020-10-27T04:03:16.000Z | 2021-03-06T01:26:06.000Z | tests/test_cosine_estimator.py | lefnire/lefnire_ml_utils | 65b84bb59faa41268aa14405daa58f6ba0b2509b | [
"MIT"
] | 3 | 2020-10-08T22:47:55.000Z | 2020-10-29T18:43:36.000Z | tests/test_cosine_estimator.py | lefnire/lefnire_ml_utils | 65b84bb59faa41268aa14405daa58f6ba0b2509b | [
"MIT"
] | null | null | null | from ml_tools import CosineEstimator, Similars
from ml_tools.fixtures import articles
import numpy as np
corpus = articles()
split_ = len(corpus)//3
x, y = corpus[split_:], corpus[:split_] # note reversal; x should be smaller
x, y = Similars(x, y).embed().value()
def test_cosine_estimator():
dnn = CosineEstimator(y)
dnn.fit_cosine()
adjustments = np.zeros((y.shape[0],))
dnn.fit_adjustments(x, adjustments)
preds = dnn.predict(x)
print(preds)
# TODO test outcome. Will need a larger corpus with dissimilar articles
| 28.894737 | 77 | 0.712204 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 107 | 0.1949 |
f0a252d3651a304580c3dce4edc0080b4ebde52d | 402 | py | Python | tests/test_euclidean_distance.py | danielwardega141196/unittests-with-pytest | 8dbedbe87fbfc5786856872dff6873395e6f4726 | [
"MIT"
] | null | null | null | tests/test_euclidean_distance.py | danielwardega141196/unittests-with-pytest | 8dbedbe87fbfc5786856872dff6873395e6f4726 | [
"MIT"
] | null | null | null | tests/test_euclidean_distance.py | danielwardega141196/unittests-with-pytest | 8dbedbe87fbfc5786856872dff6873395e6f4726 | [
"MIT"
] | null | null | null | from application.euclidean_distance import euclidean_distance
def test_euclidean_distance():
point_a = (0, 0)
point_b = (3, 4)
distance_between_a_and_b = euclidean_distance(point_a, point_b)
assert distance_between_a_and_b == 5
point_c = (-2, 3)
point_d = (2, 3)
distance_between_c_and_d = euclidean_distance(point_c, point_d)
assert distance_between_c_and_d == 4
| 23.647059 | 67 | 0.728856 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
f0a3ad494e78ea0104d2f74ce9643e9fed92e18a | 423 | py | Python | Material.py | YuvalAriel/Path_Tracing | 105ff37b94943eb3d9e0ebf13077547a5767b639 | [
"Unlicense"
] | null | null | null | Material.py | YuvalAriel/Path_Tracing | 105ff37b94943eb3d9e0ebf13077547a5767b639 | [
"Unlicense"
] | null | null | null | Material.py | YuvalAriel/Path_Tracing | 105ff37b94943eb3d9e0ebf13077547a5767b639 | [
"Unlicense"
] | null | null | null | import numpy as np
class Material:
"""send color as list of 3 floats in range of 0-1"""
def __init__(self, color, reflection=0, transparency=0, emission=np.array((0, 0, 0)), refraction_ind=1):
self.color = np.array(color)
self.emission = emission # only for light sources
self.reflection = reflection
self.transparency = transparency
self.refraction_ind = refraction_ind
| 32.538462 | 108 | 0.671395 | 401 | 0.947991 | 0 | 0 | 0 | 0 | 0 | 0 | 76 | 0.179669 |
f0a3b174092260f45b2b53d775a1f36bc4ab8698 | 4,599 | py | Python | MTVulnerability/models/DRNSegDepth.py | yamizi/taskaugment | 8393e15d7ae16f267592edf1d67e20368aeeb1b5 | [
"MIT"
] | null | null | null | MTVulnerability/models/DRNSegDepth.py | yamizi/taskaugment | 8393e15d7ae16f267592edf1d67e20368aeeb1b5 | [
"MIT"
] | null | null | null | MTVulnerability/models/DRNSegDepth.py | yamizi/taskaugment | 8393e15d7ae16f267592edf1d67e20368aeeb1b5 | [
"MIT"
] | null | null | null | import math
import sys
import torch
from torch import nn
import models.drn as drn
# class Decoder(nn.Module):
#
# def __init__(self,
# output_channels = 3,
# ):
#
# self.output_channels = 3
#
# # DEFINING MODEL AS COMPLEMENT OF BASE CHOSEN FOR DRN
#
# self.layer_list = model
#
# self.decoder_model = nn.Sequential(*layer_list)
#
#
#
# def forward(self, representation):
#
# x = self.
def fill_up_weights(up):
w = up.weight.data
f = math.ceil(w.size(2) / 2)
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(w.size(2)):
for j in range(w.size(3)):
w[0, 0, i, j] = \
(1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c))
for c in range(1, w.size(0)):
w[c, 0, :, :] = w[0, 0, :, :]
class DRNSegDepth(nn.Module):
def __init__(self,
model_name, # tells which DRN architecture has to be loaded.
classes=19, # tells how many classes the drn model is used for, though this is for the last layer, may not make sense here.
pretrained_model=None,
pretrained=True,
tasks=[], # So that we can initialise the network for specific tasks
use_torch=False, #TODO - may not be needed.
old_version=False): #TODO: See all the parameters, are these enough.
super(DRNSegDepth, self).__init__()
# Get the DRN model skeleton based on model_name
model = drn.__dict__.get(model_name)(
pretrained=pretrained, num_classes=1000
)
pmodel = nn.DataParallel(model)
if pretrained_model is not None:
pmodel.load_state_dict(pretrained_model)
# ch = list(model.children())
# ch1 = (list(model.children())[:-5])
# ch2 = (list(model.children())[:-2])
self.branching_layer_number = 5
# Decide a base from DRN based on which we want to branch into 3 different tasks
self.encoder = nn.Sequential(*nn.ModuleList(model.children())[:-self.branching_layer_number])
self.tasks = tasks
self.softmax = nn.LogSoftmax()
self.softmax_only = nn.Softmax()
# Make a decoder for each task - dict so that it is easy to extend for other datasets
self.task_to_decoder = nn.ModuleDict({})
if self.tasks is not None:
for task in self.tasks:
if task == 'segmentsemantic':
output_channels = classes
if task == 'segment_semantic':
output_channels = classes
if task == 'depth_zbuffer' or task == 'depth':
output_channels = 1 # TODO : Confirm if depth is just for one channel
if task == 'autoencoder' or task == 'reconstruct':
output_channels = 3
# MAKE A SEPARATE DECODER FOR EACH TASK AND PUT IN DICTIONARY
decoder = nn.ModuleList(model.children())[-self.branching_layer_number:-2]
decoder.extend([nn.Conv2d(model.out_dim, output_channels,kernel_size=1,bias=True)])
up = nn.ConvTranspose2d(output_channels, output_channels, 16, stride=8, padding=4, output_padding=0, groups=output_channels, bias=False)
fill_up_weights(up)
up.weight.requires_grad = False
decoder.extend([up])
decoder = nn.Sequential(*(decoder))
# Finally, decoder should contain all the layers from end of the model
# decoder = Decoder(output_channels,num_layers)
self.task_to_decoder[task] = decoder
else:
# Assume segmentation if no tasks are given
print("\n NO TASKS GIVEN IN CONFIG FILE \n")
output_channels = 3
#self.decoders = nn.ModuleDict(self.task_to_decoder)
def forward(self,x) :
rep = self.encoder(x)
outputs = {'rep' : rep}
for i, (task,decoder) in enumerate(self.task_to_decoder.items()):
# DEBUG- TEST SIZES - MULTIPLE DECODERS
# for m in decoder.children():
# print("APPLYING ", m)
# rep = m(rep)
# print(rep.shape)
decoder_output = decoder(rep)
if task != 'segmentsemantic' and task != 'segment_semantic':
outputs[task] = decoder_output
else:
outputs[task] = self.softmax(decoder_output)
#THINK ABOUT THE LAST LINEARITY
return outputs
| 32.387324 | 152 | 0.56621 | 3,761 | 0.817786 | 0 | 0 | 0 | 0 | 0 | 0 | 1,622 | 0.352685 |
f0a3f15b303ced78faaebde15a58c43f37452c94 | 5,686 | py | Python | src/web/modules/post/controllers/post/create.py | unkyulee/elastic-cms | 3ccf4476c3523d4fefc0d8d9dee0196815b81489 | [
"MIT"
] | 2 | 2017-04-30T07:29:23.000Z | 2017-04-30T07:36:27.000Z | src/web/modules/post/controllers/post/create.py | unkyulee/elastic-cms | 3ccf4476c3523d4fefc0d8d9dee0196815b81489 | [
"MIT"
] | null | null | null | src/web/modules/post/controllers/post/create.py | unkyulee/elastic-cms | 3ccf4476c3523d4fefc0d8d9dee0196815b81489 | [
"MIT"
] | null | null | null | from flask import request, render_template
import json
import traceback
import lib.es as es
import web.util.tools as tools
import web.modules.post.services.workflow as workflow
import web.modules.post.services.upload as upload
import web.util.jinja as jinja
import web.modules.admin.services.notification as notification
def get(p):
host = p['c']['host']; index = p['c']['index'];
# send out empty post to be compatible with edit form
p['post'] = {}
# init workflow
wf = tools.get("wf", 'create')
p['workflow'] = workflow.init(wf, host, index)
# field map
fields = es.list(host, index, 'field')
p['field_map'] = {}
for field in fields:
p['field_map'][field['id']] = field
######################################################
# check condition
if p['workflow'] and p['workflow'].get('condition'):
try:
exec (p['workflow']['condition'], globals())
ret = condition(p)
if ret != True and ret: return ret
except SystemExit: pass
except Exception, e:
raise
######################################################
if request.method == "POST":
return post(p)
# get list of field
if p['workflow'] and p['workflow'].get('screen'):
p['field_list'] = []
for field in jinja.getlist(p['workflow'].get('screen')):
query = "name:{}".format(field)
ret = es.list(host, index, 'field', field, query)
if len(ret): p['field_list'].append(ret[0])
else:
query = "visible:create"
option = "size=10000&sort=order_key:asc"
p['field_list'] = es.list(host, index, 'field', query, option)
return render_template("post/post/create.html", p=p)
def post(p):
host = p['c']['host']; index = p['c']['index'];
# get all submitted fields
p['post'] = {}
p['original'] = {}
for field in request.form:
field_info = p['field_map'][field]
value = tools.get(field)
# if object then convert to json object
if field_info['handler'] == "object":
if value:
p["post"][field_info['id']] = json.loads(value)
elif value:
p["post"][field_info['id']] = value
######################################################
# validate
if p['workflow'] and p['workflow'].get('validation'):
try:
exec (p['workflow']['validation'], globals())
ret = validation(p)
if ret != True and ret: return ret
except SystemExit: pass
except Exception, e:
raise
######################################################
# create post
p['post']['created'] = es.now()
p['post']['created_by'] = p['login']
response = es.create(host, index, 'post', p['post'].get('id'), p["post"])
# get created id
p["post"]["id"] = response["_id"]
# handle attachment
#try:
for f in request.files:
if request.files[f]:
p["post"][f] = \
upload.save(request.files[f], p['c']['allowed_exts'],
p["post"]["id"], p['c']['upload_dir'])
#except Exception, e:
# es.delete(host, index, 'post', p['post'].get('id'))
# return tools.alert(str(e))
es.update(host, index, 'post', p["post"]["id"], p["post"])
es.flush(host, index)
######################################################
# Record History
if p['c']['keep_history'] == "Yes":
for k, v in p['post'].items():
if k in ["updated", "viewed"]: continue
if p['original'].get(k) != p['post'].get(k):
# write history
doc = {
"id": p["post"]["id"],
"field": k,
"previous": unicode(p['original'].get(k)),
"current": unicode(p['post'].get(k)),
"login": p['login'],
"created": es.now()
}
es.create(host, index, 'log', '', doc)
######################################################
# Post action
p['post'] = es.get(host, index, 'post', p["post"]["id"])
if p['workflow'] and p['workflow'].get('postaction'):
try:
exec (p['workflow']['postaction'], globals())
ret = postaction(p)
if ret != True and ret: return ret
except SystemExit: pass
except Exception, e:
raise
######################################################
######################################################
# notification
if p['workflow']:
notifications = es.list(host, index, 'notification', 'workflow:{}'.format(p['workflow'].get('name')))
for p['notification'] in notifications:
p['notification']['recipients'] = jinja.getlist(p['notification'].get('recipients'))
if p['notification'] and p['notification'].get('condition'):
try:
exec (p['notification'].get('condition'), globals())
ret = condition(p)
if ret != True and ret: return ret
except SystemExit: pass
except Exception, e:
raise
# send notification
notification.send(p,
p['notification'].get('header'),
p['notification'].get('message'),
p['notification'].get('recipients')
)
######################################################
# redirect to view
return tools.redirect("{}/post/view/{}".format(p['url'], p["post"]["id"]))
| 34.047904 | 109 | 0.470805 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,038 | 0.358424 |
f0a5756f3c51f204fd2707019d372fb6aa8f9179 | 7,096 | py | Python | stibium/bot.py | szymonszl/stibium | bf9a7756edd8dcaeb65f1afdacbf8f4780827d00 | [
"BSD-3-Clause"
] | null | null | null | stibium/bot.py | szymonszl/stibium | bf9a7756edd8dcaeb65f1afdacbf8f4780827d00 | [
"BSD-3-Clause"
] | null | null | null | stibium/bot.py | szymonszl/stibium | bf9a7756edd8dcaeb65f1afdacbf8f4780827d00 | [
"BSD-3-Clause"
] | null | null | null | """This module provies the main Bot class"""
import time
import traceback
import json
import threading
import sched
from fbchat import models
from ._fbclient import Client
from ._logs import log
from .dataclasses import Thread
from .handlers import BaseHandler
from ._i18n import _
class Bot(object):
"""Main Stibium Bot class"""
name = None
owner = None
fb_login = ()
fbchat_client = None
command_prefix = None
_logged_in = False
_handlers = {}
_hooked_functions = []
_username_cache = {}
_scheduler = sched.scheduler(time.time, time.sleep)
def __init__(self, name, prefix, fb_login, owner):
log.debug('__init__ called')
self.name = name
self.prefix = prefix
self.fb_login = fb_login
if owner and False:
self.owner = None # FIXME
else:
log.warning('Owner not set, DM error reporting disabled!')
log.debug('Object created')
def login(self):
"""Log in to the bot account"""
log.debug('login called')
raise NotImplementedError # FIXME
log.debug('Created and logged in the fbchat client...')
self._logged_in = True
log.info('Logged in!')
def _timeout_daemon(self):
log.debug('Started timeout daemon')
while True:
# the thread is a daemon, so this while does not need to be exited
self._scheduler.run(blocking=False)
time.sleep(1) # wait for more events
def listen(self):
"""Start listening for events"""
if not self._logged_in:
raise Exception('The bot is not logged in yet')
log.debug('Starting the timeout daemon...')
timeout_daemon = threading.Thread(
target=self._timeout_daemon,
name='TimeoutThread',
daemon=True
)
timeout_daemon.start()
log.info('Starting listening...')
raise NotImplementedError # FIXME
def register(self, *handlers: BaseHandler):
"""Register handlers"""
for handler in handlers:
log.debug('Registering a handler for function %s', repr(handler))
if handler.event is None:
raise Exception('Handler did not define event type')
if handler.event == '_recurrent':
self._scheduler.enterabs(
self._run_untrusted(
handler.next_time,
args=(time.time(),),
notify=False
),
0,
self._handle_recurrent,
argument=(handler,)
)
return
if handler.event == '_timeout':
self._scheduler.enter(
handler.timeout,
0,
self._handle_timeout,
argument=(handler,)
)
return
if handler.event not in self._handlers.keys():
self._handlers[handler.event] = []
handler.setup(self)
self._handlers[handler.event].append(handler)
if handler.timeout is not None:
self._scheduler.enter(
handler.timeout,
0,
self._handlers[handler.event].remove,
argument=(handler,)
)
if handlers:
return handlers[0] # for use as a decorator
def _handle_timeout(self, handler: BaseHandler):
self._run_untrusted(
handler.execute,
args=(time.time(), self),
thread=None,
notify=False
)
def _handle_recurrent(self, handler: BaseHandler):
log.debug('Executing recurring handler %s', handler)
self._run_untrusted(
handler.execute,
args=(time.time(), self),
thread=None,
notify=False
)
self._scheduler.enterabs(
self._run_untrusted(
handler.next_time,
args=(time.time(),),
notify=False
),
0,
self._handle_recurrent,
argument=(handler,)
)
def send(self, text, thread, mentions=None, reply=None):
"""Send a message to a specified thread"""
# TODO: add attachments, both here and in onMessage
raise NotImplementedError # FIXME
def get_user_name(self, uid):
"""Get the name of the user specified by uid"""
uid = str(uid)
name = self._username_cache.get(uid)
if name is None:
raise NotImplementedError # FIXME
self._username_cache[uid] = name
return name
def _fbchat_callback_handler(self, func, thread, event):
for handler in self._handlers.get(func, []):
valid = self._run_untrusted(
handler.check,
args=[event, self],
default='error',
thread=thread,
notify=False
)
if valid == 'error':
self._handlers.get(event, []).remove(handler)
errormsg = f'The handler {handler} was disabled, because of causing an exception.'
log.error(errormsg)
self.send(
errormsg,
thread=self.owner
)
elif valid:
log.debug('Executing %s, reacting to %s', handler, event)
raise NotImplementedError # FIXME
def _run_untrusted( # pylint: disable=dangerous-default-value
self,
fun,
args=[],
kwargs={},
thread=None,
notify=True,
default=None,
catch_keyboard=False
):
try:
return fun(*args, **kwargs)
except Exception:
trace = traceback.format_exc()
if thread is not None and notify:
short_error_message = \
_("An error occured and the action could not be completed.\n"
"The administrator has been notified.\n") \
+ trace.splitlines()[-1]
raise NotImplementedError # FIXME
self.send(short_error_message, thread) # notify the end user
error_message = '\n'.join([
f'Error while running function {fun.__name__}',
f'with *args={args}',
f'**kwargs={kwargs}',
f'in thread {thread}',
f'Full traceback:',
trace,
])
log.error(error_message)
if self.owner: # Report error to admin
raise NotImplementedError # FIXME
return default
except KeyboardInterrupt as ex:
if catch_keyboard:
if thread is not None and notify:
self.send(_('The command has been interrupted by admin'), thread)
return default
raise ex
| 33.471698 | 98 | 0.52748 | 6,808 | 0.959414 | 0 | 0 | 0 | 0 | 0 | 0 | 1,337 | 0.188416 |
f0a6aad82c1650cdcb381029fba97553f6a8cf60 | 5,296 | py | Python | train.py | SwatiSharda/Solar-forecasting | 455b3573bd827bda5b00aea643cf29fea38958b2 | [
"BSD-3-Clause"
] | 3 | 2021-04-03T01:40:48.000Z | 2021-11-21T11:52:53.000Z | train.py | SwatiSharda/Solar-forecasting | 455b3573bd827bda5b00aea643cf29fea38958b2 | [
"BSD-3-Clause"
] | null | null | null | train.py | SwatiSharda/Solar-forecasting | 455b3573bd827bda5b00aea643cf29fea38958b2 | [
"BSD-3-Clause"
] | null | null | null | import pandas as pd
from torch.utils.data import DataLoader
import multiprocessing as mp
import argparse
from DataSet import Dataset
import torch
import torch.nn as nn
from os import path
import Infer
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
cpu = torch.device('cpu')
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int, default=256)
parser.add_argument('--seq_len', type=int, default=256)
parser.add_argument('--root_dir')
parser.add_argument('--tr_start_year', type=int, help='Training Start year')
parser.add_argument('--tr_final_year', type=int, help='Training Final year')
parser.add_argument('--val_start_year', type=int, help='Validation Start year')
parser.add_argument('--val_final_year', type=int, help='Validation Final year')
parser.add_argument('--epochs', type=int, default=10)
parser.add_argument('--loss', default='mse', help='Choose from qr_loss,mse')
parser.add_argument('--gamma_list', nargs='*', type=float, help='All gammas to be predicted by 1 model')
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--model', default='ar_net', help='Choose From ar_net, trfrmr, cnn_lstm, lstm')
parser.add_argument('--ini_len', type=int, default=18, help='Number of Columns in Data<i>.csv')
parser.add_argument('--final_len', type=int, default=1, help='Number of numbers your model will predict.')
parser.add_argument('--steps', type=int, default=1, help='How many step ahead do you want to predict?')
parser.add_argument('--optimizer', default='Adam', help='Choose from Adam and RAdam.')
parser.add_argument('--param_file', help='Path to file to store weights.May not exist.')
args = parser.parse_args()
b_sz = args.batch_size
n_wrkrs = mp.cpu_count()
seq_len = args.seq_len
epochs = args.epochs
tr_csv_paths = [args.root_dir+'/Data'+str(i)+'.csv' for i in range(args.tr_start_year, args.tr_final_year+1)]
val_csv_paths = [args.root_dir+'/Data'+str(i)+'.csv' for i in range(args.val_start_year, args.val_final_year+1)]
if args.gamma_list is not None and len(args.gamma_list)>1 and len(args.gamma_list)%2!=0 and args.loss=='qr_loss':
print('Invalid gamma list')
exit(0)
dataset_final_len = args.final_len #if args.loss!='qr_loss' else 1 #or len(args.gamma_list)<=1 else int(args.final_len/2)
model_final_len = args.final_len*len(args.gamma_list) if args.gamma_list!=None else args.final_len
train_dataset = Dataset.SRdata(tr_csv_paths, seq_len, steps=args.steps, final_len=dataset_final_len)
train_data_loader = DataLoader(train_dataset, batch_size = b_sz, num_workers=n_wrkrs, drop_last = True)
test_dataset = Dataset.SRdata(val_csv_paths, seq_len, steps=args.steps, final_len=dataset_final_len)
test_data_loader = DataLoader(test_dataset, batch_size = b_sz, num_workers=n_wrkrs, drop_last=True)
if args.loss=='mse' :
lossfn = nn.MSELoss().to(device)
elif args.loss=='qr_loss' :
maximum = nn.ReLU()
gamma_list_len = len(args.gamma_list)
gammas = torch.tensor(args.gamma_list, dtype=torch.float64, device=device)
gammas = gammas.repeat_interleave(args.final_len)
def qr_loss(tar, pred) :
if gamma_list_len!=1 :
tar = torch.cat([tar]*gamma_list_len,dim=1)
n = tar.shape[0]
m = tar.shape[1]
loss = (1-gammas)*maximum(tar-pred)+(gammas)*maximum(pred-tar)
return loss.sum()/(n*m)
lossfn = qr_loss
if args.model=='ar_net' :
from Models import AR_Net
t = AR_Net.ar_nt(seq_len = seq_len, ini_len=args.ini_len, final_len=model_final_len).to(device)
elif args.model=='cnn_lstm' :
from Models import CNN_LSTM
t = CNN_LSTM.cnn_lstm(seq_len = seq_len, ini_len=args.ini_len, final_len=model_final_len).to(device)
elif args.model=='trfrmr' :
from Models import Transformer
t = Transformer.trnsfrmr_nt(seq_len = seq_len, ini_len=args.ini_len, final_len=model_final_len).to(device)
elif args.model=='lstm' :
from Models import LSTM
t = LSTM.lstm(seq_len = seq_len, ini_len=args.ini_len, final_len=model_final_len).to(device)
elif args.model=='lstm_a' :
from Models import lstm_attention
t = LSTM.lstm_a(seq_len = seq_len, ini_len=args.ini_len, final_len=model_final_len).to(device)
if path.exists(args.param_file) :
t.load_state_dict(torch.load(args.param_file))
if args.optimizer == 'RAdam' :
from optimizers import RAdam
optimizer = RAdam.RAdam(t.parameters(),lr=args.lr)
elif args.optimizer == 'Adam' :
optimizer = torch.optim.Adam(t.parameters(),lr=args.lr)
t = t.double()
train_mse = []
test_mse = [10000]
for ij in range(epochs) :
loss_list = []
for i, batch in enumerate(train_data_loader) :
optimizer.zero_grad()
in_batch = batch['in'].to(device)
out = t(in_batch)
loss = lossfn(batch['out'].to(device), out)
loss_list.append(loss)
loss.backward()
optimizer.step()
print('Avg. Training Loss in '+str(ij)+ 'th epoch :- ', sum(loss_list)/len(loss_list))
train_mse.append(sum(loss_list)/len(loss_list))
loss_list=[]
test_mse.append(Infer.evaluate(t, loss = args.loss, test_dataset=test_dataset, args_from_train=args))
if test_mse[-1]==min(test_mse) :
print('saving:- ', test_mse[-1])
torch.save(t.state_dict(),args.param_file)
| 42.368 | 126 | 0.719033 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 918 | 0.173338 |
f0a82aad993bf49b4180b264ae6f6a38e0fa4607 | 1,842 | py | Python | txtpp/src/txtpp.py | Cc618/dotmgt | 7c34ca1971e20a50ebdb4731ae43620e1296cc80 | [
"MIT"
] | 2 | 2022-01-08T10:11:58.000Z | 2022-01-09T10:25:16.000Z | txtpp/src/txtpp.py | Cc618/dotmgt | 7c34ca1971e20a50ebdb4731ae43620e1296cc80 | [
"MIT"
] | null | null | null | txtpp/src/txtpp.py | Cc618/dotmgt | 7c34ca1971e20a50ebdb4731ae43620e1296cc80 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import re
import sys
import io
import os
import traceback
class TextPreprocessorError(Exception):
def __init__(self, file_id, line, msg):
super().__init__(f"{file_id}:{line}: {msg}")
def debug_print(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def preprocess(
file: io.TextIOWrapper,
definitions: set[str],
file_id: str = "<stdin>",
):
import parser
parser.parse_exec(file, definitions, file_id)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Text preprocessor")
parser.add_argument(
"-D", dest="definitions", action="append", help="Set definition"
)
parser.add_argument(
"--deffile",
dest="deffile",
help="Set definition file, the definition file is a python file that defines a definitions iterable that contains definitions",
)
parser.add_argument(
"file", nargs="?", default=None, help="Input file (stdin by default)"
)
args = parser.parse_args()
definitions = set()
if args.deffile:
import importlib
deffile = importlib.machinery.SourceFileLoader(
"deffile", os.path.realpath(args.deffile)
)
deffile = deffile.load_module()
definitions |= set(deffile.definitions)
if args.definitions:
definitions |= set(args.definitions)
file = sys.stdin if args.file is None else open(args.file, "r")
file_id = "<stdin>" if args.file is None else args.file
try:
preprocess(
file,
definitions,
file_id
)
except:
traceback.print_exc()
print("Failed to parse / execute file", file_id, file=sys.stderr)
exit(3)
finally:
if args.file is not None:
file.close()
| 23.615385 | 135 | 0.623779 | 136 | 0.073833 | 0 | 0 | 0 | 0 | 0 | 0 | 361 | 0.195983 |
f0a9b76e7527fc4562f1dffa72d9bb26c30f1073 | 30 | py | Python | vggface/resnet50/__init__.py | claudiourbina/VGGFace | 362cc8f805c1fd4135fddf8d602026735bcfdf5a | [
"MIT"
] | null | null | null | vggface/resnet50/__init__.py | claudiourbina/VGGFace | 362cc8f805c1fd4135fddf8d602026735bcfdf5a | [
"MIT"
] | null | null | null | vggface/resnet50/__init__.py | claudiourbina/VGGFace | 362cc8f805c1fd4135fddf8d602026735bcfdf5a | [
"MIT"
] | null | null | null | from .resnet50 import ResNet50 | 30 | 30 | 0.866667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
f0a9ef787da7675bb95305830e5a4f6a73cec7a5 | 14,354 | py | Python | supportal/tests/app/management/commands/test_email_users_with_expiring_assignments.py | Elizabeth-Warren/supportal-backend | e55b0e8fd154730bab1708f27386b2adcb18cfbc | [
"MIT"
] | 34 | 2020-03-27T14:59:04.000Z | 2021-11-15T10:24:12.000Z | supportal/tests/app/management/commands/test_email_users_with_expiring_assignments.py | Elizabeth-Warren/supportal-backend | e55b0e8fd154730bab1708f27386b2adcb18cfbc | [
"MIT"
] | 5 | 2021-03-18T22:51:05.000Z | 2022-02-10T15:03:33.000Z | supportal/tests/app/management/commands/test_email_users_with_expiring_assignments.py | Elizabeth-Warren/supportal-backend | e55b0e8fd154730bab1708f27386b2adcb18cfbc | [
"MIT"
] | 14 | 2020-03-27T17:36:39.000Z | 2020-06-18T21:47:43.000Z | from datetime import datetime, timezone
from io import StringIO
from unittest import mock
import freezegun
import pytest
from django.conf import settings
from django.core.management import call_command
from django.utils import timezone
from model_bakery import baker
from supportal.app.common.enums import CanvassResult
from supportal.app.models import EmailSend
CREATED_AT = datetime(2019, 10, 26, 1, tzinfo=timezone.utc)
CREATED_AT_EARLIER = datetime(2019, 10, 26, tzinfo=timezone.utc)
DAY_BEFORE_EXPIRE = datetime(2019, 11, 1, tzinfo=timezone.utc)
TWO_DAY_BEFORE_EXPIRE = datetime(2019, 10, 31, tzinfo=timezone.utc)
EXPIRED_AT = datetime(2019, 11, 2, 1, tzinfo=timezone.utc)
EXPIRED_EARLIER = datetime(2019, 11, 2, tzinfo=timezone.utc)
AFTER_EXPIRATION_DATE = datetime(2019, 11, 3, tzinfo=timezone.utc)
SIX_DAYS_BEFORE_EXPIRE = datetime(2019, 10, 27, tzinfo=timezone.utc)
def email_expiring_users(*args, **kwargs):
call_command("email_users_with_expiring_assignments", **kwargs)
@pytest.fixture
def first_cambridge_assignment(cambridge_leader_user, cambridge_prospect):
cambridge_assignment = baker.make(
"VolProspectAssignment", user=cambridge_leader_user, person=cambridge_prospect
)
cambridge_assignment.created_at = CREATED_AT
cambridge_assignment.save()
return cambridge_assignment
@pytest.fixture
def hayes_assignment(hayes_valley_leader_user, california_prospect):
hayes_valley_assignment = baker.make(
"VolProspectAssignment",
user=hayes_valley_leader_user,
person=california_prospect,
)
hayes_valley_assignment.created_at = CREATED_AT_EARLIER
hayes_valley_assignment.save()
return hayes_valley_assignment
@pytest.fixture
def hayes_cambrdige_assignment(hayes_valley_leader_user, cambridge_prospect):
hayes_valley_assignment = baker.make(
"VolProspectAssignment",
user=hayes_valley_leader_user,
person=cambridge_prospect,
)
hayes_valley_assignment.created_at = CREATED_AT
hayes_valley_assignment.save()
return hayes_valley_assignment
@pytest.fixture
def second_cambridge_assignment(cambridge_leader_user, california_prospect):
cambridge_assignment = baker.make(
"VolProspectAssignment", user=cambridge_leader_user, person=california_prospect
)
cambridge_assignment.created_at = CREATED_AT
cambridge_assignment.save()
return cambridge_assignment
@pytest.fixture
def expired_assignment(cambridge_leader_user, somerville_prospect):
cambridge_assignment = baker.make(
"VolProspectAssignment", user=cambridge_leader_user, person=somerville_prospect
)
cambridge_assignment.created_at = CREATED_AT
cambridge_assignment.expired_at = EXPIRED_AT
cambridge_assignment.save()
return cambridge_assignment
DEFAULT_TEMPLATE_DATA = {
"assignment_count": "",
"email": "",
"expiration_date": "",
"switchboard_login_url": settings.SUPPORTAL_BASE_URL,
"first_name": "",
"last_name": "",
}
def make_payload(assignment_count, email, expiration, first_name, last_name):
return {
"assignment_count": assignment_count,
"email": email,
"expiration_date": expiration.strftime("%a %b %d, %Y"),
"switchboard_login_url": settings.SUPPORTAL_BASE_URL,
"first_name": first_name,
"last_name": last_name,
}
def check_email_sends(user, assignment_count, expiration, single_call_mock=None):
assert EmailSend.objects.filter(user=user).count() == 1
email_sent = EmailSend.objects.get(user=user)
assert email_sent.template_name == "expiring_contacts_email"
assert email_sent.payload == {
"assignment_count": assignment_count,
"email": user.email,
"expiration_date": expiration.strftime("%a %b %d, %Y"),
"switchboard_login_url": settings.SUPPORTAL_BASE_URL,
"first_name": user.first_name,
"last_name": user.last_name,
}
if single_call_mock:
single_call_mock.return_value.send_bulk_email.assert_called_once_with(
configuration_set_name="organizing_emails",
default_template_data=DEFAULT_TEMPLATE_DATA,
from_email=settings.FROM_EMAIL,
payload_array=[
make_payload(
assignment_count,
user.email,
expiration,
user.first_name,
user.last_name,
)
],
reply_to_email=settings.REPLY_TO_EMAIL,
template="expiring_contacts_email",
application_name="supportal",
)
@pytest.mark.django_db
@freezegun.freeze_time(TWO_DAY_BEFORE_EXPIRE)
def test_email_with_uncontacted_assignments(
first_cambridge_assignment, expired_assignment
):
out = StringIO()
assert EmailSend.objects.filter(user=first_cambridge_assignment.user).count() == 0
with mock.patch(
"supportal.app.management.commands.base_email_command.EmailService"
) as email_service_mock:
email_expiring_users(stdout=out, send=True)
first_cambridge_assignment.refresh_from_db()
assert EmailSend.objects.all().count() == 1
check_email_sends(
first_cambridge_assignment.user, 1, EXPIRED_AT, email_service_mock
)
assert "Found 1 users to email." in out.getvalue()
@pytest.mark.django_db
@freezegun.freeze_time(TWO_DAY_BEFORE_EXPIRE)
def test_dryrun(first_cambridge_assignment, expired_assignment):
out = StringIO()
assert EmailSend.objects.filter(user=first_cambridge_assignment.user).count() == 0
with mock.patch(
"supportal.app.management.commands.base_email_command.EmailService"
) as email_service_mock:
email_expiring_users(stdout=out)
first_cambridge_assignment.refresh_from_db()
assert EmailSend.objects.all().count() == 0
assert first_cambridge_assignment.user.email in out.getvalue()
assert "Found 1 users to email." in out.getvalue()
@pytest.mark.django_db
@freezegun.freeze_time(DAY_BEFORE_EXPIRE)
def test_dont_email_outside_of_two_days(first_cambridge_assignment, expired_assignment):
out = StringIO()
email_expiring_users(stdout=out, send=True)
assert EmailSend.objects.all().count() == 0
assert EmailSend.objects.filter(user=first_cambridge_assignment.user).count() == 0
assert "Found 0 users to email." in out.getvalue()
@pytest.mark.django_db
@freezegun.freeze_time(TWO_DAY_BEFORE_EXPIRE)
def test_email_with_two_assignments(
first_cambridge_assignment, second_cambridge_assignment, expired_assignment
):
out = StringIO()
with mock.patch(
"supportal.app.management.commands.base_email_command.EmailService"
) as email_service_mock:
email_expiring_users(stdout=out, send=True)
assert EmailSend.objects.all().count() == 1
check_email_sends(
first_cambridge_assignment.user, 2, EXPIRED_AT, email_service_mock
)
assert "Found 1 users to email." in out.getvalue()
@pytest.mark.django_db
@freezegun.freeze_time(TWO_DAY_BEFORE_EXPIRE)
def test_email_with_two_users(
first_cambridge_assignment,
hayes_assignment,
hayes_cambrdige_assignment,
expired_assignment,
):
out = StringIO()
with mock.patch(
"supportal.app.management.commands.base_email_command.EmailService"
) as email_service_mock:
email_expiring_users(stdout=out, send=True)
assert EmailSend.objects.all().count() == 2
check_email_sends(first_cambridge_assignment.user, 1, EXPIRED_AT)
check_email_sends(hayes_assignment.user, 2, EXPIRED_EARLIER)
email_service_mock.return_value.send_bulk_email.assert_called_once_with(
configuration_set_name="organizing_emails",
default_template_data=DEFAULT_TEMPLATE_DATA,
from_email=settings.FROM_EMAIL,
payload_array=[
make_payload(
1,
first_cambridge_assignment.user.email,
EXPIRED_AT,
first_cambridge_assignment.user.first_name,
first_cambridge_assignment.user.last_name,
),
make_payload(
2,
hayes_assignment.user.email,
EXPIRED_EARLIER,
hayes_assignment.user.first_name,
hayes_assignment.user.last_name,
),
],
reply_to_email=settings.REPLY_TO_EMAIL,
template="expiring_contacts_email",
application_name="supportal",
)
assert "Found 2 users to email." in out.getvalue()
@pytest.mark.django_db
@freezegun.freeze_time(TWO_DAY_BEFORE_EXPIRE)
def test_email_with_two_users_send_all_to_flag(
first_cambridge_assignment,
hayes_assignment,
hayes_cambrdige_assignment,
expired_assignment,
):
out = StringIO()
with mock.patch(
"supportal.app.management.commands.base_email_command.EmailService"
) as email_service_mock:
email_expiring_users(
stdout=out, send=True, send_all_to="sgoldblatt+ts@elizabethwarren.com"
)
assert EmailSend.objects.all().count() == 0
email_service_mock.return_value.send_bulk_email.assert_called_once_with(
configuration_set_name="organizing_emails",
default_template_data=DEFAULT_TEMPLATE_DATA,
from_email=settings.FROM_EMAIL,
payload_array=[
make_payload(
1,
"sgoldblatt+ts@elizabethwarren.com",
EXPIRED_AT,
first_cambridge_assignment.user.first_name,
first_cambridge_assignment.user.last_name,
),
make_payload(
2,
"sgoldblatt+ts@elizabethwarren.com",
EXPIRED_EARLIER,
hayes_assignment.user.first_name,
hayes_assignment.user.last_name,
),
],
reply_to_email=settings.REPLY_TO_EMAIL,
template="expiring_contacts_email",
application_name="supportal",
)
assert "Found 2 users to email." in out.getvalue()
@pytest.mark.django_db
@freezegun.freeze_time(TWO_DAY_BEFORE_EXPIRE)
def test_email_with_two_users_limit_flag(
first_cambridge_assignment,
hayes_assignment,
hayes_cambrdige_assignment,
expired_assignment,
):
out = StringIO()
with mock.patch(
"supportal.app.management.commands.base_email_command.EmailService"
) as email_service_mock:
email_expiring_users(stdout=out, limit=1, send=True)
assert EmailSend.objects.all().count() == 1
check_email_sends(first_cambridge_assignment.user, 1, EXPIRED_AT)
assert "Found 1 users to email." in out.getvalue()
@pytest.mark.django_db
@freezegun.freeze_time(TWO_DAY_BEFORE_EXPIRE)
def test_email_unsuccessfully_contacted_assignments(
first_cambridge_assignment, expired_assignment
):
first_cambridge_assignment.create_contact_event(
result=CanvassResult.UNAVAILABLE_LEFT_MESSAGE
)
first_cambridge_assignment.save()
out = StringIO()
with mock.patch(
"supportal.app.management.commands.base_email_command.EmailService"
) as email_service_mock:
email_expiring_users(stdout=out, send=True)
assert EmailSend.objects.all().count() == 1
check_email_sends(
first_cambridge_assignment.user, 1, EXPIRED_AT, email_service_mock
)
assert "Found 1 users to email." in out.getvalue()
@pytest.mark.django_db
@freezegun.freeze_time(TWO_DAY_BEFORE_EXPIRE)
def test_dont_email_unsubscribed_user(first_cambridge_assignment, expired_assignment):
first_cambridge_assignment.user.unsubscribed_at = datetime.now(tz=timezone.utc)
first_cambridge_assignment.user.save()
out = StringIO()
email_expiring_users(stdout=out, send=True)
assert EmailSend.objects.all().count() == 0
assert EmailSend.objects.filter(user=first_cambridge_assignment.user).count() == 0
assert "Found 0 users to email." in out.getvalue()
@pytest.mark.django_db
@freezegun.freeze_time(TWO_DAY_BEFORE_EXPIRE)
def test_dont_email_user_who_was_emailed_recently(
first_cambridge_assignment, expired_assignment
):
EmailSend.objects.create(
user=first_cambridge_assignment.user,
template_name=EmailSend.EXPIRING_PROSPECTS,
payload={},
)
assert first_cambridge_assignment.user.unsubscribed_at is None
out = StringIO()
email_expiring_users(stdout=out, send=True)
assert EmailSend.objects.all().count() == 1
assert EmailSend.objects.filter(user=first_cambridge_assignment.user).count() == 1
assert "Found 0 users to email." in out.getvalue()
@pytest.mark.django_db
@freezegun.freeze_time(TWO_DAY_BEFORE_EXPIRE)
def test_email_user_who_was_invited_recently(
first_cambridge_assignment, expired_assignment
):
EmailSend.objects.create(
user=first_cambridge_assignment.user,
template_name=EmailSend.INVITE_EMAIL,
payload={},
)
assert first_cambridge_assignment.user.unsubscribed_at is None
out = StringIO()
with mock.patch(
"supportal.app.management.commands.base_email_command.EmailService"
) as email_service_mock:
email_expiring_users(stdout=out, send=True)
assert EmailSend.objects.all().count() == 2
assert EmailSend.objects.filter(user=first_cambridge_assignment.user).count() == 2
assert "Found 1 users to email." in out.getvalue()
@pytest.mark.django_db
@freezegun.freeze_time(TWO_DAY_BEFORE_EXPIRE)
def test_successfully_contacted_dont_email(
first_cambridge_assignment, expired_assignment
):
# Make sure that having a previous unsuccessful contact event doesn't cause
# the contact to get expired.
first_cambridge_assignment.create_contact_event(
result=CanvassResult.UNAVAILABLE_LEFT_MESSAGE
)
first_cambridge_assignment.create_contact_event(
result=CanvassResult.SUCCESSFUL_CANVASSED
)
first_cambridge_assignment.save()
out = StringIO()
email_expiring_users(stdout=out, send=True)
first_cambridge_assignment.refresh_from_db()
assert EmailSend.objects.all().count() == 0
assert "Found 0 users to email." in out.getvalue()
@pytest.mark.django_db
def test_expire_zero_assignments():
out = StringIO()
email_expiring_users(stdout=out, send=True)
assert EmailSend.objects.all().count() == 0
assert "Found 0 users to email." in out.getvalue()
| 33.933806 | 88 | 0.725442 | 0 | 0 | 0 | 0 | 11,489 | 0.800404 | 0 | 0 | 1,716 | 0.119549 |
f0aa0b1b2287e2d9b265d630907c744aa79a38c2 | 9,128 | py | Python | reinforcement_learning/rl_hvac_coach_energyplus/src/eplus/envs/data_center_env.py | jpmarques19/tensorflwo-test | 0ff8b06e0415075c7269820d080284a42595bb2e | [
"Apache-2.0"
] | 5 | 2019-01-19T23:53:35.000Z | 2022-01-29T14:04:31.000Z | reinforcement_learning/rl_hvac_coach_energyplus/src/eplus/envs/data_center_env.py | jpmarques19/tensorflwo-test | 0ff8b06e0415075c7269820d080284a42595bb2e | [
"Apache-2.0"
] | 6 | 2020-01-28T23:08:49.000Z | 2022-02-10T00:27:19.000Z | reinforcement_learning/rl_hvac_coach_energyplus/src/eplus/envs/data_center_env.py | jpmarques19/tensorflwo-test | 0ff8b06e0415075c7269820d080284a42595bb2e | [
"Apache-2.0"
] | 8 | 2020-12-14T15:49:24.000Z | 2022-03-23T18:38:36.000Z | import gym
from gym import error, spaces, utils
from gym.utils import seeding
from eplus.envs import pyEp
import socket
from eplus.envs.socket_builder import socket_builder
import numpy as np
import os
class DataCenterEnv(gym.Env):
def __init__(self, config):
#timestep=12, days=1, eplus_path=None,
# weather_file = 'weather/SPtMasterTable_587017_2012_amy.epw'):
cur_dir = os.path.dirname(__file__)
#print("File directory: ", cur_dir)
# buildings/1ZoneDataCenter/1ZoneDataCenter.idf is the EnergyPlus file
# used for this environment. The 1ZoneDataCenter folder also contains
# variables.cfg which configures the external input and output
# variables
self.idf_file = cur_dir + '/buildings/1ZoneDataCenter/1ZoneDataCenter.idf'
# EnergyPlus weather file
if "weather_file" in config:
self.weather_file = cur_dir + '/' + config["weather_file"]
else:
self.weather_file = cur_dir + '/weather/SPtMasterTable_587017_2012_amy.epw'
#self.weather_file = cur_dir + '/weather/SPtMasterTable_587017_2012_amy.epw'
if "eplus_path" in config:
self.eplus_path = config["eplus_path"]
else:
# Using EnergyPlus version 8.80, path to the executable
# Assuming Mac
self.eplus_path = '/Applications/EnergyPlus-8-8-0/'
# EnergyPlus number of timesteps in an hour
if "timestep" in config:
self.epTimeStep = config["timestep"]
else:
self.epTimeStep = 12
# EnergyPlus number of simulation days
if "days" in config:
self.simDays = config["days"]
else:
self.simDays = 1
# Number of steps per day
self.DAYSTEPS = int(24 * self.epTimeStep)
# Total number of steps
self.MAXSTEPS = int(self.simDays * self.DAYSTEPS)
# Time difference between each step in seconds
self.deltaT = (60/self.epTimeStep)*60
# Outputs given by EnergyPlus, defined in variables.cfg
self.outputs = []
# Inputs expected by EnergyPlus, defined in variables.cfg
self.inputs = []
# Current step of the simulation
self.kStep = 0
# Instance of EnergyPlus simulation
self.ep = None
# state can be all the inputs required to make a control decision
# getting all the outputs coming from EnergyPlus for the time being
self.observation_space = spaces.Box(np.array([0, -50, 0]), #zone temp, outdoor drybulb temp, relative humidity
np.array([60, 70, 100]), dtype=np.float32)
# actions are all the control inputs
#self.action_space = spaces.Tuple(( #spaces.Box(low=22, high=27, shape=(1,),dtype=np.float32), #cooling setpoint
# spaces.Box(low=6, high=7, shape=(1,),dtype=np.float32), #chiller setpoint
# spaces.Box(low=0, high=1, shape=(1,),dtype=np.float32) #lighting setpoint
# ))
self.clg_min = 20 #cooling setpoint min in celcius
self.clg_max = 35 #cooling setpoint max in celcius
self.htg_min = 5 #heating setpoint min in celcius
self.htg_max = 20 #heating setpoint max in celcius
#self.action_space = spaces.Box(np.array([self.clg_min,self.htg_min]),
# np.array([self.clg_max, self.htg_max]), dtype=np.float32)
# Normalized action space
self.action_space = spaces.Box(np.array([0,0]),
np.array([1,1]), dtype=np.float32)
def step(self, action):
# while(self.kStep < self.MAXSTEPS):
# current time from start of simulation
time = self.kStep * self.deltaT
# current time from start of day
dayTime = time % 86400
if dayTime == 0:
print("Day: ", int(self.kStep/self.DAYSTEPS)+1)
#inputs should be same as actions
#bring the actions in the correct range
#For Ray: assuming mean 0 and std dev 1 by ray
#action[0] = action[0]*(self.clg_max - self.clg_min)+(self.clg_min+self.clg_max)/2.0
#action[1] = action[1]*(self.htg_max - self.htg_min)+(self.htg_min+self.htg_max)/2.0
#For Coach: input is 0 to 1 range
action[0] = action[0]*(self.clg_max - self.clg_min)+(self.clg_min)
action[1] = action[1]*(self.htg_max - self.htg_min)+(self.htg_min)
#force action to be within limits
cooling_setpoint = np.clip(action, self.clg_min, self.clg_max)[0]
heating_setpoint = np.clip(action, self.htg_min, self.htg_max)[1]
self.inputs = [cooling_setpoint, heating_setpoint]
input_packet = self.ep.encode_packet_simple(self.inputs, time)
self.ep.write(input_packet)
#after EnergyPlus runs the simulation step, it returns the outputs
output_packet = self.ep.read()
self.outputs = self.ep.decode_packet_simple(output_packet)
#print("Outputs:", self.outputs)
if not self.outputs:
print("Outputs:", self.outputs)
print("Actions:", action)
next_state = self.reset()
return next_state, 0, False, {}
# reward needs to be a combination of energy and comfort requirement
energy_coeff = -0.00001
heating_coeff = -100
cooling_coeff = -100
energy = self.outputs[0]
zone_temperature = self.outputs[1] #taking mid-zone 2 as an example
heating_setpoint = 15 #fixed lower limit in celcius
cooling_setpoint = 30 #fixed upper limit in celcius
heating_penalty = max(heating_setpoint - zone_temperature, 0)
cooling_penalty = max(zone_temperature - cooling_setpoint, 0)
# punish if action is out of limits
action_penalty_coeff = -100
max_penalty = max(self.clg_min - action[0], 0)
min_penalty = max(action[0] - self.clg_max, 0)
action_penalty = action_penalty_coeff * (max_penalty + min_penalty)
max_penalty = max(self.htg_min - action[1], 0)
min_penalty = max(action[1] - self.htg_max, 0)
action_penalty += action_penalty_coeff * (max_penalty + min_penalty)
# final reward
reward = energy_coeff * energy \
+ heating_coeff * heating_penalty \
+ cooling_coeff * cooling_penalty \
+ action_penalty
# state can be all the inputs required to make a control decision
# zone temp, outside drybulb temp, outside wetbulb temp, relative humidity
next_state = np.array([self.outputs[1], self.outputs[2], self.outputs[4]])
# fake state space
#next_state = np.array([3, 2, 1, 0])
#print("energy: %.2f, reward: %.2f, action: %.2f, %.2f" \
# % (energy, reward, action[0], action[1]))
#print("zone temp: %.2f, drybulb: %.2f, humidity: %.2f"\
# %tuple(next_state))
# increment simulation step count
self.kStep += 1
# done when number of steps of simulation reaches its maximum (e.g. 1 day)
done = False
if self.kStep >= (self.MAXSTEPS):
#requires one more step to close the simulation
input_packet = self.ep.encode_packet_simple(self.inputs, time)
self.ep.write(input_packet)
#output is empty in the final step
#but it is required to read this output for termination
output_packet = self.ep.read()
last_output = self.ep.decode_packet_simple(output_packet)
print("Finished simulation")
print("Last action: ", action)
print("Last reward: ", reward)
done = True
self.ep.close()
self.ep = None
# extra information we want to pass
info = {}
# print("State:", next_state, "Reward:", reward)
return next_state, reward, done, info
def reset(self):
# stop existing energyplus simulation
if self.ep:
print("Closing the old simulation and socket.")
self.ep.close() #needs testing: check if it stops the simulation
self.ep = None
# start new simulation
print("Starting a new simulation..")
self.kStep = 0
idf_dir = os.path.dirname(self.idf_file)
builder = socket_builder(idf_dir)
configs = builder.build()
self.ep = pyEp.ep_process('localhost', configs[0], self.idf_file, self.weather_file, self.eplus_path)
# read the initial outputs from EnergyPlus
# these outputs are from warmup phase, so this does not count as a simulation step
self.outputs = self.ep.decode_packet_simple(self.ep.read())
return np.array([self.outputs[1], self.outputs[2], self.outputs[4]])
#return np.array([3,2,1,0])
def render(self, mode='human', close=False):
pass
| 41.490909 | 120 | 0.60528 | 8,923 | 0.977542 | 0 | 0 | 0 | 0 | 0 | 0 | 3,918 | 0.429229 |
f0ab28142605e757508c738f9eafe63973378e0d | 55 | py | Python | test1.py | ktbyers/pyneta | 1690ce5a6ddb640198ccf3bca26f32a65d772b92 | [
"Apache-2.0"
] | 2 | 2018-11-08T09:20:42.000Z | 2021-07-15T18:12:06.000Z | test1.py | ktbyers/pyneta | 1690ce5a6ddb640198ccf3bca26f32a65d772b92 | [
"Apache-2.0"
] | null | null | null | test1.py | ktbyers/pyneta | 1690ce5a6ddb640198ccf3bca26f32a65d772b92 | [
"Apache-2.0"
] | null | null | null | print("hello")
while True:
print("Infinite loop")
| 11 | 26 | 0.654545 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 22 | 0.4 |
f0ad10d1d55fd2bdc2c491c48aba252ed553fa31 | 3,568 | py | Python | srblib/__init__.py | srbcheema1/srblib | 26146cb0d5586548da5f97a9fe3af355cd97f3ca | [
"MIT"
] | 2 | 2019-04-03T00:51:54.000Z | 2019-05-16T10:33:44.000Z | srblib/__init__.py | srbcheema1/srblib | 26146cb0d5586548da5f97a9fe3af355cd97f3ca | [
"MIT"
] | null | null | null | srblib/__init__.py | srbcheema1/srblib | 26146cb0d5586548da5f97a9fe3af355cd97f3ca | [
"MIT"
] | null | null | null | __version__ = '0.1.6'
__mod_name__ = 'srblib'
from .colour import Colour # A class with color names and a static print function which prints coloured output to stderr
from .debugger import debug # a boolean whose value can be changed in ~/.config/srblib/debug.json
from .debugger import on_appveyor # a boolean value which is true if code is running on appveyor
from .debugger import on_ci # a boolean value which is true if it code is running on CI
from .debugger import on_srbpc # a boolean value which is true if it is my PC i.e. srb-pc
from .debugger import on_travis # a boolean value which is true if code is running on travis
from .dependency import install_arg_complete # A function to append line of argcomplete in ~/.bashrc
from .dependency import install_dependencies # A function that takes a special data-template to install dependencies
from .dependency import install_dependencies_pkg # similar but based on package-managers (Recommended)
from .dependency import is_installed # checks if the following application is installed on machine or not
from .dependency import remove_dependencies # Opposite of install_dependencies
from .dependency import remove_dependencies_pkg # Opposite of install_dependencies_pkg
from .email import email # a function
from .email import Email # a class to send email
from .files import file_extension # returns back the extention of a file from filepath, may return '' if no ext
from .files import file_name # returns filename from a filepath
from .files import remove # removes a path recursively. it deletes all files and folders under that path
from .files import verify_file # verify that a file exists. if not it will create one. also creates parents if needed
from .files import verify_folder # verify that a folder exists. creates one if not there. also creates parents if needed
from .file_importer import Module # a class to import modules
# one cant declare more attributes in frozen class
from .frozen import FrozenClass # A class to be inherited to make a class frozen. i.e. no more attributes can be added.
from .path import abs_path # returns absolute path of a path given. works on windows as well as linux.
from .path import is_child_of # returns if a given path is child(direct/indirect) of the second path given.
from .path import parent_dir # returns Nth parent of a path. default it returns 1st parent
from .path import relative_path # returns relative path if given absolute path
from .requests import debug_res # print debug output of response.
from .srb_bank import SrbBank # A class to store things for later use of a program. can act as a database
from .srb_json import SrbJson # A class to use json file more easily
from .srb_hash import path_hash # get hash of full path (recursively)
from .srb_hash import str_hash # get hash of string
from .soup import Soup # A class to make scrapping easier
from .system import get_os_name # returns OS name. values are windows, linux or mac
from .system import os_name # value of get_os_name
from .system import on_windows # True if system is windows OS
from .tabular import Tabular # A class to process dabular data
from .util import line_adder # append a line if not present in a given file
from .util import show_dependency_error_and_exit # display missing dependency error and exit
from .util import similarity # returns percentage of similarity of two strings
from .util import top # first element of list or set or dict(first key)
from .util import dump_output # variable containing string value ` > /dev/null 2>&1 ` or ` > nul 2>&1 `.
| 60.474576 | 120 | 0.7912 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,189 | 0.613509 |
f0ae594509e7de16319dd68dba06754107fabb34 | 2,880 | py | Python | src/msfbe/handlers/CountiesHandler.py | Noella-W/MethaneSourceFinder-BackEndDocker-1 | e935a4bbbcdf2f9bf11814a96ebbe1d8b59eed2b | [
"Apache-2.0"
] | null | null | null | src/msfbe/handlers/CountiesHandler.py | Noella-W/MethaneSourceFinder-BackEndDocker-1 | e935a4bbbcdf2f9bf11814a96ebbe1d8b59eed2b | [
"Apache-2.0"
] | 4 | 2021-06-08T23:39:15.000Z | 2022-03-12T00:49:52.000Z | src/msfbe/handlers/CountiesHandler.py | Noella-W/MethaneSourceFinder-BackEndDocker-1 | e935a4bbbcdf2f9bf11814a96ebbe1d8b59eed2b | [
"Apache-2.0"
] | 1 | 2021-09-11T05:30:07.000Z | 2021-09-11T05:30:07.000Z | """
Copyright (c) 2018 Jet Propulsion Laboratory,
California Institute of Technology. All rights reserved
"""
import json
from msfbe.webmodel import BaseHandler, service_handler
import requests
import psycopg2
class CountiesColumns:
COUNTY_ID = 0
NAME = 1
AREA = 2
PERIMETER = 3
CACOA = 4
CACOA_ID = 5
DSSLV = 6
CONUM = 7
class SimpleResult(object):
def __init__(self, result):
self.result = result
def toJson(self):
return json.dumps(self.result)
@service_handler
class CountiesHandlerImpl(BaseHandler):
name = "Counties Service"
path = "/counties"
description = ""
params = {}
singleton = True
def __init__(self):
BaseHandler.__init__(self)
def __query(self, config, maxLat, maxLon, minLat, minLon):
sql = """
select
county_id,
name,
area,
perimeter,
cacoa,
cacoa_id,
dsslv,
conum
from
counties as c
where
ST_Intersects(c.county_shape, ST_MakeEnvelope(%s, %s, %s, %s, 4326));
"""
conn = psycopg2.connect(dbname=config.get("database", "db.database"),
user=config.get("database", "db.username"),
password=config.get("database", "db.password"),
host=config.get("database", "db.endpoint"),
port=config.get("database", "db.port"))
cur = conn.cursor()
# Query
cur.execute(sql,
(
minLon,
minLat,
maxLon,
maxLat
)
)
results = cur.fetchall()
cur.close()
conn.close()
return results
def __format_results(self, rows):
results = []
for row in rows:
results.append({
"county_id": row[CountiesColumns.COUNTY_ID],
"name": row[CountiesColumns.NAME],
"area": row[CountiesColumns.AREA],
"perimeter": row[CountiesColumns.PERIMETER],
"cacoa": row[CountiesColumns.CACOA],
"cacoa_id": row[CountiesColumns.CACOA_ID],
"dsslv": row[CountiesColumns.DSSLV],
"conum": row[CountiesColumns.CONUM],
})
return results
def handle(self, computeOptions, **args):
maxLat = computeOptions.get_decimal_arg("maxLat", 90)
maxLon = computeOptions.get_decimal_arg("maxLon", 180)
minLat = computeOptions.get_decimal_arg("minLat", -90)
minLon = computeOptions.get_decimal_arg("minLon", -180)
rows = self.__query(args["webconfig"], maxLat, maxLon, minLat, minLon)
results = self.__format_results(rows)
return SimpleResult(results)
| 26.666667 | 79 | 0.546181 | 2,644 | 0.918056 | 0 | 0 | 2,368 | 0.822222 | 0 | 0 | 624 | 0.216667 |
f0aee58dff262da24995b34a28866e048d9466ad | 347 | py | Python | ai for simple games/connect 4/tictoc.py | gustasvs/AI | 23360a8865e8211568594c2b2ced11dcdc9b0006 | [
"MIT"
] | 1 | 2022-02-03T18:21:28.000Z | 2022-02-03T18:21:28.000Z | ai for simple games/connect 4/tictoc.py | gustasvs/AI | 23360a8865e8211568594c2b2ced11dcdc9b0006 | [
"MIT"
] | null | null | null | ai for simple games/connect 4/tictoc.py | gustasvs/AI | 23360a8865e8211568594c2b2ced11dcdc9b0006 | [
"MIT"
] | null | null | null | import time
def tic():
#Homemade version of matlab tic and toc functions
global startTime_for_tictoc
startTime_for_tictoc = time.time()
def toc():
if 'startTime_for_tictoc' in globals():
print("Elapsed time is " + str(time.time() - startTime_for_tictoc) + " seconds.")
else:
print("Toc: start time not set") | 24.785714 | 89 | 0.665706 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 125 | 0.360231 |
f0b1d2bc676f5219d5c50eeed54f3ef0c2068b76 | 243 | py | Python | contrib/enginedesign/openmdao.examples.enginedesign/openmdao/examples/enginedesign/releaseinfo.py | mjfwest/OpenMDAO-Framework | a5521f47ad7686c25b203de74e1c7dff5fd7a52b | [
"Apache-2.0"
] | 69 | 2015-01-02T19:10:08.000Z | 2021-11-14T04:42:28.000Z | contrib/enginedesign/openmdao.examples.enginedesign/openmdao/examples/enginedesign/releaseinfo.py | jcchin/OpenMDAO-Framework | 038e89b06da1c74f00918f4c6fbd8bd365e25657 | [
"Apache-2.0"
] | 3 | 2015-01-15T23:08:18.000Z | 2015-03-11T16:57:35.000Z | contrib/enginedesign/openmdao.examples.enginedesign/openmdao/examples/enginedesign/releaseinfo.py | jcchin/OpenMDAO-Framework | 038e89b06da1c74f00918f4c6fbd8bd365e25657 | [
"Apache-2.0"
] | 31 | 2015-09-16T00:37:35.000Z | 2022-01-10T06:27:55.000Z |
# This file is automatically generated
__version__ = '0.10.3.2'
__comments__ = """too many spaces :/
Merge pull request #1848 from swryan/work"""
__date__ = '2014-11-15 09:50:34 -0500'
__commit__ = '97c66aaecfad3451bc6a0b1cae1fce4c0595037a'
| 27 | 55 | 0.753086 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 183 | 0.753086 |
f0b1f9e76ce0827997c1af6729dbd897a8b9fe82 | 779 | py | Python | Utils/get_skeleton.py | Kohulan/DECIMER-Image-Segmentation | 68ee9a9693e5bad5c41826d28e2d6558a20fe21f | [
"MIT"
] | 29 | 2021-01-08T13:48:18.000Z | 2022-01-17T08:29:00.000Z | Utils/get_skeleton.py | Kohulan/DECIMER-Image-Segmentation | 68ee9a9693e5bad5c41826d28e2d6558a20fe21f | [
"MIT"
] | 23 | 2021-01-07T21:43:21.000Z | 2022-03-14T21:52:17.000Z | Utils/get_skeleton.py | Kohulan/DECIMER-Image-Segmentation | 68ee9a9693e5bad5c41826d28e2d6558a20fe21f | [
"MIT"
] | 8 | 2021-01-08T05:39:21.000Z | 2022-02-14T10:06:38.000Z | '''
* This Software is under the MIT License
* Refer to LICENSE or https://opensource.org/licenses/MIT for more information
* Written by ©Kohulan Rajan 2020
'''
from skimage import img_as_float
from skimage import io, color, morphology
import matplotlib.pyplot as plt
def get_skeleton_and_thin(input_image):
image = img_as_float(color.rgb2gray(io.imread(input_image)))
#image = img_as_float(color.rgb2gray(input_image))
image_binary = image < 0.5
out_skeletonize = morphology.skeletonize(image_binary)
out_thin = morphology.thin(image_binary)
f, (ax0, ax1, ax2) = plt.subplots(1, 3, figsize=(10, 3))
plt.imsave('thinned_output.png', 255-out_thin,cmap='gray')
plt.imsave('skeletonized_output.png', 255-out_skeletonize,cmap='gray')
return out_skeletonize,out_thin
| 32.458333 | 79 | 0.770218 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 271 | 0.347436 |
f0b23ace85262a4a9788725f73969adf1044950b | 1,328 | py | Python | Task/CSV-to-HTML-translation/Python/csv-to-html-translation-5.py | mullikine/RosettaCodeData | 4f0027c6ce83daa36118ee8b67915a13cd23ab67 | [
"Info-ZIP"
] | 1 | 2021-05-05T13:42:20.000Z | 2021-05-05T13:42:20.000Z | Task/CSV-to-HTML-translation/Python/csv-to-html-translation-5.py | seanwallawalla-forks/RosettaCodeData | 9ad63ea473a958506c041077f1d810c0c7c8c18d | [
"Info-ZIP"
] | null | null | null | Task/CSV-to-HTML-translation/Python/csv-to-html-translation-5.py | seanwallawalla-forks/RosettaCodeData | 9ad63ea473a958506c041077f1d810c0c7c8c18d | [
"Info-ZIP"
] | null | null | null | from csv import DictReader
from xml.etree import ElementTree as ET
def csv2html_robust(txt, header=True, attr=None):
# Use DictReader because, despite what the docs say, reader() doesn't
# return an object with .fieldnames
# (DictReader expects an iterable that returns lines, so split on \n)
reader = DictReader(txt.split('\n'))
table = ET.Element("TABLE", **attr.get('TABLE', {}))
thead_tr = ET.SubElement(
ET.SubElement(table, "THEAD", **attr.get('THEAD', {})),
"TR")
tbody = ET.SubElement(table, "TBODY", **attr.get('TBODY', {}))
if header:
for name in reader.fieldnames:
ET.SubElement(thead_tr, "TD").text = name
for row in reader:
tr_elem = ET.SubElement(tbody, "TR", **attr.get('TR', {}))
# Use reader.fieldnames to query `row` in the correct order.
# (`row` isn't an OrderedDict prior to Python 3.6)
for field in reader.fieldnames:
td_elem = ET.SubElement(tr_elem, "TD", **attr.get('TD', {}))
td_elem.text = row[field]
return ET.tostring(table, method='html')
htmltxt = csv2html_robust(csvtxt, True, {
'TABLE': {'border': "1", 'summary': "csv2html extra program output"},
'THEAD': {'bgcolor': "yellow"},
'TBODY': {'bgcolor': "orange"}
})
print(htmltxt.decode('utf8'))
| 34.947368 | 73 | 0.618223 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 471 | 0.354669 |
f0b260ad5df3102bf13fd8cb7dbdddf25e662251 | 2,763 | py | Python | src/gausskernel/dbmind/tools/sqldiag/load_sql_from_wdr.py | opengauss-mirror/openGauss-graph | 6beb138fd00abdbfddc999919f90371522118008 | [
"MulanPSL-1.0"
] | null | null | null | src/gausskernel/dbmind/tools/sqldiag/load_sql_from_wdr.py | opengauss-mirror/openGauss-graph | 6beb138fd00abdbfddc999919f90371522118008 | [
"MulanPSL-1.0"
] | null | null | null | src/gausskernel/dbmind/tools/sqldiag/load_sql_from_wdr.py | opengauss-mirror/openGauss-graph | 6beb138fd00abdbfddc999919f90371522118008 | [
"MulanPSL-1.0"
] | null | null | null | import os
import re
import sys
import argparse
from preprocessing import templatize_sql
from utils import DBAgent, check_time_legality
__description__ = "Get sql information based on wdr."
def parse_args():
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
description=__description__)
parser.add_argument('--port', help="Port of database service.", type=int, required=True)
parser.add_argument('--start-time', help="Start time of query", required=True)
parser.add_argument('--finish-time', help="Finish time of query", required=True)
parser.add_argument('--save-path', default='sample_data/data.csv', help="Path to save result")
return parser.parse_args()
def mapper_function(value):
query = templatize_sql(value[0])
execution_time = float(value[1]) / 1000000
return (query, execution_time)
def wdr_features(start_time, end_time, port, database='postgres'):
sql = 'select query, execution_time from statement_history '
if start_time and end_time:
sql = "select query, execution_time from dbe_perf.get_global_slow_sql_by_timestamp" \
" (\'{start_time}\',\'{end_time}\')" \
.format(start_time=start_time, end_time=end_time)
with DBAgent(port=port, database=database) as db:
result = db.fetch_all_result(sql)
if result:
result = list(filter(lambda x: re.match(r'UPDATE|SELECT|DELETE|INSERT', x[0]) and x[1] != 0, result))
result = list(map(mapper_function, result))
return result
def save_csv(result, save_path):
if save_path:
save_path = os.path.realpath(save_path)
if not os.path.exists(os.path.dirname(save_path)):
os.makedirs(os.path.dirname(save_path))
with open(save_path, mode='w') as f:
for query, execution_time in result:
f.write(query + ',' + str(execution_time) + '\n')
if __name__ == '__main__':
args = parse_args()
start_time, finish_time = args.start_time, args.finish_time
port = args.port
save_path = args.save_path
if start_time and not check_time_legality(start_time):
print("error time format '{time}', using: {date_format}.".format(time=start_time,
date_format='%Y-%m-%d %H:%M:%S'))
sys.exit(-1)
if finish_time and not check_time_legality(finish_time):
print("error time format '{time}', using: {date_format}.".format(time=finish_time,
date_format='%Y-%m-%d %H:%M:%S'))
sys.exit(-1)
res = wdr_features(start_time, finish_time, port)
save_csv(res, save_path)
| 40.043478 | 113 | 0.641694 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 565 | 0.204488 |
f0b3c832cf8e1ee83bee7a2645bdcc49172d3621 | 344 | py | Python | program_train/hachina1.py | zhujisheng/HAComponent | 29c44a5ea3090d748738830a50e2d25c07e1bac8 | [
"Apache-2.0"
] | 39 | 2017-12-17T13:51:13.000Z | 2022-02-25T02:57:39.000Z | program_train/hachina1.py | vitc-123/HAComponent | 29c44a5ea3090d748738830a50e2d25c07e1bac8 | [
"Apache-2.0"
] | 5 | 2019-05-30T07:09:49.000Z | 2021-07-15T02:53:49.000Z | program_train/hachina1.py | vitc-123/HAComponent | 29c44a5ea3090d748738830a50e2d25c07e1bac8 | [
"Apache-2.0"
] | 28 | 2018-01-05T10:48:28.000Z | 2021-12-07T13:59:22.000Z | """
文件名:hachina.py.
演示程序,三行代码创建一个新设备.
"""
def setup(hass, config):
"""HomeAssistant在配置文件中发现hachina域的配置后,会自动调用hachina.py文件中的setup函数."""
# 设置实体hachina.Hello_World的状态。
# 注意1:实体并不需要被创建,只要设置了实体的状态,实体就自然存在了
# 注意2:实体的状态可以是任何字符串
hass.states.set("hachina.hello_world", "太棒了!")
# 返回True代表初始化成功
return True
| 20.235294 | 72 | 0.671512 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 470 | 0.821678 |
f0b5d4652c83e4ec1fe94bf488284b069e276dc7 | 4,263 | py | Python | simple_ddl_parser/dialects/hql.py | swiatek25/simple-ddl-parser | b46f99b4e1838718bc4024cd281a66cd1b78b165 | [
"MIT"
] | null | null | null | simple_ddl_parser/dialects/hql.py | swiatek25/simple-ddl-parser | b46f99b4e1838718bc4024cd281a66cd1b78b165 | [
"MIT"
] | null | null | null | simple_ddl_parser/dialects/hql.py | swiatek25/simple-ddl-parser | b46f99b4e1838718bc4024cd281a66cd1b78b165 | [
"MIT"
] | null | null | null | from simple_ddl_parser.utils import check_spec, remove_par
class HQL:
def p_expression_location(self, p):
"""expr : expr LOCATION STRING"""
p[0] = p[1]
p_list = list(p)
p[0]["location"] = p_list[-1]
def p_row_format(self, p):
"""row_format : ROW FORMAT SERDE
| ROW FORMAT
"""
p_list = list(p)
p[0] = {"serde": p_list[-1] == "SERDE"}
def p_expression_row_format(self, p):
"""expr : expr row_format id
| expr row_format STRING
"""
p[0] = p[1]
p_list = list(p)
if p[2]["serde"]:
format = {"serde": True, "java_class": p_list[-1]}
else:
format = check_spec(p_list[-1])
p[0]["row_format"] = format
def p_expression_with_serde(self, p):
"""expr : expr WITH SERDEPROPERTIES LP assigment RP"""
p[0] = p[1]
p_list = list(p)
row_format = p[0]["row_format"]
row_format["properties"] = p_list[-2]
p[0]["row_format"] = row_format
def p_expression_tblproperties(self, p):
"""expr : expr TBLPROPERTIES multi_assigments"""
p[0] = p[1]
p[0]["tblproperties"] = list(p)[-1]
def p_multi_assigments(self, p):
"""multi_assigments : LP assigment
| multi_assigments RP
| multi_assigments COMMA assigment RP"""
p_list = remove_par(list(p))
p[0] = p_list[1]
p[0].update(p_list[-1])
def p_assigment(self, p):
"""assigment : id id id
| STRING id STRING
| id id STRING
| STRING id id
| STRING id"""
p_list = remove_par(list(p))
if "state" in self.lexer.__dict__:
p[0] = {p[1]: self.lexer.state.get(p_list[-1])}
else:
if "=" in p_list[-1]:
p_list[-1] = p_list[-1].split("=")[-1]
p[0] = {p_list[1]: p_list[-1]}
def p_expression_comment(self, p):
"""expr : expr COMMENT STRING"""
p[0] = p[1]
p_list = list(p)
p[0]["comment"] = check_spec(p_list[-1])
def p_expression_terminated_by(self, p):
"""expr : expr id TERMINATED BY id
| expr id TERMINATED BY STRING
"""
p[0] = p[1]
p_list = list(p)
p[0][f"{p[2].lower()}_terminated_by"] = check_spec(p_list[-1])
def p_expression_map_keys_terminated_by(self, p):
"""expr : expr MAP KEYS TERMINATED BY id
| expr MAP KEYS TERMINATED BY STRING
"""
p[0] = p[1]
p_list = list(p)
p[0]["map_keys_terminated_by"] = check_spec(p_list[-1])
def p_expression_skewed_by(self, p):
"""expr : expr SKEWED BY LP id RP ON LP pid RP"""
p[0] = p[1]
p_list = remove_par(list(p))
p[0]["skewed_by"] = {"key": p_list[4], "on": p_list[-1]}
def p_expression_collection_terminated_by(self, p):
"""expr : expr COLLECTION ITEMS TERMINATED BY id
| expr COLLECTION ITEMS TERMINATED BY STRING
"""
p[0] = p[1]
p_list = list(p)
p[0]["collection_items_terminated_by"] = check_spec(p_list[-1])
def p_expression_stored_as(self, p):
"""expr : expr STORED AS id
| expr STORED AS id STRING
| expr STORED AS id STRING id STRING
"""
p[0] = p[1]
p_list = list(p)
if len(p_list) >= 6:
# only input or output format
p[0]["stored_as"] = {p_list[-2].lower(): p_list[-1]}
if len(p_list) == 8:
# both input & output
p[0]["stored_as"].update({p_list[-4].lower(): p_list[-3]})
else:
p[0]["stored_as"] = p_list[-1]
def p_expression_partitioned_by_hql(self, p):
"""expr : expr PARTITIONED BY pid_with_type"""
p[0] = p[1]
p_list = list(p)
p[0]["partitioned_by"] = p_list[-1]
def p_pid_with_type(self, p):
"""pid_with_type : LP column
| pid_with_type COMMA column
| pid_with_type RP
"""
p_list = remove_par(list(p))
if not isinstance(p_list[1], list):
p[0] = [p_list[1]]
else:
p[0] = p_list[1]
if len(p_list) > 2:
p[0].append(p_list[-1])
| 31.345588 | 74 | 0.524513 | 4,201 | 0.985456 | 0 | 0 | 0 | 0 | 0 | 0 | 1,498 | 0.351396 |
f0b7d59823b393f5c64dfa9a82c8f3f19e0a14bc | 5,627 | py | Python | pydaemon/tx_sms_service_.py | tmkasun/pysmsgate | 42c06f1e8e3598697844fd9b098f314a24709777 | [
"Apache-2.0"
] | null | null | null | pydaemon/tx_sms_service_.py | tmkasun/pysmsgate | 42c06f1e8e3598697844fd9b098f314a24709777 | [
"Apache-2.0"
] | null | null | null | pydaemon/tx_sms_service_.py | tmkasun/pysmsgate | 42c06f1e8e3598697844fd9b098f314a24709777 | [
"Apache-2.0"
] | null | null | null | import json
import sys
import time
from twisted.internet.task import deferLater
from twisted.web import http
from twisted.web.resource import Resource
from twisted.web.server import Site
from twisted.internet import reactor
from gsmmodem.modem import GsmModem, SentSms
from gsmmodem.exceptions import TimeoutException, PinRequiredError, IncorrectPinError
from config import config
from libs.services import modem_service
from libs import modem_manager
class Sms(Resource):
isLeaf = True
def __init__(self, serviceType):
Resource.__init__(self)
self.serviceType = serviceType
def render_POST(self, request):
if self.serviceType == 'send':
print "DEBUG: Got POST a request from {}".format(request.getClientIP())
# global debugObject
# reactor.callLater(2,reactor.stop)
# debugObject = request
print "DEBUG: ",
print(request.args)
# TODO: Return JSON with status and ACK of sending message
# TODO: Use inline call back ratherthan blocking call
d = deferLater(reactor, 0, lambda: request)
d.addCallback(self._delayedRender)
request.responseHeaders.addRawHeader(b"content-type", b"application/json")
timestamp = int(time.time())
return_value = {
u'result': u'true',
u'timestamp': timestamp,
u'status': u'sent',
u'refid': u'N/A',
}
return json.dumps(return_value)
def _delayedRender(self, request):
mobile_number = request.args['mobile_number'][0]
if not (self.isMobile(mobile_number)):
return "Invalid mobile number: {}\nerror code:-1".format(mobile_number)
message = request.args['message'][0]
#TODO: find why this class var is not resolved not Service.debug_mode:
if True:
print("DEBUG: Running delayed job")
sendSms(mobile_number, message)
else:
print("[DEBUG_MODE]: Message = {} , \nmobile number = {}".format(mobile_number, message))
def isMobile(self, number):
try:
int(number)
if (len(number) != 10):
return False
return True
except ValueError:
return False
def sendSms(destination, message, deliver=False):
if deliver:
print ('\nSending SMS and waiting for delivery report...')
else:
print('\nSending SMS \nmessage ({}) \nto ({})...'.format(message, destination))
try:
modem = modem_manager.modems.get_random_modem()
sms = modem.sendSms(destination, message, waitForDeliveryReport=deliver)
except TimeoutException:
print('Failed to send message: the send operation timed out')
else:
if sms.report:
print('Message sent{0}'.format(
' and delivered OK.' if sms.status == SentSms.DELIVERED else ', but delivery failed.'))
else:
print('Message sent.')
class UnknownService(Resource):
isLeaf = True
def render(self, request):
return self.error_info(request)
def error_info(self, request):
request.responseHeaders.addRawHeader(b"content-type", b"application/json")
request.setResponseCode(http.NOT_FOUND)
return_value = {
u'result': u'false',
u'reason': u'Unknown Service',
u'request': {
u'args': request.args,
u'client': {
u'host': request.client.host,
u'port': request.client.port,
u'type': request.client.type,
},
u'code': request.code,
u'method': request.method,
u'path': request.path,
}
}
return json.dumps(return_value)
class Service(Resource):
# isLeaf = True
debugMode = False
def __init__(self, debugMode):
Resource.__init__(self)
Service.debugMode = debugMode
def getChild(self, path, request):
if path == "sms":
return Sms(request.postpath[0]) # Get the next URL component
elif path == "modem":
return modem_service.ModemService(request.postpath[0])
elif path == "ping":
return Ping()
else:
return UnknownService()
def render_GET(self, request):
request.responseHeaders.addRawHeader(b"content-type", b"application/json")
return_value = {u'result': u'ok'}
return json.dumps(return_value)
def restart(self):
pass
class Ping(Resource):
isLeaf = True
def render_GET(self, request):
request.responseHeaders.addRawHeader(b"content-type", b"application/json")
timestamp = int(time.time())
return_value = {
u'result': u'true',
u'timestamp': timestamp,
u'status': u'pong',
}
return json.dumps(return_value)
def main():
port = config.api['port']
service_name = config.api['service_name']
debug_mode = config.api['debug']
resource = Service(debug_mode)
root_web = Site(resource)
resource.putChild(service_name, Service(debug_mode))
if not debug_mode:
modem_manager.init()
print("Connected to modem")
else:
print("DEBUG_MODE enabled no message will be sent out from the dongle")
reactor.listenTCP(port, root_web)
print "Server running on {} url: localhost:{}/{}".format(port, port, service_name)
reactor.run()
if __name__ == '__main__':
main() | 30.917582 | 103 | 0.602808 | 3,839 | 0.682246 | 0 | 0 | 0 | 0 | 0 | 0 | 1,268 | 0.225342 |
f0b7dafc2a4cbe01ebdeb4efbbb1605f26910088 | 391 | py | Python | plugins/vad/snr_vad/test_snr_vad.py | kowo-zahl/Naomi | 476cfc42036a1ac88e33451431fc64d5ea1aa49a | [
"MIT"
] | 194 | 2018-07-28T14:54:35.000Z | 2022-03-18T12:40:10.000Z | plugins/vad/snr_vad/test_snr_vad.py | HoltTechnologyCorporation/Naomi | 16d5f6ba03ea96c3fa13ed4e2c1f082041d9de31 | [
"MIT"
] | 239 | 2018-07-13T16:15:25.000Z | 2022-03-31T17:55:01.000Z | plugins/vad/snr_vad/test_snr_vad.py | Longshotpro2/Naomi | 9330c63fe24606dc45194d297c665f37a4ec10f7 | [
"MIT"
] | 64 | 2018-07-26T02:18:33.000Z | 2022-01-07T06:53:01.000Z | # -*- coding: utf-8 -*-
from naomi import testutils
from . import snr_vad
class TestSNR_VADPlugin(testutils.Test_VADPlugin):
def setUp(self):
super(TestSNR_VADPlugin, self).setUp()
self.plugin = testutils.get_plugin_instance(
snr_vad.SNRPlugin,
self._test_input
)
# prime by running through one wav file
self.map_file()
| 24.4375 | 52 | 0.641944 | 314 | 0.803069 | 0 | 0 | 0 | 0 | 0 | 0 | 62 | 0.158568 |
f0b832955eefe7451229c65ceb69048f96000730 | 13,243 | py | Python | dali/test/python/test_operator_rotate.py | L-Net-1992/DALI | 982224d8b53e1156ae092f73f5a7d600982a1eb9 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | dali/test/python/test_operator_rotate.py | L-Net-1992/DALI | 982224d8b53e1156ae092f73f5a7d600982a1eb9 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | dali/test/python/test_operator_rotate.py | L-Net-1992/DALI | 982224d8b53e1156ae092f73f5a7d600982a1eb9 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright (c) 2019-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cv2
import numpy as np
import math
import os
import random
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.ops as ops
import nvidia.dali.types as types
import nvidia.dali as dali
from test_utils import compare_pipelines
from sequences_test_utils import ArgData, ArgDesc, get_video_input_cases, ParamsProvider, sequence_suite_helper, ArgCb
test_data_root = os.environ['DALI_EXTRA_PATH']
caffe_db_folder = os.path.join(test_data_root, 'db', 'lmdb')
def get_output_size(angle, input_size, parity_correction=True):
cosa = abs(math.cos(angle))
sina = abs(math.sin(angle))
(h, w) = input_size[0:2]
eps = 1e-2
out_w = int(math.ceil(w*cosa + h*sina - eps))
out_h = int(math.ceil(h*cosa + w*sina - eps))
if not parity_correction:
return (out_h, out_w)
if sina <= cosa:
if out_w % 2 != w % 2:
out_w += 1
if out_h % 2 != h % 2:
out_h += 1
else:
if out_w % 2 != h % 2:
out_w += 1
if out_h % 2 != w % 2:
out_h += 1
return (out_h, out_w)
def get_3d_lin_rotation(angle, axis):
# mirrors transform.h:rotation3D
if not angle:
return np.eye((3, 3), dtype=np.float32)
axis_norm = np.linalg.norm(axis)
axis = [dim / axis_norm for dim in axis]
u, v, w = axis
cosa = math.cos(angle)
sina = math.sin(angle)
return np.array([
[u*u + (v*v+w*w)*cosa, u*v*(1-cosa) - w*sina, u*w*(1-cosa) + v*sina],
[u*v*(1-cosa) + w*sina, v*v + (u*u+w*w)*cosa, v*w*(1-cosa) - u*sina],
[u*w*(1-cosa) - v*sina, v*w*(1-cosa) + u*sina, w*w + (u*u+v*v)*cosa],
], dtype=np.float32)
def get_3d_output_size(angle, axis, input_size, parity_correction=False):
transform = np.abs(get_3d_lin_rotation(angle, axis))
eps = 1e-2
in_size = np.array(input_size[2::-1], dtype=np.int32)
out_size = np.int32(np.ceil(np.matmul(transform, in_size) - eps))
if parity_correction:
dominant_axis = np.argmax(transform, axis=1)
out_size += (out_size % 2) ^ (in_size[dominant_axis] % 2)
return out_size[::-1]
def get_transform(angle, input_size, output_size):
cosa = math.cos(angle)
sina = math.sin(angle)
(out_h, out_w) = output_size[0:2]
(in_h, in_w) = input_size[0:2]
t1 = np.array([
[1, 0, -out_w*0.5],
[0, 1, -out_h*0.5],
[0, 0, 1]])
r = np.array([
[cosa, -sina, 0],
[sina, cosa, 0],
[0, 0, 1]])
t2 = np.array([
[1, 0, in_w*0.5],
[0, 1, in_h*0.5],
[0, 0, 1]])
return (np.matmul(t2, np.matmul(r, t1)))[0:2,0:3]
def ToCVMatrix(matrix):
offset = np.matmul(matrix, np.array([[0.5], [0.5], [1]]))
result = matrix.copy()
result[0][2] = offset[0] - 0.5
result[1][2] = offset[1] - 0.5
return result
def CVRotate(output_type, input_type, fixed_size):
def warp_fn(img, angle):
in_size = img.shape[0:2]
angle = math.radians(angle)
out_size = fixed_size if fixed_size is not None else get_output_size(angle, in_size)
matrix = get_transform(angle, in_size, out_size)
matrix = ToCVMatrix(matrix)
if output_type == dali.types.FLOAT or input_type == dali.types.FLOAT:
img = np.float32(img)
out_size_wh = (out_size[1], out_size[0])
out = cv2.warpAffine(img, matrix, out_size_wh, borderMode = cv2.BORDER_CONSTANT, borderValue = [42,42,42],
flags = (cv2.INTER_LINEAR|cv2.WARP_INVERSE_MAP))
if output_type == dali.types.UINT8 and input_type == dali.types.FLOAT:
out = np.uint8(np.clip(out, 0, 255))
return out
return warp_fn
class RotatePipeline(Pipeline):
def __init__(self, device, batch_size, output_type, input_type, fixed_size=None, num_threads=3, device_id=0, num_gpus=1):
super(RotatePipeline, self).__init__(batch_size, num_threads, device_id, seed=7865, exec_async=False, exec_pipelined=False)
self.name = device
self.input = ops.readers.Caffe(path = caffe_db_folder, shard_id = device_id, num_shards = num_gpus)
self.decode = ops.decoders.Image(device = "cpu", output_type = types.RGB)
if input_type != dali.types.UINT8:
self.cast = ops.Cast(device = device, dtype = input_type)
else:
self.cast = None
self.uniform = ops.random.Uniform(range = (-180.0, 180.0), seed = 42)
self.rotate = ops.Rotate(device = device, size=fixed_size, fill_value = 42, dtype = output_type)
def define_graph(self):
self.jpegs, self.labels = self.input(name = "Reader")
images = self.decode(self.jpegs)
if self.rotate.device == "gpu":
images = images.gpu()
if self.cast:
images = self.cast(images)
outputs = self.rotate(images, angle = self.uniform())
return outputs
class CVPipeline(Pipeline):
def __init__(self, batch_size, output_type, input_type, fixed_size, num_threads=3, device_id=0, num_gpus=1):
super(CVPipeline, self).__init__(batch_size, num_threads, device_id, seed=7865, exec_async=False, exec_pipelined=False)
self.name = "cv"
self.input = ops.readers.Caffe(path = caffe_db_folder, shard_id = device_id, num_shards = num_gpus)
self.decode = ops.decoders.Image(device = "cpu", output_type = types.RGB)
self.rotate = ops.PythonFunction(function=CVRotate(output_type, input_type, fixed_size),
output_layouts="HWC")
self.uniform = ops.random.Uniform(range = (-180.0, 180.0), seed = 42)
self.iter = 0
def define_graph(self):
self.jpegs, self.labels = self.input(name = "Reader")
images = self.decode(self.jpegs)
angles = self.uniform()
outputs = self.rotate(images, angles)
return outputs
def compare(pipe1, pipe2, eps):
pipe1.build()
pipe2.build()
epoch_size = pipe1.epoch_size("Reader")
batch_size = pipe1.max_batch_size
niter = 1 if batch_size >= epoch_size else 2
compare_pipelines(pipe1, pipe2, batch_size, niter, eps)
io_types = [
(dali.types.UINT8, dali.types.UINT8),
(dali.types.UINT8, dali.types.FLOAT),
(dali.types.FLOAT, dali.types.UINT8),
(dali.types.FLOAT, dali.types.FLOAT)
]
def create_pipeline(backend, *args):
if backend == "cv":
return CVPipeline(*args)
else:
return RotatePipeline(backend, *args)
def run_cases(backend1, backend2, epsilon):
for batch_size in [1, 4, 19]:
for output_size in [None, (160,240)]:
for (itype, otype) in io_types:
def run_case(backend1, backend2, *args):
pipe1 = create_pipeline(backend1, *args)
pipe2 = create_pipeline(backend2, *args)
compare(pipe1, pipe2, epsilon)
yield run_case, backend1, backend2, batch_size, otype, itype, output_size
def test_gpu_vs_cv():
for test in run_cases("gpu", "cv", 8):
yield test
def test_cpu_vs_cv():
for test in run_cases("cpu", "cv", 8):
yield test
def test_gpu_vs_cpu():
for test in run_cases("gpu", "cpu", 1):
yield test
def infer_sequence_size(input_shapes, angles, axes=None):
assert(len(input_shapes) == len(angles))
assert(axes is None or len(axes) == len(angles))
if axes is None:
no_correction_shapes = [
np.array(get_output_size(math.radians(angle), shape, False), dtype=np.int32)
for shape, angle in zip(input_shapes, angles)]
corrected_shapes = [
np.array(get_output_size(math.radians(angle), shape, True), dtype=np.int32)
for shape, angle in zip(input_shapes, angles)]
else:
no_correction_shapes = [
np.array(get_3d_output_size(math.radians(angle), axis, shape, False), dtype=np.int32)
for shape, angle, axis in zip(input_shapes, angles, axes)]
corrected_shapes = [
np.array(get_3d_output_size(math.radians(angle), axis, shape, True), dtype=np.int32)
for shape, angle, axis in zip(input_shapes, angles, axes)]
max_shape = np.max(no_correction_shapes, axis=0)
parity = np.sum(np.array(corrected_shapes, dtype=np.int32) % 2, axis=0)
for i in range(len(max_shape)):
if max_shape[i] % 2 != (2 * parity[i] > len(input_shapes)):
max_shape[i] += 1
return max_shape
def sequence_batch_output_size(unfolded_extents, input_batch, angle_batch, axis_batch=None):
def iter_by_groups():
assert(sum(unfolded_extents) == len(input_batch))
assert(len(input_batch) == len(angle_batch))
assert(axis_batch is None or len(axis_batch) == len(angle_batch))
offset = 0
for group in unfolded_extents:
yield input_batch[offset:offset + group], angle_batch[offset:offset + group],\
None if axis_batch is None else axis_batch[offset:offset + group]
offset += group
sequence_output_shape = [
infer_sequence_size([frame.shape for frame in input_frames], angles, axes)
for input_frames, angles, axes in iter_by_groups()]
return [
output_shape for output_shape, num_frames in zip(sequence_output_shape, unfolded_extents)
for _ in range(num_frames)]
class RotatePerFrameParamsProvider(ParamsProvider):
"""
Provides per frame angle argument input to the video rotate operator test.
The expanded baseline pipeline must be provided with additional argument ``size``
to make allowance for coalescing of inferred frames sizes
"""
def __init__(self, input_params):
super().__init__(input_params)
def expand_params(self):
assert(self.num_expand == 1)
expanded_params = super().expand_params()
params_dict = {param_data.desc.name: param_data for param_data in expanded_params}
expanded_angles = params_dict.get('angle')
expanded_axis = params_dict.get('axis')
assert expanded_angles is not None and 'size' not in self.fixed_params and 'size' not in params_dict
sequence_extents = [
[sample.shape[0] for sample in input_batch]
for input_batch in self.input_data]
output_size_params = (sequence_extents, self.unfolded_input, expanded_angles.data)
if expanded_axis is not None:
output_size_params += (expanded_axis.data,)
output_sizes = [
sequence_batch_output_size(*args)
for args in zip(*output_size_params)]
expanded_params.append(ArgData(ArgDesc("size", False, "cpu"), output_sizes))
return expanded_params
def __repr__(self):
return "{}({})".format(repr(self.__class__), repr(self.input_params))
def test_video():
def small_angle(sample_desc):
return np.array(sample_desc.rng.uniform(-44., 44.), dtype=np.float32)
def random_angle(sample_desc):
return np.array(sample_desc.rng.uniform(-180., 180.), dtype=np.float32)
def random_output(sample_desc):
return np.array([sample_desc.rng.randint(300, 400), rng.randint(300, 400)])
video_test_cases = [
(dali.fn.rotate, {'angle': 45.}, []),
(dali.fn.rotate, {}, [ArgCb("angle", small_angle, False)]),
(dali.fn.rotate, {}, [ArgCb("angle", random_angle, False)]),
(dali.fn.rotate, {}, RotatePerFrameParamsProvider([ArgCb("angle", small_angle, True)])),
(dali.fn.rotate, {}, RotatePerFrameParamsProvider([ArgCb("angle", random_angle, True)])),
(dali.fn.rotate, {}, [ArgCb("angle", small_angle, True), ArgCb("size", random_output, False)]),
]
rng = random.Random(42)
video_cases = get_video_input_cases("FHWC", rng, larger_shape=(512, 287))
input_cases = [("FHWC", input_data) for input_data in video_cases]
yield from sequence_suite_helper(rng, "F", input_cases, video_test_cases)
def test_3d_sequence():
rng = random.Random(42)
num_batches = 4
max_batch_size = 8
max_frames_num = 32
input_layout = "FDHWC"
np_rng = np.random.default_rng(42)
def get_random_sample():
num_frames = rng.randint(1, max_frames_num)
d, h, w = tuple(rng.randint(10, 50) for _ in range(3))
return np.int32(np_rng.uniform(0, 255, (num_frames, d, h, w, 3)))
def get_random_batch():
return [get_random_sample() for _ in range(rng.randint(1, max_batch_size))]
input_cases = [(input_layout, [get_random_batch() for _ in range(num_batches)])]
def random_angle(sample_desc):
return np.array(sample_desc.rng.uniform(-180., 180.), dtype=np.float32)
def random_axis(sample_desc):
return np.array([sample_desc.rng.uniform(-1, 1) for _ in range(3)], dtype=np.float32)
test_cases = [
(dali.fn.rotate, {'angle': 45., 'axis': np.array([1, 0, 0], dtype=np.float32)}, []),
(dali.fn.rotate, {'size': (50, 30, 20)}, [ArgCb("angle", random_angle, True), ArgCb("axis", random_axis, True)]),
(dali.fn.rotate, {}, RotatePerFrameParamsProvider([ArgCb("angle", random_angle, True), ArgCb("axis", random_axis, True)])),
]
yield from sequence_suite_helper(rng, "F", input_cases, test_cases)
| 37.945559 | 131 | 0.671751 | 3,515 | 0.265423 | 3,848 | 0.290569 | 0 | 0 | 0 | 0 | 1,145 | 0.086461 |
f0b8a938a5d0cde1b5aa321a0fcf6ce1d6ace0f2 | 524 | py | Python | graph/templatetags/helpers.py | Soaring-Outliers/news_graph | ae7cde461e49b6ee8fe932fcf6c581f3a5574da4 | [
"MIT"
] | 1 | 2015-04-19T08:26:34.000Z | 2015-04-19T08:26:34.000Z | graph/templatetags/helpers.py | Soaring-Outliers/news_graph | ae7cde461e49b6ee8fe932fcf6c581f3a5574da4 | [
"MIT"
] | 5 | 2015-04-28T07:31:22.000Z | 2015-05-11T12:47:57.000Z | graph/templatetags/helpers.py | Soaring-Outliers/news_graph | ae7cde461e49b6ee8fe932fcf6c581f3a5574da4 | [
"MIT"
] | null | null | null | from django import template
import bleach
# import the logging library
import logging
# Get an instance of a logger
logger = logging.getLogger(__name__)
register = template.Library()
@register.filter
def sanitize(text):
ALLOWED_TAGS = ['p', 'span', 'h5', 'h6']
ALLOWED_ATTRIBUTES = []
ALLOWED_STYLES = []
return bleach.clean(text, tags=ALLOWED_TAGS, attributes=ALLOWED_ATTRIBUTES,
styles=ALLOWED_STYLES, strip=True, strip_comments=True)
@register.filter
def log(text):
logger.error(text) | 23.818182 | 79 | 0.725191 | 0 | 0 | 0 | 0 | 331 | 0.631679 | 0 | 0 | 74 | 0.141221 |