max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
depth_upsampling/losses/gradient_loss.py | Levintsky/ARKitScenes | 237 | 12766951 | import torch
import dataset_keys
def div_by_mask_sum(loss: torch.Tensor, mask_sum: torch.Tensor):
return loss / torch.max(mask_sum, torch.ones_like(mask_sum))
class SafeTorchLog(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
"""
In the forward pass we receive a Tensor containing the input and return
a Tensor containing the output. ctx is a context object that can be used
to stash information for backward computation. You can cache arbitrary
objects for use in the backward pass using the ctx.save_for_backward method.
"""
input_abs = torch.abs(input) + 1e-9
ctx.save_for_backward(input_abs)
return torch.log(input_abs)
@staticmethod
def backward(ctx, grad_output):
"""
In the backward pass we receive a Tensor containing the gradient of the loss
with respect to the output, and we need to compute the gradient of the loss
with respect to the input.
"""
(input_abs,) = ctx.saved_tensors
grad_input = grad_output.clone()
return grad_input * (1.0 / input_abs) / 2.302585093 # ln(10)
safe_torch_log = SafeTorchLog.apply
def create_gradient_log_loss(log_prediction_d, mask, log_gt):
# compute log difference
log_d_diff = log_prediction_d - log_gt
log_d_diff = torch.mul(log_d_diff, mask)
# compute vertical gradient
v_gradient = torch.abs(log_d_diff[:, :, 2:, :] - log_d_diff[:, :, :-2, :])
v_mask = torch.mul(mask[:, :, 2:, :], mask[:, :, :-2, :])
v_gradient = torch.mul(v_gradient, v_mask)
# compute horizontal gradient
h_gradient = torch.abs(log_d_diff[:, :, :, 2:] - log_d_diff[:, :, :, :-2])
h_mask = torch.mul(mask[:, :, :, 2:], mask[:, :, :, :-2])
h_gradient = torch.mul(h_gradient, h_mask)
# sum up gradients
grad_loss = torch.sum(h_gradient, dim=[1, 2, 3]) + torch.sum(v_gradient, dim=[1, 2, 3])
num_valid_pixels = torch.sum(mask, dim=[1, 2, 3])
grad_loss = div_by_mask_sum(grad_loss, num_valid_pixels)
return grad_loss
def create_gradient_log_loss_4_scales(log_prediction, log_ground_truth, mask):
log_prediction_d = log_prediction
log_gt = log_ground_truth
mask = mask
log_prediction_d_scale_1 = log_prediction_d[:, :, fc00:db20:35b:7399::5, ::2]
log_prediction_d_scale_2 = log_prediction_d_scale_1[:, :, fc00:db20:35b:7399::5, ::2]
log_prediction_d_scale_3 = log_prediction_d_scale_2[:, :, fc00:db20:35b:7399::5, ::2]
mask_scale_1 = mask[:, :, fc00:db20:35b:7399::5, ::2]
mask_scale_2 = mask_scale_1[:, :, fc00:db20:35b:7399::5, ::2]
mask_scale_3 = mask_scale_2[:, :, fc00:db20:35b:7399::5, ::2]
log_gt_scale_1 = log_gt[:, :, fc00:db20:35b:7399::5, ::2]
log_gt_scale_2 = log_gt_scale_1[:, :, fc00:db20:35b:7399::5, ::2]
log_gt_scale_3 = log_gt_scale_2[:, :, fc00:db20:35b:7399::5, ::2]
gradient_loss_scale_0 = create_gradient_log_loss(log_prediction_d, mask, log_gt)
gradient_loss_scale_1 = create_gradient_log_loss(
log_prediction_d_scale_1, mask_scale_1, log_gt_scale_1
)
gradient_loss_scale_2 = create_gradient_log_loss(
log_prediction_d_scale_2, mask_scale_2, log_gt_scale_2
)
gradient_loss_scale_3 = create_gradient_log_loss(
log_prediction_d_scale_3, mask_scale_3, log_gt_scale_3
)
gradient_loss_4_scales = (
gradient_loss_scale_0 + gradient_loss_scale_1 + gradient_loss_scale_2 + gradient_loss_scale_3
)
return gradient_loss_4_scales
def gradient_loss(outputs, inputs):
valid_mask = inputs[dataset_keys.VALID_MASK_IMG]
gt_depth = inputs[dataset_keys.HIGH_RES_DEPTH_IMG]
prediction = outputs[dataset_keys.PREDICTION_DEPTH_IMG]
log_prediction = safe_torch_log(prediction)
log_gt = safe_torch_log(gt_depth)
loss = create_gradient_log_loss_4_scales(log_prediction, log_gt, valid_mask)
loss = torch.mean(loss)
return loss
| 2.71875 | 3 |
model/TextCNN/text_cnn_pytorch.py | stanli124/TextClassification-Pytorch | 0 | 12766952 | <filename>model/TextCNN/text_cnn_pytorch.py
'''
AUTHOR :<NAME>
DATE :2021/07/18 10:22
'''
import torch.nn as nn
import torch
import numpy as np
class TextCNN_torch(nn.Module):
def __init__(self, vocab_size=None, embedding_dims=50,
seq_len=400,
kernel_size=[3, 4, 5],
out_channel=128,
device=None):
super(TextCNN_torch,self).__init__()
self.vocab_size = vocab_size
self.embedding_dims = embedding_dims
self.seq_len = seq_len
self.kernel_sizes = kernel_size
self.class_num = 1
self.out_channel = out_channel
self.device = device
self.embedding = nn.Embedding(self.vocab_size, self.embedding_dims, padding_idx=0)
self.conv1D_3 = nn.Conv1d(self.embedding_dims, self.out_channel, kernel_size[0])
self.conv1D_4 = nn.Conv1d(self.embedding_dims, self.out_channel, kernel_size[1])
self.conv1D_5 = nn.Conv1d(self.embedding_dims, self.out_channel, kernel_size[2])
self.conv1D_6 = nn.Conv1d(self.embedding_dims, self.out_channel, kernel_size[3])
self.global_max_pool_3 = nn.MaxPool1d(seq_len-kernel_size[0]+1, stride=1)
self.global_max_pool_4 = nn.MaxPool1d(seq_len-kernel_size[1]+1, stride=1)
self.global_max_pool_5 = nn.MaxPool1d(seq_len-kernel_size[2]+1, stride=1)
self.global_max_pool_6 = nn.MaxPool1d(seq_len-kernel_size[3]+1, stride=1)
self.fc = nn.Linear(in_features=out_channel*len(self.kernel_sizes), out_features=1)
self.relu = nn.ReLU()
self.sigmoid = nn.Sigmoid()
def forward(self, x):
x = x.to(self.device)
x = self.embedding(x)
input = x.transpose(1, 2)
out_3 = self.conv1D_3(input)
out_3 = self.relu(out_3)
out_3 = self.global_max_pool_3(out_3)
out_4 = self.conv1D_4(input)
out_4 = self.relu(out_4)
out_4 = self.global_max_pool_4(out_4)
out_5 = self.conv1D_5(input)
out_5 = self.relu(out_5)
out_5 = self.global_max_pool_5(out_5)
out_6 = self.conv1D_6(input)
out_6 = self.relu(out_6)
out_6 = self.global_max_pool_6(out_6)
out = torch.cat([out_3, out_4, out_5, out_6], dim=1)
out = out.squeeze()
out = self.fc(out)
out = self.sigmoid(out)
return out
| 2.4375 | 2 |
analysis/thermistors.py | sanielfishawy/ODrive | 5 | 12766953 | #%%
import matplotlib.pyplot as plt
import numpy as np
Rload = 3300
R_25 = 10000
T_25 = 25 + 273.15 #Kelvin
Beta = 3434
Tmin = 0
Tmax = 140
temps = np.linspace(Tmin, Tmax, 1000)
tempsK = temps + 273.15
# https://en.wikipedia.org/wiki/Thermistor#B_or_%CE%B2_parameter_equation
r_inf = R_25 * np.exp(-Beta/T_25)
R_temps = r_inf * np.exp(Beta/tempsK)
V = Rload / (Rload + R_temps)
fit = np.polyfit(V, temps, 3)
p1 = np.poly1d(fit)
fit_temps = p1(V)
#%%
print(fit)
plt.plot(V, temps, label='actual')
plt.plot(V, fit_temps, label='fit')
plt.xlabel('normalized voltage')
plt.ylabel('Temp [C]')
plt.legend(loc=0)
plt.show() | 2.9375 | 3 |
q2_protein_pca/__init__.py | misialq/q2-protein-pca | 2 | 12766954 | <reponame>misialq/q2-protein-pca
# ----------------------------------------------------------------------------
# Copyright (c) 2021, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from ._alignment import mafft, map_positions
from ._pca import pca
from ._plot import plot_loadings
from ._ranking import rank_alignment
__version__ = "2020.08"
__all__ = ['mafft', 'map_positions', 'pca', 'plot_loadings', 'rank_alignment']
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
| 1.203125 | 1 |
PBO_18099/Latihan_3.2.Penugasan.py | viviyanti/PBO | 0 | 12766955 | <reponame>viviyanti/PBO<gh_stars>0
#OPERATOR PENUGASAN
#input mengisi nilai
nilai_x=int(input("masukan nilai x: "))
#operator penjumlahan
nilai_x +=20
print("hasil jumlah: ", nilai_x)
nilai_x=int(input("masukan nilai x: "))
#operator pengurangan
nilai_x -=10
print("hasil kurang:",nilai_x)
nilai_x=int(input("masukan nilai x: "))
#operator perkalian
nilai_x *=10
print("hasil kali:",nilai_x)
nilai_x=int(input("masukan nilai x: "))
#operator pembagian
nilai_x /=30
print("hasil bagi:",nilai_x)
nilai_x=int(input("masukan nilai x: "))
#operator perpangkatan
nilai_x **=5
print("hasil pangkat:",nilai_x)
nilai_x=int(input("masukan nilai x: "))
#operator sisa bagi
nilai_x %=35
print("hasil sisa bagi:",nilai_x)
| 3.578125 | 4 |
poop/hfdp/adapter/ducks/challenge/drone_adapter.py | cassiobotaro/poop | 37 | 12766956 | from poop.hfdp.adapter.ducks.challenge.drone import Drone
class DroneAdapter:
def __init__(self, drone: Drone) -> None:
self.__drone = drone
def quack(self) -> None:
self.__drone.beep()
def fly(self) -> None:
self.__drone.spin_rotors()
self.__drone.take_off()
| 2.109375 | 2 |
samples/knn.py | likeand/ml | 1 | 12766957 | # -*- coding: utf-8 -*-
# @Date : 2020/5/24
# @Author: Luokun
# @Email : <EMAIL>
import sys
from os.path import dirname, abspath
import matplotlib.pyplot as plt
import numpy as np
sys.path.append(dirname(dirname(abspath(__file__))))
def test_knn():
from models.knn import KNN
x, y = np.random.randn(3, 200, 2), np.zeros([3, 200])
x[0] += np.array([2, 2]) # 右偏移2,上偏移2
x[1] += np.array([2, -2]) # 右偏移2,下偏移2
y[1] = 1
y[2] = 2
plot_scatter(x, 'Real')
x = x.reshape(-1, 2)
y = y.flatten()
# train
knn = KNN(3)
knn.fit(x, y)
pred = knn.predict(x)
plot_scatter([x[pred == i] for i in [0, 1, 2]], 'Pred')
# print accuracy
acc = np.sum(pred == y) / len(pred)
print(f'Acc = {100 * acc:.2f}%')
def plot_scatter(xys, title):
plt.figure(figsize=(10, 10))
for xy, color in zip(xys, ['r', 'g', 'b']):
plt.scatter(xy[:, 0], xy[:, 1], color=color)
plt.title(title)
plt.show()
if __name__ == '__main__':
test_knn()
| 2.734375 | 3 |
ramachandran/run_phi_angles.py | simonholmes001/ramachandran | 0 | 12766958 | import os
import concurrent.futures
from tqdm import tqdm
from phi_angles import PhiDihedralAngleStatistics
import argparse
parser = argparse.ArgumentParser(description='To set to the path to the data')
parser.add_argument('-i', '--input_directory', help='An input directory for the psi angles must be named', required=True)
parser.add_argument('-a', '--amino_acid_input', help='An input directory for the amino acid tags must be named', required=True)
args = parser.parse_args()
phi_data_path = args.input_directory
amino_acid_data_path = args.amino_acid_input
# psi_data_path = '../dihedral_coordinates'
# amino_acid_data_path = '../../structure_prediction/output/final_features'
def main(phi_data_path, amino_acid_data_path):
for root, dirs, files in os.walk(phi_data_path, topdown=False):
for name in tqdm(files):
if 'phi' in name:
phi = PhiDihedralAngleStatistics(phi_data_path, amino_acid_data_path, name.split('_')[0])
phi.get_amino_acid_array()
phi.encode()
phi.get_phi()
phi.check_length()
phi.combine_amino_acid_phi()
phi.save_phi_angles()
if __name__ == '__main__':
main(phi_data_path, amino_acid_data_path)
| 2.5 | 2 |
inferencia/task/object_tracking/object_tracking/model/object_tracking_result.py | yuya-mochimaru-np/inferencia | 0 | 12766959 | from dataclasses import dataclass
import numpy as np
@dataclass
class ObjectTrackingResult:
frame_index: int
tracking_id: int
class_id: int
class_name: str
xmin: int
ymin: int
xmax: int
ymax: int
confidence: float
is_active: bool
def to_txt(self):
return "{} {} {} {} {} {}".format(self.class_name,
self.confidence,
self.xmin,
self.ymin,
self.xmax,
self.ymax)
def to_dict(self):
return self.__dict__
def to_array(self):
return np.array([self.xmin,
self.ymin,
self.xmax,
self.ymax,
self.confidence,
])
def to_list(self):
return [self.xmin,
self.ymin,
self.xmax,
self.ymax,
self.confidence,
]
| 2.765625 | 3 |
tf2_models/trainer.py | samiraabnar/DistillingInductiveBias | 10 | 12766960 | import tensorflow as tf
import os
from tf2_models.keras_callbacks import CheckpointCallback, SummaryCallback
from tf2_models.train_utils import RectifiedAdam, ExponentialDecayWithWarmpUp
OPTIMIZER_DIC = {'adam': tf.keras.optimizers.Adam,
'radam': RectifiedAdam,
}
class Trainer(object):
def __init__(self, hparams, strategy, model, task, train_params, log_dir, ckpt_dir):
self.hparams = hparams
self.model = model
self.task = task
self.train_params = train_params
self.strategy = strategy
lr_schedule = self.get_lr_schedule()
self.optimizer = OPTIMIZER_DIC[self.train_params.optimizer](learning_rate=lr_schedule, epsilon=1e-08, clipnorm=1.0)
self.ckpt = tf.train.Checkpoint(step=tf.Variable(1, name='checkpoint_step'), optimizer=self.optimizer, net=self.model)
self.manager = tf.train.CheckpointManager(self.ckpt, ckpt_dir,
keep_checkpoint_every_n_hours=self.hparams.keep_checkpoint_every_n_hours,
max_to_keep=2)
with self.strategy.scope():
x, y = iter(self.task.valid_dataset).next()
model(x)
model.summary()
model.compile(
optimizer=self.optimizer,
loss=self.task.get_loss_fn(),
metrics=self.task.metrics())#[self.task.get_loss_fn()])
#tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),)
summary_dir = os.path.join(log_dir, 'summaries')
tf.io.gfile.makedirs(log_dir)
self.summary_writer = tf.compat.v2.summary.create_file_writer(os.path.join(summary_dir, 'train'))
tf.compat.v2.summary.experimental.set_step(self.optimizer.iterations)
ckpt_callback = CheckpointCallback(manager=self.manager, ckpt=self.ckpt)
summary_callback = SummaryCallback(summary_writer=self.summary_writer)
self.callbacks = [ckpt_callback, summary_callback]
def get_lr_schedule(self):
if 'crs' in self.train_params.schedule:
initial_learning_rate = self.train_params.learning_rate
lr_schedule = (
tf.keras.experimental.CosineDecayRestarts(
initial_learning_rate,
self.train_params.decay_steps,
t_mul=2.0,
m_mul=0.9,
alpha=0.001,
))
elif self.train_params.optimizer == 'radam':
initial_learning_rate = self.train_params.learning_rate
lr_schedule = ExponentialDecayWithWarmpUp(
initial_learning_rate=initial_learning_rate,
decay_steps=self.train_params.decay_steps,
hold_base_rate_steps=self.train_params.hold_base_rate_steps,
decay_rate=0.96,
warmup_steps=0.0)
else:
initial_learning_rate = self.train_params.learning_rate
lr_schedule = ExponentialDecayWithWarmpUp(
initial_learning_rate=initial_learning_rate,
decay_steps=self.train_params.decay_steps,
decay_rate=0.96,
hold_base_rate_steps=self.train_params.hold_base_rate_steps,
warmup_steps=self.train_params.warmup_steps)
return lr_schedule
def restore(self):
with self.strategy.scope():
self.ckpt.restore(self.manager.latest_checkpoint)
if self.manager.latest_checkpoint:
print("Restored from {}".format(self.manager.latest_checkpoint))
else:
print("Initializing from scratch.")
def train(self):
with self.strategy.scope():
with self.summary_writer.as_default():
print("initial learning rate:", self.model.optimizer.learning_rate(self.model.optimizer.iterations))
self.model.fit(self.task.train_dataset,
epochs=self.train_params.num_train_epochs,
steps_per_epoch=self.task.n_train_batches,
validation_steps=self.task.n_valid_batches,
callbacks=self.callbacks,
validation_data=self.task.valid_dataset,
verbose=2
)
| 2.265625 | 2 |
tests/test__text_to_nlp.py | JeremyGibson/xml_parser | 2 | 12766961 | #!/usr/bin/env python3
# import modules.
import sys; sys.path.append("..")
import logging
import math
import plac
import unittest
from tomes_tagger.lib.text_to_nlp import *
# enable logging.
logging.basicConfig(level=logging.DEBUG)
class Test_TextToNLP(unittest.TestCase):
def setUp(self):
# set attributes.
self.host = "http://localhost:-1"
self.t2n = TextToNLP(host=self.host)
def test__failed_annotate(self):
""" Since we can't connect to port -1, is a ConnectionError raised? """
# call CoreNLP instance's annotator.
try:
self.t2n.corenlp.annotate("")
is_connection_error = False
except ConnectionError as err:
is_connection_error = True
# check if result is as expected.
self.assertTrue(is_connection_error)
def test__gets_empty_list(self):
""" If we try and tag an empty string, is an empty list returned? """
results = self.t2n.get_NER("")
self.assertTrue(results == [])
# CLI.
def main(text="North Carolina.", host="http://localhost:9003"):
"Prints list of NER results.\
\nexample: `python3 test__text_to_nlp.py '<NAME>'`"
# print NER results.
t2n = TextToNLP(host=host)
ner = t2n.get_NER(text)
print(ner)
if __name__ == "__main__":
plac.call(main) | 2.921875 | 3 |
plugins/trivia/trivia.py | awesomarcus/haaaaaaaah | 0 | 12766962 | from threading import Thread
import time
from plugins.trivia.questions import QuestionGenerator
# This class will put itself in a pseudo-while loop that is non-blocking
# to the rest of the program.
class Question:
def __init__(self, q, a):
self.text = q
self.ans = a
class Trivia:
def __init__(self, ws, room, kind):
self.ws = ws
self.room = room
self.kind = kind
self.generator = QuestionGenerator()
self.question = None
self.solved = False
self.multiple = False
self.solver = ''
self.endSession = False
# Create the first thread that starts the loop
self.thread = Thread(target = self.customWait,
name = 'customWait',
args = (5,),
daemon = True)
self.thread.start()
def notify(self, msg):
self.ws.send('{room}|{msg}'.format(room = self.room, msg = msg))
def customWait(self, secondsToWait):
''' Between every question there is a 10 second waiting time '''
secondsPassed = 0
while secondsPassed <= secondsToWait or self.endSession:
time.sleep(1)
secondsPassed += 1
if self.endSession:
# This breaks the pseudo-while loop and kills the Trivia session
return
newQ = self.generator.makeQuestion()
self.question = Question(newQ['q'], newQ['a'])
self.notify('Next question:')
self.notify(self.question.text)
# Create the waiting thread that'll time out after 40 seconds
self.thread = Thread(target = self.wait30Sec,
name = 'longWait',
daemon = True)
self.thread.start()
def wait30Sec(self):
''' Every question have a 30 second answering period or until someone get the question right '''
secondsPassed = 0
while secondsPassed <= 30 or self.endSession:
time.sleep(1)
secondsPassed += 1
if self.solved:
self.notify('{name} was correct{extra}!'.format(name = self.solver,
extra = ' first' if self.multiple else ''))
else:
self.notify('No one got it right')
self.notify('The answer was {ans}.'.format(ans = self.question.ans))
if self.endSession:
return
self.clear()
self.notify('Next round will start soon.')
self.thread = Thread(target = self.customWait,
name = 'customWait',
args = (10,),
daemon = True)
self.thread.start()
def tryAnswer(self, guess):
if guess.lower() == self.question.ans:
self.solved = True
return self.solved
def wasSolved(self, by):
self.solver = by
def clear(self):
self.solved = False
self.multiple = False
self.solver = ''
def status(self):
return self.thread.name
| 3.40625 | 3 |
trustpayments/models/subscription_charge_type.py | TrustPayments/python-sdk | 2 | 12766963 | # coding: utf-8
from enum import Enum, unique
@unique
class SubscriptionChargeType(Enum):
MANUAL = "MANUAL"
AUTOMATIC = "AUTOMATIC"
| 2.15625 | 2 |
setupext/sdist.py | theHamsta/jpype | 1 | 12766964 | <filename>setupext/sdist.py
# -*- coding: utf-8 -*-
import os
import sys
import warnings
import distutils
from distutils.dir_util import copy_tree, remove_tree
from setuptools.command.sdist import sdist
from setuptools import Extension
# Customization of the sdist
class BuildSourceDistribution(sdist):
"""
Override some behavior on sdist
Copy the build/lib to native/jars to remove ant/jdk dependency
"""
def run(self):
# We need to build a jar cache for the source distribution
self.run_command("build_java")
self.run_command("test_java")
dest = os.path.join('native','jars')
src = os.path.join('build','lib')
if not os.path.exists(src):
distutils.log.error("Jar source file is missing from build")
raise distutils.errors.DistutilsPlatformError("Error copying jar file")
copy_tree(src, dest)
# Collect the sources
sdist.run(self)
# Clean up the jar cache after sdist
remove_tree(dest)
| 2.15625 | 2 |
src/ezdxf/render/dimension.py | jpsantos-mf/ezdxf | 2 | 12766965 | <gh_stars>1-10
# Created: 28.12.2018
# Copyright (c) 2018-2020, <NAME>
# License: MIT License
from typing import TYPE_CHECKING
from ezdxf.math import UCS
from ezdxf.lldxf.const import DXFValueError
from ezdxf.entities.dimstyleoverride import DimStyleOverride
from .dim_linear import LinearDimension
from .dim_radius import RadiusDimension
from .dim_diameter import DiameterDimension
if TYPE_CHECKING:
from ezdxf.eztypes import Dimension, BaseDimensionRenderer
class DimensionRenderer:
def dispatch(self, override: 'DimStyleOverride', ucs: 'UCS') -> 'BaseDimensionRenderer':
dimension = override.dimension
dim_type = dimension.dimtype
if dim_type in (0, 1):
return self.linear(dimension, ucs, override)
elif dim_type == 2:
return self.angular(dimension, ucs, override)
elif dim_type == 3:
return self.diameter(dimension, ucs, override)
elif dim_type == 4:
return self.radius(dimension, ucs, override)
elif dim_type == 5:
return self.angular3p(dimension, ucs, override)
elif dim_type == 6:
return self.ordinate(dimension, ucs, override)
else:
raise DXFValueError(f'Unknown DIMENSION type: {dim_type}')
def linear(self, dimension: 'Dimension', ucs: 'UCS', override: 'DimStyleOverride' = None):
""" Call renderer for linear dimension lines: horizontal, vertical and rotated """
return LinearDimension(dimension, ucs, override)
def angular(self, dimension: 'Dimension', ucs: 'UCS', override: 'DimStyleOverride' = None):
raise NotImplementedError()
def diameter(self, dimension: 'Dimension', ucs: 'UCS', override: 'DimStyleOverride' = None):
""" Call renderer for diameter dimension """
return DiameterDimension(dimension, ucs, override)
def radius(self, dimension: 'Dimension', ucs: 'UCS', override: 'DimStyleOverride' = None):
""" Call renderer for radius dimension """
return RadiusDimension(dimension, ucs, override)
def angular3p(self, dimension: 'Dimension', ucs: 'UCS', override: 'DimStyleOverride' = None):
raise NotImplementedError()
def ordinate(self, dimension: 'Dimension', ucs: 'UCS', override: 'DimStyleOverride' = None):
raise NotImplementedError()
| 2.140625 | 2 |
scrapy_auto_trans/exceptions.py | jiansongyang/scrapy-auto-translation-middelware | 4 | 12766966 | from scrapy.exceptions import IgnoreRequest
class TranslationResult(IgnoreRequest):
"""A translation response was received"""
def __init__(self, response, *args, **kwargs):
self.response = response
super(TranslationResult, self).__init__(*args, **kwargs)
class TranslationError(Exception):
def __init__(self):
pass
def error(self):
return "Translation Error"
def warn(self):
return self.error()
def details(self):
return self.error()
class TranslationErrorGeneral(TranslationError):
def __init__(self, message):
self.message = message
super(TranslationErrorGeneral, self).__init__()
def warn(self):
return self.message
class TranslationErrorDueToInvalidResponseCode(TranslationError):
def __init__(self, response):
self.response = response
super(TranslationErrorDueToInvalidResponseCode, self).__init__()
def warn(self):
return "translation failed due to response code = %d"%self.response.status
def details(self):
return "translation failed due to response code = %d, request url = '%s'"%(
self.response.status,
self.response.request.url
)
| 2.578125 | 3 |
Graph/Graph/users/models.py | MGijon/TheGraph.es | 0 | 12766967 | """Users Models."""
# Django
from django.db import models
from django.contrib.auth.models import User
class Profile(models.Model):
"""Profile extended:
Proxy model that extends the base data with other information.
"""
# this links Profile with the User profile, one to one relationship
profile_user = models.OneToOneField(User, on_delete=models.CASCADE)
# Website filed
profile_website = models.URLField(max_length=200, blank=True)
# Biography
profile_biography = models.TextField(blank=True)
# Phone number
profile_phone_number = models.CharField(max_length=20, blank=True)
# Picture
profile_picture = models.ImageField(
upload_to = 'users/pictures',
blank = True,
null = True,)
profile_created_on = models.DateTimeField(auto_now_add = True)
profile_modified_on = models.DateTimeField(auto_now = True)
def __str__(self):
"""TODO: description."""
return self.profile_user.username
| 2.875 | 3 |
flask/templates/app/views.py | telliott99/Pi | 0 | 12766968 | from flask import render_template, url_for
from app import app
script_list = ['demo',
'format_DNA',
'translate',
'extra_sites']
default_choice = 'format_DNA'
def render_index_template():
return render_template(
"index.html",
script_list = script_list,
default=default_choice)
# index shows form with scripts listed
@app.route('/', methods = ['GET'])
@app.route('/index', methods = ['GET'])
def index():
return render_index_template()
| 2.40625 | 2 |
test/integration/ggrc_workflows/notifications/test_data_handler.py | Smotko/ggrc-core | 0 | 12766969 | <gh_stars>0
# coding: utf-8
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: <EMAIL>
# Maintained By: <EMAIL>
from integration.ggrc import TestCase
from ggrc import db
from ggrc.models.revision import Revision
from ggrc_workflows.notification.data_handler import get_cycle_task_dict
from integration.ggrc.models.factories import ContractFactory
from integration.ggrc.models.factories import EventFactory
from integration.ggrc.models.factories import RelationshipFactory
from integration.ggrc_workflows.models.factories import CycleTaskFactory
class TestDataHandler(TestCase):
""" This class test basic functions in the data_handler module """
def test_get_cycle_task_dict(self):
contract = ContractFactory(title=u"Contract1")
cycle_task = CycleTaskFactory(title=u"task1")
relationship = RelationshipFactory(source=contract,
destination=cycle_task)
db.session.delete(relationship)
db.session.commit()
relationship_revision = Revision(obj=relationship,
modified_by_id=None,
action="deleted",
content="{}")
contract_revision = Revision(obj=contract,
modified_by_id=None,
action="deleted",
content='{"display_name": "Contract1"}')
revisions = [relationship_revision, contract_revision]
EventFactory(
modified_by_id=None,
action="DELETE",
resource_id=relationship.id,
resource_type=relationship.type,
context_id=None,
revisions=revisions
)
task_dict = get_cycle_task_dict(cycle_task)
self.assertEqual(task_dict["related_objects"][0],
u"Contract1 [removed from task]")
| 1.804688 | 2 |
text_wrappers.py | 11ajith/pypassman | 0 | 12766970 | <reponame>11ajith/pypassman<gh_stars>0
import sys
class TextWrappers:
def __init__(self):
if (sys.platform == 'win32'):
self.LOCK = ''
self.ERROR = ''
self.SUCCESS = ''
self.INFO = ''
self.BANNER = ''
self.PARAM = ''
self.DEFAULT = ''
self.HEADER = ''
self.DEFAULT = ''
else: # MacOS, Linux, etc.
self.LOCK = '\U0001F510 '
self.ERROR = '\033[91m\u2717 '
self.SUCCESS = '\033[92m\u2713 '
self.INFO = '\033[96m'
self.BANNER = '\033[1m'
self.PARAM = '\x1b[3m'
self.DEFAULT = '\033[0m\x1b[0m'
self.HEADER = '\033[94m\033[1m'
self.DEFAULT = '\033[0m\x1b[0m' | 2.5625 | 3 |
dask_sql/physical/rex/core/over.py | goodwanghan/dask-sql | 0 | 12766971 | <reponame>goodwanghan/dask-sql<filename>dask_sql/physical/rex/core/over.py
import logging
from typing import Any, Callable, List, Tuple
import dask.dataframe as dd
import pandas as pd
from dask_sql.datacontainer import ColumnContainer, DataContainer
from dask_sql.java import org
from dask_sql.physical.rex.base import BaseRexPlugin
from dask_sql.physical.rex.convert import RexConverter
from dask_sql.physical.utils.groupby import get_groupby_with_nulls_cols
from dask_sql.physical.utils.map import map_on_partition_index
from dask_sql.physical.utils.sort import sort_partition_func
from dask_sql.utils import LoggableDataFrame, new_temporary_column
logger = logging.getLogger(__name__)
class OverOperation:
def __call__(self, partitioned_group, *args) -> pd.Series:
"""Call the stored function"""
return self.call(partitioned_group, *args)
class ExplodedOperation(OverOperation):
def __init__(self, f):
self.unexploded_f = f
def call(self, partitioned_group, *args):
result = self.unexploded_f(partitioned_group, *args)
return pd.Series(
[result] * len(partitioned_group), index=partitioned_group.index
)
class RowNumberOperation(OverOperation):
def call(self, partitioned_group):
return range(1, len(partitioned_group) + 1)
class FirstValueOperation(OverOperation):
def call(self, partitioned_group, value_col):
return partitioned_group[value_col].iloc[0]
class LastValueOperation(OverOperation):
def call(self, partitioned_group, value_col):
return partitioned_group[value_col].iloc[-1]
class SumOperation(OverOperation):
def call(self, partitioned_group, value_col):
return partitioned_group[value_col].sum()
class CountOperation(OverOperation):
def call(self, partitioned_group, value_col=None):
if value_col is None:
return partitioned_group.iloc[:, 0].count()
else:
return partitioned_group[value_col].count()
class MaxOperation(OverOperation):
def call(self, partitioned_group, value_col):
return partitioned_group[value_col].max()
class MinOperation(OverOperation):
def call(self, partitioned_group, value_col):
return partitioned_group[value_col].min()
class RexOverPlugin(BaseRexPlugin):
"""
A RexOver is an expression, which calculates a given function over the dataframe
while first optionally partitoning the data and optionally sorting it.
expressions like `F OVER (PARTITION BY x ORDER BY y)` apply f on each
partition separately and sort by y before applying f. The result of this
calculation has however the same length as the input dataframe - it is not an aggregation.
Typical examples include ROW_NUMBER and lagging.
"""
class_name = "org.apache.calcite.rex.RexOver"
OPERATION_MAPPING = {
"row_number": RowNumberOperation(),
"$sum0": ExplodedOperation(SumOperation()),
# Is replaced by a sum and count by calcite: "avg": ExplodedOperation(AvgOperation()),
"count": ExplodedOperation(CountOperation()),
"max": ExplodedOperation(MaxOperation()),
"min": ExplodedOperation(MinOperation()),
"single_value": ExplodedOperation(FirstValueOperation()),
"first_value": ExplodedOperation(FirstValueOperation()),
"last_value": ExplodedOperation(LastValueOperation()),
}
def convert(
self,
rex: "org.apache.calcite.rex.RexNode",
dc: DataContainer,
context: "dask_sql.Context",
) -> Any:
window = rex.getWindow()
self._assert_simple_window(window)
df = dc.df
cc = dc.column_container
# Store the divisions to apply them later again
known_divisions = df.divisions
# Store the index and sort order to apply them later again
df, partition_col, index_col, sort_col = self._preserve_index_and_sort(df)
dc = DataContainer(df, cc)
# Now extract the groupby and order information
sort_columns, sort_ascending, sort_null_first = self._extract_ordering(
window, cc
)
logger.debug(
"Before applying the function, sorting according to {sort_columns}."
)
df, group_columns = self._extract_groupby(df, window, dc, context)
logger.debug(
f"Before applying the function, partitioning according to {group_columns}."
)
# Finally apply the actual function on each group separately
operator = rex.getOperator()
operator_name = str(operator.getName())
operator_name = operator_name.lower()
try:
operation = self.OPERATION_MAPPING[operator_name]
except KeyError: # pragma: no cover
try:
operation = context.functions[operator_name]
except KeyError: # pragma: no cover
raise NotImplementedError(f"{operator_name} not (yet) implemented")
logger.debug(f"Executing {operator_name} on {str(LoggableDataFrame(df))}")
operands = [
RexConverter.convert(o, dc, context=context) for o in rex.getOperands()
]
df, new_column_name = self._apply_function_over(
df,
operation,
operands,
group_columns,
sort_columns,
sort_ascending,
sort_null_first,
)
# Revert back any sorting and grouping by using the previously stored information
df = self._revert_partition_and_order(
df, partition_col, index_col, sort_col, known_divisions
)
return df[new_column_name]
def _assert_simple_window(self, window: org.apache.calcite.rex.RexWindow):
"""Make sure we can actually handle this window type"""
lower_bound = window.getLowerBound()
RexWindowBounds = org.apache.calcite.rex.RexWindowBounds
assert (
lower_bound == RexWindowBounds.UNBOUNDED_PRECEDING
), f"Lower window bound type {lower_bound} is not implemented"
upper_bound = window.getUpperBound()
assert upper_bound in [
RexWindowBounds.CURRENT_ROW,
RexWindowBounds.UNBOUNDED_FOLLOWING,
], f"Lower window bound type {upper_bound} is not implemented"
def _preserve_index_and_sort(
self, df: dd.DataFrame
) -> Tuple[dd.DataFrame, str, str, str]:
"""Store the partition number, index and sort order separately to make any shuffling reversible"""
partition_col, index_col, sort_col = (
new_temporary_column(df),
new_temporary_column(df),
new_temporary_column(df),
)
def store_index_columns(partition, partition_index):
return partition.assign(
**{
partition_col: partition_index,
index_col: partition.index,
sort_col: range(len(partition)),
}
)
df = map_on_partition_index(df, store_index_columns)
return df, partition_col, index_col, sort_col
def _extract_groupby(
self,
df: dd.DataFrame,
window: org.apache.calcite.rex.RexWindow,
dc: DataContainer,
context: "dask_sql.Context",
) -> Tuple[dd.DataFrame, str]:
"""Prepare grouping columns we can later use while applying the main function"""
partition_keys = list(window.partitionKeys)
if partition_keys:
group_columns = [
RexConverter.convert(o, dc, context=context) for o in partition_keys
]
group_columns = get_groupby_with_nulls_cols(df, group_columns)
group_columns = {
new_temporary_column(df): group_col for group_col in group_columns
}
else:
group_columns = {new_temporary_column(df): 1}
df = df.assign(**group_columns)
group_columns = list(group_columns.keys())
return df, group_columns
def _extract_ordering(
self, window: org.apache.calcite.rex.RexWindow, cc: ColumnContainer
) -> Tuple[str, str, str]:
"""Prepare sorting information we can later use while applying the main function"""
order_keys = list(window.orderKeys)
sort_columns_indices = [int(i.getKey().getIndex()) for i in order_keys]
sort_columns = [
cc.get_backend_by_frontend_index(i) for i in sort_columns_indices
]
ASCENDING = org.apache.calcite.rel.RelFieldCollation.Direction.ASCENDING
FIRST = org.apache.calcite.rel.RelFieldCollation.NullDirection.FIRST
sort_ascending = [x.getDirection() == ASCENDING for x in order_keys]
sort_null_first = [x.getNullDirection() == FIRST for x in order_keys]
return sort_columns, sort_ascending, sort_null_first
def _apply_function_over(
self,
df: dd.DataFrame,
f: Callable,
operands: List[dd.Series],
group_columns: List[str],
sort_columns: List[str],
sort_ascending: List[bool],
sort_null_first: List[bool],
) -> Tuple[dd.DataFrame, str]:
"""Apply the given function over the dataframe, possibly grouped and sorted per group"""
temporary_operand_columns = {
new_temporary_column(df): operand for operand in operands
}
df = df.assign(**temporary_operand_columns)
# Important: move as few bytes as possible to the pickled function,
# which is evaluated on the workers
temporary_operand_columns = temporary_operand_columns.keys()
def map_on_each_group(partitioned_group):
if sort_columns:
partitioned_group = sort_partition_func(
partitioned_group, sort_columns, sort_ascending, sort_null_first
)
column_result = f(partitioned_group, *temporary_operand_columns)
partitioned_group = partitioned_group.assign(
**{new_column_name: column_result}
)
return partitioned_group
new_column_name = new_temporary_column(df)
meta = df._meta_nonempty.assign(**{new_column_name: 0.0})
df = df.groupby(group_columns).apply(map_on_each_group, meta=meta)
return df, new_column_name
def _revert_partition_and_order(
self,
df: dd.DataFrame,
partition_col: str,
index_col: str,
sort_col: str,
known_divisions: Any,
) -> dd.DataFrame:
"""Use the stored information to make revert the shuffling"""
from dask.dataframe.shuffle import set_partition
divisions = tuple(range(len(known_divisions)))
df = set_partition(df, partition_col, divisions)
df = df.map_partitions(
lambda x: x.set_index(index_col, drop=True).sort_values(sort_col),
meta=df._meta.set_index(index_col),
)
df.divisions = known_divisions
return df
| 1.992188 | 2 |
dramkit/find_addends/find_addends_utils.py | Genlovy-Hoo/dramkit | 0 | 12766972 | # -*- coding: utf-8 -*-
import numpy as np
#%%
def tol2side_x_eq_y(x, y, tol_below=0.0, tol_above=0.0):
'''在上界误差tol_above和下界误差tol_below范围内判断x是否等于y'''
return y - tol_below <= x <= y + tol_above
def tol_eq(x, y, tol=0.0):
'''在绝对误差tol范围内判断x和y相等'''
return abs(x - y) <= tol
def tol_x_big_y(x, y, tol=0.0):
'''在绝对误差tol范围外判断x大于y'''
return x > y and abs(x - y) > tol
def tol_x_big_eq_y(x, y, tol=0.0):
'''在绝对误差tol范围内判断x大于等于y'''
return tol_x_big_y(x, y, tol) or tol_eq(x, y, tol)
def tol_x_sml_y(x, y, tol=0.0):
'''在绝对误差tol范围外判断x小于y'''
return x < y and abs(y - x) > tol
def tol_x_sml_eq_y(x, y, tol=0.0):
'''在绝对误差tol范围内判断x小于等于y'''
return tol_x_sml_y(x, y, tol) or tol_eq(x, y, tol)
#%%
def get_alts_sml(tgt_sum, alts, sort_type='descend', tol=0.0, add_num=None):
'''
从给定备选列表alts中挑选出和小于等于tgt_sum的可行备选数
Parameters
----------
tgt_sum : float, int
目标和
alts : list
备选数列表
sort_type : str
对alts进行排序的方式,默认'descend'降序,可选'ascend'升序、None不排
tol : float
两个数进行比较时的绝对误差控制范围
add_num : int, None
限制在加起来和大于等于tgt_sum的基础上增加的备选数个数,默认无限制
Returns
-------
alts : list
可行备选数列表
'''
# 备选数不能大于目标和
alts = [x for x in alts if tol_x_sml_eq_y(x, tgt_sum, tol)]
if len(alts) == 0:
return []
if sort_type == 'descend':
alts = sorted(alts, reverse=True)
if sort_type == 'ascend':
alts = sorted(alts, reverse=False)
if add_num is None or add_num >= len(alts):
return alts
cumSum = list(np.cumsum(alts))
tmp = [1 if s >= tgt_sum else 0 for s in cumSum]
try:
strt_idx = tmp.index(1)
if strt_idx+add_num+1 <= len(alts):
return alts[:strt_idx+add_num+1]
else:
return alts
except:
return alts
#%%
def backfind_sml1st_index(tgt_sum, alts, tol=0.0, loop_count=None):
'''
alts从后往前搜索,返回第一个小于等于tgt_sum的数的索引
Parameters
----------
tgt_sum : int, float
目标值
alts : list
待比较数列表
tol : float
两个数进行比较时的绝对误差控制范围
loop_count : int
初始迭代次数值,默认为None;若loop_count为None,则不记录迭代次数,
否则在loop_count基础上继续记录迭代次数
Returns
-------
idx : int
从后往前搜索,alts中小于等于tgt_sum的第一个数的索引
loop_count : int
搜索结束时的迭代次数
'''
if len(alts) == 0:
return -1, loop_count
idx = len(alts) - 1
if loop_count is None:
while idx >= 1 and tol_x_big_y(alts[idx], tgt_sum, tol):
idx -= 1
return idx, loop_count
else:
while idx >= 1 and tol_x_big_y(alts[idx], tgt_sum, tol):
idx -= 1
loop_count += 1
return idx, loop_count
#%%
if __name__ == '__main__':
tgt_sum = 10
alts = [2, 5, 12, 11, 7, 8, 6, 3, 1, 10, 13]
sort_type = 'descend'
tol = 1.0
add_num = None
alts_new = get_alts_sml(tgt_sum, alts, sort_type=sort_type, tol=tol,
add_num=add_num)
print(alts_new)
alts = sorted(alts, reverse=False)
idx, loop_count = backfind_sml1st_index(tgt_sum, alts, tol=tol,
loop_count=None)
print(alts)
print(idx, loop_count)
| 3.078125 | 3 |
tests/tests/test_02_integration.py | tkw1536/django_selenium_test | 0 | 12766973 | <gh_stars>0
from __future__ import annotations
from unittest import mock
from django_selenium_test import IntegrationTest
class ExampleIntegrationTest(IntegrationTest):
find_element_selector = "main"
def test_element_mixins(self) -> None:
""" Checks that the element mixins work as intended """
self.load_live_url("integration")
test_element = self.find_element("#test")
self.assertIsNotNone(test_element, "Can find the test element")
test_next_element = self.find_element("#test_next")
self.assertIsNotNone(test_next_element, "Can find the test next element")
self.assertEqual(
self.find_next_sibling(test_element),
test_next_element,
"Checks that find_next_sibling works as expected",
)
def test_element_assertions(self) -> None:
""" Checks that the element assertions work as intended """
self.load_live_url("integration")
self.assert_element_exists("#exists")
self.assert_element_not_exists("#not_exists")
self.assert_element_displayed("#displayed")
self.assert_element_not_displayed("#not_displayed")
self.assert_element_not_displayed("#not_exists")
def test_urls(self) -> None:
# check that load_live_url and assert_url_equal work
self.load_live_url(
"integrationparams",
url_kwargs={"parameter": 12},
url_reverse_get_params={"next": "integration"},
)
self.assert_url_equal(
"integrationparams",
kwargs={"parameter": 12},
reverse_get_params={"next": "integration"},
)
# check that the follow function works
self.assert_url_follow("integrationredirect", "integration")
@mock.patch("tests.views.cleaned_data_check", return_value=1)
def test_fill_form(self, cmock: mock.Mock) -> None:
# fill in the form normally
self.submit_form(
"integration",
"input_id_submit",
send_form_keys={"id_a": "Filled in A"},
select_dropdowns={"id_b": "b"},
script_value={"id_c": "Filled in C"},
)
self.assert_url_equal("integrationsubmit")
cmock.assert_has_calls(
[mock.call({"a": "Filled in A", "b": "b", "c": "Filled in C"})]
)
# fill in the form, but set b manually
cmock.reset_mock()
submit = self.fill_out_form(
"integration",
"input_id_submit",
send_form_keys={"id_a": "Filled in A"},
select_dropdowns={"id_b": "a"},
script_value={"id_c": "Filled in C"},
)
self.select_dropdown(self.find_element("#id_b"), "b")
submit.click()
self.assert_url_equal("integrationsubmit")
cmock.assert_has_calls(
[mock.call({"a": "Filled in A", "b": "b", "c": "Filled in C"})]
)
# reset all the things
cmock.reset_mock()
submit = self.fill_out_form(
"integration",
"input_id_submit",
send_form_keys={"id_a": "Filled in A"},
select_dropdowns={"id_b": "b"},
)
# hack away the required fields, and then submit
self.disable_form_requirements()
submit.click()
# check that the submission failed
self.assert_url_equal("integration")
cmock.assert_not_called()
# submitting a download form and getting the content
ok, data = self.get_form_download(self.find_element("#input_id_download"))
self.assertEqual(ok, True)
self.assertEqual(data, b"content of example.txt, but via post")
# downloading a file via url
ok2, data2 = self.get_url_download("integrationdownload")
self.assertEqual(ok2, True)
self.assertEqual(data2, b"content of example.txt, but via get")
# hover an element
self.hover_element("hoverable")
self.assertEqual(
self.find_element("#hoverable").get_attribute("data-hovered"), "true"
)
| 2.4375 | 2 |
invoice/migrations/0016_expensegroup.py | pickleshb/PyInvoice | 2 | 12766974 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2018-01-03 13:56
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('invoice', '0015_auto_20180102_0048'),
]
operations = [
migrations.CreateModel(
name='ExpenseGroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=80)),
('expense', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='group', to='invoice.Invoice')),
],
),
]
| 1.617188 | 2 |
yammler/yammler.py | EttyKitty/OpenXcom_Tools | 0 | 12766975 | # python3 yammler.py ~/Games/openxcom_71_40k/user/mods/ROSIGMA/Ruleset ~/Games/openxcom_71_40k/user/mods/40k/Ruleset/
import sys, os
import yaml
print(sys.argv)
# os.chdir(sys.argv[1])
paths = sys.argv[1:]
fileList = []
DEBUG = False
def debugPrint(debugText):
if DEBUG:
print(debugText)
def addTrailingSlash(path):
if path[-1] != "/":
return (path + "/")
else:
return path
def isFolder(path, fileName):
stMode = os.stat(path + fileName).st_mode
stMode //= 0x4000 # directory
if stMode == 1:
return True
return False
def isRulFile(path, fileName):
stMode = os.stat(path + fileName).st_mode
stMode //= 0x8000 # file
if ".rul" == fileName[-4:] and stMode == 1:
return True
return False
def populateFileList(path): # recursive
for x in os.listdir(path):
path = addTrailingSlash(path)
if isFolder(path, x):
populateFileList(path+x)
elif isRulFile(path, x):
fileList.append(path+x)
print("Searching for Ruleset Files in:")
for path in paths:
print(path)
populateFileList(path)
print("Number of Ruleset Files: " + str(len(fileList)))
def tryYamlSafeLoad(fileHandler):
try:
return yaml.safe_load(yamlFile)
except yaml.constructor.ConstructorError:
print("Constructor Error; Affected file: " + str(fileHandler.name))
return dict()
except yaml.composer.ComposerError:
print("Composer Error; Affected file: " + str(fileHandler.name))
return dict()
class yamlItemEntry:
def __init__(self, yamlEntry):
itemName = self.safeInsert(yamlEntry, "type")
battleType = self.safeInsert(yamlEntry, "battleType")
tuAuto = self.safeInsert(yamlEntry, "tuAuto")
tuSnap = self.safeInsert(yamlEntry, "tuSnap")
tuAimed = self.safeInsert(yamlEntry, "tuAimed")
rosigmaComment = self.safeInsert(yamlEntry, "rosigmaComment")
def safeInsert(self, yamlEntry, key):
try:
return yamlEntry[key]
except KeyError:
return ""
yamlEntries = []
for filePath in fileList:
yamlFile = open(filePath, 'r')
yamlContent = tryYamlSafeLoad(yamlFile)
debugPrint(filePath)
debugPrint(yamlContent.keys())
if "items" in yamlContent.keys():
print(filePath)
#print(yamlContent["items"])
#print(len(yamlContent["items"]))
#print(type(yamlContent))
#print(yamlContent.keys())
for x in yamlContent["items"]:
if "type" in x.keys():
print(x["type"])
print(x)
yamlEntries.append(yamlItemEntry(x))
break
yamlFile.close()
print(yamlEntries)
| 2.234375 | 2 |
setup.py | whit537/httpy | 0 | 12766976 | from distutils.core import setup
classifiers = [
'Development Status :: 3 - Alpha'
, 'Intended Audience :: Developers'
, 'License :: OSI Approved :: BSD License'
, 'Natural Language :: English'
, 'Operating System :: MacOS :: MacOS X'
, 'Operating System :: Microsoft :: Windows'
, 'Operating System :: POSIX'
, 'Operating System :: Unix'
, 'Programming Language :: Python'
, 'Topic :: Internet :: WWW/HTTP'
, 'Topic :: Internet :: WWW/HTTP :: WSGI'
, 'Topic :: Internet :: WWW/HTTP :: WSGI :: Middleware'
, 'Topic :: Software Development :: Libraries :: Python Modules'
]
setup( name='httpy'
, version='~~VERSION~~'
, package_dir = {'':'src'}
, py_modules=['httpy']
, description = "httpy smooths out a few of WSGI's most glaring warts."
, author = '<NAME>'
, author_email = '<EMAIL>'
, url = 'http://www.zetadev.com/software/httpy/'
, classifiers = classifiers
)
| 1.125 | 1 |
utils/time_watcher.py | Johnson145/RapidObjectDetectionUsingCascadedCNNs | 9 | 12766977 | <gh_stars>1-10
from time import time
from datetime import timedelta
from utils import log
class TimeWatcher:
"""A TimeWatcher object can be used to evaluate the runtime of individual code parts."""
def __init__(self, name: str):
"""Create a new TimeWatcher.
:param name: The name of the time watcher. Will be included in logs to identify it.
"""
self._name = name
self.start()
def start(self):
"""Starts counting the time."""
self._start_time = time()
self._end_time = None
self._elapsed_seconds = None
log.log("TimeWatcher Start: {}".format(
self._name
))
def stop(self):
"""Stop the previously-started runtime evaluation."""
self._end_time = time()
self._elapsed_seconds = self._end_time - self._start_time
log.log("TimeWatcher Stop {}: {}".format(
self._name,
self.seconds_to_str(self._elapsed_seconds)
))
@property
def elapsed_seconds(self):
"""Get the elapsed time in seconds.
If called before self.stop() was called the first time, None will be returned.
"""
return self._elapsed_seconds
@staticmethod
def seconds_to_str(time_in_seconds: int) -> str:
"""Get a readable time representation of the given duration in seconds."""
return str(timedelta(seconds=time_in_seconds))
| 3.5625 | 4 |
test/map_optimizer.py | fritzo/kazoo | 3 | 12766978 | <gh_stars>1-10
#!/usr/bin/python
from math import *
from scipy import *
#from matplotlib import pyplot
#from scipy import linalg
import main
#----( commands )-------------------------------------------------------------
@main.command
def project_unif (X = 3, Y = 2):
'projects dJ matrix via inclusion-exclusion, WRT J metric'
J = exp(randn(Y,X))
J /= sum(J)
print 'J = %s' % J
p = dot(ones(Y), J)
q = dot(J, ones(X))
Z = sum(J)
print 'p = %s' % p
print 'q = %s' % q
print 'Z = %s' % Z
print '\nBefore projecting:'
dJ = randn(Y,X)
print 'dJ = %s' % dJ
dp = dot(ones(Y), dJ)
dq = dot(dJ, ones(X))
dZ = sum(dJ)
print 'dp = %s' % dp
print 'dq = %s' % dq
print 'dZ = %s' % dZ
print '\nAfter projecting:'
dJ -= outer(ones(Y) / Y, dp)
dJ -= outer(dq, ones(X) / X)
dJ += dZ / (X * Y)
dp = dot(ones(Y), dJ)
dq = dot(dJ, ones(X))
dZ = sum(dJ)
print 'dp = %s' % dp
print 'dq = %s' % dq
print 'dZ = %s' % dZ
@main.command
def project_log (X = 3, Y = 2):
'projects dJ matrix via inclusion-exclusion WRT log(J) metric (incorrectly)'
J = exp(randn(Y,X))
J /= sum(J)
print 'J = %s' % J
p = dot(ones(Y), J)
q = dot(J, ones(X))
Z = sum(J)
print 'p = %s' % p
print 'q = %s' % q
print 'Z = %s' % Z
print '\nBefore projecting:'
dJ = randn(Y,X)
dJ *= J # convert gradient to descent direction in log(J) metric
print 'dJ = %s' % dJ
dp = dot(ones(Y), dJ)
dq = dot(dJ, ones(X))
dZ = sum(dJ)
print 'dp = %s' % dp
print 'dq = %s' % dq
print 'dZ = %s' % dZ
print '\nAfter projecting:'
dJ -= J * outer(ones(Y), dp / p)
dJ -= J * outer(dq / q, ones(X))
dJ += J * (dZ / Z)
dp = dot(ones(Y), dJ)
dq = dot(dJ, ones(X))
dZ = sum(dJ)
print 'dp = %s' % dp
print 'dq = %s' % dq
print 'dZ = %s' % dZ
@main.command
def project_iter (tol = 1e-12, X = 3, Y = 2):
'projects dJ matrix iteratively WRT log(J) metric'
'''
Each of the projections 1-dp/p, 1-dq/q has eigenvalues in {0,1}.
The sum 1 - (dp/p + dq/q) has eigenvalues in [-1,0] u {1}.
The optimal iterative approximation is 1 - 2/3 * (dp/p + dq/q),
which has eigenvalues in [-1/3,1/3] u {1}.
'''
logging = (X + Y < 8)
from matplotlib import pyplot
J = exp(randn(Y,X))
J /= sum(J)
if logging:
print 'J = %s' % J
p = dot(ones(Y), J)
q = dot(J, ones(X))
Z = sum(J)
if logging:
print 'p = %s' % p
print 'q = %s' % q
print 'Z = %s' % Z
print '\nBefore projecting:'
dJ = randn(Y,X)
dJ *= J # convert gradient to descent direction in log(J) metric
if logging:
print 'dJ = %s' % dJ
scale = 2 / 3.0
iters = []
errors = []
for i in range(100):
dp = scale * dot(ones(Y), dJ)
dq = scale * dot(dJ, ones(X))
dJ -= J * outer(ones(Y), dp / p)
dJ -= J * outer(dq / q, ones(X))
iter = 1 + i
error = sqrt((sum(dq * dq) + sum(dp * dp)) / (X + Y))
iters.append(iter)
errors.append(error)
if error < tol:
print 'projection converged after %i steps (expected %g)' \
% (iter, -log(tol) / log(3.0))
break
print '\nAfter projecting:'
dp = dot(ones(Y), dJ)
dq = dot(dJ, ones(X))
dZ = sum(dJ)
if logging:
print 'dp/tol = %s' % (dp / tol)
print 'dq/tol = %s' % (dq / tol)
print 'dZ/tol = %s' % (dZ / tol)
pyplot.plot(iters, errors, 'ko')
pyplot.yscale('log')
pyplot.xlabel('iteration')
pyplot.ylabel('rms error')
pyplot.show()
if __name__ == '__main__': main.main()
| 2.734375 | 3 |
data_resource/storage/aws_s3.py | brighthive/data-resource-generator | 2 | 12766979 | <reponame>brighthive/data-resource-generator
import os
import logging
import boto3
import json
from botocore.exceptions import ClientError
class S3Manager:
"""S3Manager Manager Class."""
def __init__(self, config):
self.config = config
self.aws_iam_role = self.config.AWS_S3_USE_IAM_ROLE
self.aws_access_key_id = self.config.AWS_ACCESS_KEY_ID
self.aws_secret_access_key = self.config.AWS_SECRET_ACCESS_KEY
self.region_name = self.config.AWS_S3_REGION
if (
self.aws_access_key_id == None
or self.aws_secret_access_key == None
or self.region_name == None
):
if self.aws_iam_role == False:
raise RuntimeError("Invalid AWS S3 Configuration")
def _required_env_check(self):
if self.config.ENV != "PRODUCTION":
raise RuntimeError(
"AWS S3 storage manager are not allowed in testing environment"
)
def get_s3_client(self):
self._required_env_check()
if self.aws_iam_role:
return boto3.client("s3", region_name=self.region_name)
else:
return boto3.client(
"s3",
aws_access_key_id=self.aws_access_key_id,
aws_secret_access_key=self.aws_secret_access_key,
region_name=self.region_name,
)
def get_s3_resource(self):
self._required_env_check()
if self.aws_iam_role:
return boto3.resource("s3", region_name=self.region_name)
else:
return boto3.resource(
"s3",
aws_access_key_id=self.aws_access_key_id,
aws_secret_access_key=self.aws_secret_access_key,
region_name=self.region_name,
)
# will upload a file from volume to a s3 bucket
def aws_s3_upload_file(self, file_name, bucket, object_name=None):
if object_name is None:
object_name = file_name
try:
s3_client = self.get_s3_client()
_ = s3_client.upload_file(file_name, bucket, object_name)
except ClientError as e:
logging.error(e)
return False
return True
# will check if s3 object exist in s3
def aws_s3_object_exists(self, bucket, object_name):
try:
s3_resource = self.get_s3_resource()
s3_resource.Object(bucket, object_name).load()
except ClientError as e:
logging.error(e)
return int(e.response["Error"]["Code"]) != 404
return True
# will get uploaded dat from s3 bucket object (utf-8 decoding)
def aws_s3_get_data(self, bucket, object_name):
try:
s3 = self.get_s3_resource()
obj = s3.Object(bucket, object_name)
data = obj.get()["Body"].read().decode("UTF-8")
return data
except ClientError as e:
logging.error(e)
return None
# will upload json_data to a bucket with object name (utf-8 encoding)
def aws_s3_put_data(self, json_data, bucket, object_name):
try:
s3 = self.get_s3_resource()
obj = s3.Object(bucket, object_name)
obj.put(Body=(bytes(json.dumps(json_data).encode("UTF-8"))))
return True
except ClientError as e:
logging.error(e)
return False
| 2.21875 | 2 |
tftk/optuna/__init__.py | kitfactory/tftk | 6 | 12766980 | import optuna
from . optuna_util import Optuna
from . mongo import get_storage_mongo
__all__ = ['Optuna']
optuna.storages.get_storage = get_storage_mongo
| 1.070313 | 1 |
main/dash/layout.py | sdf94/flask-dash-blog | 0 | 12766981 | import os
"""Plotly Dash HTML layout override."""
dir_path = os.getcwd()
with open(os.path.join(dir_path, 'main', 'templates', 'base.html'), 'r') as f:
rows = f.readlines()
rows = [row.strip() for row in rows]
nav_index = rows.index('</nav>')
dash_str = rows[:nav_index+1] + ['{%app_entry%}',
'<footer>'
'{%config%}',
'{%scripts%}',
'{%renderer%}',
'</footer>',
'</body>',
'</html>']
html_layout = "\n".join(dash_str) | 2.671875 | 3 |
helpers/simulation_helpers/scripts/schunk_machine_server.py | GT-RAIL/assistance_arbitration | 0 | 12766982 | #!/usr/bin/python
# Copyright 2019 Fetch Robotics Inc.
# Author(s): <NAME>
# Python
from __future__ import print_function
from datetime import datetime
from datetime import timedelta
# ROS
import rospy
import actionlib
from fetchit_challenge.msg import SchunkMachineAction, SchunkMachineResult, SchunkMachineGoal
# # GPIO
# try:
# import RPi.GPIO as GPIO
# except ImportError:
# print("This script must be run on a Raspberry Pi. (RPi.GPIO import failed)")
# sys.exit(1)
class SchunkMachineServer(object):
"""Class for using Schunk Machine chuck."""
_result = SchunkMachineResult()
def __init__(self):
"""Specifies pin numbering schema and sets up GPIO channels"""
# # Setup GPIO
# GPIO.setwarnings(False)
# mode = GPIO.getmode()
# if mode is None:
# GPIO.setmode(GPIO.BOARD)
# elif mode == GPIO.BCM:
# GPIO.setup([], GPIO.OUT)
# GPIO.cleanup()
# GPIO.setmode(GPIO.BOARD)
# GPIO.setup(37, GPIO.OUT, initial=1)
# GPIO.setup(40, GPIO.OUT, initial=0)
self._current_state = SchunkMachineGoal.OPEN
# Minimum time for chuck to be closed
self._lock_time = 120.0
self.server = actionlib.SimpleActionServer('schunk_machine', SchunkMachineAction, self.callback, False)
self.server.start()
rospy.loginfo("Simulated SCHUNK machine is ready")
def callback(self, goal):
"""Action server callback."""
print("Received goal: " + str(goal))
if goal.state == self._current_state:
self._result.success = True
self._result.message = "Schunk Machine Chuck already in desired state."
self.server.set_succeeded(self._result)
elif goal.state == SchunkMachineGoal.CLOSE:
self._lock_until = datetime.now() + timedelta(seconds=self._lock_time)
self.close()
self._result.success = True
self._result.message = "Schunk Machine Chuck closed."
self.server.set_succeeded(self._result)
self._current_state = SchunkMachineGoal.CLOSE
elif goal.state == SchunkMachineGoal.OPEN:
time_left = self._lock_until - datetime.now()
if time_left.total_seconds() > 0.0:
self._result.success = False
self._result.message = "Schunk Machine Chuck must be closed for at least " + str(self._lock_time) +\
"s. Please wait " + str(time_left.total_seconds()) + "s."
self.server.set_aborted(self._result)
else:
self.open()
self._result.success = True
self._result.message = "Schunk Machine Chuck open."
self.server.set_succeeded(self._result)
self._current_state = SchunkMachineGoal.OPEN
else:
self._result.success = False
self._result.message = "Unknown goal type"
self.server.set_aborted(self._result)
def open(self):
"""Set Pi pins to open chuck."""
# GPIO.output(40, 0)
# GPIO.output(37, 1)
rospy.loginfo("Opening SCHUNK machine")
def close(self):
"""Set Pi pins to close chuck."""
# GPIO.output(40, 1)
# GPIO.output(37, 0)
rospy.loginfo("Closing SCHUNK machine")
if __name__ == "__main__":
rospy.init_node('schunk_machine_server')
machine = SchunkMachineServer()
rospy.spin()
| 2.421875 | 2 |
tests/test_convert.py | vikpe/exex-cli | 2 | 12766983 | <filename>tests/test_convert.py<gh_stars>1-10
from exex_cli import convert
def test_to_string():
assert convert.to_string(None) == ""
assert convert.to_string("None") == "None"
assert convert.to_string(0) == "0"
assert convert.to_string(1) == "1"
assert convert.to_string("a") == "a"
assert convert.to_string(False) == "False"
assert convert.to_string("False") == "False"
assert convert.to_string([]) == ""
def test_to_strings():
assert convert.to_strings(None) == ""
assert convert.to_strings("None") == "None"
assert convert.to_strings(0) == "0"
assert convert.to_strings(1) == "1"
assert convert.to_strings("a") == "a"
assert convert.to_strings(False) == "False"
assert convert.to_strings("False") == "False"
assert convert.to_strings([]) == ""
assert convert.to_strings(["a"]) == ["a"]
assert convert.to_strings([1]) == ["1"]
assert convert.to_strings([["a", 1], ["b", 2]]) == [["a", "1"], ["b", "2"]]
def test_to_csv():
assert convert.to_csv(None) == "\n"
assert convert.to_csv("None") == "None\n"
assert convert.to_csv(0) == "0\n"
assert convert.to_csv(1) == "1\n"
assert convert.to_csv("a") == "a\n"
assert convert.to_csv(False) == "False\n"
assert convert.to_csv("False") == "False\n"
assert convert.to_csv([]) == "\n"
assert convert.to_csv(["a"]) == "a\n"
assert convert.to_csv(["a", "b", 3]) == "a,b,3\n"
assert convert.to_csv([["a", "b", 3]]) == "a,b,3\n"
assert convert.to_csv([1]) == "1\n"
assert convert.to_csv([["a", 1], ["b", 2]]) == "a,1\nb,2\n"
def test_to_json():
def strip_whitespace(val):
import re
pattern = re.compile(r"\s+")
return re.sub(pattern, "", val)
def json_no_whitespace(val):
return strip_whitespace(convert.to_json(val))
assert json_no_whitespace(["a"]) == '["a"]'
assert json_no_whitespace(["a", "b", 3]) == '["a","b",3]'
assert json_no_whitespace([["a", "b", 3]]) == '[["a","b",3]]'
assert json_no_whitespace([1]) == "[1]"
assert json_no_whitespace([["a", 1], ["b", 2]]) == '[["a",1],["b",2]]'
| 2.65625 | 3 |
tests/roots/test-epub-anchor-id/conf.py | samdoran/sphinx | 4,973 | 12766984 | <filename>tests/roots/test-epub-anchor-id/conf.py
def setup(app):
app.add_crossref_type(directivename="setting", rolename="setting")
| 1.1875 | 1 |
Python/Set .add()/Solution.py | chessmastersan/HackerRank | 2 | 12766985 | #author <NAME>
# Enter your code here. Read input from STDIN. Print output to STDOUT
n = int(input())
s = set()
for i in range(0, n) :
s.add(input())
print(len(s))
| 3.484375 | 3 |
Trellis.py | plex1/turbo-code-simulation | 1 | 12766986 | #! /usr/bin/env python
# title : Trellis.py
# description : This class generates a trellis based on a trellis definition class.
# Parameters such as reduction (radix) can be used to construct the trellis.
# author : <NAME>
# python_version : 3.5.2
import utils
class Trellis(object):
def __init__(self, trellisDefinition, reduction=1, merge_parallel=False):
self.tdef = trellisDefinition
self.reduction = reduction
self.merge_parallel = merge_parallel
self.radix = 2 ** reduction # relationship between reduction factor and radix
self.Ns = self.tdef.Ns # number of states
self.Nb = self.tdef.Nb * 2 ** (reduction - 1) # number of branches
self.wc = self.tdef.wc * reduction # number of coded bits
self.wu = self.tdef.wu * reduction # number of data bits
# empty precomputed lists
self.get_dat_pc = []
self.get_enc_bits_pc = []
self.get_next_state_pc = []
self.get_prev_state_pc = []
self.get_next_branches_pc = []
self.get_prev_branches_pc = []
# perform computation of trellis
if reduction == 1 and not self.merge_parallel:
self.pre_calc_reduction1()
else:
self.pre_calculation()
def get_rate(self):
return self.wc / self.wu
def pre_calc_reduction1(self):
"""
Pre calculate the functions of a trellis:
- get_dat, get_enc_bits, get_next_state, get_prev_state
- get_next_branches, get_prev_branches
"""
self.get_dat_pc = [self.tdef.get_dat(x) for x in range(self.Nb)]
self.get_enc_bits_pc = [self.tdef.get_enc_bits(x) for x in range(self.Nb)]
self.get_next_state_pc = [self.tdef.get_next_state(x) for x in range(self.Nb)]
self.get_prev_state_pc = [self.tdef.get_prev_state(x) for x in range(self.Nb)]
self.get_prev_branches_pc = [self.tdef.get_prev_branches(x) for x in range(self.Ns)]
# for the pre calculation of 'next branches' we, we do the same
# but additionally sort for the data output generated by this branch
# this way the encoder can use get the next branch via
# this code -> trellis.get_next_branches_pc[current_state][data_input]
get_next_branches_pc_unsorted = [self.tdef.get_next_branches(x) for x in range(self.Ns)]
self.get_next_branches_pc = []
for b in get_next_branches_pc_unsorted:
dat_b = [self.get_dat_pc[x] for x in b]
dat_d = [utils.bin2dec(x) for x in dat_b]
b_new = [x for _, x in sorted(zip(dat_d, b))]
self.get_next_branches_pc.append(b_new)
def pre_calculation(self):
"""
Pre calculate the functions of a trellis (with options):
- get_dat, get_enc_bits, get_next_state, get_prev_state
- get_next_branches, get_prev_branches
Options are:
- reduction = log2(radix)
- merge parallel branches
"""
all_u, all_c, all_s = [], [], []
for s in range(self.Ns):
u, c, s = self._get_all_paths(self.reduction, s)
all_u = all_u + u
all_c = all_c + c
all_s = all_s + s
# init tables
n_branches_per_state = int(self.Nb / self.Ns)
self.get_dat_pc = []
self.get_enc_bits_pc = []
self.get_next_state_pc = []
self.get_prev_state_pc = []
self.get_next_branches_pc = [[-1] * n_branches_per_state for i in range(self.Ns)]
self.get_prev_branches_pc = [[] for i in range(self.Ns)]
# loop through all paths and generate a new branch for each
for branch_index in range(len(all_u)):
u = all_u[branch_index]
c = all_c[branch_index]
s = all_s[branch_index]
# check if branch already exists
n_states = self.get_next_state_pc
p_states = self.get_prev_state_pc
branch_exists = True in [x == s[0] and y == s[-1] for x, y in zip(p_states, n_states)]
if self.merge_parallel and branch_exists:
pass
else:
dat_int = utils.bin2dec(u)
self.get_dat_pc.append(u)
self.get_enc_bits_pc.append(c)
self.get_next_state_pc.append(s[-1])
self.get_prev_state_pc.append(s[0])
self.get_next_branches_pc[s[0]][dat_int] = branch_index
self.get_prev_branches_pc[s[-1]].append(branch_index)
def _get_all_paths(self, depth, state):
"""
recursively get all paths form a start state with depth n
Parameters
----------
depth [int]: depths of recursion
state [int]: start state for paths to be returned
Returns
-------
pathlist_u, pathlist_c, pathlist_s: [list of lists] list of paths
"""
if depth == 0:
return [[]], [[]], [[state]]
else:
# for all next states (next_state
# get all paths with depth-1 from next_state,
# then add the path from state to next_state to all these paths
# add new paths to list
pathlist_u, pathlist_c, pathlist_s = [], [], []
# pathlist_* are lists of paths
for b in self.tdef.get_next_branches(state):
next_state = self.tdef.get_next_state(b)
u1, c1, s1 = self._get_all_paths(depth - 1, next_state)
# for all lists in u1 add the new element
pathlist_u += [self.tdef.get_dat(b) + x for x in u1]
pathlist_c += [self.tdef.get_enc_bits(b) + x for x in c1]
pathlist_s += [[state] + x for x in s1]
return pathlist_u, pathlist_c, pathlist_s
| 3.5625 | 4 |
postcodeinfo/apps/postcode_api/management/commands/download_and_import_postcode_gss_codes.py | UKHomeOffice/postcodeinfo | 0 | 12766987 | <gh_stars>0
import os
from django.core.management.base import BaseCommand
from postcode_api.utils import ZipExtractor
from postcode_api.downloaders import PostcodeGssCodeDownloader
from postcode_api.importers.postcode_gss_code_importer \
import PostcodeGssCodeImporter
class Command(BaseCommand):
args = '<destination_dir (default /tmp/)>'
def add_arguments(self, parser):
# Positional arguments
parser.add_argument('--destination_dir',
action='store_true',
dest='destination_dir',
default='/tmp/postcode_gss_codes/')
def handle(self, *args, **options):
if not os.path.exists(options['destination_dir']):
os.makedirs(options['destination_dir'])
downloaded_files = self._download(options['destination_dir'])
if downloaded_files:
self._process(downloaded_files)
else:
print 'nothing downloaded - nothing to import'
def _download(self, destination_dir):
print 'downloading'
downloader = PostcodeGssCodeDownloader()
return downloader.download(destination_dir)
def _process(self, filepath):
if isinstance(filepath, list):
filepath = filepath[0]
files = ZipExtractor(filepath).unzip_if_needed('.*NSPL.*\.csv')
for path in files:
print 'importing ' + path
self._import(path)
return True
def _import(self, downloaded_file):
importer = PostcodeGssCodeImporter()
importer.import_postcode_gss_codes(downloaded_file)
| 2.140625 | 2 |
teda/fitting.py | majkelx/teda | 1 | 12766988 | <gh_stars>1-10
import math
import numpy as np
from scipy import optimize
def fit_gauss_1d_zero_c(x, y):
"""
Fits a*gaussian(0, sig) + sky of mu=0
Returns
-------
model, a, sig, c, rmse
"""
x, y = np.asarray(x), np.asarray(y)
gauss0 = lambda x, a, c, sig2: c + a * np.exp(-x ** 2 / (2 * sig2))
opt, cov = optimize.curve_fit(gauss0, x, y, p0=[1.0, 0.0, 1.0])
res = gauss0(x, *opt) - y
rmse = math.sqrt((res * res).sum() / len(res))
try:
sig = math.sqrt(opt[2])
except ValueError:
sig = 0
return gauss0, opt[0], sig, opt[1], rmse
def fit_gauss_2d_c(xy ,values, initial_mu = None, mu_radius=(np.inf, np.inf)):
"""
Fits a*gaussian(mu_x, mu_y, sig) + sky
Parameters
----------
xy:
List of coordinates, shape: Nx2
Returns
-------
model, a, mu_x, mu_y, sig, c, rmse
"""
if initial_mu is None: # initial guess
initial_mu = xy[0]
xy = np.asarray(xy).T # transpose xy[0]=x xy[1] = y
def gauss(xy, a, c, mu_x, mu_y, sig2):
val = c + a * np.exp(-((mu_x - xy[0])** 2 + (mu_y - xy[1])** 2) / (2 * sig2))
return val
# gauss = lambda xy, a, c, mu_x, mu_y, sig2: c + a * np.exp(-((mu_x - xy[0])** 2 + (mu_y - xy[1])** 2) / (2 * sig2))
minimal = np.nanmin(values)
maximal = np.nanmax(values)
opt, cov = optimize.curve_fit(gauss, xy, values,
p0=[maximal - minimal, minimal, initial_mu[0], initial_mu[1], 1.0],
bounds=(
[0.0, -np.inf, initial_mu[0]-mu_radius[0], initial_mu[1]-mu_radius[1], -np.inf],
[ np.inf, np.inf, initial_mu[0]+mu_radius[0], initial_mu[1]+mu_radius[1], np.inf])
)
res = (gauss(xy, *opt) - values)
rmse = math.sqrt((res * res).sum() / len(values)),
try:
sig = math.sqrt(opt[4])
except ValueError:
sig = 0
return gauss, opt[0], opt[2], opt[3], sig, opt[1], rmse | 2.734375 | 3 |
dataset/waveform_dataset.py | wimmerb/Wave-U-Net-for-Speech-Enhancement | 166 | 12766989 | import os
import librosa
from torch.utils import data
from util.utils import sample_fixed_length_data_aligned
class Dataset(data.Dataset):
def __init__(self,
dataset,
limit=None,
offset=0,
sample_length=16384,
mode="train"):
"""Construct dataset for training and validation.
Args:
dataset (str): *.txt, the path of the dataset list file. See "Notes."
limit (int): Return at most limit files in the list. If None, all files are returned.
offset (int): Return files starting at an offset within the list. Use negative values to offset from the end of the list.
sample_length(int): The model only supports fixed-length input. Use sample_length to specify the feature size of the input.
mode(str): If mode is "train", return fixed-length signals. If mode is "validation", return original-length signals.
Notes:
dataset list file:
<noisy_1_path><space><clean_1_path>
<noisy_2_path><space><clean_2_path>
...
<noisy_n_path><space><clean_n_path>
e.g.
/train/noisy/a.wav /train/clean/a.wav
/train/noisy/b.wav /train/clean/b.wav
...
Return:
(mixture signals, clean signals, filename)
"""
super(Dataset, self).__init__()
dataset_list = [line.rstrip('\n') for line in open(os.path.abspath(os.path.expanduser(dataset)), "r")]
dataset_list = dataset_list[offset:]
if limit:
dataset_list = dataset_list[:limit]
assert mode in ("train", "validation"), "Mode must be one of 'train' or 'validation'."
self.length = len(dataset_list)
self.dataset_list = dataset_list
self.sample_length = sample_length
self.mode = mode
def __len__(self):
return self.length
def __getitem__(self, item):
mixture_path, clean_path = self.dataset_list[item].split(" ")
filename = os.path.splitext(os.path.basename(mixture_path))[0]
mixture, _ = librosa.load(os.path.abspath(os.path.expanduser(mixture_path)), sr=None)
clean, _ = librosa.load(os.path.abspath(os.path.expanduser(clean_path)), sr=None)
if self.mode == "train":
# The input of model should be fixed-length in the training.
mixture, clean = sample_fixed_length_data_aligned(mixture, clean, self.sample_length)
return mixture.reshape(1, -1), clean.reshape(1, -1), filename
else:
return mixture.reshape(1, -1), clean.reshape(1, -1), filename
| 2.84375 | 3 |
benchmark/utils.py | facebookresearch/fairring | 42 | 12766990 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import multiprocessing
from typing import Any, List, Tuple
def recv_from_connections_and_join_processes(
processes_and_connections: List[
Tuple[multiprocessing.Process, multiprocessing.connection.Connection]
],
) -> List[Any]:
"""
Wait for processes to return a value via a connection and then to terminate
Given a list of processes and, for each of them, (the reading end of) a
connection on which the process will send its result, gather the results of
all processes and then join them, with extra care taken to handle any error
(e.g., process crashing without returning) and kill all processes in case.
"""
results = [None] * len(processes_and_connections)
try:
connections = [c for _, c in processes_and_connections]
sentinels = [p.sentinel for p, _ in processes_and_connections]
not_ready = connections + sentinels
while len(not_ready) > 0:
ready = multiprocessing.connection.wait(not_ready)
for obj in ready:
if obj in connections:
idx = connections.index(obj)
try:
val = obj.recv()
except EOFError:
# We won't get any more values out of this connection.
not_ready.remove(obj)
else:
if results[idx] is not None:
raise RuntimeError(
f"Process {idx} returned more than one value"
)
# Wrap in a tuple so we can distinguish a process that
# returned None from one that didn't return yet.
results[idx] = (val,)
elif obj in sentinels:
idx = sentinels.index(obj)
proc, _ = processes_and_connections[idx]
proc.join()
if proc.exitcode != 0:
raise RuntimeError(
f"Process {idx} exited with status {proc.exitcode}"
)
not_ready.remove(obj)
else:
raise RuntimeError(f"Unexpected object: {obj}")
except Exception:
for p, _ in processes_and_connections:
p.kill()
for p, _ in processes_and_connections:
p.join()
raise
for idx, result in enumerate(results):
if result is None:
raise RuntimeError(f"Process {idx} exited without producing a result")
# Unwrap from the tuples.
return [r for r, in results]
| 2.828125 | 3 |
image_indexer/imageDAO.py | RikEnde/image-indexer | 2 | 12766991 | <gh_stars>1-10
#!/usr/bin/env python
import bson
import pymongo
class ImageDAO(object):
"""
Wraps inserts and queries to the images collection
As well as some aggregations
"""
def __init__(self, connection_string='mongodb://localhost', database='pictures', collection='images', upsert=False):
connection = pymongo.MongoClient(connection_string)
self.database = connection[database]
self.db = self.database[collection]
self.upsert = upsert
self.db.create_index([('path', 1)], unique=True)
self.db.create_index([('hash', 1)], unique=False)
def aggregate(self, query):
"""
Return one aggregated document
"""
return self.db.aggregate(query)
def find_by_hash(self, hashcode):
"""
Return list of image file paths with the given hash code
"""
cursor = self.db.find({'hash': hashcode}, {'path': 1})
ret = []
for doc in cursor:
ret.append(doc['path'])
return ret
def add_data(self, data):
"""
Attempt to insert document called data into the images collection
In case of a duplicate key exception do an update if the upsert argument
has been selected
"""
try:
try:
self.db.insert(data)
except pymongo.errors.DuplicateKeyError as e:
if self.upsert:
_id = data.pop('_id', None)
try:
self.db.update({'_id': _id}, data)
except Exception as e:
print data
raise e
else:
print "File is already indexed", data['path']
return False
except pymongo.errors.OperationFailure as e:
print "Mongo error", e
raise
except bson.errors.InvalidDocument as e:
print "Can't convert your datatype", e, data['path']
print data, data.keys(), data['exif'].keys()
raise
except bson.errors.InvalidStringData as e:
print "Invalid string data", e, data['path']
raise
return True
| 3.015625 | 3 |
saas/backend/apps/subject/views.py | nannan00/bk-iam-saas | 0 | 12766992 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-权限中心(BlueKing-IAM) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import logging
from drf_yasg.openapi import Response as yasg_response
from drf_yasg.utils import swagger_auto_schema
from rest_framework import serializers, status
from rest_framework.response import Response
from rest_framework.viewsets import GenericViewSet
from backend.account.permissions import role_perm_class
from backend.apps.policy.serializers import PolicyDeleteSLZ, PolicyPartDeleteSLZ, PolicySLZ, PolicySystemSLZ
from backend.audit.audit import audit_context_setter, view_audit_decorator
from backend.biz.group import GroupBiz
from backend.biz.policy import ConditionBean, PolicyOperationBiz, PolicyQueryBiz
from backend.common.serializers import SystemQuerySLZ
from backend.common.swagger import ResponseSwaggerAutoSchema
from backend.service.constants import PermissionCodeEnum, SubjectRelationType
from backend.service.models import Subject
from .audit import SubjectGroupDeleteAuditProvider, SubjectPolicyDeleteAuditProvider
from .serializers import SubjectGroupSLZ, UserRelationSLZ
permission_logger = logging.getLogger("permission")
class SubjectGroupViewSet(GenericViewSet):
permission_classes = [role_perm_class(PermissionCodeEnum.MANAGE_ORGANIZATION.value)]
paginator = None # 去掉swagger中的limit offset参数
biz = GroupBiz()
@swagger_auto_schema(
operation_description="我的权限-用户组列表",
auto_schema=ResponseSwaggerAutoSchema,
responses={status.HTTP_200_OK: SubjectGroupSLZ(label="用户组", many=True)},
tags=["subject"],
)
def list(self, request, *args, **kwargs):
subject = Subject(type=kwargs["subject_type"], id=kwargs["subject_id"])
relations = self.biz.list_subject_group(subject, is_recursive=True)
return Response([one.dict() for one in relations])
@swagger_auto_schema(
operation_description="我的权限-退出用户组",
auto_schema=ResponseSwaggerAutoSchema,
query_serializer=UserRelationSLZ,
responses={status.HTTP_200_OK: yasg_response({})},
tags=["subject"],
)
@view_audit_decorator(SubjectGroupDeleteAuditProvider)
def destroy(self, request, *args, **kwargs):
subject = Subject(type=kwargs["subject_type"], id=kwargs["subject_id"])
serializer = UserRelationSLZ(data=request.query_params)
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
permission_logger.info("subject group delete by user: %s", request.user.username)
# 目前只支持移除用户的直接加入的用户组,不支持其通过部门关系加入的用户组
if data["type"] == SubjectRelationType.GROUP.value:
self.biz.remove_members(data["id"], [subject])
# 写入审计上下文
audit_context_setter(subject=subject, group=Subject.parse_obj(data))
return Response({})
class SubjectSystemViewSet(GenericViewSet):
permission_classes = [role_perm_class(PermissionCodeEnum.MANAGE_ORGANIZATION.value)]
paginator = None # 去掉swagger中的limit offset参数
biz = PolicyQueryBiz()
@swagger_auto_schema(
operation_description="Subject有权限的所有系统列表",
auto_schema=ResponseSwaggerAutoSchema,
query_serializer=None,
responses={status.HTTP_200_OK: PolicySystemSLZ(label="系统", many=True)},
tags=["subject"],
)
def list(self, request, *args, **kwargs):
subject = Subject(type=kwargs["subject_type"], id=kwargs["subject_id"])
data = self.biz.list_system_counter_by_subject(subject)
return Response([one.dict() for one in data])
class SubjectPolicyViewSet(GenericViewSet):
permission_classes = [role_perm_class(PermissionCodeEnum.MANAGE_ORGANIZATION.value)]
paginator = None # 去掉swagger中的limit offset参数
policy_query_biz = PolicyQueryBiz()
policy_operation_biz = PolicyOperationBiz()
@swagger_auto_schema(
operation_description="Subject权限列表",
auto_schema=ResponseSwaggerAutoSchema,
query_serializer=SystemQuerySLZ,
responses={status.HTTP_200_OK: PolicySLZ(label="策略", many=True)},
tags=["subject"],
)
def list(self, request, *args, **kwargs):
subject = Subject(type=kwargs["subject_type"], id=kwargs["subject_id"])
slz = SystemQuerySLZ(data=request.query_params)
slz.is_valid(raise_exception=True)
system_id = slz.validated_data["system_id"]
policies = self.policy_query_biz.list_by_subject(system_id, subject)
return Response([p.dict() for p in policies])
@swagger_auto_schema(
operation_description="删除权限",
auto_schema=ResponseSwaggerAutoSchema,
query_serializer=PolicyDeleteSLZ,
responses={status.HTTP_200_OK: serializers.Serializer()},
tags=["subject"],
)
@view_audit_decorator(SubjectPolicyDeleteAuditProvider)
def destroy(self, request, *args, **kwargs):
subject = Subject(type=kwargs["subject_type"], id=kwargs["subject_id"])
slz = PolicyDeleteSLZ(data=request.query_params)
slz.is_valid(raise_exception=True)
system_id = slz.validated_data["system_id"]
ids = slz.validated_data["ids"]
permission_logger.info("subject policy delete by user: %s", request.user.username)
# 为了记录审计日志,需要在删除前查询
policy_list = self.policy_query_biz.query_policy_list_by_policy_ids(system_id, subject, ids)
# 删除权限
self.policy_operation_biz.delete_by_ids(system_id, subject, ids)
# 写入审计上下文
audit_context_setter(subject=subject, system_id=system_id, policies=policy_list.policies)
return Response()
@swagger_auto_schema(
operation_description="权限更新",
auto_schema=ResponseSwaggerAutoSchema,
request_body=PolicyPartDeleteSLZ(label="条件删除"),
responses={status.HTTP_200_OK: serializers.Serializer()},
tags=["subject"],
)
@view_audit_decorator(SubjectPolicyDeleteAuditProvider)
def update(self, request, *args, **kwargs):
subject = Subject(type=kwargs["subject_type"], id=kwargs["subject_id"])
slz = PolicyPartDeleteSLZ(data=request.data)
slz.is_valid(raise_exception=True)
data = slz.validated_data
policy_id = kwargs["pk"]
system_id = data["system_id"]
resource_type = data["type"]
condition_ids = data["ids"]
condition = data["condition"]
permission_logger.info("subject policy delete partial by user: %s", request.user.username)
delete_policy = self.policy_operation_biz.delete_partial(
subject,
policy_id,
system_id,
resource_type,
condition_ids,
[ConditionBean(attributes=[], **c) for c in condition],
)
# 写入审计上下文
audit_context_setter(subject=subject, system_id=system_id, policies=[delete_policy])
return Response({})
| 1.351563 | 1 |
aplicaciones_informaticas/backend/migrations/0023_atentionqueue_max_capacity.py | awainer/7539 | 0 | 12766993 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-12-08 22:10
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('backend', '0022_auto_20161208_1740'),
]
operations = [
migrations.AddField(
model_name='atentionqueue',
name='max_capacity',
field=models.PositiveIntegerField(default=10),
),
]
| 1.46875 | 1 |
src/einsteinpy/coordinates/utils.py | Bhavam/einsteinpy | 0 | 12766994 | <gh_stars>0
import numpy as np
from ..ijit import jit
def cartesian_to_spherical_fast(
x, y, z, v_x=None, v_y=None, v_z=None, velocities_provided=False
):
if velocities_provided:
return cartesian_to_spherical(x, y, z, v_x, v_y, v_z)
return cartesian_to_spherical_novel(x, y, z)
@jit
def cartesian_to_spherical(x, y, z, v_x, v_y, v_z):
"""
Utility function (jitted) to convert cartesian to spherical.
This function should eventually result in Coordinate Transformation Graph!
"""
hxy = np.hypot(x, y)
r = np.hypot(hxy, z)
theta = np.arctan2(hxy, z)
phi = np.arctan2(y, x)
n1 = x ** 2 + y ** 2
n2 = n1 + z ** 2
v_r = (x * v_x + y * v_y + z * v_z) / np.sqrt(n2)
v_t = (z * (x * v_x + y * v_y) - n1 * v_z) / (n2 * np.sqrt(n1))
v_p = -1 * (v_x * y - x * v_y) / n1
return r, theta, phi, v_r, v_t, v_p
@jit
def cartesian_to_spherical_novel(x, y, z):
"""
Utility function (jitted) to convert cartesian to spherical.
This function should eventually result in Coordinate Transformation Graph!
"""
hxy = np.hypot(x, y)
r = np.hypot(hxy, z)
theta = np.arctan2(hxy, z)
phi = np.arctan2(y, x)
return r, theta, phi
def cartesian_to_bl_fast(
x, y, z, a, v_x=None, v_y=None, v_z=None, velocities_provided=False
):
if velocities_provided:
return cartesian_to_bl(x, y, z, a, v_x, v_y, v_z)
return cartesian_to_bl_novel(x, y, z, a)
@jit
def cartesian_to_bl(x, y, z, a, v_x, v_y, v_z):
"""
Utility function (jitted) to convert cartesian to boyer lindquist.
This function should eventually result in Coordinate Transformation Graph!
"""
w = (x ** 2 + y ** 2 + z ** 2) - (a ** 2)
r = np.sqrt(0.5 * (w + np.sqrt((w ** 2) + (4 * (a ** 2) * (z ** 2)))))
theta = np.arccos(z / r)
phi = np.arctan2(y, x)
dw_dt = 2 * (x * v_x + y * v_y + z * v_z)
v_r = (1 / (2 * r)) * (
(dw_dt / 2)
+ (
(w * dw_dt + 4 * (a ** 2) * z * v_z)
/ (2 * np.sqrt((w ** 2) + (4 * (a ** 2) * (z ** 2))))
)
)
v_t = (-1 / np.sqrt(1 - np.square(z / r))) * ((v_z * r - v_r * z) / (r ** 2))
v_p = (1 / (1 + np.square(y / x))) * ((v_y * x - v_x * y) / (x ** 2))
return r, theta, phi, v_r, v_t, v_p, a
@jit
def cartesian_to_bl_novel(x, y, z, a):
"""
Utility function (jitted) to convert cartesian to boyer lindquist.
This function should eventually result in Coordinate Transformation Graph!
"""
w = (x ** 2 + y ** 2 + z ** 2) - (a ** 2)
r = np.sqrt(0.5 * (w + np.sqrt((w ** 2) + (4 * (a ** 2) * (z ** 2)))))
theta = np.arccos(z / r)
phi = np.arctan2(y, x)
return r, theta, phi, a
def spherical_to_cartesian_fast(
r, t, p, v_r=None, v_t=None, v_p=None, velocities_provided=False
):
if velocities_provided:
return spherical_to_cartesian(r, t, p, v_r, v_t, v_p)
return spherical_to_cartesian_novel(r, t, p)
@jit
def spherical_to_cartesian(r, t, p, v_r, v_t, v_p):
"""
Utility function (jitted) to convert spherical to cartesian.
This function should eventually result in Coordinate Transformation Graph!
"""
x = r * np.cos(p) * np.sin(t)
y = r * np.sin(p) * np.sin(t)
z = r * np.cos(t)
v_x = (
np.sin(t) * np.cos(p) * v_r
- r * np.sin(t) * np.sin(p) * v_p
+ r * np.cos(t) * np.cos(p) * v_t
)
v_y = (
np.sin(t) * np.sin(p) * v_r
+ r * np.cos(t) * np.sin(p) * v_t
+ r * np.sin(t) * np.cos(p) * v_p
)
v_z = np.cos(t) * v_r - r * np.sin(t) * v_t
return x, y, z, v_x, v_y, v_z
@jit
def spherical_to_cartesian_novel(r, t, p):
"""
Utility function (jitted) to convert spherical to cartesian.
This function should eventually result in Coordinate Transformation Graph!
"""
x = r * np.cos(p) * np.sin(t)
y = r * np.sin(p) * np.sin(t)
z = r * np.cos(t)
return x, y, z
def bl_to_cartesian_fast(
r, t, p, a, v_r=None, v_t=None, v_p=None, velocities_provided=False
):
if velocities_provided:
return bl_to_cartesian(r, t, p, a, v_r, v_t, v_p)
return bl_to_cartesian_novel(r, t, p, a)
@jit
def bl_to_cartesian(r, t, p, a, v_r, v_t, v_p):
"""
Utility function (jitted) to convert bl to cartesian.
This function should eventually result in Coordinate Transformation Graph!
"""
xa = np.sqrt(r ** 2 + a ** 2)
sin_norm = xa * np.sin(t)
x = sin_norm * np.cos(p)
y = sin_norm * np.sin(p)
z = r * np.cos(t)
v_x = (
(r * v_r * np.sin(t) * np.cos(p) / xa)
+ (xa * np.cos(t) * np.cos(p) * v_t)
- (xa * np.sin(t) * np.sin(p) * v_p)
)
v_y = (
(r * v_r * np.sin(t) * np.sin(p) / xa)
+ (xa * np.cos(t) * np.sin(p) * v_t)
+ (xa * np.sin(t) * np.cos(p) * v_p)
)
v_z = (v_r * np.cos(t)) - (r * np.sin(t) * v_t)
return x, y, z, v_x, v_y, v_z
@jit
def bl_to_cartesian_novel(r, t, p, a):
"""
Utility function (jitted) to convert bl to cartesian.
This function should eventually result in Coordinate Transformation Graph!
"""
xa = np.sqrt(r ** 2 + a ** 2)
sin_norm = xa * np.sin(t)
x = sin_norm * np.cos(p)
y = sin_norm * np.sin(p)
z = r * np.cos(t)
return x, y, z
| 3 | 3 |
test/conductor/test_conductor_translation.py | onap/optf-osdf | 3 | 12766995 | # -------------------------------------------------------------------------
# Copyright (c) 2017-2018 AT&T Intellectual Property
# Copyright (C) 2020 Wipro Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -------------------------------------------------------------------------
#
import unittest
from osdf.adapters.local_data import local_policies
from osdf.adapters.conductor import translation as tr
from osdf.utils.interfaces import json_from_file
class TestConductorTranslation(unittest.TestCase):
def setUp(self):
self.main_dir = ""
self.conductor_api_template = self.main_dir + "osdf/templates/conductor_interface.json"
self.local_config_file = self.main_dir + "config/common_config.yaml"
policy_data_path = self.main_dir + "test/policy-local-files"
valid_policies_list_file = policy_data_path + '/' + 'meta-valid-policies.txt'
valid_policies_files = local_policies.get_policy_names_from_file(valid_policies_list_file)
parameter_data_file = self.main_dir + "test/placement-tests/request.json"
self.request_json = json_from_file(parameter_data_file)
parameter_data_file = self.main_dir + "test/placement-tests/request_vfmod.json"
self.request_vfmod_json = json_from_file(parameter_data_file)
self.policies = [json_from_file(policy_data_path + '/' + name) for name in valid_policies_files]
self.optimization_policies = [json_from_file(policy_data_path + '/'
+ "slice-selection-files/opt_policy_nsi_reuse.json")]
def tearDown(self):
pass
def test_gen_demands(self):
# need to run this only on vnf policies
vnf_policies = [x for x in self.policies if x[list(x.keys())[0]]["type"]
== "onap.policies.optimization.VnfPolicy"]
res = tr.gen_demands(self.request_json['placementInfo']['placementDemands'], vnf_policies)
assert res is not None
def test_gen_vfmod_demands(self):
# need to run this only on vnf policies
vnf_policies = [x for x in self.policies if x[list(x.keys())[0]]["type"]
== "onap.policies.optimization.VnfPolicy"]
res = tr.gen_demands(self.request_vfmod_json['placementInfo']['placementDemands'], vnf_policies)
assert res is not None
def test_gen_optimization_policy(self):
expected = [{
"goal": "minimize",
"operation_function": {
"operator": "sum",
"operands": [
{
"function": "attribute",
"params": {
"attribute": "creation_cost",
"demand": "embb-nst"
}
}
]
}
}]
self.assertEqual(expected,
tr.gen_optimization_policy(self.request_vfmod_json['placementInfo']['placementDemands'],
self.optimization_policies))
if __name__ == "__main__":
unittest.main()
| 1.734375 | 2 |
url200.py | endritqerreti/URL200 | 0 | 12766996 | <filename>url200.py
#!/usr/bin/python
#App Name : URL200
#Author : <NAME>
#Repo : https://github.com/endritqerreti/URL200
#License : MIT License
#Version : 1.0.0
import requests
import csv
import sys
import time
from datetime import date
seconds = 2
path = 'urls.txt'
today = date.today()
f = open(path, 'r')
count = f.readlines()
app_name = """\n
# .##..##.#####..##......####...####...####..
# .##..##.##..##.##.........##.##..##.##..##.
# .##..##.#####..##......####..######.######.
# .##..##.##..##.##.....##.....##..##.##..##.
# ..####..##..##.######.######..####...####..
"""
print(app_name)
appname ="A Simple URL Checker"
version = "#Version : 1.0.0"
license = "#License : MIT License"
coded_by ="#Coded by: <NAME>"
print(appname.center(45),"\n","\n",version,"\n",license,"\n",coded_by,"\n")
if len(count) == 0:
file_status = sys.exit("URLS.txt is empty")
else:
file_status="[OK]"
line = '----'
print(line[0] * 58, )
print ("| URL loaded :",len(count),'|','Date :', today,"| File status:",file_status, "|")
print(line[0] * 58)
options = [
'\nOPTIONS\n',
'[Y] - To start checking URLs',
'[N] - To exit the program',
'',
]
for option in options:
print(option)
set_option = input("Type Y/y or N/n: ").lower().strip()
if set_option == 'y':
print ("starting..")
elif set_option == 'n':
sys.exit("Program closed")
else:
sys.exit("wrong command")
results = csv.writer(open('results.csv', 'w'))
results.writerow(['URL', 'Response', 'Date :', today])
good_urls = csv.writer(open('good_urls.csv', 'w'))
good_urls.writerow(['URL', 'Response', 'Date :', today])
count = 0
good_url = 0
bad_url = 0
with open(path, 'r') as urls:
for adresa in urls:
count += 1
time.sleep(seconds)
print("\n")
adresa = requests.get(adresa.strip())
pergjigja = adresa.status_code
results.writerow([adresa.url, pergjigja])
print([count],"Checking : ", adresa.url ,"\n[-] Response : ",pergjigja)
if pergjigja == 200:
good_url += 1
good_urls.writerow([adresa.url, pergjigja])
else:
bad_url += 1
continue
def statusi():
print("\nGood URLs (200 OK):",[good_url],"\n---", "\nBad URLs :",[bad_url],"\n---", "\nTotal URLs :",[count],"\n")
statusi()
| 2.875 | 3 |
garden/models.py | e-dang/Autogarden | 0 | 12766997 | from datetime import datetime, timedelta
import pytz
from django.conf import settings
from django.contrib.auth.hashers import check_password
from django.db import models
from django.urls import reverse
from rest_framework.request import Request
from garden.formatters import WateringStationFormatter
from .managers import TokenManager
def _default_moisture_threshold():
return 50
def _default_watering_duration():
return timedelta(minutes=1)
def _default_is_connected():
return False
def _default_update_frequency():
return timedelta(minutes=5)
def _default_status():
return True
def _default_garden_image():
return 'default_garden.jpg'
class Garden(models.Model):
OK = 'ok'
LOW = 'lo'
WATER_LEVEL_CHOICES = [
(OK, 'Ok'),
(LOW, 'Low'),
]
owner = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='gardens', on_delete=models.CASCADE)
name = models.CharField(max_length=255, db_index=True)
image = models.ImageField(default=_default_garden_image)
is_connected = models.BooleanField(default=_default_is_connected)
last_connection_ip = models.GenericIPAddressField(null=True)
last_connection_time = models.DateTimeField(null=True)
update_frequency = models.DurationField(default=_default_update_frequency)
connection_strength = models.SmallIntegerField(null=True)
water_level = models.CharField(choices=WATER_LEVEL_CHOICES, max_length=2, null=True)
class Meta:
unique_together = ['owner', 'name']
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('garden-detail', kwargs={'pk': self.pk})
def get_watering_stations_url(self):
return reverse('watering-station-list', kwargs={'pk': self.pk})
def get_update_url(self):
return reverse('garden-update', kwargs={'pk': self.pk})
def get_delete_url(self):
return reverse('garden-delete', kwargs={'pk': self.pk})
def calc_time_till_next_update(self):
if self.last_connection_time is None:
return None
factor = 1
next_update = self.last_connection_time + factor * self.update_frequency - datetime.now(pytz.UTC)
while next_update.total_seconds() < 0:
factor += 1
next_update = self.last_connection_time + factor * self.update_frequency - datetime.now(pytz.UTC)
return int(next_update.total_seconds())
def update_connection_status(self, request: Request):
self.is_connected = True
self.last_connection_ip = request.META.get('REMOTE_ADDR')
self.last_connection_time = datetime.now(pytz.UTC)
self.save()
def refresh_connection_status(self):
if self.last_connection_time is None:
return
time_next_update = self.last_connection_time + self.update_frequency - datetime.now(pytz.UTC)
if time_next_update.total_seconds() < 0:
self.is_connected = False
self.connection_strength = None
self.save()
def get_watering_station_formatters(self):
for watering_station in self.watering_stations.all():
yield WateringStationFormatter(watering_station)
def get_watering_station_idx(self, watering_station) -> int:
for i, station in enumerate(self.watering_stations.all()):
if station == watering_station:
return i
def get_watering_station_at_idx(self, idx):
for i, station in enumerate(self.watering_stations.all()):
if i == idx:
return station
def get_active_watering_stations(self):
return self.watering_stations.filter(status=True)
def get_num_active_watering_stations(self):
return self.get_active_watering_stations().count()
@property
def plant_types(self):
return self.watering_stations.exclude(plant_type__exact='').values_list('plant_type', flat=True)
@property
def time_since_last_connection(self):
if self.last_connection_time is None:
return None
return datetime.now(pytz.UTC) - self.last_connection_time
class Token(models.Model):
MAX_HASH_LENGTH = 128
garden = models.OneToOneField(Garden, on_delete=models.CASCADE)
uuid = models.CharField(max_length=MAX_HASH_LENGTH)
created = models.DateTimeField(auto_now_add=True)
objects = TokenManager()
def __str__(self):
return self.created.strftime('%B %-d, %Y %-I:%M %p')
def verify(self, uuid):
return check_password(uuid, self.uuid)
class WateringStation(models.Model):
garden = models.ForeignKey(Garden, related_name='watering_stations', on_delete=models.CASCADE)
image = models.ImageField(null=True, blank=True)
moisture_threshold = models.IntegerField(default=_default_moisture_threshold)
watering_duration = models.DurationField(default=_default_watering_duration)
plant_type = models.CharField(max_length=255, blank=True)
status = models.BooleanField(default=_default_status)
created = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ['created']
def __str__(self):
return f'{str(self.garden)} - {self.idx}'
def get_absolute_url(self):
return reverse('watering-station-detail', kwargs={'garden_pk': self.garden.pk, 'ws_pk': self.pk})
def get_update_url(self):
return reverse('watering-station-update', kwargs={'garden_pk': self.garden.pk, 'ws_pk': self.pk})
def get_delete_url(self):
return reverse('watering-station-delete', kwargs={'garden_pk': self.garden.pk, 'ws_pk': self.pk})
def get_records_url(self):
return reverse('watering-station-record-list', kwargs={'garden_pk': self.garden.pk, 'ws_pk': self.pk})
@property
def idx(self):
return self.garden.get_watering_station_idx(self)
def get_formatter(self):
return WateringStationFormatter(self)
class WateringStationRecord(models.Model):
watering_station = models.ForeignKey(WateringStation, related_name='records', on_delete=models.CASCADE)
moisture_level = models.FloatField()
created = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ['created']
def __str__(self):
return f'{self.watering_station.garden}/{self.watering_station.idx}/{self.created}'
| 2.15625 | 2 |
fast_bird_part_localization/geometry_utils.py | thedekel/fast-bird-part-localization | 25 | 12766998 | import numpy as np
import cv2
from poisson_disk import PoissonDiskSampler
import skimage.morphology
import skimage.measure
import scipy.stats
class Box(object):
"""
This class represents a box in an image. This could be a bounding box of an object or part.
Internally each box is represented by a tuple of 4 integers: (xmin, xmax, ymin, ymax)
"""
POINT_GENERATION_POLECIES = ['poisson_disk']
def __init__(self, xmin, xmax, ymin, ymax):
self.xmin = xmin
self.xmax = xmax
self.ymin = ymin
self.ymax = ymax
def __repr__(self):
return "%d - %d - %d - %d" % (self.xmin, self.xmax, self.ymin, self.ymax)
def is_valid(self):
return int(self.xmin) != -1
@staticmethod
def box_from_img(img):
"""
Creats a box from the image
"""
height, width = img.shape[:2]
return Box(0, height, 0, width)
@staticmethod
def box_from_cendim(cen, dim):
"""
Create a box from a pair of center and dimension. Each center or dimension is a tuple. For short we call the center and dimension the `cendim`
Center: (cenX, cenY)
Dimension: (height, width)
"""
cenX, cenY = cen
height, width = dim
height_2 = height / 2.
width_2 = width / 2.
xmin = int(round(cenX - height_2))
xmax = int(round(cenX + height_2))
ymin = int(round(cenY - width_2))
ymax = int(round(cenY + width_2))
return Box(xmin, xmax, ymin, ymax)
def cendim(self):
"""
Convert the box into cendim format. In cendim format the center and dimension are stored as floating point numbers.
"""
cenX = float(self.xmin + self.xmax) / 2
cenY = float(self.ymin + self.ymax) / 2
height = float(self.xmax - self.xmin)
width = float(self.ymax - self.ymin)
cen = (cenX, cenY)
dim = (height, width)
return cen, dim
def trim_to_borders(self, img_shape):
"""
Trims the box with respect to the image provided.
"""
img_h, img_w = img_shape[:2]
self.xmin = max(0, self.xmin)
self.xmax = min(img_h - 1, self.xmax)
self.ymin = max(0, self.ymin)
self.ymax = min(img_w - 1, self.ymax)
return self
def draw_box(self, img, color=(1, 0, 0), width=2):
"""
Annotate the `img` with this Box. This returns a new image with the box annotated on it.
"""
new_img = img.copy()
cv2.rectangle(new_img, (self.ymin, self.xmin), (self.ymax, self.xmax), color, width)
return new_img
def get_sub_image(self, img):
"""
Return a sub-image only containing information inside this Box.
"""
self.trim_to_borders(img.shape)
return img[self.xmin:self.xmax, self.ymin:self.ymax]
@staticmethod
def expand_cendim(cen, dim, alpha):
height, width = dim
height = (2 * alpha) * height
width = (2 * alpha) * width
dim = (height, width)
return cen, dim
def expand(self, alpha=0.666):
cen, dim = self.cendim()
cen, dim = Box.expand_cendim(cen, dim, alpha)
new_box = Box.box_from_cendim(cen, dim)
self.xmin = new_box.xmin
self.xmax = new_box.xmax
self.ymin = new_box.ymin
self.ymax = new_box.ymax
return self
def evalIOU(self, gt_box, source_shape):
# TODO
# making sure not to generate errors further down the line
self.trim_to_borders(source_shape)
gt_box.trim_to_borders(source_shape)
height, width = source_shape[:2]
gt_part = np.zeros((height, width), np.uint8)
gt_part[gt_box.xmin:gt_box.xmax, gt_box.ymin:gt_box.ymax] = 1
sl_part = np.zeros((height, width), np.uint8)
sl_part[self.xmin:self.xmax, self.ymin:self.ymax] = 1
intersection = (gt_part & sl_part).sum()
union = (gt_part | sl_part).sum()
return intersection / float(union)
def evalPCP(self, gt_box, source_shape, thresh=0.5):
iou = self.evalIOU(gt_box, source_shape)
if iou >= thresh:
return 1
else:
return 0
def generate_points_inside(self, policy='poisson_disk', param=None, img=None):
"""
This function generates points inside this rectangle. It uses the poisson disk to do it by default. But there is a policy option that is configurable.
There is an optional `param` parameter that specifies the parameters of the generation policy.
Different Policies:
- `poisson_disk`:
The param is expected to be the radius. The radius is the parameter of the poisson disk sampler.
By default radius is set to be average of 1/10 of width and height of the box.
Each point is a row vector [x, y]. A set of `n` points will be represented as a numpy array of shape (n,2). The dtype is numpy.int.
There can be an optional img option. We can use the image's shape to further prune points that are located outside the boundary of the image.
"""
assert(policy in self.POINT_GENERATION_POLECIES)
cen, dim = self.cendim()
height, width = dim
if policy == 'poisson_disk':
if param is None:
radius = ((height / 10.) + (width / 10.)) / 2.
else:
radius = param
# please note that PoissonDiskSampler does use a flipped version of the axis
# also the algorithm generates points in the range [0, height] but we want [0, height) that is
# the reason behind the "-1".
pds = PoissonDiskSampler(height - 1, width - 1, radius)
samples = pds.get_sample()
points = np.zeros((len(samples), 2), dtype=np.int)
for i, s in enumerate(samples):
points[i, :] = [int(round(s[0])), int(round(s[1]))]
points += np.array([self.xmin, self.ymin])
return points
def draw_points(points, ax, color=None):
if color is None:
color = 'red'
for p in points:
# Notice that in plt the axis are different from what we work with
# namely in plt the horizontal axis is x and vertical axis is y
# whereas in numpy and images that we work with the vertical axis is x
# this is the reason behind the flipping of points here.
ax.plot(p[1], p[0], 'o', color=color)
def filter_points(points, box):
"""
Remove points that lie inside the box from the set.
"""
new_points_ind = []
for i, p in enumerate(points):
if (box.xmin <= p[0] <= box.xmax and box.ymin <= p[1] <= box.ymax):
continue
else:
new_points_ind.append(i)
return points[new_points_ind, :]
def post_process_preds(preds):
preds = skimage.morphology.closing(preds, skimage.morphology.square(10))
preds = skimage.morphology.remove_small_objects(preds, min_size=10, connectivity=1)
return preds
def find_rect_from_preds(preds):
L, N = skimage.measure.label(preds, return_num=True, background=0)
if N > 0:
L_no_bg = L[L != 0].flatten()
vals, counts = scipy.stats.mode(L_no_bg)
part_label = int(vals[0])
indices = np.where(L == part_label)
xmin = indices[0].min()
xmax = indices[0].max()
ymin = indices[1].min()
ymax = indices[1].max()
return Box(xmin, xmax, ymin, ymax)
else:
return Box(-1, -1, -1, -1)
| 3.078125 | 3 |
codility_python_tests/04_frog_jump_across_road.py | sreedhar-venkatesan/codility-python-tests | 0 | 12766999 | <filename>codility_python_tests/04_frog_jump_across_road.py
import math
def solution(X,Y,D):
distance = Y-X
return math.ceil(distance/D)
if __name__ =="__main__":
X=10
Y=85
D=30
ans = solution(X,Y, D)
print (ans) | 3.671875 | 4 |
captioner/utils.py | svaisakh/captioner | 1 | 12767000 | <reponame>svaisakh/captioner
import numpy as np
from contextlib import contextmanager
from collections import namedtuple
from itertools import chain
def _get_data_paths():
from pathlib import Path
DIR_DATA = Path('~/.data/COCO').expanduser()
DIR_CHECKPOINTS = Path(__file__).resolve().parents[1] / 'checkpoints'
for directory in [DIR_DATA, DIR_CHECKPOINTS]: directory.mkdir(exist_ok=True, parents=True)
return DIR_DATA, DIR_CHECKPOINTS
DIR_DATA, DIR_CHECKPOINTS = _get_data_paths()
class BeamSearch:
Branch = namedtuple('Branch', ['content', 'score', 'context'])
def __init__(self, build=None):
"""
A Beam Searcher instance.
:param build: A function, f(content, context) which takes in a content and context object and
returns a (contents, scores, context) tuple.
The definition of what these are is upto the function.
Content represents the things in a particular node.
Context represents the features of a branch of nodes.
A score is the log probability of a particular node.
"""
self.build = build
def __call__(self, beam_size, context, max_len, probabilistic=0):
"""
Perform beam search.
:param beam_size: The size of the beam used.
:param context: The initial context.
:param max_len: Search will be terminated when branches reach this length.
:param probabilistic: If True, retains nodes at each iteration according to their probabilities.
:return: A list of branches found. Each branch has a content and score (relative probability).
"""
return self._search(beam_size, context, max_len, probabilistic)
def _search(self, beam_size, context, max_len, probabilistic):
branches = [self.Branch([], 0, context)]
for _ in range(max_len):
branches = list(chain(*[[new_branch
for new_branch in self._get_branches(branch, beam_size, probabilistic)]
for branch in branches]))
branches = self._prune_branches(branches, beam_size, probabilistic)
branches = [self.Branch(branch.content, np.exp(branch.score), None) for branch in branches]
return branches
def _get_branches(self, branch, beam_size, probabilistic):
contents, scores, context = self.build(branch.content, branch.context)
nodes = [self.Branch([content], score, context)
for content, score in zip(contents, scores)]
if not probabilistic: nodes = self._prune_branches(nodes, beam_size, probabilistic)
return [self._merge(branch, node) for node in nodes]
def _merge(self, b1, b2):
return self.Branch(b1.content + b2.content, b1.score + b2.score, b2.context)
@staticmethod
def _prune_branches(branches, beam_size, probabilistic):
branches = _sort_list(branches, key=lambda branch: np.exp(branch.score), probabilistic=probabilistic)
return branches[:beam_size]
def _sort_list(x, key, probabilistic):
if not probabilistic:
x.sort(key=key, reverse=True)
return x
probs = np.array([key(x_i) for x_i in x])
probs /= probs.sum()
ids = np.random.choice(list(range(len(x))), len(x), replace=False, p=probs)
return [x[i] for i in ids]
def launch(fn, defaults=None, default_module=None):
"""
Launches a function as the main entry point to a command line program.
:param fn: The function to launch.
:param defaults: A dictionary of default arguments to the function.
:param default_module: A module with default arguments with the same name.
Any options to click should be given in a dictionary called click_options in this module.
"""
import click
from inspect import signature
args = list(signature(fn).parameters.keys())
click_options = getattr(default_module, 'click_options', {}) if default_module is not None else {}
for k in args[::-1]:
if defaults is not None and k in defaults.keys():
d = defaults[k]
if type(d) in (tuple, list):
fn = click.option('--' + k, show_default=True, default=d[0], help=d[1])(fn)
else:
fn = click.option('--' + k, show_default=True, default=d)(fn)
if hasattr(default_module, k):
kwargs = click_options[k] if k in click_options.keys() else {}
fn = click.option('--' + k, show_default=True, default=getattr(default_module, k), **kwargs)(fn)
fn = click.command()(fn)
return fn()
def show_coco(img, captions):
"""
Show the coco images with the captions as title.
:param img: Images to show.
:param captions: Corresponding captions
"""
import matplotlib.pyplot as plt
from numpy.random import randint
captions = captions[randint(len(captions))]
def show_image(image, title):
image = image.permute(1, 2, 0).numpy().copy()
i_min, i_max = image.min(), image.max()
image = (image - i_min) / (i_max - i_min)
plt.imshow(image)
plt.xticks([]); plt.yticks([]); plt.grid(False)
plt.title(title)
plt.show()
for i, c in zip(img, captions): show_image(i, c)
def loopy(gen):
"""
Returns an iterator with infinite length.
Does not raise the StopException.
:param gen: The generator object to loop.
:return: An infinite iterator.
"""
while True:
for x in iter(gen): yield x
def working_directory(path):
"""
A context manager cum decorator which changes the working directory to the specified path.
If used as a decorator with no arguments, the first path in the arguments of the inner function is used.
:param path: The path to change to/the function to decorate
"""
from inspect import isfunction
if not isfunction(path):
return _working_directory_context_manager(path)
from functools import wraps
@wraps(path)
def new_fn(*args, **kwargs):
from pathlib import PosixPath
working_path = [a for a in args if type(a) is PosixPath]
if len(working_path) != 0: working_path = working_path[0]
else:
working_path = [v for v in kwargs.values() if type(v) is PosixPath]
if len(working_path) != 0: working_path = working_path[0]
else: raise RuntimeError('No suitable paths found')
with _working_directory_context_manager(working_path):
return path(*args, **kwargs)
return new_fn
@contextmanager
def _working_directory_context_manager(path):
import os
# Change to working directory
path_cwd = os.getcwd()
os.chdir(path)
yield
os.chdir(path_cwd) # Change back to working directory
def get_tqdm():
"""
:return: Returns a flexible tqdm object according to the environment of execution.
"""
import tqdm
try:
get_ipython()
return getattr(tqdm, 'tqdm_notebook')
except:
return getattr(tqdm, 'tqdm')
def get_optimizer(optimizer):
"""
Returns an optimizer according to the passed string.
:param optimizer: The string representation of the optimizer. eg. 'adam' for Adam etc.
:return: The proper nn.optim optimizer.
"""
from torch import optim
from functools import partial
_optim_dict = {'adam': partial(optim.Adam, amsgrad=True)}
return _optim_dict[optimizer] | 2.625 | 3 |
norns/gear/migrations/0007_auto_20180523_2359.py | the-norns/norns | 0 | 12767001 | <gh_stars>0
# Generated by Django 2.0.5 on 2018-05-23 23:59
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('gear', '0006_auto_20180522_2102'),
]
operations = [
migrations.AlterField(
model_name='consumable',
name='ability',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='status.Ability'),
),
migrations.AlterField(
model_name='weapon',
name='ability',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='status.Ability'),
),
]
| 1.460938 | 1 |
month05/Hadoop/day02_course/code/word_count.py | chaofan-zheng/python_learning_code | 0 | 12767002 | <filename>month05/Hadoop/day02_course/code/word_count.py
"""
统计文件中每个文件中单词的数量
"""
from mrjob.job import MRJob
class WordCount(MRJob):
# 重写mapper和reducer方法
# def mapper(self, key, value):
def mapper(self, _, line):
# 参数key:每行行首的偏移量
# 参数value:一行的内容
# mapper的执行次数由文本的行数决定。
# 文件有几行,mapper被调用几次。每次执行的时候,把行首的偏移量、行首的的文本内容传给key(_),value(line)
# 一行的内容通常写作line
for word in line.split():
yield word, 1
# shuffle and sort 这个过程我们看不见,由MapReduce自己实现
# little 1
# twink 1 1
# you 1
def reducer(self, word, occurence):
# key是shuffle sort之后的哪些key(上面的单词),values shuffle之后的序列(上面的1 / 11 )
yield word,sum(occurence)
if __name__ == '__main__':
WordCount.run()
| 3.234375 | 3 |
setup.py | TheTechRobo/PyGit | 2 | 12767003 | from setuptools import setup
import os
with open(os.devnull, 'w') as a:
print("If this raises an error, you're using python 2 - not supported.", file=a) #get rid of python 2 users
with open("README.md", "r") as file:
long_desc = file.read()
import sys
if sys.version_info < (3,7):
sys.exit('Sorry, Python < 3.7 is not supported')
setup(
name='snakeGit',
version='0.4.5',
description='the missing Python git module',
long_description=long_desc,
python_requires='>3.7.0',
license='Apache-2.0',
packages=['snakeGit'],
author='TheTechRobo',
author_email='<EMAIL>',
keywords=['git', 'easy', 'thetechrobo'],
url='https://github.com/TheTechRobo/snakegit',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Programming Language :: Python :: 3'
],
project_urls={
'Documentation': 'https://github.com/thetechrobo/snakegit/wiki',
'Source': 'https://github.com/thetechrobo/snakegit',
'Tracker': 'https://github.com/thetechrobo/snakegit/issues',
},
long_description_content_type='text/markdown',
)
| 2.078125 | 2 |
src/ch3/dictionary_code.py | hillaryychan/iccsh | 1 | 12767004 | def find_prefix_entry(message, dictionary):
"""
Find the longest entry in dictionary which is a prefix of the given message
"""
for entry in dictionary[::-1]:
if message.startswith(entry[0]):
return dictionary.index(entry)
return -1
def lz78_encode(message, *args, **kwargs):
dictionary = []
while len(message) > 0:
prefix_entry = find_prefix_entry(message, dictionary)
if prefix_entry == -1:
next_char = message[0]
entry = (next_char, (0, next_char))
message = message[1:]
else:
prefix = dictionary[prefix_entry][0]
next_char = message[len(prefix)]
value = prefix + next_char
entry = (value, (prefix_entry + 1, next_char))
message = message[len(value) :]
dictionary.append(entry)
return dictionary
def lz78_decode(outputs, *args, **kwargs):
dictionary = []
message = ""
for i, c in outputs:
if i == 0:
suffix = c
dictionary.append((c, (i, c)))
else:
entry = dictionary[i - 1]
suffix = entry[0] + c
dictionary.append((entry[0] + c, (i, c)))
message += suffix
return message
| 3.546875 | 4 |
extras/sm_to_graph.py | Attumm/semantic-model | 0 | 12767005 | import json
import sys
from dsm import dsm_looper
def get_color(item):
n = len(item)
return COLORS[n % len(COLORS)]
class Node():
DN = {}
head = None
def __init__(self, val, dn):
self.val = val
self.nodes = []
self.dn = dn
Node.DN[dn] = self
def connect(self, node):
self.nodes.append(node)
@classmethod
def get_node(cls, dn):
return cls.DN[dn]
@classmethod
def loop_over(cls):
return cls.head.loop()
def loop(self):
print('-'.join(self.dn), self.val)
for n in self.nodes:
n.loop()
print()
LABELS = {}
COLORS = [
"black",
"red",
"green",
"yellow",
"blue",
"purple",
"pink",
]
DN = []
def get_color(item):
n = len(item)
return COLORS[n % len(COLORS)]
def load_dns(model, dn):
if len(dn) >= 1:
DN.append(dn)
return model
if __name__ == "__main__":
filename = sys.argv[sys.argv.index('-f')+1] if '-f' in sys.argv else None
if filename is not None:
dsm_model = json.load(open("example/example_file.json"))
else:
data = sys.stdin.read()
#data = input()
dsm_model = json.loads(data)
result = dsm_looper(load_dns, dsm_model)
font = "ubuntu"
print("digraph G {")
print("rankdir=LR;")
print(f'node [shape=rectangle width=3 fontname="{font}"];')
print(f'graph [fontname = "{font}"]');
print(f'edge [fontname = "{font}"]');
node = Node("dsm_model", "()")
Node.head = node
for i, item in enumerate(DN, start=2):
node = Node(i, str(item))
parent_dn = "()" if len(item) == 1 else str(item[:-1])
parent_node = Node.get_node(str(item[:-1]))
parent_node.connect(node)
for i, item in enumerate(DN, start=2):
LABELS[str(item)] = i
label = item[0] if len(item) == 1 else item[-1]
print(f'\t{i} [label="{label}", style=filled color={get_color(item)}];')
SEEN = set()
for i, item in enumerate(DN, start=2):
parent_dn = "()" if len(item) == 1 else str(item[:-1])
if parent_dn in SEEN:
continue
node = Node.get_node(parent_dn)
for n in node.nodes:
print(f"{node.val} -> {n.val} [penwidth=1, arrowhead=none];")
SEEN.add(parent_dn)
print("}")
| 3 | 3 |
pages/02_multipage.py | sebastiandres/stb_chapter_demo_v070 | 0 | 12767006 | import streamlit as st
import streamlit_book as stb
st.title("Multipage")
st.markdown("There are several user cases for having multipages on streamlit. We'll explore each one of those")
st.header("Basic or interactive single page")
st.markdown("""
You use only streamlit (no need can use streamlit_book).
Optionally, if you want to use any of the python function for activities/questions, you can use streamlit_book. No need to initialize the library.
""")
st.header("Book: A single document with multiple connected pages")
st.markdown("""
You only need previous/next buttons.
Use `stb.set_book_config` to set the path and other book configurations.
""")
st.header("Library: several simple or multipaged books")
st.markdown("""
Requires a sidebar menu (like this demo), where each topic required a previous/next buttons.
Use `stb.set_library_config` to set the path and the configuration for the book.
""")
| 3.421875 | 3 |
scripts/find_revdeps.py | clouserw/zamboni | 0 | 12767007 | <gh_stars>0
#!/usr/bin/env python
import pip
import sys
from pip.req import parse_requirements
def rdeps(pkg_name):
return [pkg.project_name
for pkg in pip.get_installed_distributions()
if pkg_name in [requirement.project_name
for requirement in pkg.requires()]]
def main(requirements_path):
apps = sorted([r.name for r in parse_requirements(requirements_path,
session=pip.download.PipSession())])
reverse_requirements = {}
for app in apps:
reverse_requirements[app] = rdeps(app)
for app in sorted(reverse_requirements):
if reverse_requirements.get(app, None):
print '# %s is required by %s' % (
app, ', '.join(reverse_requirements[app]))
if __name__ == '__main__':
if len(sys.argv) < 2:
print 'Usage: %s <requirement-file>' % sys.argv[0]
sys.exit(1)
main(sys.argv[1])
| 2.515625 | 3 |
momo/plugins/flask/functions.py | shichao-an/momo | 6 | 12767008 | # template global functions
# make sure not to conflict with built-ins:
# http://jinja.pocoo.org/docs/2.9/templates/#list-of-global-functions
from flask.helpers import url_for as _url_for
from flask_paginate import Pagination
def paginate(page, total, per_page, config):
record_name = config['MOMO_PAGINATION_RECORD_NAME']
display_msg = config['MOMO_PAGINATION_DISPLAY_MSG']
pagination = _paginate(
page=page,
total=total,
per_page=per_page,
record_name=record_name,
display_msg=display_msg,
)
return pagination
def _paginate(page, total, per_page, record_name, display_msg):
pagination = Pagination(
page=page,
total=total,
per_page=per_page,
bs_version=3,
show_single_page=False,
record_name=record_name,
display_msg=display_msg,
)
return pagination
def get_page(request):
return request.args.get('page', default=1, type=int)
def toggle_arg(endpoint, request, arg, value, **kwargs):
"""Toggle request arguments.
:param endpoint: endpoint name.
:param request: request object.
:param arg: request argument name to toggle.
:param value: intial value for the toggled argument.
:param kwargs: keyword arguments to preserve.
"""
args = request.args.to_dict()
if arg in args:
args.pop(arg)
else:
args[arg] = value
args.update(request.view_args)
args.update(kwargs)
return _url_for(endpoint, **args)
| 2.546875 | 3 |
todos/models/definitions.py | asidlare/todos | 0 | 12767009 | <filename>todos/models/definitions.py
from .base import db, DictMixin, DATETIME_TYPE, BOOLEAN_TYPE, UNSIGNEDSMALLINT_TYPE
from .utils import on_create, on_drop
from sqlalchemy_utils import PasswordType, EmailType, force_auto_coercion
from sqlalchemy.ext.hybrid import hybrid_property
from enum import Enum
from flask_login import UserMixin
force_auto_coercion()
class TodoListStatus(Enum):
active = 1
inactive = 2
class TaskStatus(Enum):
active = 1
done = 2
ready = 3
class Priority(Enum):
veryhigh = 'a'
high = 'b'
medium = 'c'
low = 'd'
verylow = 'e'
class UserTbl(db.Model, UserMixin, DictMixin):
"""
Table with user details.
"""
__tablename__ = 'User'
__str__ = lambda self: str(self.to_dict()) # noqa
__repr__ = lambda self: repr(self.to_dict()) # noqa
user_id = db.Column(db.CHAR(36), primary_key=True)
login = db.Column(db.String(length=80), nullable=False, unique=True)
password = db.Column(PasswordType(schemes=['<PASSWORD>']), nullable=False)
name = db.Column(db.String(length=255), nullable=False)
email = db.Column(EmailType, nullable=False, unique=True)
created = db.Column(DATETIME_TYPE, nullable=False)
# Many to many relationship user <--> todolist with roles
# elements of relation are presorted in required order
todolists = db.relationship('TodoListTbl', lazy='dynamic', secondary='UserTodoList', back_populates='users',
order_by=lambda: (TodoListTbl.priority, TodoListTbl.status, TodoListTbl.label,
TodoListTbl.created_ts))
# One to many relationship user <--> todolists creator
todolistcreator = db.relationship('TodoListCreatorTbl', back_populates='User')
# One to many relationship user <--> todolists statuses
todoliststatuses = db.relationship('TodoListStatusChangeLogTbl', back_populates='User')
# One to many relationship user <--> task statuses
taskstatuses = db.relationship('TaskStatusChangeLogTbl', back_populates='User')
def get_id(self):
"""Return user_id per login"""
return self.user_id
def is_authenticated(self, login, password):
"""Return True if valid credentials provided"""
if self.login == login and self.password == password:
return True
return False
def role(self, todolist_id):
role_row = [row.role for row in self.todolists_assoc if row.todolist_id == todolist_id]
return role_row[0] if role_row else None
@property
def owner_todolist_count(self):
todolists = [row.todolist_id for row in self.todolists_assoc if row.role == 'owner']
return len(todolists)
def all_todolists(self, label=None, status=None, priority=None):
todolists = self.todolists
if label:
todolists = todolists.filter_by(label=label)
if status:
todolists = todolists.filter_by(status=getattr(TodoListStatus, status).name)
if priority:
todolists = todolists.filter_by(priority=getattr(Priority, priority).value)
return (row.to_dict() for row in todolists
for user_role in row.users_assoc if user_role.user_id == self.user_id)
def to_dict(self):
out = super().to_dict()
out.pop('password')
return out
class RoleTbl(db.Model, DictMixin):
"""
Table with role permissions.
Expected roles with permissions:
- owner (change_owner, delete, add_permissions, change, read)
- administrator (change, read)
- reader (read)
"""
__tablename__ = 'Role'
__str__ = lambda self: str(self.to_dict()) # noqa
__repr__ = lambda self: repr(self.to_dict()) # noqa
role = db.Column(db.String(length=50), primary_key=True)
change_owner = db.Column(BOOLEAN_TYPE, nullable=False)
delete = db.Column(BOOLEAN_TYPE, nullable=False)
change_permissions = db.Column(BOOLEAN_TYPE, nullable=False)
change_data = db.Column(BOOLEAN_TYPE, nullable=False)
read = db.Column(BOOLEAN_TYPE, nullable=False)
todolist_count_limit = db.Column(UNSIGNEDSMALLINT_TYPE, nullable=True)
task_count_limit = db.Column(UNSIGNEDSMALLINT_TYPE, nullable=True)
task_depth_limit = db.Column(UNSIGNEDSMALLINT_TYPE, nullable=True)
class TodoListTbl(db.Model, DictMixin):
"""
TodoList table includes roots of lists
"""
__tablename__ = 'TodoList'
__str__ = lambda self: str(self.to_dict()) # noqa
__repr__ = lambda self: repr(self.to_dict()) # noqa
todolist_id = db.Column(db.CHAR(36), primary_key=True)
label = db.Column(db.String(length=255), nullable=False)
description = db.Column(db.String(length=255), nullable=True)
status = db.Column(db.Enum(TodoListStatus), nullable=False)
# inserting values into table to guarantee specific ordering by priority
priority = db.Column(db.Enum(Priority, values_callable=lambda x: [e.value for e in x]), nullable=False)
created_ts = db.Column(DATETIME_TYPE, nullable=False)
# Many to many relationship user <--> todolist with roles
users = db.relationship('UserTbl', secondary='UserTodoList', back_populates='todolists',
order_by=lambda: (UserTbl.name))
# One to many relationship todolist <--> tasks
tasks = db.relationship('TaskTbl', back_populates='TodoList',
order_by=lambda: (TaskTbl.priority, TaskTbl.status, TaskTbl.label, TaskTbl.created_ts))
# One to many relationship todolist <--> logs
statuses = db.relationship('TodoListStatusChangeLogTbl', back_populates='TodoList',
order_by=lambda: (TodoListStatusChangeLogTbl.change_ts.desc()))
# One to many relationship todolist <--> statuses
tasks = db.relationship('TaskTbl', back_populates='TodoList',
order_by=lambda: (TaskTbl.priority, TaskTbl.status, TaskTbl.label, TaskTbl.created_ts))
# One to one relationship todolist <--> todolist ownership
TodoListCreator = db.relationship("TodoListCreatorTbl", uselist=False, back_populates="TodoList")
# One to one relationship todolist <--> tasks count
TaskCount = db.relationship("TaskCountTbl", uselist=False, back_populates="TodoList")
def role(self, user_id):
role_row = [row.role for row in self.users_assoc if row.user_id == user_id]
return role_row[0] if role_row else None
@property
def all_roles(self):
return [{'login': row.login, 'name': row.name, 'email': row.email, 'role': user_role.role}
for row in self.users
for user_role in row.todolists_assoc if user_role.todolist_id == self.todolist_id]
@property
def children_tasks(self):
return (row for row in self.tasks if row.parent_id is None)
@property
def creator(self):
return self.TodoListCreator.created_by if self.TodoListCreator else None
@property
def status_changes(self):
return [row.to_dict() for row in self.statuses]
def to_dict(self):
out = super().to_dict()
out['status'] = self.status.name
out['priority'] = self.priority.name
out['status_changes'] = self.status_changes
return out
class TodoListStatusChangeLogTbl(db.Model, DictMixin):
"""
Table to hold status changes for todolist
"""
__tablename__ = 'TodoListStatusChangeLog'
__str__ = lambda self: str(self.to_dict()) # noqa
__repr__ = lambda self: repr(self.to_dict()) # noqa
todolist_id = db.Column(db.CHAR(36), db.ForeignKey('TodoList.todolist_id', ondelete="cascade"), primary_key=True)
change_ts = db.Column(DATETIME_TYPE, primary_key=True)
changed_by = db.Column(db.CHAR(36), db.ForeignKey('User.user_id', ondelete="cascade"), nullable=False)
status = db.Column(db.Enum(TodoListStatus), nullable=False)
# many to one todolists statuses <=> users
User = db.relationship("UserTbl", back_populates="todoliststatuses")
# many to one todolists statuses <=> todolist
TodoList = db.relationship("TodoListTbl", back_populates="statuses")
def to_dict(self):
out = super().to_dict()
out.pop('todolist_id')
out['status'] = self.status.name
out['changed_by'] = self.User.name
return out
class TodoListCreatorTbl(db.Model, DictMixin):
"""
Table to hold connection between todolist and its creator
"""
__tablename__ = 'TodoListCreator'
__str__ = lambda self: str(self.to_dict()) # noqa
__repr__ = lambda self: repr(self.to_dict()) # noqa
todolist_id = db.Column(db.CHAR(36), db.ForeignKey('TodoList.todolist_id', ondelete="cascade"), primary_key=True)
created_by = db.Column(db.CHAR(36), db.ForeignKey('User.user_id', ondelete="cascade"), nullable=False)
# many to one todolists creator <=> users
User = db.relationship("UserTbl", back_populates="todolistcreator")
# one to one todolist creator <=> todolist
TodoList = db.relationship("TodoListTbl", back_populates="TodoListCreator")
class UserTodoListTbl(db.Model, DictMixin):
"""
Table to hold roles users for lists.
Only one user can be owner
"""
__tablename__ = 'UserTodoList'
__str__ = lambda self: str(self.to_dict()) # noqa
__repr__ = lambda self: repr(self.to_dict()) # noqa
user_id = db.Column(db.CHAR(36), db.ForeignKey('User.user_id', ondelete="cascade"), primary_key=True)
todolist_id = db.Column(db.CHAR(36), db.ForeignKey('TodoList.todolist_id', ondelete="cascade"), primary_key=True)
role = db.Column(db.String(length=50), db.ForeignKey('Role.role', ondelete="cascade"), nullable=False)
User = db.relationship(UserTbl, backref=db.backref("todolists_assoc"))
TodoList = db.relationship(TodoListTbl, backref=db.backref("users_assoc"))
class TaskTbl(db.Model, DictMixin):
"""
Table to hold detailed information about tasks.
"""
__tablename__ = 'Task'
__str__ = lambda self: str(self.to_dict()) # noqa
__repr__ = lambda self: repr(self.to_dict()) # noqa
task_id = db.Column(db.CHAR(36), primary_key=True)
parent_id = db.Column(db.CHAR(36), db.ForeignKey('Task.task_id', ondelete="cascade"), nullable=True)
label = db.Column(db.String(length=255), nullable=False, index=True)
description = db.Column(db.Text(), nullable=True)
todolist_id = db.Column(db.CHAR(36), db.ForeignKey('TodoList.todolist_id', ondelete="cascade"), nullable=False,
index=True)
status = db.Column(db.Enum(TaskStatus), nullable=False, index=True)
# inserting values into table to guarantee specific ordering by priority
priority = db.Column(db.Enum(Priority, values_callable=lambda x: [e.value for e in x]), nullable=False)
created_ts = db.Column(DATETIME_TYPE, nullable=False)
# relation inside table
children_one_level = db.relationship("TaskTbl", backref=db.backref('parent', remote_side=[task_id]),
order_by=lambda: (TaskTbl.priority, TaskTbl.status, TaskTbl.label,
TaskTbl.created_ts))
# many to one todolists task <=> todolist
TodoList = db.relationship("TodoListTbl", back_populates="tasks")
# One to many relationship task <--> statuses
statuses = db.relationship('TaskStatusChangeLogTbl', back_populates='Task',
order_by=lambda: (TaskStatusChangeLogTbl.change_ts.desc()))
# One to one relationship task <--> task depth
TaskDepth = db.relationship("TaskDepthTbl", uselist=False, back_populates="Task")
@property
def children(self):
return (row.to_dict() for row in self.children_one_level)
@property
def siblings(self):
if self.parent_id:
return (row for row in self.parent.children_one_level)
else:
return (row for row in self.TodoList.children_tasks)
@property
def ancestors(self):
task = self
while task.parent is not None:
task = task.parent
yield task.task_id
@property
def descendants(self):
return (row for row in self.dfs_tree_from_object(visited=list()) if row != self.to_dict())
@property
def dfs_tree(self):
visited = list()
if self.parent is None:
for node in self.siblings:
visited = node.dfs_tree_from_object(visited)
else:
visited = self.dfs_tree_from_object(visited)
return iter(visited)
def dfs_tree_from_object(self, visited):
current = self
visited.append(current.to_dict())
for node in current.children_one_level:
if node not in visited:
node.dfs_tree_from_object(visited)
return visited
@property
def is_leaf(self):
return False if self.children_one_level else True
@property
def status_changes(self):
return [row.to_dict() for row in self.statuses]
@hybrid_property
def depth(self):
return self.TaskDepth.depth
def to_dict(self):
out = super().to_dict()
out['status'] = self.status.name
out['priority'] = self.priority.name
out['depth'] = self.depth
out['is_leaf'] = self.is_leaf
out['status_changes'] = self.status_changes
return out
class TaskStatusChangeLogTbl(db.Model, DictMixin):
"""
Table to hold status changes for task
"""
__tablename__ = 'TaskStatusChangeLog'
__str__ = lambda self: str(self.to_dict()) # noqa
__repr__ = lambda self: repr(self.to_dict()) # noqa
task_id = db.Column(db.CHAR(36), db.ForeignKey('Task.task_id', ondelete="cascade"), primary_key=True)
change_ts = db.Column(DATETIME_TYPE, primary_key=True)
changed_by = db.Column(db.CHAR(36), db.ForeignKey('User.user_id', ondelete="cascade"), nullable=False)
status = db.Column(db.Enum(TaskStatus), nullable=False)
# many to one tasks statuses <=> users
User = db.relationship("UserTbl", back_populates="taskstatuses")
# many to one todolists statuses <=> task
Task = db.relationship("TaskTbl", back_populates="statuses")
def to_dict(self):
out = super().to_dict()
out.pop('task_id')
out['status'] = self.status.name
out['changed_by'] = self.User.name
return out
class TaskCountTbl(db.Model, DictMixin):
"""
Table to hold number of tasks
"""
__tablename__ = 'TaskCount'
__str__ = lambda self: str(self.to_dict()) # noqa
__repr__ = lambda self: repr(self.to_dict()) # noqa
todolist_id = db.Column(db.CHAR(36), db.ForeignKey('TodoList.todolist_id', ondelete="cascade"), primary_key=True)
quantity = db.Column(UNSIGNEDSMALLINT_TYPE, nullable=False)
# one to one todolist <=> task count
TodoList = db.relationship("TodoListTbl", back_populates="TaskCount")
class TaskDepthTbl(db.Model, DictMixin):
"""
Table to hold depth of tasks
"""
__tablename__ = 'TaskDepth'
__str__ = lambda self: str(self.to_dict()) # noqa
__repr__ = lambda self: repr(self.to_dict()) # noqa
task_id = db.Column(db.CHAR(36), db.ForeignKey('Task.task_id', ondelete="cascade"), primary_key=True)
depth = db.Column(UNSIGNEDSMALLINT_TYPE, nullable=False)
# one to one task <=> task depth
Task = db.relationship("TaskTbl", back_populates="TaskDepth")
task_insert = """
CREATE TRIGGER TaskInsert AFTER INSERT ON Task
FOR EACH ROW
BEGIN
UPDATE TaskCount SET quantity = quantity + 1 WHERE todolist_id = NEW.todolist_id;
INSERT INTO TaskDepth (task_id, depth) VALUES (NEW.task_id,
IFNULL((SELECT depth FROM TaskDepth TD WHERE task_id = IFNULL(NEW.parent_id, -1)), -1) + 1);
END;
"""
on_create(TaskTbl, task_insert)
on_drop(TaskTbl, """DROP TRIGGER IF EXISTS TaskInsert;""")
| 2.109375 | 2 |
common/geometry.py | ocozalp/Algorithms | 5 | 12767010 | __author__ = 'orhan'
from math import asin, sqrt, degrees
class Point:
def __init__(self, x, y=0.0, z=0.0):
self.x = x
self.y = y
self.z = z
def angle_x(self, p2):
dy = self.y - p2.y
dx = self.x - p2.x
h = sqrt(dy ** 2 + dx ** 2)
if h == 0:
return 0
return degrees(asin(dx / h))
def __cmp__(self, other):
return (self.x - other.x) or (self.y - other.y) or (self.z - other.z)
def __eq__(self, other):
return self.__cmp__(other) == 0.0
def __ne__(self, other):
return self.__cmp__(other) != 0.0 | 3.734375 | 4 |
data-generation-GAN/solver/make_optimizer.py | lulujianjie/efficient-person-generation-for-reid | 24 | 12767011 | import torch
def make_optimizer(Cfg, model):
params = []
for key, value in model.named_parameters():
if not value.requires_grad:
continue
lr = Cfg.SOLVER.BASE_LR
weight_decay = Cfg.SOLVER.WEIGHT_DECAY
if "bias" in key:
lr = Cfg.SOLVER.BASE_LR * Cfg.SOLVER.BIAS_LR_FACTOR
weight_decay = Cfg.SOLVER.WEIGHT_DECAY_BIAS
params += [{"params": [value], "lr": lr, "betas": (0.5, 0.999), "weight_decay": weight_decay}]
optimizer = getattr(torch.optim, Cfg.SOLVER.OPTIMIZER)(params)
return optimizer | 2.453125 | 2 |
src/clusterfuzz/_internal/tests/core/bot/fuzzers/afl/afl_engine_test.py | anonymousandrew/clusterfuzz | 1 | 12767012 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for AFL's engine implementation."""
import os
import shutil
import unittest
from clusterfuzz._internal.bot.fuzzers.afl import engine
from clusterfuzz._internal.bot.fuzzers.afl import launcher
from clusterfuzz._internal.system import environment
from clusterfuzz._internal.tests.core.bot.fuzzers.afl import \
afl_launcher_integration_test
from clusterfuzz._internal.tests.test_libs import helpers as test_helpers
# TODO(mbarbella): Break dependency on afl_launcher_integration_test once
# everything has been fully converted to the new pipeline.
TEST_PATH = os.path.abspath(os.path.dirname(__file__))
TEMP_DIRECTORY = os.path.join(TEST_PATH, 'temp')
DATA_DIRECTORY = os.path.join(TEST_PATH, 'data')
CORPUS_DIRECTORY = os.path.join(TEMP_DIRECTORY, 'corpus')
OUTPUT_DIRECTORY = os.path.join(TEMP_DIRECTORY, 'output')
BASE_FUZZ_TIMEOUT = (
launcher.AflRunnerCommon.SIGTERM_WAIT_TIME +
launcher.AflRunnerCommon.AFL_CLEAN_EXIT_TIME)
FUZZ_TIMEOUT = 5 + BASE_FUZZ_TIMEOUT
LONG_FUZZ_TIMEOUT = 90 + BASE_FUZZ_TIMEOUT
def clear_temp_dir():
"""Clear temp directories."""
if os.path.exists(TEMP_DIRECTORY):
shutil.rmtree(TEMP_DIRECTORY)
def create_temp_dir():
"""Create temp directories."""
# Corpus directory will be created when preparing for fuzzing.
os.mkdir(TEMP_DIRECTORY)
os.mkdir(OUTPUT_DIRECTORY)
@unittest.skipIf(not environment.get_value('AFL_INTEGRATION_TESTS'),
'AFL_INTEGRATION_TESTS=1 must be set')
class AFLEngineTest(unittest.TestCase):
"""Tests for AFLEngine."""
def setUp(self):
clear_temp_dir()
create_temp_dir()
test_helpers.patch_environ(self)
afl_launcher_integration_test.dont_use_strategies(self)
def tearDown(self):
clear_temp_dir()
def test_fuzz(self):
"""Test for fuzz."""
engine_impl = engine.AFLEngine()
afl_launcher_integration_test.setup_testcase_and_corpus(
'empty', 'corpus', fuzz=True)
fuzzer_path = os.path.join(DATA_DIRECTORY, 'test_fuzzer')
options = engine_impl.prepare(CORPUS_DIRECTORY, fuzzer_path, DATA_DIRECTORY)
result = engine_impl.fuzz(fuzzer_path, options, OUTPUT_DIRECTORY,
FUZZ_TIMEOUT)
self.assertEqual('{0}/afl-fuzz'.format(DATA_DIRECTORY), result.command[0])
self.assertIn('-i{0}'.format(CORPUS_DIRECTORY), result.command)
# Ensure that we've added something other than the dummy file to the corpus.
self.assertTrue(os.listdir(CORPUS_DIRECTORY))
def test_reproduce(self):
"""Test for reproduce."""
engine_impl = engine.AFLEngine()
target_path = os.path.join(DATA_DIRECTORY, 'test_fuzzer')
testcase_path = afl_launcher_integration_test.setup_testcase_and_corpus(
'crash', 'empty_corpus')
timeout = 5
result = engine_impl.reproduce(target_path, testcase_path, [], timeout)
self.assertIn(
'ERROR: AddressSanitizer: SEGV on unknown address 0x000000000000',
result.output)
def test_fuzz_with_crash(self):
"""Tests that we detect crashes when fuzzing."""
engine_impl = engine.AFLEngine()
afl_launcher_integration_test.setup_testcase_and_corpus(
'empty', 'corpus', fuzz=True)
fuzzer_path = os.path.join(DATA_DIRECTORY, 'easy_crash_fuzzer')
options = engine_impl.prepare(CORPUS_DIRECTORY, fuzzer_path, DATA_DIRECTORY)
result = engine_impl.fuzz(fuzzer_path, options, OUTPUT_DIRECTORY,
LONG_FUZZ_TIMEOUT)
self.assertGreater(len(result.crashes), 0)
crash = result.crashes[0]
self.assertIn('ERROR: AddressSanitizer: heap-use-after-free',
crash.stacktrace)
# Testcase (non-zero size) should've been copied back.
self.assertNotEqual(os.path.getsize(crash.input_path), 0)
def test_startup_crash_not_reported(self):
"""Ensures that we properly handle startup crashes."""
engine_impl = engine.AFLEngine()
afl_launcher_integration_test.setup_testcase_and_corpus(
'empty', 'corpus', fuzz=True)
fuzzer_path = os.path.join(DATA_DIRECTORY, 'always_crash_fuzzer')
options = engine_impl.prepare(CORPUS_DIRECTORY, fuzzer_path, DATA_DIRECTORY)
result = engine_impl.fuzz(fuzzer_path, options, OUTPUT_DIRECTORY,
FUZZ_TIMEOUT)
self.assertFalse(result.crashes)
| 1.78125 | 2 |
fuzzy_dynamic_cluster.py | FlashZoom/Fuzzy-Dynamic-Cluster | 1 | 12767013 | import numpy as np
# direct cluster
class FCM(object):
def __init__(self, data):
self.lambd = 0
self.data = data
self.cluster = []
self.F_S = []
def standard(self):
data_min, data_max = np.min(self.data, axis=0), np.max(self.data, axis=0)
num_samples, num_shapes = np.shape(self.data)
for i in range(num_samples):
self.data[i, :] = (self.data[i, :])/data_max
for j in range(num_shapes):
self.data[i, j] = round(float(self.data[i, j]), 2)
def matrix_alike(self):
num_samples, num_shapes = np.shape(self.data)
data = self.data
r = np.zeros((num_samples, num_samples))
# using max min method
for i in range(num_samples):
for j in range(num_samples):
r[i, j] = np.sum(self.min(data[i, :], data[j, :]))/np.sum(self.max(data[i, :], data[j, :]))
r[i, j] = round(r[i, j], 2)
return r
def max(self, a, b):
a_or_b = []
for (i, j) in zip(a, b):
if i > j:
a_or_b.append(i)
else:
a_or_b.append(j)
return a_or_b
def min(self, a, b):
a_and_b = []
for (i, j) in zip(a, b):
if i < j:
a_and_b.append(i)
else:
a_and_b.append(j)
return a_and_b
def merge_alike_class(self, a):
b = []
for i in range(len(a)):
temp = []
sign = False
for j in range(len(a[i])):
if len(b) != 0:
for k in range(len(b)):
if a[i][j] in b[k]:
b[k].extend(a[i])
b[k] = list(np.unique(b[k]))
sign = True
break
if sign:
break
temp.append(a[i][j])
if sign:
continue
b.append(temp)
return b
def remove_same_cluster(self):
length = len(self.cluster)
temp = self.cluster.copy()
for i in range(length-1):
if self.cluster[i]['result'] == self.cluster[i+1]['result']:
index = 0
while True:
if temp[index]['lambd'] == self.cluster[i+1]['lambd']:
break
else:
index = index+1
temp.pop(index)
self.cluster = temp
def cluster_t(self, T, lam):
answer = T >= lam
num_i, num_j = answer.shape
x_index, y_index = [], []
for i in range(num_i):
for j in range(num_j):
if answer[i, j]:
x_index.append(i+1)
y_index.append(j+1)
num = list(np.unique(x_index))
result = []
for i in num:
temp = []
for j, k in zip(x_index, y_index):
if i == j:
temp.append(k)
result.append(temp)
result = self.merge_alike_class(result) # merge alike class
return result
# start cluster
def fcm(self):
self.standard() # data standardization
r = self.matrix_alike() # create fuzzy alike matrix
lambd = np.unique(r) # get confidence level lambda
lambd_length = len(lambd)
for i in range(lambd_length):
temp = {}
temp['lambd'] = round(lambd[lambd_length-i-1], 2)
temp['result'] = self.cluster_t(r, lambd[lambd_length-i-1])
self.cluster.append(temp)
self.remove_same_cluster()
print('The result of cluster is ', self.cluster)
self.select_lambda()
best = self.F_S.index(min(self.F_S))+1 # use the F-S function to be the validate measure of lambda
print('The best lambda is ', self.cluster[best]['lambd'])
print('The best result of cluster is ', self.cluster[best]['result'])
def data_mean(self, data, index):
if len(index) == 1:
return data
else:
return np.mean(data, axis=0)
def select_lambda(self):
total_mean = np.mean(self.data, axis=0)
length = len(self.cluster)
for option in range(1, length-1):
F_S = 0
temp = 0
for i in self.cluster[option]['result']:
i = [j-1 for j in i] # fix list index
vi = self.data_mean(self.data[i, :], i)
temp = 0
for j in i:
temp = temp + (np.sum(np.square(self.data[j, :] - vi)) - np.sum(np.square(vi - total_mean)))
F_S = F_S + temp
self.F_S.append(F_S)
def main():
data = np.array([[80., 10., 6., 2.],
[50., 1., 6., 4.],
[90., 6., 4., 6.],
[40., 5., 7., 3.],
[10., 1., 2., 4.]])
fcm = FCM(data)
fcm.fcm()
if __name__ == '__main__':
main()
| 2.6875 | 3 |
tools.py | Frankkie/Thesis-Project-IF-Game | 1 | 12767014 | """
Some useful functions for file management.
Functions:
copytree(scr, dst, symlinks=False, ignore=None):
Copy all the contents of directory scr to directory dst.
empty_folder(folder):
Empty the directory folder from all subfolders and files.
"""
import os
import shutil
def copytree(src, dst, symlinks=False, ignore=None):
"""
Copy all the contents of directory scr to directory dst.
:param src: String
Source directory.
:param dst: String
Destination directory.
:param symlinks: default False
:param ignore: default None
:return: None
"""
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
shutil.copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
def empty_folder(folder):
"""
Empty the directory folder from all subfolders and files.
Print a diagnostic message if the directory cannot be emptied for any reason.
:param folder: string
The dir to be emptied.
:return:
"""
for filename in os.listdir(folder):
file_path = os.path.join(folder, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print('Failed to delete %s. Reason: %s' % (file_path, e)) | 3.4375 | 3 |
plaso/formatters/android_sms.py | Defense-Cyber-Crime-Center/plaso | 2 | 12767015 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""The Android mmssms.db database event formatter."""
from plaso.formatters import interface
from plaso.formatters import manager
class AndroidSmsFormatter(interface.ConditionalEventFormatter):
"""Formatter for an Android SMS event."""
DATA_TYPE = u'android:messaging:sms'
FORMAT_STRING_PIECES = [
u'Type: {sms_type}',
u'Address: {address}',
u'Status: {sms_read}',
u'Message: {body}']
FORMAT_STRING_SHORT_PIECES = [u'{body}']
SOURCE_LONG = u'Android SMS messages'
SOURCE_SHORT = u'SMS'
manager.FormattersManager.RegisterFormatter(AndroidSmsFormatter)
| 1.945313 | 2 |
apps/categories/urls.py | tuanquanghpvn/bideox | 0 | 12767016 | <filename>apps/categories/urls.py
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^(?P<pk>[0-9]+)-(?P<slug>[\w-]+)/$',
views.CategoryListView.as_view(), name='list')
]
| 1.835938 | 2 |
paddlespeech/t2s/audio/codec.py | jerryuhoo/PaddleSpeech | 1,379 | 12767017 | <filename>paddlespeech/t2s/audio/codec.py
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import numpy as np
import paddle
# x: [0: 2**bit-1], return: [-1, 1]
def label_2_float(x, bits):
return 2 * x / (2**bits - 1.) - 1.
#x: [-1, 1], return: [0, 2**bits-1]
def float_2_label(x, bits):
assert abs(x).max() <= 1.0
x = (x + 1.) * (2**bits - 1) / 2
return x.clip(0, 2**bits - 1)
# y: [-1, 1], mu: 2**bits, return: [0, 2**bits-1]
# see https://en.wikipedia.org/wiki/%CE%9C-law_algorithm
# be careful the input `mu` here, which is +1 than that of the link above
def encode_mu_law(x, mu):
mu = mu - 1
fx = np.sign(x) * np.log(1 + mu * np.abs(x)) / np.log(1 + mu)
return np.floor((fx + 1) / 2 * mu + 0.5)
# from_labels = True:
# y: [0: 2**bit-1], mu: 2**bits, return: [-1,1]
# from_labels = False:
# y: [-1, 1], return: [-1, 1]
def decode_mu_law(y, mu, from_labels=True):
# TODO: get rid of log2 - makes no sense
if from_labels:
y = label_2_float(y, math.log2(mu))
mu = mu - 1
x = paddle.sign(y) / mu * ((1 + mu)**paddle.abs(y) - 1)
return x
| 2.609375 | 3 |
tests/_test_flow_output/fairytool/tests/test_version.py | Sillte/fairytemplate- | 0 | 12767018 | """Version's attribute test.
"""
import pytest
import fairytool
def test_version():
assert hasattr(fairytool, "__version__")
if __name__ == "__main__":
pytest.main(["--capture=no"])
| 1.734375 | 2 |
dynamodb_ce/ceparser.py | QuiNovas/dynamodb-conditional-expressions | 0 | 12767019 | __all__ = ["CeParser"]
from copy import deepcopy
from decimal import Decimal
from typing import Callable, Dict, Set, Union
import simplejson as json
from boto3.dynamodb.types import (
BINARY,
BINARY_SET,
BOOLEAN,
LIST,
MAP,
NULL,
NUMBER,
NUMBER_SET,
STRING,
STRING_SET,
Binary,
TypeDeserializer,
TypeSerializer,
)
from sly.yacc import Parser
from .celexer import CeLexer
class CeTypeDeserializer(TypeDeserializer):
def deserialize(self, value):
if value and isinstance(value, dict):
if list(value)[0] in (
BINARY,
BINARY_SET,
BOOLEAN,
LIST,
MAP,
NULL,
NUMBER,
NUMBER_SET,
STRING,
STRING_SET,
):
value = super().deserialize(value)
else:
value = {k: self.deserialize(v) for k, v in value.items()}
return value.value if isinstance(value, Binary) else value
_TYPE_DESERIALIZER = CeTypeDeserializer()
_TYPE_SERIALIZER = TypeSerializer()
Dynamo = Union[
Binary, bool, Decimal, dict, list, None, str, Set[Binary], Set[Decimal], Set[str]
]
ExpressionAttributeNames = Dict[str, str]
ExpressionAttributeValues = DynamoItem = Dict[str, Union[Dynamo, Dict[str, Dynamo]]]
class CeParser(Parser):
_expression_cache: Dict[int, Callable[[DynamoItem], bool]] = dict()
def __init__(
self,
*,
expression_attribute_names: ExpressionAttributeNames = None,
expression_attribute_values: ExpressionAttributeValues = None,
):
self._expression_attribute_names: ExpressionAttributeNames = dict()
self._expression_attribute_values: ExpressionAttributeValues = dict()
self.expression_attribute_names = expression_attribute_names or dict()
self.expression_attribute_values = expression_attribute_values or dict()
self._set_expression_attribute_json()
super().__init__()
def _set_expression_attribute_json(self) -> None:
self._expression_attribute_json = json.dumps(
self._expression_attribute_names, separators=(",", ":"), use_decimal=True
) + json.dumps(
self._expression_attribute_values, separators=(",", ":"), use_decimal=True
)
@property
def expression_attribute_names(self) -> ExpressionAttributeNames:
return deepcopy(self._expression_attribute_names)
@expression_attribute_names.setter
def expression_attribute_names(
self, expression_attribute_names: ExpressionAttributeNames
) -> None:
self._expression_attribute_names = (
deepcopy(expression_attribute_names) or dict()
)
self._set_expression_attribute_json()
@expression_attribute_names.deleter
def expression_attribute_names(self) -> None:
self._expression_attribute_names: ExpressionAttributeNames = dict()
self._set_expression_attribute_json()
@property
def expression_attribute_values(self) -> ExpressionAttributeValues:
return deepcopy(self._expression_attribute_values)
@expression_attribute_values.setter
def expression_attribute_values(
self, expression_attribute_values: ExpressionAttributeValues
) -> None:
self._expression_attribute_values: ExpressionAttributeValues = (
_TYPE_DESERIALIZER.deserialize(expression_attribute_values) or dict()
)
self._set_expression_attribute_json()
@expression_attribute_values.deleter
def expression_attribute_values(self) -> None:
self._expression_attribute_values: ExpressionAttributeValues = dict()
self._set_expression_attribute_json()
def evaluate(self, /, expression: str, item: DynamoItem) -> bool:
return self.parse(expression)(item)
@classmethod
def flush_cache(cls) -> None:
cls._expression_cache: Dict[int, Callable[[DynamoItem], bool]] = dict()
def parse(self, expression: str) -> Callable[[DynamoItem], bool]:
expression_hash = hash(expression + self._expression_attribute_json)
if expression_hash not in self._expression_cache:
compiled_expression: Callable[[DynamoItem], bool] = super().parse(
CeLexer().tokenize(expression)
)
def truthy(item: DynamoItem) -> bool:
item = _TYPE_DESERIALIZER.deserialize(item)
return compiled_expression(item)
self._expression_cache[expression_hash] = lambda m: truthy(m)
return self._expression_cache[expression_hash]
# Get the token list from the lexer (required)
tokens = CeLexer.tokens
precedence = (
("left", OR),
("left", AND),
("right", NOT),
("right", PARENS),
("left", ATTRIBUTE_EXISTS, ATTRIBUTE_NOT_EXISTS, BEGINS_WITH, CONTAINS),
("left", BETWEEN),
("left", IN),
("left", EQ, NE, LT, LTE, GT, GTE),
)
# Grammar rules and actions
@_("operand EQ operand")
def condition(self, p):
operand0 = p.operand0
operand1 = p.operand1
return lambda m: operand0(m) == operand1(m)
@_("operand NE operand")
def condition(self, p):
operand0 = p.operand0
operand1 = p.operand1
return lambda m: operand0(m) != operand1(m)
@_("operand GT operand")
def condition(self, p):
operand0 = p.operand0
operand1 = p.operand1
return lambda m: operand0(m) > operand1(m)
@_("operand GTE operand")
def condition(self, p):
operand0 = p.operand0
operand1 = p.operand1
return lambda m: operand0(m) >= operand1(m)
@_("operand LT operand")
def condition(self, p):
operand0 = p.operand0
operand1 = p.operand1
return lambda m: operand0(m) < operand1(m)
@_("operand LTE operand")
def condition(self, p):
operand0 = p.operand0
operand1 = p.operand1
return lambda m: operand0(m) <= operand1(m)
@_("operand BETWEEN operand AND operand")
def condition(self, p):
operand0 = p.operand0
operand1 = p.operand1
operand2 = p.operand2
return lambda m: operand1(m) <= operand0(m) <= operand2(m)
@_('operand IN "(" in_list ")"')
def condition(self, p):
operand = p.operand
in_list = p.in_list
return lambda m: operand(m) in in_list(m)
@_("function")
def condition(self, p):
function = p.function
return lambda m: function(m)
@_("condition AND condition")
def condition(self, p):
condition0 = p.condition0
condition1 = p.condition1
return lambda m: condition0(m) and condition1(m)
@_("condition OR condition")
def condition(self, p):
condition0 = p.condition0
condition1 = p.condition1
return lambda m: condition0(m) or condition1(m)
@_("NOT condition")
def condition(self, p):
condition = p.condition
return lambda m: not condition(m)
@_('"(" condition ")" %prec PARENS')
def condition(self, p):
condition = p.condition
return lambda m: condition(m)
@_('ATTRIBUTE_EXISTS "(" path ")"')
def function(self, p):
path = p.path
return lambda m: path(m) is not None
@_('ATTRIBUTE_NOT_EXISTS "(" path ")"')
def function(self, p):
path = p.path
return lambda m: path(m) is None
@_('ATTRIBUTE_TYPE "(" path "," operand ")"')
def function(self, p):
path = p.path
operand = p.operand
return lambda m: list(_TYPE_SERIALIZER.serialize(path(m)))[0] == operand(m)
@_('BEGINS_WITH "(" path "," operand ")"')
def function(self, p):
path = p.path
operand = p.operand
return (
lambda m: path(m).startswith(operand(m))
if isinstance(path(m), str)
else False
)
@_('CONTAINS "(" path "," operand ")"')
def function(self, p):
path = p.path
operand = p.operand
return (
lambda m: operand(m) in path(m)
if isinstance(path(m), (str, set))
else False
)
@_('SIZE "(" path ")"')
def operand(self, p):
path = p.path
return (
lambda m: len(path(m))
if isinstance(path(m), (str, set, dict, bytearray, bytes, list))
else -1
)
@_('in_list "," operand')
def in_list(self, p):
in_list = p.in_list
operand = p.operand
return lambda m: [*in_list(m), operand(m)]
@_('operand "," operand')
def in_list(self, p):
operand0 = p.operand0
operand1 = p.operand1
return lambda m: [operand0(m), operand1(m)]
@_("path")
def operand(self, p):
return p.path
@_("VALUE")
def operand(self, p):
VALUE = p.VALUE
expression_attribute_values = self._expression_attribute_values
return lambda m: expression_attribute_values.get(VALUE)
@_('path "." NAME')
def path(self, p):
path = p.path
NAME = p.NAME
return lambda m: path(m).get(NAME) if path(m) else None
@_('path "." NAME_REF')
def path(self, p):
path = p.path
NAME_REF = p.NAME_REF
expression_attribute_names = self._expression_attribute_names
return (
lambda m: path(m).get(expression_attribute_names.get(NAME_REF))
if path(m)
else None
)
@_('path "[" INDEX "]"')
def path(self, p):
path = p.path
INDEX = p.INDEX
return (
lambda m: path(m)[INDEX]
if isinstance(path(m), list) and len(path(m)) > INDEX
else None
)
@_("NAME")
def path(self, p):
NAME = p.NAME
return lambda m: m.get(NAME)
@_("NAME_REF")
def path(self, p):
NAME_REF = p.NAME_REF
expression_attribute_names = self._expression_attribute_names
return lambda m: m.get(expression_attribute_names.get(NAME_REF))
| 2.015625 | 2 |
tools/Scripts/UploadToFtp.py | rozyczko/easyDiffractionApp | 1 | 12767020 | # SPDX-FileCopyrightText: 2021 easyDiffraction contributors <<EMAIL>>
# SPDX-License-Identifier: BSD-3-Clause
# © 2021 Contributors to the easyDiffraction project <https://github.com/easyScience/easyDiffractionApp>
__author__ = "github.com/AndrewSazonov"
__version__ = '0.0.1'
import os, sys
import ftplib
import pathlib
import Functions, Config
CONFIG = Config.Config()
def connect(ftp, host, port):
try:
message = f'connect to ftp server'
ftp.connect(host, port)
except Exception as exception:
Functions.printFailMessage(message, exception)
sys.exit(1)
else:
Functions.printSuccessMessage(message)
def login(ftp, user, password):
try:
message = f'login to ftp server'
ftp.login(user, password)
except Exception as exception:
Functions.printFailMessage(message, exception)
sys.exit(1)
else:
Functions.printSuccessMessage(message)
def winToLin(path):
return path.replace('\\', '/')
def makeDir(ftp, path):
if pathExists(ftp, path):
Functions.printNeutralMessage(f'Directory exists: {path}')
return
try:
path = winToLin(path)
message = f'create directory {path}'
ftp.mkd(path)
except Exception as exception:
Functions.printFailMessage(message, exception)
sys.exit(1)
else:
Functions.printSuccessMessage(message)
def uploadFile(ftp, source, destination):
try:
destination = winToLin(destination)
message = f'upload file {source} to {destination}'
dir_name = os.path.basename(destination)
dir_names = ftp.nlst(os.path.dirname(destination))
if dir_name not in dir_names:
makeDir(ftp, destination)
destination = f'{destination}/{os.path.basename(source)}'
with open(source, 'rb') as fb:
ftp.storbinary(f'STOR {destination}', fb)
except Exception as exception:
Functions.printFailMessage(message, exception)
sys.exit(1)
else:
Functions.printSuccessMessage(message)
def uploadDir(ftp, source, destination):
try:
message = f'upload dir {source} to {destination}'
root_dir_name = os.path.basename(source)
for dir_path, _, file_names in os.walk(source):
for file_name in file_names:
source_file = os.path.join(dir_path, file_name)
parent_path = os.path.relpath(source_file, source)
parent_dir = os.path.dirname(parent_path)
destination_dir = os.path.join(destination, root_dir_name, parent_dir).rstrip(os.path.sep)
uploadFile(ftp, source_file, destination_dir)
except Exception as exception:
Functions.printFailMessage(message, exception)
sys.exit(1)
else:
Functions.printSuccessMessage(message)
def upload(ftp, source, destination):
try:
message = f'upload {source} to {destination}'
if os.path.isfile(source):
uploadFile(ftp, source, destination)
elif os.path.isdir(source):
uploadDir(ftp, source, destination)
else:
Functions.printFailMessage(message)
sys.exit(1)
except Exception as exception:
Functions.printFailMessage(message, exception)
sys.exit(1)
else:
Functions.printSuccessMessage(message)
def pathExists(ftp, path):
try:
message = f'find path {path}'
ftp.nlst(path)
except Exception as exception:
Functions.printFailMessage(message, exception)
return False
else:
Functions.printSuccessMessage(message)
return True
def removeDir(ftp, path):
if not pathExists(ftp, path):
Functions.printNeutralMessage(f"Directory doesn't exists: {path}")
return
try:
path = winToLin(path)
message = f'remove directory {path}'
for (name, properties) in ftp.mlsd(path=path):
if name in ['.', '..']:
continue
elif properties['type'] == 'file':
ftp.delete(f'{path}/{name}')
elif properties['type'] == 'dir':
removeDir(ftp, f'{path}/{name}')
ftp.rmd(path)
except Exception as exception:
Functions.printNeutralMessage(message, exception)
sys.exit(1)
else:
Functions.printSuccessMessage(message)
def deploy():
branch = sys.argv[1]
if branch != 'master':
Functions.printNeutralMessage(f'No ftp upload for branch {branch}')
return
password = sys.argv[2]
host = CONFIG['ci']['app']['setup']['ftp']['host']
port = CONFIG['ci']['app']['setup']['ftp']['port']
user = CONFIG['ci']['app']['setup']['ftp']['user']
prefix = CONFIG['ci']['app']['setup']['ftp']['prefix']
repo_subdir = CONFIG['ci']['app']['setup']['ftp']['repo_subdir']
local_repository_dir_name = f'{CONFIG.app_name}{CONFIG.repository_dir_suffix}'
local_repository_dir_path = os.path.join(CONFIG.dist_dir, local_repository_dir_name, CONFIG.setup_os)
online_repository_subdir_path = f'{prefix}/{repo_subdir}'
online_repository_dir_path = f'{online_repository_subdir_path}/{CONFIG.setup_os}'
ftp = ftplib.FTP()
connect(ftp, host, port)
login(ftp, user, password)
removeDir(ftp, online_repository_dir_path)
makeDir(ftp, online_repository_dir_path)
upload(ftp, local_repository_dir_path, online_repository_subdir_path)
ftp.quit()
if __name__ == "__main__":
deploy()
| 2.3125 | 2 |
main.py | nonjosh/tg_bot_webscrap_update | 0 | 12767021 | <gh_stars>0
"""main"""
import time
from typing import List
import yaml
import schedule
from helpers import (
TgHelper,
# CocomanhuaHelper,
WutuxsHelper,
ManhuaguiHelper,
EsjzoneHelper,
SyosetuHelper,
Qiman6Helper,
BaozimhHelper,
)
from utils import get_logger, get_main_domain_name
LIST_YAML_PATH = "config/list.yaml"
logger = get_logger(__name__)
tg_helper = TgHelper()
def my_checker(my_helper, urls: List[str], show_no_update_msg=False):
"""my_checker
Args:
my_helper (Helper): helper
show_no_update_msg (bool, optional): print no update msg. Defaults to False.
"""
has_update = my_helper.check_update()
if has_update:
if hasattr(my_helper, "translate_url"):
url = my_helper.translate_url
else:
url = my_helper.latest_chapter_url
# Print update message
logger.info(
"Update found for %s: %s (%s)",
my_helper.name,
my_helper.latest_chapter_title,
url,
)
content_html_text = get_msg_content(my_helper, urls)
tg_helper.send_msg(content=content_html_text)
else:
if show_no_update_msg:
logger.info(
"No update found for %s %s",
my_helper.media_type,
my_helper.name,
)
def get_msg_content(my_helper, urls: List[str] = None) -> str:
"""Construct html message content from helper and urls
Args:
my_helper (Helper): helper object
urls (List[str], optional): list of urls. Defaults to None.
Returns:
str: message content (html)
"""
content_html_text = f"{my_helper.name} {my_helper.media_type} updated!\n"
urls_texts = [
f"<a href='{url}'>{get_main_domain_name(url)}</a>" for url in urls
]
content_html_text += " | ".join(urls_texts) + "\n"
content_html_text += (
f"latest chapter: <a href='{my_helper.latest_chapter_url}'>"
f"{my_helper.latest_chapter_title}</a>"
)
return content_html_text
def print_latest_chapter(my_helper):
"""Print latest chapter"""
logger.info(
"Current chapter for %s %s: %s (%s)",
my_helper.media_type,
my_helper.name,
my_helper.latest_chapter_title,
my_helper.latest_chapter_url,
)
def add_schedule(helper, urls: List[str] = None):
"""Add task to schedule
Args:
helper (Helper): [description]
urls (List[str], optional): [description]. Defaults to None.
"""
print_latest_chapter(helper)
schedule.every(30).to(60).minutes.do(
my_checker,
my_helper=helper,
urls=urls,
show_no_update_msg=False,
)
def get_helper(item_obj, urls_type: str = "comic_urls"):
"""Define helper based on item_obj
Args:
item_obj (dict): item object contains helper name and urls
urls_type (str, optional): [description]. Defaults to "comic_urls".
Returns:
helper
"""
# Set helper list for checking (default list: comic)
helper_list = []
if urls_type == "comic_urls":
if "comic_urls" in item_obj:
helper_list = [
# CocomanhuaHelper,
ManhuaguiHelper,
Qiman6Helper,
BaozimhHelper,
]
elif urls_type == "novel_urls":
if "novel_urls" in item_obj:
helper_list = [
WutuxsHelper,
EsjzoneHelper,
SyosetuHelper,
]
else:
raise ValueError(f"Unknown urls_type: {urls_type}")
# Check helper type and return helper
if len(helper_list) > 0:
for helper in helper_list:
if helper.match(item_obj[urls_type][0]):
return helper(
name=item_obj["name"], url=item_obj[urls_type][0]
)
return None
def main():
"""Main logic"""
# logger.info("Check hour range: {}:00:00 - {}:00:00".format(start_hour, end_hour))
with open(LIST_YAML_PATH, encoding="utf8") as list_file:
yml_data = yaml.load(list_file, Loader=yaml.FullLoader)
# Add schedule for each item
for item_obj in yml_data:
# Create helper object and add add to schedule
for urls_type in ["comic_urls", "novel_urls"]:
helper = get_helper(item_obj, urls_type)
if helper:
add_schedule(helper, urls=item_obj[urls_type])
if len(schedule.jobs) > 0:
logger.info("Scheduled %s checker(s).", len(schedule.jobs))
else:
raise ValueError(
"No schedule job found, please check format in list.yaml"
)
# Run the scheduler
while True:
schedule.run_pending()
time.sleep(1)
if __name__ == "__main__":
logger.info("Program Start!")
main()
| 2.3125 | 2 |
To do.py | dotkim/PyToDo | 0 | 12767022 | <gh_stars>0
import pypyodbc, cmd, sys, os, datetime, pathlib
ScriptDir = os.path.dirname(os.path.realpath('__file__')) # Where the scripts should be placed, which is root
def dbconnection():
try:
conString = open('.\Config\ConnectionStrings.config', 'r')
connection = pypyodbc.connect(conString.read())
conString.close()
return connection
except:
print('dberror')
print(sys.exc_info())
#SysL.write(str(datetime.datetime.now()) + " " + str(sys.exc_info()[0]) + '\n')
return None
# End of function
def AuditUser(arg, idarg, funcID):
LogDir = ScriptDir + '\\Logs'
pathlib.Path(LogDir).mkdir(parents=True, exist_ok=True)
AuditLog = LogDir + '\\AuditLog' + str(datetime.date.today()) + '.log'
try:
AL = open(AuditLog, 'a')
AL.write(str(datetime.datetime.now()) + ' - TaskID: ' + str(idarg) + + ' with task: ' + arg + '\n')
AL.close()
except FileNotFoundError:
SysL = open(SysLog, 'a')
SysL.write(str(datetime.datetime.now()) + 'Failed to create AddLog file' + '\n')
SysL.close()
def LogAdd(arg, idarg):
LogDir = ScriptDir + '\Logs'
pathlib.Path(LogDir).mkdir(parents=True, exist_ok=True)
AddLog = LogDir + '\AddLog' + str(datetime.date.today()) + '.log'
SysLog = LogDir + '\SysLog' + str(datetime.date.today()) + '.log'
try:
AddL = open(AddLog, 'a')
AddL.write(str(datetime.datetime.now()) + ' - TASK: ' + arg + ' WITH ID: ' + str(idarg) + '\n')
AddL.close()
except FileNotFoundError:
SysL = open(SysLog, 'a')
SysL.write(str(datetime.datetime.now()) + 'Failed to create AddLog file' + '\n')
SysL.close()
# End of function
StartCleanUp = 1 # Clean CMD and connectio to DB. Then dont do this again.
if StartCleanUp == 1:
print('Connecting to Database...')
connection = dbconnection()
connection.close()
os.system('CLS')
print('Type help for Available commands.')
StartCleanUp = 0
# Start of programm
class TaskForce(cmd.Cmd):
promt = 'Choice: '
# All functions are to be listed in help
def help(self, arg):
print('------Availabe commands------')
print('Show') # Show all current tasks
print('Add') # Add a new task
print('Complete') # Complete a task
print('Remove') # Remove tasks
print('Close') # Close the program
# End of function
def do_show(self, arg=''):
os.system('CLS')
connection = dbconnection()
c = connection.cursor()
c.execute('EXEC dbo.GetActiveTasks')
TaskList = c.fetchall()
for row in TaskList:
print(row)
connection.close()
# End of function
def do_add(self, arg=1):
FuncID = 1
if arg == '':
arg = 1
task = []
connection = dbconnection()
for i in range(int(arg)):
task.insert(i, input('Task: '))
c = connection.cursor()
c.execute("""
EXEC dbo.AddTask
@TaskText = '{TaskText}'
""".\
format(TaskText=task[i]))
# This needs cleanup, add a procedure to the logging file.
# c.execute("""
# SELECT TaskID
# FROM dbo.Tasks
# WHERE TaskDesc = '{ID}'
# """.\
# format(ID=task[i]))
# TID = c.fetchall()
# # Begin logging
# AuditUser(task[i], TID[0][0])
connection.commit()
connection.close()
self.do_show()
# End of function
def do_complete(self, arg):
os.system('CLS')
connection = dbconnection()
c = connection.cursor()
c.execute("""
SELECT TaskID, TaskDesc
FROM dbo.Tasks
WHERE IsComplete = 0
""")
TaskList = c.fetchall() # Fetch the current active tasks with IDs
for x in range(0, len(TaskList)):
print(str(x+1) + ' - ' + TaskList[x][1])
# Print a list for all the tasks
Choice = int(input('Choice: '))
c.execute("""
EXEC dbo.CompleteTask
@SelectedID = {cid}
""".\
format(cid=TaskList[Choice-1][0])) # Find chosen task ID via indexing
connection.commit()
connection.close()
self.do_show()
# End of function
def do_remove(self, arg):
os.system('CLS')
connection = dbconnection()
c = connection.cursor()
c.execute("""
SELECT TaskID, TaskDesc
FROM dbo.Tasks
WHERE IsComplete = 0
""")
TaskList = c.fetchall() # Fetch the current active tasks with IDs
for x in range(0, len(TaskList)):
print(str(x+1) + ' - ' + TaskList[x][1])
# Print a list for all the tasks
Choice = int(input('Choice: '))
c.execute("""
EXEC dbo.RemoveTask
@SelectedID = {cid}
""".\
format(cid=TaskList[Choice-1][0])) # Find chosen task ID via indexing
connection.commit()
connection.close()
self.do_show()
def do_close(self, arg):
sys.exit()
# End of function
if __name__ == '__main__':
TaskForce().cmdloop()
| 2.1875 | 2 |
acapi/resources/environmentlist.py | marynaperesypkina/python-acquia-cloud | 0 | 12767023 | <filename>acapi/resources/environmentlist.py
"""Acquia Cloud API server list resource."""
from acapi.resources.acquialist import AcquiaList
from acapi.resources.environment import Environment
class EnvironmentList(AcquiaList):
"""Dict of Acquia Cloud API Environment resources keyed by short name."""
def __init__(self, base_uri, auth, *args, **kwargs):
"""Constructor."""
super(EnvironmentList, self).__init__(base_uri, auth, *args, **kwargs)
self.fetch()
def fetch(self):
"""Fetch and store environment objects."""
envs = self.request(uri=self.uri)
for env in envs:
name = str(env['name'])
env_uri = self.get_resource_uri(name)
self.__setitem__(name, Environment(env_uri, self.auth, data=env))
def get_resource_uri(self, name):
"""Generate the resource URI.
Parameters
----------
name : str
The name of the environment resource.
Returns
-------
str
The resource URI.
"""
return '{base_uri}/{name}'.format(base_uri=self.uri, name=name)
def set_base_uri(self, base_uri):
"""Set the base URI for server resources.
Parameters
----------
base_uri : str
The base URI to use for generating the new URI.
"""
uri = '{}/envs'.format(base_uri)
self.uri = uri
| 3.03125 | 3 |
pygradethis/utils.py | nischalshrestha/pygradethis | 12 | 12767024 | """
Module with useful functions.
"""
from typing import Union, List
import ast
def parse_code(input: Union[str, List[str]]) -> str:
"""Tries to parse code represented as string or list of strings
Parameters
----------
input : Union[str, List[str]]
either a str or a list of str
Returns
-------
str
the formatted string
Raises
------
SyntaxError
if there are any parsing issues
"""
if input is None:
return input
try:
simple = "".join(input)
ast.parse(simple)
return simple
except SyntaxError as e:
if "EOF" in str(e):
return "\n".join(input)
else:
raise SyntaxError("Problem parsing your code!")
| 3.609375 | 4 |
shifthappens/models/base.py | shift-happens-benchmark/icml-2022 | 1 | 12767025 | <reponame>shift-happens-benchmark/icml-2022
"""Base classes and helper functions for adding models to the benchmark.
To add a new model, implement a new wrapper class inheriting from
:py:class:`shifthappens.models.base.Model`, and from any of the Mixins defined
in :py:mod:`shifthappens.models.mixins`.
Model results should be converted to :py:class:`numpy.ndarray` objects, and
packed into an :py:class:`shifthappens.models.base.ModelResult` instance.
"""
import abc
import dataclasses
from typing import Iterator
import numpy as np
from shifthappens.data.base import DataLoader
from shifthappens.models import mixins
class ModelResult:
"""Emissions of a model after processing a batch of data.
Each model needs to return class labels that are compatible with
the ILSRC2012 labels. We use the same convention used by PyTorch
regarding the ordering of labels.
Args:
class_labels: ``(N, k)``, top-k predictions for
each sample in the batch. Choice of ``k`` can be selected by
the user, and potentially influences the type of accuracy
based benchmarks that the model can be run on. For standard
ImageNet, ImageNet-C evaluation, choose at least ``k=5``.
confidences: ``(N, 1000)``, confidences for each class.
Standard PyTorch ImageNet class label order is expected for
this array. Scores can be in the range ``-inf`` to ``inf``.
uncertainties: ``(N, 1000)``, uncertainties for the
different class predictions. Different from the ``confidences``,
this is a measure of certainty of the given ``confidences`` and
common e.g. in Bayesian Deep neural networks.
ood_scores: ``(N,)``, score for interpreting the sample
as an out-of-distribution class, in the range ``-inf`` to ``inf``.
features: ``(N, d)``, where ``d`` can be arbitrary, feature
representation used to arrive at the given predictions.
"""
__slots__ = [
"class_labels",
"confidences",
"uncertainties",
"ood_scores",
"features",
]
def __init__(
self,
class_labels: np.ndarray,
confidences: np.ndarray = None,
uncertainties: np.ndarray = None,
ood_scores: np.ndarray = None,
features: np.ndarray = None,
):
self.class_labels = class_labels
self.confidences = confidences
self.uncertainties = uncertainties
self.ood_scores = ood_scores
self.features = features
@dataclasses.dataclass
class PredictionTargets:
"""Contains boolean flags of which type of targets model is predicting. Note
that at least one flag should be set as ``True`` and model should be inherited
from corresponding ModelMixin.
Args:
class_labels: Set to ``True`` if model returns predicted labels.
confidences: Set to ``True`` if model returns confidences.
uncertainties: Set to ``True`` if model returns uncertainties.
ood_scores: Set to ``True`` if model returns ood scores.
features: Set to ``True`` if model returns features.
"""
class_labels: bool = False
confidences: bool = False
uncertainties: bool = False
ood_scores: bool = False
features: bool = False
def __post_init__(self):
assert any(
getattr(self, field.name) for field in dataclasses.fields(self)
), "At least one prediction target must be set."
class Model(abc.ABC):
"""Model base class.
Override the :py:meth:`_predict` method to define predictions type of your specific model.
If your model uses unsupervised adaptation mechanisms override :py:meth:`prepare`
as well.
Also make sure that your model inherits from the mixins from :py:mod:`shifthappens.models.mixins`
corresponding to your model predictions type (e.g., :py:class:`LabelModelMixin <shifthappens.models.mixins.LabelModelMixin>` for labels
or :py:class:`ConfidenceModelMixin <shifthappens.models.mixins.ConfidenceModelMixin>` for confidences).
"""
def prepare(self, dataloader: DataLoader):
"""If the model uses unsupervised adaptation mechanisms, it will run those.
Args:
dataloader: Dataloader producing batches of data.
"""
pass
def predict(
self, input_dataloader: DataLoader, targets: PredictionTargets
) -> Iterator[ModelResult]:
"""Yield all the predictions of the model for all data samples contained
in the dataloader
Args:
input_dataloader: Dataloader producing batches of data.
targets: Indicates which kinds of targets should
be predicted.
Returns:
Prediction results for the given batch. Depending on the target
arguments this includes the predicted labels, class confidences,
class uncertainties, ood scores, and image features, all as
:py:class:`numpy.ndarray` objects.
"""
if targets.class_labels:
assert issubclass(type(self), mixins.LabelModelMixin)
if targets.confidences:
assert issubclass(type(self), mixins.ConfidenceModelMixin)
if targets.uncertainties:
assert issubclass(type(self), mixins.UncertaintyModelMixin)
if targets.ood_scores:
assert issubclass(type(self), mixins.OODScoreModelMixin)
if targets.features:
assert issubclass(type(self), mixins.FeaturesModelMixin)
return self._predict(input_dataloader, targets)
@abc.abstractmethod
def _predict(
self, input_dataloader: DataLoader, targets: PredictionTargets
) -> Iterator[ModelResult]:
"""
Override this function for the specific model.
Args:
input_dataloader: Dataloader producing batches of data.
targets: Indicates which kinds of targets should be predicted.
Returns:
Yields prediction results for all batches yielded by the dataloader.
Depending on the target arguments the model results may include the
predicted labels, class confidences, class uncertainties, ood scores,
and image features, all as :py:class:`numpy.ndarray` objects.
"""
raise NotImplementedError()
| 2.5625 | 3 |
data/stream/electricityTest.py | PseudoAj/MyInsightRepo | 5 | 12767026 | <reponame>PseudoAj/MyInsightRepo<gh_stars>1-10
#!/usr/bin/env python
#title :electricityTest.py
#description :Class to test the electricity logic
#author :<NAME>
#date :02122017
#version :0.1
#==============================================================================
# Libraries
import unittest
from electricity import Electricity
#==============================================================================
class ElectricityTest(unittest.TestCase):
# Setup method
def setUp(self):
# Initiate class
self.thisElec = Electricity()
# Test for the list to csv
def testReadAll(self):
# check for the right execution
self.assertTrue(self.thisElec.readAll())
# Main funtion
if __name__ == '__main__':
# initite and run the unittest
unittest.main()
| 2.71875 | 3 |
components/mpas-seaice/testing_and_setup/testcases/square/1D_velocity_hex/plot_method_comparison.py | Fa-Li/E3SM | 235 | 12767027 | <reponame>Fa-Li/E3SM
from netCDF4 import Dataset
import numpy as np
import matplotlib.pyplot as plt
import math
fig, axes = plt.subplots()
subcycleNumber = 7680
operatorMethods = ["wachspress","pwl","weak"]
for operatorMethod in operatorMethods:
# data in
filenameIn = "./output_hex_%s_%i/output.2000.nc" %(operatorMethod,subcycleNumber)
filein = Dataset(filenameIn, "r")
nCells = len(filein.dimensions["nCells"])
nVertices = len(filein.dimensions["nVertices"])
vertexDegree = len(filein.dimensions["vertexDegree"])
nTimes = len(filein.dimensions["Time"])
cellsOnVertex = filein.variables["cellsOnVertex"][:]
cellsOnVertex -= 1
xVertex = filein.variables["xVertex"][:]
yVertex = filein.variables["yVertex"][:]
xCell = filein.variables["xCell"][:]
yCell = filein.variables["yCell"][:]
uVelocity = filein.variables["uVelocity"][-1,:]
vVelocity = filein.variables["vVelocity"][-1,:]
uVelocities = filein.variables["uVelocity"][:,:]
filein.close()
xmin = np.amin(xVertex)
xmax = np.amax(xVertex)
ymin = np.amin(yVertex)
ymax = np.amax(yVertex)
us = []
for iTime in range(0,nTimes):
x = []
u = []
for iVertex in range(0,nVertices):
if (math.fabs(yVertex[iVertex] - 508068.236886871) < 1e-8):
x.append(xVertex[iVertex])
u.append(uVelocities[iTime,iVertex])
x = np.array(x)
u = np.array(u)
sortedIdxs = x.argsort()
x = x[sortedIdxs]
u = u[sortedIdxs]
us.append(math.sqrt(np.sum(np.power(u,2))))
if (iTime == nTimes-1):
axes.plot(x, u, label=operatorMethod)
#axes.plot(x, np.zeros(x.shape[0]), zorder=1, c='k')
uAir = 1.0
rhoair = 1.3
rhow = 1026.0
cocn = 0.00536
cair = 0.0012
Pstar = 2.75e4
Cstar = 20.0
e = 2
alpha = math.sqrt(1.0 + math.pow(1.0 / e, 2))
Lx = 1280000
uu = []
for xx in x:
a = xx / Lx
v = 2.0 * a
dadx = (1.0 / Lx)
dvdx = 2.0 * dadx
oceanStressCoeff = rhow * cocn * a
airStress = rhoair * uAir * uAir * a * cair
P = Pstar * v * math.exp(-Cstar * (1-a))
dPdx = Pstar * math.exp(-Cstar * (1-a)) * (dvdx + v * Cstar * dadx)
print(xx, a, -Cstar * (1-a), P, dPdx)
u = max((airStress - 0.5*(alpha + 1.0) * dPdx) / oceanStressCoeff, 0.0)
uu.append(u)
axes.plot(x, uu, zorder=2, c='r')
axes.set_xlabel("time")
axes.set_ylabel("uVelocity")
axes.legend()
plt.savefig("1D_velocity_operator.png",dpi=300)
| 1.757813 | 2 |
File Corruption/excel.py | box-community/admin-toolkit | 2 | 12767028 | <gh_stars>1-10
# Import pandas
from __future__ import print_function
import os
import pandas as pd
import csv
# Assign spreadsheet filename to `file`
xl_file = "..\..\Box File Corruption Investigation\EID 81319 - All Affected File Versions (Fixed info as of " \
"2018-02-27).xlsx "
# Load spreadsheet into two different variables
xl = pd.ExcelFile(xl_file)
xl2 = pd.ExcelFile(xl_file)
# Load Column names
items = xl._parse_excel(sheetname=1).as_matrix(
["file_name", "version_created_time", "uploader_email", "file_id", "parent_folder_id"])
count = 0
# Load Column name from sheet 2 to get the list of uploader's on there
unique_users = xl2._parse_excel(sheetname=2, header=0).as_matrix(["uploader_email"])
for user in unique_users:
current_user = user[0]
completeName = os.path.join("output", current_user + ".csv")
myfields = ["file_name", "link_to_file", "parent_folder_id", "version_created_time", "uploader_email"]
with open(completeName, 'wb') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=myfields)
writer.writeheader()
for item in items:
if item[2] == current_user:
try:
writer.writerow(
{"file_name": str(item[0]),
"link_to_file": "=HYPERLINK(\"https://iu.app.box.com/file/" + str(item[3]) + "\")",
"parent_folder_id": "=HYPERLINK(\"https://iu.app.box.com/folder/" + str(item[4]) + "\")",
"version_created_time": str(item[1]),
"uploader_email": str(item[2])
}
)
except:
count += 1
print("Non alphanumeric characters for: " + str(item[2]))
writer.writerow({"file_name": "*non characters*", ### ADDRESS THIS ###
"link_to_file": "=HYPERLINK(\"https://iu.app.box.com/file/" + str(item[3]) + "\")",
"parent_folder_id": "=HYPERLINK(\"https://iu.app.box.com/folder/" + str(
item[4]) + "\")",
"version_created_time": str(item[1]),
"uploader_email": str(item[2])})
print(str(count) + " Exceptions")
| 2.640625 | 3 |
azure-iot-device/azure/iot/device/iothub/sync_handler_manager.py | danewalton/azure-iot-sdk-python | 0 | 12767029 | <reponame>danewalton/azure-iot-sdk-python
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""This module contains the manager for handler methods used by the callback client"""
import logging
import threading
import abc
import six
from azure.iot.device.common import handle_exceptions
from azure.iot.device.common.chainable_exception import ChainableException
from azure.iot.device.iothub.sync_inbox import InboxEmpty
import concurrent.futures
logger = logging.getLogger(__name__)
MESSAGE = "_on_message_received"
METHOD = "_on_method_request_received"
TWIN_DP_PATCH = "_on_twin_desired_properties_patch_received"
# TODO: add more for "event"
class HandlerManagerException(ChainableException):
"""An exception raised by a HandlerManager
"""
pass
class HandlerRunnerKillerSentinel(object):
"""An object that functions according to the sentinel design pattern.
Insert into an Inbox in order to indicate that the Handler Runner associated with that
Inbox should be stopped.
"""
pass
@six.add_metaclass(abc.ABCMeta)
class AbstractHandlerManager(object):
"""Partial class that defines handler manager functionality shared between sync/async"""
def __init__(self, inbox_manager):
self._inbox_manager = inbox_manager
self._handler_runners = {
# Inbox handler tasks
MESSAGE: None,
METHOD: None,
TWIN_DP_PATCH: None,
# Other handler tasks
# TODO: add
}
# Inbox handlers
self._on_message_received = None
self._on_method_request_received = None
self._on_twin_desired_properties_patch_received = None
# Other handlers
# TODO: add
def _get_inbox_for_handler(self, handler_name):
"""Retrieve the inbox relevant to the handler"""
if handler_name == METHOD:
return self._inbox_manager.get_method_request_inbox()
elif handler_name == TWIN_DP_PATCH:
return self._inbox_manager.get_twin_patch_inbox()
elif handler_name == MESSAGE:
return self._inbox_manager.get_unified_message_inbox()
else:
return None
@abc.abstractmethod
def _inbox_handler_runner(self, inbox, handler_name):
"""Run infinite loop that waits for an inbox to receive an object from it, then calls
the handler with that object
"""
pass
@abc.abstractmethod
def _event_handler_runner(self, handler_name):
pass
@abc.abstractmethod
def _start_handler_runner(self, handler_name):
"""Create, and store a handler runner
"""
pass
@abc.abstractmethod
def _stop_handler_runner(self, handler_name):
"""Cancel and remove a handler runner"""
pass
def _generic_handler_setter(self, handler_name, new_handler):
"""Set a handler"""
curr_handler = getattr(self, handler_name)
if new_handler is not None and curr_handler is None:
# Create runner, set handler
logger.debug("Creating new handler runner for handler: {}".format(handler_name))
setattr(self, handler_name, new_handler)
self._start_handler_runner(handler_name)
elif new_handler is None and curr_handler is not None:
# Cancel runner, remove handler
logger.debug("Removing handler runner for handler: {}".format(handler_name))
self._stop_handler_runner(handler_name)
setattr(self, handler_name, new_handler)
else:
# Update handler, no need to change runner
logger.debug("Updating set handler: {}".format(handler_name))
setattr(self, handler_name, new_handler)
def stop(self):
"""Stop the process of invoking handlers in response to events.
All pending items will be handled prior to stoppage.
"""
for handler_name in self._handler_runners:
if self._handler_runners[handler_name] is not None:
self._stop_handler_runner(handler_name)
def ensure_running(self):
"""Ensure the process of invoking handlers in response to events is running"""
for handler_name in self._handler_runners:
if (
self._handler_runners[handler_name] is None
and getattr(self, handler_name) is not None
):
self._start_handler_runner(handler_name)
@property
def on_message_received(self):
return self._on_message_received
@on_message_received.setter
def on_message_received(self, value):
self._generic_handler_setter(MESSAGE, value)
@property
def on_method_request_received(self):
return self._on_method_request_received
@on_method_request_received.setter
def on_method_request_received(self, value):
self._generic_handler_setter(METHOD, value)
@property
def on_twin_desired_properties_patch_received(self):
return self._on_twin_desired_properties_patch_received
@on_twin_desired_properties_patch_received.setter
def on_twin_desired_properties_patch_received(self, value):
self._generic_handler_setter(TWIN_DP_PATCH, value)
class SyncHandlerManager(AbstractHandlerManager):
"""Handler manager for use with synchronous clients"""
def _inbox_handler_runner(self, inbox, handler_name):
"""Run infinite loop that waits for an inbox to receive an object from it, then calls
the handler with that object
"""
logger.debug("HANDLER RUNNER ({}): Starting runner".format(handler_name))
# Define a callback that can handle errors in the ThreadPoolExecutor
def _handler_callback(future):
try:
e = future.exception(timeout=0)
except Exception as raised_e:
# This shouldn't happen because cancellation or timeout shouldn't occur...
# But just in case...
new_err = HandlerManagerException(
message="HANDLER ({}): Unable to retrieve exception data from incomplete invocation".format(
handler_name
),
cause=raised_e,
)
handle_exceptions.handle_background_exception(new_err)
else:
if e:
new_err = HandlerManagerException(
message="HANDLER ({}): Error during invocation".format(handler_name),
cause=e,
)
handle_exceptions.handle_background_exception(new_err)
else:
logger.debug(
"HANDLER ({}): Successfully completed invocation".format(handler_name)
)
# Run the handler in a threadpool, so that it cannot block other handlers (from a different task),
# or the main client thread. The number of worker threads forms an upper bound on how many instances
# of the same handler can be running simultaneously.
tpe = concurrent.futures.ThreadPoolExecutor(max_workers=4)
while True:
handler_arg = inbox.get()
if isinstance(handler_arg, HandlerRunnerKillerSentinel):
# Exit the runner when a HandlerRunnerKillerSentinel is found
logger.debug(
"HANDLER RUNNER ({}): HandlerRunnerKillerSentinel found in inbox. Exiting.".format(
handler_name
)
)
tpe.shutdown()
break
# NOTE: we MUST use getattr here using the handler name, as opposed to directly passing
# the handler in order for the handler to be able to be updated without cancelling
# the running task created for this coroutine
handler = getattr(self, handler_name)
logger.debug("HANDLER RUNNER ({}): Invoking handler".format(handler_name))
fut = tpe.submit(handler, handler_arg)
fut.add_done_callback(_handler_callback)
def _event_handler_runner(self, handler_name):
# TODO: implement
logger.error(".event_handler_runner() not yet implemented")
def _start_handler_runner(self, handler_name):
"""Start and store a handler runner thread
"""
if self._handler_runners[handler_name] is not None:
# This branch of code should NOT be reachable due to checks prior to the invocation
# of this method. The branch exists for safety.
raise HandlerManagerException(
"Cannot create thread for handler runner: {}. Runner thread already exists".format(
handler_name
)
)
inbox = self._get_inbox_for_handler(handler_name)
# NOTE: It would be nice to have some kind of mechanism for making sure this thread
# doesn't crash or raise errors, but it would require significant extra infrastructure
# and an exception in here isn't supposed to happen anyway. Perhaps it could be added
# later if truly necessary
if inbox:
thread = threading.Thread(target=self._inbox_handler_runner, args=[inbox, handler_name])
else:
thread = threading.Thread(target=self._event_handler_runner, args=[handler_name])
thread.daemon = True # Don't block program exit
# Store the thread
self._handler_runners[handler_name] = thread
thread.start()
def _stop_handler_runner(self, handler_name):
"""Stop and remove a handler runner task.
All pending items in the corresponding inbox will be handled by the handler before stoppage.
"""
# Add a Handler Runner Killer Sentinel to the relevant inbox
logger.debug(
"Adding HandlerRunnerKillerSentinel to inbox corresponding to {} handler runner".format(
handler_name
)
)
inbox = self._get_inbox_for_handler(handler_name)
inbox._put(HandlerRunnerKillerSentinel())
# Wait for Handler Runner to end due to the sentinel
logger.debug("Waiting for {} handler runner to exit...".format(handler_name))
thread = self._handler_runners[handler_name]
thread.join()
self._handler_runners[handler_name] = None
logger.debug("Handler runner for {} has been stopped".format(handler_name))
| 2.015625 | 2 |
miner/__init__.py | chrisedebo/nice-py-switcher | 0 | 12767030 | <reponame>chrisedebo/nice-py-switcher
#Miner plugin definition
| 0.941406 | 1 |
src/relstorage/adapters/drivers.py | mamico/relstorage | 0 | 12767031 | ##############################################################################
#
# Copyright (c) 2016 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""
Helpers for drivers
"""
from __future__ import print_function
import importlib
import sys
import os
from zope.interface import directlyProvides
from zope.interface import implementer
from .._compat import PYPY
from .._compat import PY3
from .._compat import casefold
from .._util import positive_integer
from .._util import consume
from .interfaces import IDBDriver
from .interfaces import IDBDriverFactory
from .interfaces import IDBDriverOptions
from .interfaces import DriverNotAvailableError
from .interfaces import NoDriversAvailableError
from .interfaces import ReplicaClosedException
from .interfaces import UnknownDriverError
logger = __import__('logging').getLogger(__name__)
def _select_driver(options, driver_options):
driver = _select_driver_by_name(options.driver, driver_options)
driver.configure_from_options(options)
return driver
def _select_driver_by_name(driver_name, driver_options):
driver_name = driver_name or 'auto'
driver_name = casefold(driver_name)
accept_any_driver = driver_name == 'auto'
# XXX: For testing, we'd like to be able to prohibit the use of auto.
for factory in driver_options.known_driver_factories():
exact_match = casefold(factory.driver_name) == driver_name
if accept_any_driver or exact_match:
try:
return factory()
except DriverNotAvailableError as e:
if not accept_any_driver:
e.driver_options = driver_options
raise
# Well snap, no driver. Either we would take any driver,
# and none were available, or we needed an exact driver that
# wasn't found
error = NoDriversAvailableError if accept_any_driver else UnknownDriverError
raise error(driver_name, driver_options)
class DriverNotImportableError(DriverNotAvailableError,
ImportError):
"When the module can't be imported."
class AbstractModuleDriver(object):
"""
Base implementation of a driver, based on a module, as used in DBAPI.
Subclasses must provide:
- ``MODULE_NAME`` property.
- ``__name__`` property
- Implementation of ``get_driver_module``; this should import the
module at runtime.
"""
#: The name of the DB-API module to import.
MODULE_NAME = None
#: The name written in config files
__name__ = None
#: Can this module be used on PyPy?
AVAILABLE_ON_PYPY = True
#: Set this to false if your subclass can do static checks
#: at import time to determine it should not be used.
#: Helpful for things like Python version detection.
STATIC_AVAILABLE = True
#: Priority of this driver, when available. Lower is better.
#: (That is, first choice should have value 1, and second choice value
#: 2, and so on.)
PRIORITY = 100
#: Priority of this driver when running on PyPy. Lower is better.
PRIORITY_PYPY = 100
#: Class attribute. If set to a true value (not the default),
#: ask the underlying driver to work in as strict a mode as possible
#: when it comes to detecting programming errors.
#:
#: Typically set by tests. Most drivers do not have a stricter mode
#: that can be enabled.
STRICT = False
# Can this driver work with gevent?
_GEVENT_CAPABLE = False
# Does this driver need the socket module patched?
# Only checked if _GEVENT_CAPABLE is set to True.
_GEVENT_NEEDS_SOCKET_PATCH = True
#: The size we request cursor's from our :meth:`cursor` method
#: to fetch from ``fetchmany`` and (hopefully) iteration (which is a
#: DB-API extension. We default to 1024, but the environment variable
#: RS_CURSOR_ARRAYSIZE can be set to an int to change this default.
#: Individual drivers *might* choose a different default.
cursor_arraysize = positive_integer(
os.environ.get('RS_CURSOR_ARRAYSIZE', '1024')
)
DriverNotAvailableError = DriverNotAvailableError
# Can the driver support the full range of a 64-bit unsigned ID for
# OID and TID parameters?
supports_64bit_unsigned_id = True
def __init__(self):
if PYPY and not self.AVAILABLE_ON_PYPY:
raise self.DriverNotAvailableError(self.__name__)
if not self.STATIC_AVAILABLE:
raise self.DriverNotAvailableError(self.__name__)
try:
self.driver_module = mod = self.get_driver_module()
except ImportError:
logger.debug("Unable to import driver", exc_info=True)
raise DriverNotImportableError(self.__name__)
self.disconnected_exceptions = (mod.OperationalError,
mod.InterfaceError,
ReplicaClosedException)
self.close_exceptions = self.disconnected_exceptions + (mod.ProgrammingError,)
self.lock_exceptions = (mod.DatabaseError,)
# If we try to do something very wrong, a bug in our code,
# we *should* get a ProgrammingError. Unfortunately, some drivers
# raise ProgrammingError for other things, such as failing to get a lock.
self.illegal_operation_exceptions = (mod.ProgrammingError,)
self.use_replica_exceptions = (mod.OperationalError,)
self.Binary = mod.Binary
self._connect = mod.connect
self.priority = self.PRIORITY if not PYPY else self.PRIORITY_PYPY
def connect(self, *args, **kwargs):
return self._connect(*args, **kwargs)
def get_driver_module(self):
"""Import and return the driver module."""
return importlib.import_module(self.MODULE_NAME)
def gevent_cooperative(self):
# Return whether this driver is cooperative with gevent.
# This takes into account whether the system is
# and/or needs to be monkey-patched
if not self._GEVENT_CAPABLE:
return False
if self._GEVENT_NEEDS_SOCKET_PATCH:
return self._sockets_gevent_monkey_patched()
return True
def configure_from_options(self, options): # pylint:disable=unused-argument
"""Default implementation; does nothing."""
def _sockets_gevent_monkey_patched(self):
# Return whether the socket module has been monkey-patched
# by gevent
try:
from gevent import monkey
except ImportError: # pragma: no cover
return False
else:
# some versions of gevent have a bug where if we're monkey-patched
# on the command line using python -m gevent.monkey /path/to/testrunner ...
# it doesn't report being monkey-patched.
import socket
return monkey.is_module_patched('socket') or 'gevent' in repr(socket.socket)
# Common compatibility shims, overriden as needed.
def set_autocommit(self, conn, value):
conn.autocommit(value)
def cursor(self, conn, server_side=False): # pylint:disable=unused-argument
cur = conn.cursor()
cur.arraysize = self.cursor_arraysize
return cur
def debug_connection(self, conn, *extra): # pragma: no cover
print(conn, *extra)
def get_messages(self, conn): # pragma: no cover pylint:disable=unused-argument
return ()
def __transaction_boundary(self, conn, meth):
meth()
messages = self.get_messages(conn)
for msg in messages:
logger.debug(msg.strip())
def commit(self, conn, cursor=None): # pylint:disable=unused-argument
self.__transaction_boundary(conn, conn.commit)
def rollback(self, conn):
self.__transaction_boundary(conn, conn.rollback)
def connection_may_need_rollback(self, conn): # pylint:disable=unused-argument
return True
connection_may_need_commit = connection_may_need_rollback
def synchronize_cursor_for_rollback(self, cursor):
"""Exceptions here are ignored, we don't know what state the cursor is in."""
# psycopg2 raises ProgrammingError if we rollback when no results
# are present on the cursor. mysql-connector-python raises
# InterfaceError. OTOH, mysqlclient raises nothing and even wants
# it in certain circumstances.
if cursor is not None:
try:
consume(cursor)
except Exception: # pylint:disable=broad-except
pass
# Things that can be recognized as a pickled state,
# passed to an io.BytesIO reader, and unpickled.
# Py MySQL Connector/Python returns a bytearray, whereas
# C MySQL Connector/Python returns bytes.
# sqlite uses buffer on Py2 and memoryview on Py3.
# Keep these ordered with the most common at the front;
# Python does a linear traversal of type checks.
state_types = (bytes, bytearray)
def binary_column_as_state_type(self, data):
if isinstance(data, self.state_types) or data is None:
return data
__traceback_info__ = type(data), data
raise TypeError("Unknown binary state column")
def binary_column_as_bytes(self, data):
# Take the same inputs as `as_state_type`, but turn them into
# actual bytes. This includes None and empty bytes, which becomes
# the literal b'';
# XXX: TODO: We don't need all these checks up here. Just the common ones,
# move everything else to specific drivers.
if data is None or not data:
return b''
if isinstance(data, bytes):
return data
if isinstance(data, memoryview):
return data.tobytes()
# Everything left we convert with the bytes() construtor.
# That would be buffer and bytearray
__traceback_info__ = data, type(data)
return bytes(data)
def enter_critical_phase_until_transaction_end(self, connection, cursor):
"""Default implementation; does nothing."""
def is_in_critical_phase(self, connection, cursor):
"""Default implementation; returns a false value."""
def exit_critical_phase(self, connection, cursor):
"Default implementation; does nothing."
class MemoryViewBlobDriverMixin(object):
# psycopg2 is smart enough to return memoryview or buffer on
# Py3/Py2, respectively, for BYTEa columns. sqlite3 does exactly
# the same for BLOB columns (on Python 2; on Python 3 it returns
# bytes instead of buffer), and defines ``Binary`` that way as
# well.
# memoryview can't be passed to bytes() on Py2 or Py3, but it can
# be passed to cStringIO.StringIO() or io.BytesIO() ---
# unfortunately, memoryviews, at least, don't like going to
# io.BytesIO() on Python 3, and that's how we unpickle states. So
# while ideally we'd like to keep it that way, to save a copy, we
# are forced to make the copy. Plus there are tests that like to
# directly compare bytes.
if PY3:
def binary_column_as_state_type(self, data):
if data:
# Calling 'bytes()' on a memoryview in Python 3 does
# nothing useful.
data = data.tobytes()
return data
else:
def binary_column_as_state_type(self, data):
if data:
data = bytes(data)
return data
@implementer(IDBDriverFactory)
class _ClassDriverFactory(object):
def __init__(self, driver_type):
self.driver_type = driver_type
# Getting the name is tricky, the class wants to shadow it.
self.driver_name = driver_type.__dict__.get('__name__') or driver_type.__name__
def check_availability(self):
try:
self.driver_type()
except DriverNotAvailableError:
return False
return True
def __call__(self):
return self.driver_type()
def __eq__(self, other):
return (casefold(self.driver_name), self.driver_type) == (
casefold(other.driver_name), other.driver_type)
def __hash__(self):
return hash((casefold(self.driver_name), self.driver_type))
def __getattr__(self, name):
return getattr(self.driver_type, name)
def implement_db_driver_options(name, *driver_modules):
"""
Helper function to be called at a module scope to
make it implement ``IDBDriverOptions``.
:param str name: The value of ``__name__``.
:param driver_modules: Each of these names a module that has
one or more implementations of ``IDBDriver`` in it,
as named in their ``__all__`` attribute.
"""
module = sys.modules[name]
driver_factories = set()
for driver_module in driver_modules:
driver_module = importlib.import_module('.' + driver_module,
name)
for factory in driver_module.__all__:
factory = getattr(driver_module, factory)
if IDBDriver.implementedBy(factory): # pylint:disable=no-value-for-parameter
driver_factories.add(_ClassDriverFactory(factory))
module.known_driver_factories = lambda: sorted(
driver_factories,
key=lambda factory: factory.PRIORITY if not PYPY else factory.PRIORITY_PYPY,
)
directlyProvides(module, IDBDriverOptions)
module.select_driver = lambda driver_name=None: _select_driver_by_name(driver_name,
sys.modules[name])
class _NoGeventDriverMixin(object):
import time as gevent
def get_driver_module(self):
raise ImportError("Could not import gevent")
class _NoGeventConnectionMixin(object):
gevent_hub = None
gevent_read_watcher = None
gevent_write_watcher = None
gevent_sleep = None
try:
import gevent
except ImportError:
GeventDriverMixin = _NoGeventDriverMixin
GeventConnectionMixin = _NoGeventConnectionMixin
else:
import select
from gevent.socket import wait
get_hub = gevent.get_hub
class GeventDriverMixin(object):
gevent = gevent
class GeventConnectionMixin(_NoGeventConnectionMixin):
"""
Helper for a connection that waits using gevent.
Subclasses must provide a ``fileno()`` method. The usual
pattern for executing a query would then be something like
this::
query = format_query_to_bytes(...)
self.gevent_wait_write()
self.send_query()
self.gevent_wait_read()
self.read_results()
It is important that ``send_query`` do nothing but put bytes
on the wire. It must not include any attempt to wait for a
response from the database, especially if that response could
take an arbitrary amount of time or block. (Of course, if
``send_query`` and ``read_results`` can arrange to use gevent
waiting functions too, you'll have finer control. This example
is all-or-nothing. Sometimes its easy to handle
``read_results`` in a looping function using a server-side
cursor.)
The ``gevent_wait_read`` and ``gevent_wait_write`` functions
are implemented using :func:`gevent.socket.wait`. That
function always takes a full iteration of the event loop to
determine whether a file descriptor is ready; it always yields
control to other greenlets immediately. gevent's own sockets
don't work that way; instead they try to read/write and catch
the resulting EAGAIN exception. Only after that do they yield
to the event loop. This is for good reason: eliminating
unnecessary switches can lead to higher throughput.
Here, a pass through the event loop can be risky. If we send a
request that establishes database locks that will require
further action from the greenlet to relinquish, those will
come into being (potentially blocking other greenlets in the
same or different processes) sometime between when
``send_query`` is entered and when ``gevent_wait_read`` exits.
If, for any reason, a different greenlet runs while we have
yielded to the event loop and blocks on a resource we own that
is not gevent cooperative (a non-monkey-patched lock, a
different database) we'll never regain control. And thus we'll
never be able to make forward progress and release those
locks. Since they're shared locks, that could harm arbitrary
machines in the cluster.
Thus, we perform a similar optimization as gevent sockets: we
first check to see if the file descriptor is ready and only
yield to the event loop if it isn't. The cost is an extra
system call to ``select``. For write requests, we could be
able to assume that they are always ready (depending on the
nature of the protocol); if that's so, override
:meth:`gevent_check_write`. The same goes for
:meth:`gevent_check_read`. This doesn't eliminate the problem,
but it should substantially reduce the chances of it
happening.
"""
gevent_sleep = staticmethod(gevent.sleep)
def close(self):
self.__close_watchers()
super(GeventConnectionMixin, self).close()
def __check_watchers(self):
# We can be used from more than one thread in a sequential
# fashion.
hub = get_hub()
if hub is not self.gevent_hub:
self.__close_watchers()
fileno = self.fileno()
hub = self.gevent_hub = get_hub()
self.gevent_read_watcher = hub.loop.io(fileno, 1)
self.gevent_write_watcher = hub.loop.io(fileno, 2)
def __close_watchers(self):
if self.gevent_read_watcher is not None:
self.gevent_read_watcher.close()
self.gevent_write_watcher.close()
self.gevent_hub = None
def gevent_check_read(self,):
if select.select([self], (), (), 0)[0]:
return True
return False
def gevent_wait_read(self):
if not self.gevent_check_read():
self.__check_watchers()
wait(self.gevent_read_watcher,
hub=self.gevent_hub)
def gevent_check_write(self):
if select.select((), [self], (), 0)[1]:
return True
return False
def gevent_wait_write(self):
if not self.gevent_check_write():
self.__check_watchers()
wait(self.gevent_write_watcher,
hub=self.gevent_hub)
| 1.78125 | 2 |
skyportal/handlers/api/source_exists.py | jadalilleboe/skyportal | 1 | 12767032 | <filename>skyportal/handlers/api/source_exists.py<gh_stars>1-10
import conesearch_alchemy as ca
from baselayer.app.access import auth_or_token
from ..base import BaseHandler
from ...models import (
Obj,
)
class SourceExistsHandler(BaseHandler):
@auth_or_token
def get(self, obj_id=None):
"""
---
single:
description: Retrieve a source
tags:
- sources
parameters:
- in: path
name: obj_id
required: false
schema:
type: string
multiple:
description: Retrieve all sources
tags:
- sources
parameters:
- in: query
name: ra
nullable: true
schema:
type: number
description: RA for spatial filtering (in decimal degrees)
- in: query
name: dec
nullable: true
schema:
type: number
description: Declination for spatial filtering (in decimal degrees)
- in: query
name: radius
nullable: true
schema:
type: number
description: Radius for spatial filtering if ra & dec are provided (in decimal degrees)
"""
ra = self.get_query_argument('ra', None)
dec = self.get_query_argument('dec', None)
radius = self.get_query_argument('radius', None)
if obj_id is not None:
s = Obj.get_if_accessible_by(obj_id, self.current_user)
if s is not None:
return self.success("A source of that name already exists.")
obj_query = Obj.query_records_accessible_by(self.current_user)
if any([ra, dec, radius]):
if not all([ra, dec, radius]):
return self.error(
"If any of 'ra', 'dec' or 'radius' are "
"provided, all three are required."
)
try:
ra = float(ra)
dec = float(dec)
radius = float(radius)
except ValueError:
return self.error(
"Invalid values for ra, dec or radius - could not convert to float"
)
other = ca.Point(ra=ra, dec=dec)
obj_query = obj_query.filter(Obj.within(other, radius))
objs = obj_query.all()
if len(objs) == 1:
return self.success(
f"A source at that location already exists: {objs[0].id}."
)
elif len(objs) > 1:
return self.success(
f"Sources at that location already exist: {','.join([obj.id for obj in objs])}."
)
return self.success("A source of that name does not exist.")
| 2.1875 | 2 |
einsum_pc.py | ByzanTine/AutoHOOT | 13 | 12767033 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
# This matains a global state.
# Only consider two expression that has the same subscripts and operand shapes
# to be the same einsum expression.
class _EinsumPathCached:
def __init__(self):
self.path = {}
def __call__(self, *args, **kwargs):
subscript = args[0]
operands = args[1:]
key = subscript
key += '|'
for operand in operands:
key += '-'.join([str(dim) for dim in operand.shape])
key += '|'
if key not in self.path:
self.path[key] = np.einsum_path(*args,
**kwargs,
optimize='optimal')[0]
kwargs['optimize'] = self.path[key]
return np.einsum(*args, **kwargs)
einsum_pc = _EinsumPathCached()
# import time
# N = 10
# C = np.random.rand(N, N)
# I = np.random.rand(N, N, N, N)
# begin = time.time()
# for i in range(10):
# einsum_pc('pi,qj,ijkl,rk,sl->pqrs', C, C, I, C, C)
# einsum_pc('pi,qj,ijko,rk,so->pqrs', C, C, I, C, C)
# end = time.time()
# print(einsum_pc.path)
# print(f'{end - begin}')
# begin = time.time()
# for i in range(10):
# np.einsum('pi,qj,ijkl,rk,sl->pqrs', C, C, I, C, C, optimize='optimal')
# end = time.time()
# print(f'{end - begin}')
| 2.234375 | 2 |
aaem_summaries/components/ashp_non_res/summary.py | gina-alaska/alaska_affordable_energy_model | 1 | 12767034 | <filename>aaem_summaries/components/ashp_non_res/summary.py
"""
Air Source Heat Pump Non-residential Outputs
--------------------------------------------
output functions for Air Source Heat Pump Non-residential component
"""
import os.path
from pandas import DataFrame
import aaem.constants as constants
from aaem.components import comp_order
import aaem_summaries.web_lib as wl
COMPONENT_NAME = "Non-Residential ASHP"
DESCRIPTION = """
This component calculates the potential change in heating oil usage from the installation of new air source heat pumps in non-residential buildings (assumed to heat 30% of non-residential square footage).
"""
def generate_web_summary (web_object, community):
"""generate HTML summary for a community.
generates web_object.directory/community/ashp_non_residential.html and
associated csv files.
Parameters
----------
web_object: WebSummary
a WebSummary object
community: str
community name
See also
--------
aaem.web :
WebSummary object definition
"""
## get the template
template = web_object.component_html
## get the component (the modeled one)
modeled = web_object.results[community][COMPONENT_NAME]
start_year = modeled.start_year
end_year = modeled.actual_end_year
## for make table functions
projects = {'Modeled ' + COMPONENT_NAME: modeled}
## get forecast stuff (consumption, generation, etc)
#~ fuel_consumed = nr_comp.baseline_HF_consumption
nr_comp = web_object.results[community]["Non-residential Energy Efficiency"]
fuel_consumed = DataFrame(
nr_comp.baseline_HF_consumption,
columns=['fuel consumed'],
index = range(nr_comp.start_year, nr_comp.end_year+1)
)['fuel consumed'].ix[start_year:end_year]
fuel_consumed = fuel_consumed * constants.mmbtu_to_gal_HF
## get the diesel prices
diesel_price = \
modeled.cd['diesel prices'] + modeled.cd['heating fuel premium']
diesel_price = \
diesel_price[diesel_price.columns[0]].ix[start_year:end_year]
## get diesel generator efficiency
eff = modeled.cd['diesel generation efficiency']
## get generation fuel costs per year (modeled)
base_cost = fuel_consumed * diesel_price
base_cost.name = 'Base Cost'
table1 = wl.make_costs_table(community, COMPONENT_NAME, projects, base_cost,
web_object.directory)
## get generation fuel used (modeled)
base_con = (base_cost - base_cost) + fuel_consumed
base_con.name = 'Base Consumption'
table2 = wl.make_consumption_table(community, COMPONENT_NAME,
projects, base_con,
web_object.directory,
'get_fuel_total_saved()')
## info for modeled
info = create_project_details_list (modeled)
## info table (list to send to template)
info_for_projects = [{'name':'Modeled non-residential air source heat pump project',
'info':info}]
## create list of charts
charts = [
{'name':'costs', 'data': str(table1).replace('nan','null'),
'title': 'Estimated Heating Costs for non-residential sector',
'type': "'$'",'plot': True,},
{'name':'consumption', 'data': str(table2).replace('nan','null'),
'title':'Heating Fuel Consumed by non-residential sector',
'type': "'other'",'plot': True,}
]
## generate html
## generate html
msg = None
if community in web_object.bad_data_coms:
msg = web_object.bad_data_msg
pth = os.path.join(web_object.directory, community.replace("'",''),
COMPONENT_NAME.replace(' ','_').replace('(','').replace(')','').lower() + '.html')
with open(pth, 'w') as html:
html.write(template.render( info = info_for_projects,
type = COMPONENT_NAME,
com = community.replace("'",'') ,
charts = charts,
summary_pages = ['Summary'] + comp_order ,
sections = web_object.get_summary_pages(),
description = DESCRIPTION,
metadata = web_object.metadata,
message = msg
))
def create_project_details_list (project):
"""makes a projects details section for the html
Parameters
----------
project: HeatRecovery
A HeatRecovery object thats run function has been called
Returns
-------
A dictionary with values used by summary
"""
try:
costs = '${:,.0f}'.format(project.get_NPV_costs())
except ValueError:
costs = project.get_NPV_costs()
try:
benefits = '${:,.0f}'.format(project.get_NPV_benefits())
except ValueError:
benefits = project.get_NPV_benefits()
try:
net_benefits = '${:,.0f}'.format(project.get_NPV_net_benefit())
except ValueError:
net_benefits = project.get_NPV_net_benefit()
try:
BC = '{:,.1f}'.format(project.get_BC_ratio())
except ValueError:
BC = project.get_BC_ratio()
try:
sqft = '{:,.0f}'.format(project.heat_displaced_sqft)
except ValueError:
sqft = project.projectheat_displaced_sqft
return [
{'words':'Capital cost ($)',
'value': costs},
{'words':'Lifetime energy cost savings ($)',
'value': benefits},
{'words':'Net lifetime savings ($)',
'value': net_benefits},
{'words':'Benefit-cost ratio',
'value': BC},
{'words': 'Estimated square feet heated by ASHP systems',
'value': sqft},
#~ {'words': 'Average proposed capacity per residence (Btu/hr)',
#~ 'value': int(project.peak_monthly_btu_hr_hh)},
{'words': 'Excess generation capacity needed (kW)',
'value': '{:,.0f} kW'.format(
project.monthly_value_table['kWh consumed'].max()/(24 * 31))},
{'words': 'Expected average coefficient of performance (COP)',
'value': '{:,.2f}'.format(project.average_cop)},
#~ {'words':"btu/hrs",
#~ 'value': project.comp_specs['btu/hrs'] },
#~ {'words':"Cost per btu/hrs",
#~ 'value': project.comp_specs['cost per btu/hrs'] },
]
| 2.8125 | 3 |
test/utility/test_read_file_by_name.py | SoorajModi/PrOwl | 0 | 12767035 | <filename>test/utility/test_read_file_by_name.py
from prowl.utility.file import read_file_by_line
def test_read_file_by_line_returns_string():
expected: list = ["This", "is", "a", "test!"]
received: list = read_file_by_line('test/utility/test_read_file.txt')
assert expected == received
| 2.9375 | 3 |
onisite/urls_example.py | johnscancella/open-oni | 0 | 12767036 | # Copy this to urls.py. Most sites can leave this as-is. If you have custom
# apps which need routing, modify this file to include those urlconfs.
from django.conf.urls import url, include
urlpatterns = [
url('', include("core.urls")),
# If you were to add a plugin app that handles its own URLs, you might do
# something like this:
#
# url(r'^map/', include("onisite.plugins.map.urls")),
]
| 1.445313 | 1 |
test/test_pwm.py | randomsamples/rgb_mixer | 1 | 12767037 | import cocotb
from cocotb.clock import Clock
from cocotb.triggers import RisingEdge, FallingEdge, ClockCycles
import random
async def reset(dut):
dut.reset <= 1
await ClockCycles(dut.clk, 5)
dut.reset <= 0;
@cocotb.test()
async def test_pwm(dut):
clock = Clock(dut.clk, 10, units="us")
cocotb.fork(clock.start())
# test a range of values
for i in range(10, 255, 20):
# set pwm to this level
dut.level <= i
await reset(dut)
# wait pwm level clock steps
await ClockCycles(dut.clk, i)
# assert still high
assert(dut.out)
# wait for next rising clk edge
await RisingEdge(dut.clk)
# assert pwm goes low
assert(dut.out == 0)
| 2.59375 | 3 |
python/algorism/sort/selection_sort.py | mts-uw/trainning-repository | 0 | 12767038 | import random
from typing import List
def selection_sort(numbers: List[int]) -> List[int]:
len_numbers = len(numbers)
for i in range(len_numbers):
min_idx = i
for j in range(i + 1, len_numbers):
if numbers[min_idx] > numbers[j]:
min_idx = j
numbers[i], numbers[min_idx] = numbers[min_idx], numbers[i]
return numbers
nums = [random.randint(0, 100) for _ in range(10)]
#nums = [2, 5, 1, 8, 7, 3]
print(selection_sort(nums))
| 3.96875 | 4 |
lib/ui/__init__.py | mattermccrea/expensive-skeleton-free | 0 | 12767039 | # this is init file
| 1.117188 | 1 |
PANDAS/PetalProject.py | abhinavmish96/LEARN_DATA_SCIENCE | 1 | 12767040 | import codecademylib
import pandas as pd
inventory = pd.read_csv('inventory.csv')
print(inventory.head(10))
staten_island = inventory.head(10)
product_request = staten_island.product_description
seed_request = inventory[(inventory.location == 'Brooklyn') & (inventory.product_type == 'seeds')]
inventory['in_stock'] = inventory.quantity.apply(lambda row: True if row > 0 else False)
inventory['total_value'] = inventory.price*inventory.quantity
combine_lambda = lambda row:'{}-{}'.format(row.product_type,row.product_description)
inventory['full_description'] = inventory.apply(combine_lambda,axis=1)
print(inventory) | 3.453125 | 3 |
src/randomExpression/ExpressionBranch.py | rossweinstein/Evolutionary-Computing-Python | 0 | 12767041 | from random import randint
from src.randomExpression.RandomOperand import RandomOperand
from src.randomExpression.RandomOperator import RandomOperator
class ExpressionBranch:
def __init__(self, size):
"""
This class create a random expression of a given length. The operands
will only be 0-9 or x and the operators will only be '+', '-', '/', '*'.
:param size: Determines how long the expression can be
"""
self._branch_expression = ""
self._branch_length = 1 if size is None else size
self.operator = RandomOperator()
self.operand = RandomOperand(9)
self.create_branch()
def __str__(self):
"""
:return: String the most recent constructed expression with a title
"""
return "Expression: " + self._branch_expression
def get_branch(self):
"""
:return: String The most recent constructed expression
"""
return self._branch_expression
def create_branch(self):
"""
Creates a new math expression within the provided length.
The expression can contain operands 0-9 and operators +, -, *, /
:return: A random mathematical expression
"""
constructed_expression = ""
for i in range(self._ensure_odd_length()):
if i % 2 == 0:
constructed_expression += self.operand.generate_operand()
else:
constructed_expression += self.operator.generate_operator()
self._branch_expression = constructed_expression
return self._branch_expression
def _ensure_odd_length(self):
"""
An equation must have an odd length (i.e. 1 + 2). If we
end up with an even number, we will have an invalid equation
so this ensures that our equation will be of the correct length.
1 is added to handle if the random number selected is 0
:return: An odd number within a given range
"""
eq_length = randint(0, self._branch_length - 1)
return eq_length + 1 if eq_length % 2 == 0 else eq_length
def is_valid_branch(self, branch):
"""
Loops through the supplied equation and determines if the equation
is of an odd length and alternates operands and operators.
:return: Whether the entire equation is valid or not
"""
if len(branch) % 2 == 0:
return False
for i in range(len(branch)):
if i % 2 == 0:
if not self.operand.valid_operand(branch[i]):
return False
else:
if not self.operator.valid_operator(branch[i]):
return False
return True
if __name__ == '__main__':
exp = ExpressionBranch(10)
print(exp)
print(exp.is_valid_branch(exp.get_branch()))
| 4.0625 | 4 |
astropy/coordinates/builtin_frames/fk5.py | REMeyer/astropy | 3 | 12767042 | <filename>astropy/coordinates/builtin_frames/fk5.py
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, unicode_literals, division,
print_function)
from ..baseframe import frame_transform_graph
from ..attributes import TimeAttribute
from ..transformations import DynamicMatrixTransform
from .. import earth_orientation as earth
from .baseradec import _base_radec_docstring, BaseRADecFrame
from .utils import EQUINOX_J2000
class FK5(BaseRADecFrame):
"""
A coordinate or frame in the FK5 system.
Note that this is a barycentric version of FK5 - that is, the origin for
this frame is the Solar System Barycenter, *not* the Earth geocenter.
The frame attributes are listed under **Other Parameters**.
{params}
Other parameters
----------------
equinox : `~astropy.time.Time`
The equinox of this frame.
"""
equinox = TimeAttribute(default=EQUINOX_J2000)
@staticmethod
def _precession_matrix(oldequinox, newequinox):
"""
Compute and return the precession matrix for FK5 based on Capitaine et
al. 2003/IAU2006. Used inside some of the transformation functions.
Parameters
----------
oldequinox : `~astropy.time.Time`
The equinox to precess from.
newequinox : `~astropy.time.Time`
The equinox to precess to.
Returns
-------
newcoord : array
The precession matrix to transform to the new equinox
"""
return earth.precession_matrix_Capitaine(oldequinox, newequinox)
FK5.__doc__ = FK5.__doc__.format(params=_base_radec_docstring)
# This is the "self-transform". Defined at module level because the decorator
# needs a reference to the FK5 class
@frame_transform_graph.transform(DynamicMatrixTransform, FK5, FK5)
def fk5_to_fk5(fk5coord1, fk5frame2):
return fk5coord1._precession_matrix(fk5coord1.equinox, fk5frame2.equinox)
| 2.25 | 2 |
test2.py | lyx003288/python_test | 0 | 12767043 | # import os
import sys
from multiprocessing import Pool
# import time
# from concurrent import futures
import test4
print("test2 run")
class MyLocker:
def __init__(self):
print("mylocker.__init__() called.")
@staticmethod
def acquire():
print("mylocker.acquire() called.")
@staticmethod
def unlock():
print(" mylocker.unlock() called.")
class Lockerex(MyLocker):
@staticmethod
def acquire():
print("lockerex.acquire() called.")
@staticmethod
def unlock():
print(" lockerex.unlock() called.")
def lockhelper(cls):
"""
cls 必须实现acquire和release静态方法
:param cls:
:return:
"""
def _deco(func):
def __deco(*args, **kwargs):
print("before %s called." % func.__name__)
cls.acquire()
try:
return func(*args, **kwargs)
finally:
cls.unlock()
return __deco
return _deco
abc = 1
import logging
def test(value):
pid = 123
global abc
abc += 1
info = "Value=%s, Pid=%s, abc=%s" % (value, pid, abc)
logging.info(info)
return info, 5
if __name__ == '__main__':
print("test2")
| 3.109375 | 3 |
ingine/examples/seabattle_engine.py | sqarrt/Ingine | 0 | 12767044 | import random as rnd
EMPTY = ' '
DEAD = 'X'
HIT = '+'
MISSED = '-'
SHIP = 'O'
LETTERKEYS = [
'A',
'B',
'C',
'D',
'E',
'F',
'G',
'H',
'I',
'J'
]
def digit(key):
if key in LETTERKEYS:
return LETTERKEYS.index(key) + 1
elif 1 <= key <= 10:
return key
else:
raise ValueError
def letter(key):
if key in LETTERKEYS:
return key
elif 1 <= key <= 10:
return LETTERKEYS[key-1]
else:
raise ValueError
def area(posx, posy):
res = []
for i in range(3):
for j in range(3):
res.append((posx - 1 + i, posy - 1 + j))
return res
def around(posx, posy):
return [a for a in area(posx, posy) if
not (a[0] == posx and a[1] == posy) and (1 <= a[0] <= 10 and 1 <= a[1] <= 10)]
def cross_around(posx, posy):
res = []
for i in range(2):
res.append((posx, posy - 1 + i * 2))
res.append((posx - 1 + i * 2, posy))
return [a for a in res if (1 <= a[0] <= 10 and 1 <= a[1] <= 10)]
# Класс-родитель корабля
class SBGameShip:
def __init__(self, field, angle = None, length = None, posx = None, posy = None):
self.angle = 0 if angle is None else angle
self.length = 0 if length is None else length
self.posx = 0 if not posx else posx
self.posy = 0 if not posy else posy
self.field = field
self.cells = list()
def __randomcoords(self):
posx = 0
posy = 0
self.angle = rnd.randint(0, 3)
if self.angle == 0:
posx = rnd.randint(self.length, self.field.xlength)
posy = rnd.randint(1, self.field.ylength)
elif self.angle == 1:
posx = rnd.randint(1, self.field.xlength)
posy = rnd.randint(1, self.field.ylength - self.length)
elif self.angle == 2:
posx = rnd.randint(1, self.field.xlength - self.length)
posy = rnd.randint(1, self.field.ylength)
elif self.angle == 3:
posx = rnd.randint(1, self.field.xlength)
posy = rnd.randint(self.length, self.field.ylength)
self.posx = posx
self.posy = posy
self.gen_cells()
def gen_cells(self):
if self.angle == 0:
self.cells = [[self.posx - cell, self.posy, SHIP] for cell in range(self.length)]
elif self.angle == 1:
self.cells = [[self.posx, self.posy + cell, SHIP] for cell in range(self.length)]
elif self.angle == 2:
self.cells = [[self.posx + cell, self.posy, SHIP] for cell in range(self.length)]
elif self.angle == 3:
self.cells = [[self.posx, self.posy - cell, SHIP] for cell in range(self.length)]
def randompos(self):
ship_not_put = True
while (ship_not_put):
try:
self.__randomcoords()
self.field.put_ship(self)
ship_not_put = False
except ValueError:
pass
def isdead(self):
is_d = False not in [a[2] == HIT for a in self.cells]
if is_d:
for i, cell in enumerate(self.cells):
self.cells[i][2] = DEAD
return is_d
def around(self):
res = set()
own_cells = [(cell[0], cell[1]) for cell in self.cells]
for cell in own_cells:
for a in around(cell[0], cell[1]):
if a not in own_cells:
res.add(a)
res = list(res)
res = [a for a in res if 1 <= a[0] <= 10 and 1 <= a[1] <= 10]
return res
def __str__(self):
return str(self.cells)
class SBGameField:
def __init__(self):
self.ships = list()
self.hitten = list()
self.ship_hitten = list()
self.xlength = self.ylength = 10
def clean(self):
self.ships = list()
self.hitten = list()
self.ship_hitten = list()
self.xlength = self.ylength = 10
def get_all_ship_cells(self):
cells = []
for ship in self.ships:
for cell in ship.cells:
cells.append((cell[0], cell[1]))
return cells
def __getitem__(self, key):
key = digit(key)
rowitems = [None, ] + [EMPTY for cell in range(10)]
for cell in self.hitten:
if cell[0] == key:
rowitems[cell[1]] = cell[2]
for ship in self.ships:
for cell in ship.cells:
if cell[0] == key:
rowitems[cell[1]] = cell[2]
return rowitems
def field(self):
return {key: self[key] for key in LETTERKEYS}
def opfield(self):
return {key: [(cell if cell != SHIP else EMPTY) for cell in self[key]] for key in LETTERKEYS}
def __str__(self):
field = self.field()
header = ' | ' + ' | '.join(key for key in field) + ' |'
border = '-' * len(header)
content = '\n'.join(
['{:<2}'.format(str(i)) + ' |' + '|'.join(
'{:^5}'.format(field[key][i]) for key in field.keys()) + '|\n' + border for i in range(1, 11)])
return header + '\n' + border + '\n' + content
def oneline(self):
field = self.field()
content = ''.join([''.join(field[key][i] for key in field.keys()) for i in range(1, 11)])
content = ''.join(map(lambda a: a if a is not ' ' else '_', content))
return content
def op_oneline(self):
field = self.opfield()
content = ''.join([''.join(field[key][i] for key in field.keys()) for i in range(1, 11)])
content = ''.join(map(lambda a: a if a is not ' ' else '_', content))
return content
def as_opposite(self):
field = self.opfield()
header = ' | ' + ' | '.join(key for key in field) + ' |'
border = '-' * len(header)
content = '\n'.join(
['{:<2}'.format(str(i)) + ' |' + '|'.join(
'{:^5}'.format(field[key][i]) for key in field.keys()) + '|\n' + border for i in range(1, 11)])
return header + '\n' + border + '\n' + content
def can_ship(self, posx, posy):
all_cells = self.get_all_ship_cells()
banned_cells = []
for cell in all_cells:
banned_cells += area(cell[0], cell[1])
return (posx, posy) not in banned_cells
def put_ship(self, ship):
crossing = False not in [self.can_ship(cell[0], cell[1]) for cell in ship.cells]
if not crossing:
raise ValueError('Здесь нельзя поставить корабль')
self.ships.append(ship)
def get_insulted(self):
return [a for a in self.ship_hitten if a[2] == HIT]
def hit(self, posx, posy):
success = False
res = None
for ship in self.ships:
for cell in ship.cells:
if (cell[0], cell[1]) == (posx, posy):
cell[2] = HIT
res = HIT
self.hitten.append([posx, posy, HIT])
success = True
if ship.isdead():
for a in ship.around():
self.hitten.append([a[0], a[1], MISSED])
res = DEAD
for cell in self.hitten:
for c in ship.cells:
c[2] = DEAD
if (cell[0], cell[1]) == (c[0], c[1]):
cell[2] = c[2]
return res
def ai_hit(self):
field = self.opfield()
hc = map(lambda a: (a[0], a[1]), self.hitten)
insulted = self.get_insulted()
if len(insulted) == 0:
target_chosen = False
while not target_chosen:
target = (rnd.randint(1, 10), rnd.randint(1, 10))
target_chosen = target not in hc
elif len(insulted) == 1:
target = rnd.choice(cross_around(insulted[0][0], insulted[0][1]))
elif len(insulted) > 1:
xses = [cell[0] for cell in insulted]
yses = [cell[1] for cell in insulted]
last = next(a for a in reversed(self.ship_hitten) if a[2] == HIT)
ca = cross_around(last[0], last[1])
print(last, ca)
if len(set(xses)) == 1:
ca = [a for a in ca if
a[0] == xses[0]]
print(last, ca)
ca = [a for a in ca if field[letter(a[0])][a[1]] == EMPTY]
print(last, ca)
if not ca:
last = next(a for a in self.ship_hitten if a[2] == HIT)
ca = cross_around(last[0], last[1])
print(last, ca)
ca = [a for a in ca if
a[0] == xses[0]]
print(last, ca)
ca = [a for a in ca if field[letter(a[0])][a[1]] == EMPTY]
print(last, ca)
try:
target = ca[0]
except IndexError:
print(self.as_opposite(), '\n', self)
elif len(set(yses)) == 1:
ca = [a for a in ca if
a[1] == yses[0]]
print(last, ca)
ca = [a for a in ca if field[letter(a[0])][a[1]] == EMPTY]
print(last, ca)
if not ca:
last = next(a for a in self.ship_hitten if a[2] == HIT)
ca = cross_around(last[0], last[1])
print(last, ca)
ca = [a for a in ca if
a[1] == yses[1]]
print(last, ca)
ca = [a for a in ca if field[letter(a[0])][a[1]] == EMPTY]
print(last, ca)
try:
target = ca[0]
except IndexError:
print(self.as_opposite(), '\n', self)
res = self.hit(*target)
return res
| 3.421875 | 3 |
application/KitchenMagician/kitchen_magician/groups/db/init_groups.py | AsuPaul19/Kitchen-Magician | 0 | 12767045 | groups = [
{
"img": "groups/images/vegan.png",
"name": "Vegan Group",
},
{
"img": "groups/images/ketogenic-diet.png",
"name": "Keto Group",
},
{
"img": "groups/images/vegetables.png",
"name": "Vegetarian Group",
},
{
"img": "groups/images/gluten-free.png",
"name": "Gluten Free Group",
},
{
"img": "groups/images/sushi.png",
"name": "Raw Diet",
},
{
"img": "groups/images/sardine.png",
"name": "Pescatarian Group",
},
{
"img": "groups/images/fruits.png",
"name": "Paleo Group",
},
{
"img": "groups/images/low-carb-diet.png",
"name": "Low Carb Group",
}
]
def initialize_groups():
from groups.models import Group
for group in groups:
new_group = Group(img_path=group["img"], name=group["name"])
new_group.save()
print(f'Added group - {new_group.name}') | 2.546875 | 3 |
notes/algo-ds-practice/problems/dp/egg_dropping_puzzle/egg_dropping1.py | Anmol-Singh-Jaggi/interview-notes | 6 | 12767046 | <gh_stars>1-10
import math
import sys
from functools import lru_cache
@lru_cache(maxsize=None)
def min_trials(num_eggs, num_floors):
'''
min_trials[eggs][floor] =
1 + max(min_trials[eggs-1][floor-1], min_trials[eggs][num_floors-floor])
for floor = 1 .. num_floor
That is, get the min of every floor's answer [if we start first drop from this floor].
Where a floor's answer is max(breaks, doesn't break)
[max() because we want the worst case].
Complexity -> O(n*k*k)
'''
# CAREFUL: Note the base case!
if num_eggs == 1 or num_floors <= 1:
return num_floors
ans = math.inf
for floor in range(1, num_floors + 1):
# Egg does not break on this floor.
sub_ans1 = min_trials(num_eggs, num_floors - floor)
# Egg breaks on this floor.
sub_ans2 = min_trials(num_eggs - 1, floor - 1)
sub_ans = 1 + max(sub_ans1, sub_ans2)
ans = min(ans, sub_ans)
return ans
def main():
sys.setrecursionlimit(1000000)
ans = min_trials(8, 16)
print(ans)
main()
| 3.09375 | 3 |
xls/dslx/free_variables.py | julianviera99/xls | 0 | 12767047 | <filename>xls/dslx/free_variables.py<gh_stars>0
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tracking of free variables references."""
import pprint
from typing import Any, Sequence, Union, List, Tuple, Optional, Dict, Text, Callable, Set
from xls.dslx.concrete_type import ConcreteType
# pylint: disable=invalid-name
NameRef = Any
NameDef = Any
BuiltinNameDef = Any
NodeToType = Any
AnyNameDef = Union[NameDef, BuiltinNameDef]
# pylint: enable=invalid-name
class FreeVariables(object):
"""Data structure that holds free variable reference data.
Has a union() operator so that expressions can build up their free variable
references as part of lexical analysis.
"""
def __init__(self, values: Optional[Dict[Text, NameRef]] = None):
self._values = values or {} # type: Dict[Text, List[NameRef]]
def __repr__(self) -> Text:
return 'FreeVariables(values={!r})'.format(self._values)
def __len__(self) -> int:
"""Returns the number of free variable names."""
return len(self._values)
def __str__(self) -> Text:
return pprint.pformat(self._values)
def _refs_to_def(self, refs: Sequence[NameRef]) -> AnyNameDef:
return next(iter(refs)).name_def
def keys(self) -> Set[Text]:
return set(self._values.keys())
def drop_defs(self, should_drop: Callable[[NameDef],
bool]) -> 'FreeVariables':
return FreeVariables({
name: refs
for name, refs in self._values.items()
if not should_drop(self._refs_to_def(refs))
})
def get_name_def_tups(self) -> List[Tuple[Text, AnyNameDef]]:
"""Returns a list of (name, name_def) tuples."""
return [(name, self._refs_to_def(refs))
for (name, refs) in sorted(self._values.items())]
def get_name_defs(self) -> List[AnyNameDef]:
return [
self._refs_to_def(refs) for (_, refs) in sorted(self._values.items())
]
def get_name_type_pairs(self, node_to_type: NodeToType
) -> List[Tuple[Text, ConcreteType]]:
return [(name, node_to_type[name_def])
for (name, name_def) in self.get_name_def_tups()]
def union(self, other: 'FreeVariables') -> 'FreeVariables':
"""Returns the union of the references in self with those in other."""
# PyLint doesn't realize we're accessing private members of the same class.
# pylint: disable=protected-access
result = FreeVariables(dict((k, list(v)) for k, v in self._values.items()))
for key, refs in other._values.items():
if key in result._values:
result._values[key] += refs
else:
result._values[key] = list(refs)
return result
# pylint: enable=protected-access
| 2.390625 | 2 |
tests/test_expect_column_values_to_not_be_null.py | dm-drogeriemarkt/pyspark-expectations | 0 | 12767048 | <filename>tests/test_expect_column_values_to_not_be_null.py<gh_stars>0
import pytest
from pyspark.sql.functions import lit
from pyspark_expectations import expectations
def test_when_some_values_are_null(df):
res = df.expect_column_values_to_not_be_null("height")
assert not res["success"]
def test_when_allowed_33_percent_null_values_but_there_is_more_null_values(df):
# 33.3..3% - null values and 66.6..67 not null values
res = df.expect_column_values_to_not_be_null("height", 0.33)
assert not res["success"]
def test_when_allowed_34_percent_null_values_and_there_is_less_null_values(df):
# 33.3..3% - null values and 66.6..67 not null values
res = df.expect_column_values_to_not_be_null("height", 0.34)
assert res["success"]
def test_when_all_values_not_null(df):
df_all_not_null = df.withColumn("height", lit("bla"))
res = df_all_not_null.expect_column_values_to_not_be_null("height")
assert res["success"]
def test_value_error_when_unexpected_percent_has_false_format(df):
with pytest.raises(ValueError):
df.expect_column_values_to_not_be_null("height", unexpected_percent="bla")
with pytest.raises(ValueError):
df.expect_column_values_to_not_be_null("height", unexpected_percent=-5)
with pytest.raises(ValueError):
df.expect_column_values_to_not_be_null("height", unexpected_percent=5)
def test_when_dataframe_is_empty(df):
df_empty = df.where("height == 3.3")
assert df_empty.expect_column_values_to_not_be_null(
"height", unexpected_percent=0.5
)["success"]
| 2.75 | 3 |
biot_1.5/Datos.py | javacasm/biot | 0 | 12767049 | # Datos
# 'SERIALIZACION' DE OBJETOS (para manejar el salvado de datos)
try:
import cPickle as pickle
except ImportError:
import pickle
# mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm
# FUNCIONES CONTROL Y GESTION DE FICHEROS DE DATOS
# mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm
def salvar_Backup_datos(informacion_para_salvar, nombre_con_ruta):
''' Salvado de datos que autogenera una copia con el mismo nombre recibido pero con extension BAK '''
try:
ficheroDatos = open(nombre_con_ruta, "wb")
pickle.dump(informacion_para_salvar, ficheroDatos, protocol=-1) # -1, seleccion automatica del más alto disponible
ficheroDatos.close()
#CREACION DE COPIAS .bak AUTOMATICAMENTE
#separamos el nombre y la extenxion de la informacion que llega a la funcion
longitud_extension = len(nombre_con_ruta.split(".")[-1])
nombre_con_ruta_backup = nombre_con_ruta[:-longitud_extension] + "bak"
ficheroDatos_backup = open(nombre_con_ruta_backup, "wb")
pickle.dump(informacion_para_salvar, ficheroDatos_backup, protocol=-1) # -1, seleccion automatica del más alto disponible
ficheroDatos.close()
return(True)
except:
print ("---------------------------")
print ("Error Guardando backup >> ", nombre_con_ruta)
return(False)
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
def cargar_datos_desde_fichero(nombre_con_ruta):
''' Recuperacion de los datos de backup desde fichero en los momentos de reinicio '''
datos = []
try:
nombreDatosFile = nombre_con_ruta
ficheroDatos = open(nombreDatosFile,"rb")
datos = pickle.load(ficheroDatos)
ficheroDatos.close()
return True, datos
except:
print ("---------------------------")
print ("error con la carga de registros de backup")
return False , []
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
def ListaUnica (lista, destino):
for n in range(len(lista)):
if isinstance(lista[n],list):
ListaUnica(lista[n], destino)
else:
destino.append(lista[n])
return destino
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
def convertir_Datos_to_TXT(datos, nombreDatosFile, cabecera=""):
'''
RECIBE UNA LISTA o UN EMPAQUETADO (una lista de listas) y el nombre con el que queremos guardar el TxT
Funcion para la conversion de los datos de una serie de listas
a un formato de texto plano separado en colunmas.
Opcionalmente podemos indicar uan cabecera de texto apra dichos datos
Esta sera informacion que se envia por email a los suscriptores
'''
#datos = lista simple o bien lista de listas
nombreFileSalida = nombreDatosFile
numeroDatos = len(datos)
outfile = open(nombreFileSalida, 'w') # Indicamos el valor 'w' para escritura.
if cabecera != "": #si hay informacion de cabecera se añade antes de los datos para no numerar esa linea
outfile.write(cabecera)
outfile.write("\n\n")
#y dejamos el fichero abierto para seguir escribiendo la informacion correspondiente a los datos
if datos == []: #Si llega una lista vacia (que puede ser) se generarian errores,
#asi que añadimos una linea para informar de ello, cerramos el fichero y salimos
outfile.write("\nNo hay informacion disponible\n")
outfile.close()
return (True)
try:
for x in range(len(datos)):
lista_unica=[]
indice = "00000"+ str(x)
indice = indice[-5:]
linea = indice + "\t"
lista_unica = ListaUnica(datos[x], lista_unica)
for elemento in lista_unica:
if str(type(elemento))== "<class 'int'>" or str(type(elemento))== "<class 'float'>":
dato = float(elemento)
dato = "%.2f" % (dato)
linea += str(dato) + "\t"
else:
linea += elemento + "\t"
linea += "\n"
outfile.write(linea)
outfile.close()
return (True)
except:
print ("---------------------------")
outfile.close() #Cerramos por si se quedo abierto
outfile = open(nombreFileSalida, 'wb') #Reabrimos nuevamente y escribimos un mensaje de error
linea = "\n\nHubo un error en la conversion de datos\n\nContacte EXPERIMENTO BIO en telegram con el comando /DATA_ERROR_"+nombreDatosFile[:-4]+" y solicite los datos en formato RAW si lo desea \n"
outfile.write(linea)
outfile.close()
return (False)
#-----------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------
| 2.703125 | 3 |
tests/unit/market_data/fx_rate.py | amaas-fintech/amaas-core-sdk-python | 0 | 12767050 | from __future__ import absolute_import, division, print_function, unicode_literals
import json
import unittest
from amaascore.market_data.fx_rate import FXRate
from amaascore.tools.generate_market_data import generate_fx_rate
class FXRateTest(unittest.TestCase):
def setUp(self):
self.longMessage = True # Print complete error message on failure
self.fx_rate = generate_fx_rate()
self.asset_id = self.fx_rate.asset_id
def tearDown(self):
pass
def test_FXRate(self):
self.assertEqual(type(self.fx_rate), FXRate)
def test_FXRateToDict(self):
fx_rate_dict = self.fx_rate.__dict__
self.assertEqual(type(fx_rate_dict), dict)
self.assertEqual(fx_rate_dict.get('asset_id'), self.asset_id)
def test_FXRateToJSON(self):
fx_rate_json = self.fx_rate.to_json()
self.assertEqual(fx_rate_json.get('asset_id'), self.asset_id)
# If party_json is valid JSON, this will run without serialisation errors
json_asset_id = json.loads(json.dumps(fx_rate_json, ensure_ascii=False)).get('asset_id')
self.assertEqual(json_asset_id, self.asset_id)
if __name__ == '__main__':
unittest.main()
| 2.671875 | 3 |