max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
input_interpretation.py | RaverJay/fastcov | 8 | 12759251 | <reponame>RaverJay/fastcov
import argparse
import os
from typing import NamedTuple
from custom_logging import error, log
class BamFileChunk(NamedTuple):
reference: str
start: int
end: int
length: int
def parse_args():
parser = argparse.ArgumentParser(
description='Plot the coverage based on some bam files.')
parser.add_argument("bamfile",
nargs='+',
help="Alignment files to include in the coverage plot.")
parser.add_argument("-p", "--position",
help="Specify a genomic position to plot exclusively. Format: <ref_name>[:<start>-<stop>]\n"
"Coordinates are 1-based and inclusive. "
"Start and/or stop are optional with fallbacks 1 and <length_of_ref> respectively "
"(i.e. 'chr1', 'chr1:-200', 'chr1:100-' and 'chr1:100-200 are legal)")
parser.add_argument("-l", "--logscale", action='store_true',
help="Use logarithmic scale on y-axis.")
parser.add_argument("-o", "--output_file",
help="Specify plot output filename. File extension defines the format "
"(default: fastcov_output.pdf)")
parser.add_argument("-c", "--csv_out",
help="Specify csv data output filename. Use '-' to write to stdout. Will disable plot output by default, "
"specify --output_file to re-enable plot output.")
parser.add_argument("--csv_no_header", action='store_true',
help="Suppress column names in csv output.")
args = parser.parse_args()
return args
def check_input(args):
bam_files = []
for bam_file in args.bamfile:
if not os.path.isfile(bam_file):
error(f'Not a file: {bam_file}')
if bam_file in bam_files:
log(f'WARNING: Skipping duplicate bam file input: {bam_file}')
continue
if not os.path.isfile(bam_file + '.bai'):
log(
f'Bam index missing for file: {bam_file}. Trying "samtools index {bam_file}" ...')
ret = os.system(f'samtools index {bam_file}')
if ret != 0:
error(
f'ERROR: samtools index returned exit code {ret}')
# checked
bam_files.append(bam_file)
num_bam_files = len(bam_files)
log(f'Number of .bam files: {num_bam_files}')
return bam_files, num_bam_files
def parse_or_infer_reference_and_position(args, bam_files, pysam_bam_file) -> BamFileChunk:
# positions are 1-based (inclusive) in this block
if args.position:
if ':' in args.position:
# with coords
ref_name, start_stop_str = args.position.split(':')
if '-' not in start_stop_str:
error('Please provide a start and/or stop position. '
'When providing only a start or stop position, '
'indicate the intervals side to use by pre- or postfixing the value with "-" '
'(e.g. "100-" or "-200"). '
'The other side will be inferred from the given reference')
pos_start_str, pos_end_str = start_stop_str.split('-')
pos_start = convert_to_int_or_fallback(pos_start_str,
fallback=1)
pos_end = convert_to_int_or_fallback(pos_end_str,
fallback=pysam_bam_file.lengths[pysam_bam_file.references.index(ref_name)])
if pos_start < 1:
error(f'Illegal start position: {pos_start}')
if pos_start > pos_end:
error(
f'Start position is greater than end position: {pos_start} vs {pos_end}')
else:
# no coords
ref_name = args.position
log(f'No coordinates given for reference {ref_name}, assuming whole reference. '
f'Inferring length from first alignment: {bam_files[0]}')
if ref_name not in pysam_bam_file.references:
error(
f'Reference {ref_name} not found in alignment {bam_files[0]}')
# get start/stop
pos_start = 1
pos_end = pysam_bam_file.lengths[pysam_bam_file.references.index(
ref_name)]
else:
# no position
log(
f'No position given, assuming whole reference. Taking first reference name from first alignment: {bam_files[0]}')
ref_name = pysam_bam_file.references[0]
pos_start = 1
pos_end = pysam_bam_file.lengths[pysam_bam_file.references.index(
ref_name)]
# convert from 1-based inclusive (genomic) to 0-based half open interval (pythonic)
pos_start -= 1
pos_len = pos_end - pos_start
return BamFileChunk(end=pos_end, start=pos_start, reference=ref_name, length=pos_len)
def convert_to_int_or_fallback(string: str, fallback: int):
try:
return int(string)
except ValueError:
return fallback
| 2.6875 | 3 |
0x07-python-test_driven_development/6-main.py | malu17/alx-higher_level_programming | 1 | 12759252 | <gh_stars>1-10
#!/usr/bin/python3
max_integer = __import__('6-max_integer').max_integer
print(max_integer([1, 2, 3, 4]))
print(max_integer([1, 3, 4, 2]))
| 2.71875 | 3 |
training/train.py | CMSAachen3B/Machine-Learning | 3 | 12759253 | #!/usr/bin/env python
import numpy as np
np.random.seed(1234)
from collections import Counter
import argparse
import yaml
import os
import sys
from sklearn import model_selection
base = os.path.normpath(os.path.join(os.path.abspath(__file__), "../.."))
sys.path.append(base)
from utils.model import KerasModel, Config
from keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard, ReduceLROnPlateau
def multiclassNeuralNetwork():
"""
Argument Parser for the training step. Mainly modifying hyperparameter.
"""
parser = argparse.ArgumentParser(description="Perform multiclassification NN training with Keras.",
fromfile_prefix_chars="@", conflict_handler="resolve")
parser.add_argument("--fold", type=int, default=0, choices=[0, 1],
help="Training fold. [Default: %(default)s]")
parser.add_argument("--epochs", default=200,
help="Number of training epochs. [Default: %(default)s]")
parser.add_argument("--learning-rate", default=0.000005,
help="Learning rate of NN. [Default: %(default)s]")
parser.add_argument("--batch-size", default=1000,
help="Batch size for training. [Default: %(default)s]")
parser.add_argument("--early-stopping", default=False, action='store_true',
help="Stop training if loss increases again. [Default: %(default)s]")
args = parser.parse_args()
config = Config()
folder = base + '/NumpyConversion/'
# load trainings data and weights
data = np.load(folder + 'data_fold0.npz'.format(args.fold))
x = data['x']
y = data['y']
w = data['w']
w = w * config.load["global_weight"]
# Split data in training and testing
x_train, x_test, y_train, y_test, w_train, w_test = model_selection.train_test_split(
x, y, w, test_size=1.0 - config.load["train_test_split"], random_state=1234)
folder_result = base + '/results/'
if not os.path.exists(folder_result):
os.makedirs(folder_result)
np.save(folder_result + 'x_fold{}_test.npy'.format(args.fold), x_test)
np.save(folder_result + 'y_fold{}_test.npy'.format(args.fold), y_test)
def get_class_weights(y):
counter = Counter(y)
majority = 1. # max(counter.values())
return {cls: float(majority / count) for cls, count in counter.items()}
# Add callbacks
callbacks = []
# callbacks.append(TensorBoard(log_dir='/home/mf278754/master/logs',
# histogram_freq=1, write_graph=True, write_images=True))
callbacks.append(
ModelCheckpoint(filepath=base + "/test_fold{}_multiclass_model.h5".format(args.fold), save_best_only=True, verbose=1))
if args.early_stopping:
callbacks.append(EarlyStopping(monitor='val_loss',
min_delta=0,
patience=2,
verbose=0, mode='auto'))
callbacks.append(ReduceLROnPlateau(
monitor='val_loss', factor=0.1, patience=5))
# preprocessing
import pickle
from sklearn import preprocessing
scaler = preprocessing.StandardScaler().fit(x_train)
x_train_scaled = scaler.transform(x_train)
x_test_scaled = scaler.transform(x_test)
path_preprocessing = os.path.join(
base, "fold{}_keras_preprocessing.pickle".format(
args.fold))
pickle.dump(scaler, open(path_preprocessing, 'wb'))
# create KerasModel instance
model = KerasModel()
print "\033[1;33mSetting up model...\033[1;m"
# call setter of basic model parameters
model.lr = args.learning_rate
model.modelname = str(args.fold)
model.plot_model = False
# print model parameter
print "Number of training features is: ", model.nfeatures
print "Number of target classes is: ", model.nclasses
print "Learning rate is set to: ", model.lr
print "Fully trained model name is set to: ", model.modelname
print "Model plotting is set to: ", model.plot_model
# setup model with new model attributes
keras_model = model.multiclass_MSSM_HWW_model()
print "\033[1;42mModel setup was successful!\033[1;m"
# call keras fit function to start the training
fit = keras_model.fit(
x_train_scaled,
y_train,
sample_weight=w_train,
validation_data=(x_test_scaled, y_test, w_test),
batch_size=args.batch_size,
epochs=args.epochs,
shuffle=True,
callbacks=callbacks,
verbose=2)
# dump loss and accuracy to numpy arrays
np.save(folder_result + 'loss.npy', fit.history["loss"])
np.save(folder_result + 'val_loss.npy', fit.history["val_loss"])
np.save(folder_result + 'acc.npy', fit.history["acc"])
np.save(folder_result + 'val_acc.npy', fit.history["val_acc"])
if __name__ == "__main__":
multiclassNeuralNetwork()
| 2.3125 | 2 |
markdown/django/account_alert/web/accounts/middleware.py | hdknr/annotated-django | 0 | 12759254 | # -*- coding: utf-8 -*-
from django.http import HttpResponseRedirect
from django.utils.timezone import now
class AccountAlertMiddleware(object):
def process_request(self, request):
if getattr(request, 'user', None) and request.user.is_authenticated():
alerts = request.user.accountalert_set.filter(
force=True,
executed_at=None,
due_on__lt=now()).exclude(url=request.path)
if alerts.count() > 0:
return HttpResponseRedirect(alerts[0].url)
| 2.125 | 2 |
swarmlib/__init__.py | Geetha-github-cloud/swarmlib | 0 | 12759255 | <gh_stars>0
# ------------------------------------------------------------------------------------------------------
# Copyright (c) <NAME>. All rights reserved.
# Licensed under the BSD 3-Clause License. See LICENSE.txt in the project root for license information.
# ------------------------------------------------------------------------------------------------------
from .aco4tsp.aco_problem import ACOProblem
from .fireflyalgorithm.firefly_problem import FireflyProblem
from .cuckoosearch.cuckoo_problem import CuckooProblem
from .pso.pso_problem import PSOProblem
from .abc.abc_problem import ABCProblem
from .util.functions import FUNCTIONS
| 1.398438 | 1 |
modules/Logger.py | snroptimus/BitfinexLendingBot | 0 | 12759256 | # coding=utf-8
import atexit
import datetime
import io
import json
import sys
import time
import ConsoleUtils
import modules.Configuration as Config
from RingBuffer import RingBuffer
from Notify import send_notification
class ConsoleOutput(object):
def __init__(self):
self._status = ''
atexit.register(self._exit)
def _exit(self):
self._status += ' ' # In case the shell added a ^C
self.status('')
def status(self, msg, time='', days_remaining_msg=''):
status = str(msg)
cols = ConsoleUtils.get_terminal_size()[0]
if msg != '' and len(status) > cols:
# truncate status, try preventing console bloating
status = str(msg)[:cols - 4] + '...'
update = '\r'
update += status
update += ' ' * (len(self._status) - len(status))
update += '\b' * (len(self._status) - len(status))
sys.stderr.write(update)
self._status = status
def printline(self, line):
update = '\r'
update += line + ' ' * (len(self._status) - len(line)) + '\n'
update += self._status
sys.stderr.write(update)
class JsonOutput(object):
def __init__(self, file, logLimit, exchange=''):
self.jsonOutputFile = file
self.jsonOutput = {}
self.clearStatusValues()
self.jsonOutputLog = RingBuffer(logLimit)
self.jsonOutput['exchange'] = exchange
self.jsonOutput['label'] = Config.get("BOT", "label", "Lending Bot")
def status(self, status, time, days_remaining_msg):
self.jsonOutput["last_update"] = time + days_remaining_msg
self.jsonOutput["last_status"] = status
def printline(self, line):
line = line.replace("\n", ' | ')
self.jsonOutputLog.append(line)
def writeJsonFile(self):
with io.open(self.jsonOutputFile, 'w', encoding='utf-8') as f:
self.jsonOutput["log"] = self.jsonOutputLog.get()
f.write(unicode(json.dumps(self.jsonOutput, ensure_ascii=False, sort_keys=True)))
f.close()
def addSectionLog(self, section, key, value):
if section not in self.jsonOutput:
self.jsonOutput[section] = {}
if key not in self.jsonOutput[section]:
self.jsonOutput[section][key] = {}
self.jsonOutput[section][key] = value
def statusValue(self, coin, key, value):
if coin not in self.jsonOutputCoins:
self.jsonOutputCoins[coin] = {}
self.jsonOutputCoins[coin][key] = str(value)
def clearStatusValues(self):
self.jsonOutputCoins = {}
self.jsonOutput["raw_data"] = self.jsonOutputCoins
self.jsonOutputCurrency = {}
self.jsonOutput["outputCurrency"] = self.jsonOutputCurrency
def outputCurrency(self, key, value):
self.jsonOutputCurrency[key] = str(value)
class Logger(object):
def __init__(self, json_file='', json_log_size=-1, exchange=''):
self._lent = ''
self._daysRemaining = ''
if json_file != '' and json_log_size != -1:
self.output = JsonOutput(json_file, json_log_size, exchange)
else:
self.output = ConsoleOutput()
self.refreshStatus()
@staticmethod
def timestamp():
ts = time.time()
return datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
def log(self, msg):
log_message = "{0} {1}".format(self.timestamp(), msg)
self.output.printline(log_message)
self.refreshStatus()
def log_error(self, msg):
log_message = "{0} Error {1}".format(self.timestamp(), msg)
self.output.printline(log_message)
if isinstance(self.output, JsonOutput):
print log_message
self.refreshStatus()
def offer(self, amt, cur, rate, days, msg):
line = self.timestamp() + ' Placing ' + str(amt) + ' ' + str(cur) + ' at ' + str(
float(rate) * 100) + '% for ' + days + ' days... ' + self.digestApiMsg(msg)
self.output.printline(line)
self.refreshStatus()
def cancelOrder(self, cur, msg):
line = self.timestamp() + ' Canceling ' + str(cur) + ' order... ' + self.digestApiMsg(msg)
self.output.printline(line)
self.refreshStatus()
def refreshStatus(self, lent='', days_remaining=''):
if lent != '':
self._lent = lent
if days_remaining != '':
self._daysRemaining = days_remaining
self.output.status(self._lent, self.timestamp(), self._daysRemaining)
def addSectionLog(self, section, key, value):
if hasattr(self.output, 'addSectionLog'):
self.output.addSectionLog(section, key, value)
def updateStatusValue(self, coin, key, value):
if hasattr(self.output, 'statusValue'):
self.output.statusValue(coin, key, value)
def updateOutputCurrency(self, key, value):
if hasattr(self.output, 'outputCurrency'):
self.output.outputCurrency(key, value)
def persistStatus(self):
if hasattr(self.output, 'writeJsonFile'):
self.output.writeJsonFile()
if hasattr(self.output, 'clearStatusValues'):
self.output.clearStatusValues()
@staticmethod
def digestApiMsg(msg):
m = ""
try:
m = (msg['message'])
except KeyError:
pass
try:
m = (msg['error'])
except KeyError:
pass
return m
@staticmethod
def notify(msg, notify_conf):
if notify_conf['enable_notifications']:
send_notification(msg, notify_conf)
| 2.484375 | 2 |
applications/popart/bert/utils/iteration.py | kew96/GraphcoreExamples | 0 | 12759257 | <gh_stars>0
# Copyright (c) 2021 Graphcore Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import logging
from functools import partial
from collections import deque
import numpy as np
from .distributed import average_distributed_deques
logger = logging.getLogger('BERT')
def reduce_metric(args, anchors, metrics, mean=False):
accumulated_stats = args.gradient_accumulation_factor * args.batches_per_step
if len(metrics) > 1:
metric = np.add(*[anchors[metric] for metric in metrics])
if mean:
accumulated_stats *= len(metrics)
else:
metric = anchors[metrics[0]]
return np.mean(metric / accumulated_stats)
def output_stats(args, anchors, losses, accuracies):
return (reduce_metric(args, anchors, losses),
reduce_metric(args, anchors, accuracies, mean=True))
class Iteration:
def __init__(self, args, steps_per_epoch, writer, recording_steps=None):
self.epoch = args.continue_training_from_epoch
self.count = self.epoch * steps_per_epoch
self.micro_batch_size = args.micro_batch_size
self.batches_per_step = args.batches_per_step
self.gradient_accumulation_factor = args.gradient_accumulation_factor
self.replication_factor = args.replication_factor
self.training = not args.inference
self.epochs = args.epochs if self.training else args.epochs_inference
self.training_steps = args.training_steps
if self.epochs is None:
if args.training_steps is None:
RuntimeError("Either epochs or training_steps need to be specified.")
self.epochs = math.ceil(args.training_steps / steps_per_epoch)
self.epochs_per_save = args.epochs_per_save
self.steps_per_log = args.steps_per_log
self.steps_per_epoch = steps_per_epoch
self.recording_steps = self.steps_per_epoch if recording_steps is None else recording_steps
self.total_steps = self.steps_per_epoch * self.epochs
self.writer = writer
self.use_packed_sequence_format = args.use_packed_sequence_format
self.use_popdist = args.use_popdist
self.popdist_size = args.popdist_size
# This should get overridden but will ensure we can always write a scalar to TB.
self.learning_rate = 0
self.total_sequences_so_far = 0
self.sequences_per_step = deque(maxlen=self.recording_steps)
self.durations = deque(maxlen=self.recording_steps)
self.cycles = deque(maxlen=self.recording_steps)
self.losses = deque(maxlen=self.recording_steps)
self.accuracies = deque(maxlen=self.recording_steps)
self.stats_fn = output_stats
if args.use_popdist:
self.distributed = True
self.steps_per_distributed_reduce = 1
else:
self.distributed = False
@property
def throughput(self):
return np.divide(self.sequences_per_step, self.durations)
def add_scalar(self, name, scalar):
if self.writer is not None:
if self.use_packed_sequence_format:
self.writer.add_scalar(name, scalar, self.total_sequences_so_far)
else:
self.writer.add_scalar(name, scalar, self.count)
def add_stats(self, duration, hw_cycles, data, *args):
self.durations.append(duration)
if self.use_packed_sequence_format:
# To count the number of samples in each batch first
# expand the micro-batch dimension (flattened on device)
input_mask = data["input_mask"]
new_shape = list(input_mask.shape[:-1]) + [self.micro_batch_size, -1]
input_mask = input_mask.reshape(new_shape)
sequences_per_microbatch = input_mask.max(-1).sum(-1)
sequences_in_step = int(sequences_per_microbatch.sum())
args = sequences_per_microbatch, sequences_in_step, *args
else:
sequences_in_step = self.batches_per_step * self.gradient_accumulation_factor * \
self.replication_factor * self.micro_batch_size # noqa
if self.use_popdist:
sequences_in_step = sequences_in_step * self.popdist_size
self.total_sequences_so_far += sequences_in_step
self.sequences_per_step.append(sequences_in_step)
if hw_cycles:
self.cycles.append(hw_cycles)
if self.training or self.use_packed_sequence_format:
self.add_training_stats(*args)
if self.distributed and (self.count % self.steps_per_distributed_reduce) == 0:
self.average_distributed_stats()
self.add_scalar("defaultLearningRate", self.learning_rate)
self.add_scalar("throughput", np.average(self.throughput))
if self.use_packed_sequence_format:
self.add_scalar("update_steps", self.count)
self.write_training_stats()
else:
self.add_inference_stats(*args)
def add_training_stats(self, *args):
loss, accuracy = self.stats_fn(*args)
self.losses.append(loss)
self.accuracies.append(accuracy)
def write_training_stats(self):
self.add_scalar("loss", np.average(self.losses))
self.add_scalar("accuracy", np.average(self.accuracies))
def add_inference_stats(self, *args):
pass
def epoch_string(self):
if self.training_steps is not None:
status_string = f"Iteration: {self.count:5}/{int(self.training_steps)} "
else:
status_string = \
f"Iteration: {self.count:6} " \
f"Epoch: {self.count/self.steps_per_epoch:6.2f}/{self.epochs} "
if self.use_packed_sequence_format:
status_string += \
f"Sequences processed: {self.total_sequences_so_far/1000.0:6.1f}k "
return status_string
def training_metrics_string(self):
avg = np.average
status_string = \
f"Loss: {avg(self.losses):5.3f} " \
f"Accuracy: {avg(self.accuracies):5.3f} "
return status_string
def optimizer_string(self):
return f"Learning Rate: {self.learning_rate:.5f} "
def throughput_string(self):
avg = np.average
status_string = \
f"Duration: {avg(self.durations):6.4f} s " \
f"Throughput: {avg(self.throughput):6.1f} sequences/s "
if self.cycles:
status_string += f"Cycles: {int(avg(self.cycles))} "
return status_string
def average_distributed_stats(self):
replica_avg = partial(average_distributed_deques, N=self.steps_per_distributed_reduce)
self.durations = replica_avg(self.durations)
if self.cycles:
self.cycles = replica_avg(self.cycles)
self.losses = replica_avg(self.losses)
self.accuracies = replica_avg(self.accuracies)
def report_stats(self):
status_string = self.epoch_string()
status_string += self.training_metrics_string()
status_string += self.optimizer_string()
status_string += self.throughput_string()
logger.info(status_string)
def inference_metrics_string(self):
return ""
def report_inference_stats(self, mean_latency, min_latency, max_latency, p99_latency, p999_latency):
avg = np.average
status_string = f"Iteration: {self.count:6} "
status_string += self.inference_metrics_string()
status_string += self.throughput_string()
if mean_latency is not None:
status_string += f"Per-sample Latency: {mean_latency} {min_latency} {max_latency} {p99_latency} {p999_latency} seconds (mean min max p99 p99.9) "
logger.info(status_string)
def pretraining_stats(args, anchors, losses, accuracies):
losses = map(lambda loss: reduce_metric(args, anchors, [loss]), losses)
accuracies = map(lambda acc: reduce_metric(args, anchors, [acc]), accuracies)
return tuple(losses), tuple(accuracies)
def packed_pretraining_stats(sequences_per_microbatch, sequences_in_step, args, anchors, losses, accuracies):
"""
Perform the per-step averaging of losses and accuracies when using the
packed data format.
Since each step potentially contains a different number of sequences, each step should
be weighted according to how many sequences it contains. However, this type of averaging is
only necessary for the MLM accuracy while other metrics are averaged as usual.
"""
# The MLM accuracy is weighted by the number of samples in each step
# the MLM loss is averaged with equal weights for each step
mlm_loss = reduce_metric(args, anchors, [losses[0]])
mlm_accuracy = anchors[accuracies[0]]
mlm_accuracy = sum(mlm_accuracy * sequences_per_microbatch)/sequences_in_step
# The NSP and accuracy and loss are averaged assuming each step has same weight
nsp_loss = reduce_metric(args, anchors, [losses[1]])
nsp_accuracy = reduce_metric(args, anchors, [accuracies[1]])
return (mlm_loss, nsp_loss), (mlm_accuracy, nsp_accuracy)
def pretraining_inference_stats(args, anchors, losses, accuracies):
if args.inference_lm_perplexity:
loss = reduce_metric(args, anchors, [losses[0]])
else:
loss = None
accuracies = map(lambda acc: reduce_metric(args, anchors, [acc]), accuracies)
return loss, tuple(accuracies)
class PretrainingIteration(Iteration):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.calculate_perplexity = args[0].inference_lm_perplexity
self.losses = [deque(maxlen=self.recording_steps), deque(maxlen=self.recording_steps)]
self.accuracies = [deque(maxlen=self.recording_steps), deque(maxlen=self.recording_steps)]
if self.use_packed_sequence_format:
self.stats_fn = packed_pretraining_stats
else:
if self.training:
self.stats_fn = pretraining_stats
else:
self.stats_fn = pretraining_inference_stats
def average_distributed_stats(self):
replica_avg = partial(average_distributed_deques, N=self.steps_per_distributed_reduce)
self.durations = replica_avg(self.durations)
if self.cycles:
self.cycles = replica_avg(self.cycles)
self.losses = [replica_avg(self.losses[0]), replica_avg(self.losses[1])]
self.accuracies = [replica_avg(self.accuracies[0]), replica_avg(self.accuracies[1])]
def add_training_stats(self, *args):
loss, accuracy = self.stats_fn(*args)
self.losses[0].append(loss[0])
self.losses[1].append(loss[1])
self.accuracies[0].append(accuracy[0])
self.accuracies[1].append(accuracy[1])
def write_training_stats(self):
self.add_scalar("loss/MLM", np.average(self.losses[0]))
self.add_scalar("loss/NSP", np.average(self.losses[1]))
self.add_scalar("accuracy/MLM", np.average(self.accuracies[0]))
self.add_scalar("accuracy/NSP", np.average(self.accuracies[1]))
def training_metrics_string(self):
avg = np.average
status_string = \
f"Loss (MLM NSP): {avg(self.losses[0]):5.3f} {avg(self.losses[1]):5.3f} " \
f"Accuracy (MLM NSP): {avg(self.accuracies[0]):5.3f} {avg(self.accuracies[1]):5.3f} "
return status_string
def add_inference_stats(self, *args):
loss, accuracy = self.stats_fn(*args)
self.accuracies[0].append(accuracy[0])
self.accuracies[1].append(accuracy[1])
if loss is not None:
self.losses[0].append(loss)
def inference_metrics_string(self):
avg = np.average
status_string = \
f"Accuracy (MLM NSP): {avg(self.accuracies[0]):5.3f} {avg(self.accuracies[1]):5.3f} "
if self.calculate_perplexity:
status_string += \
f"LM Perplexity: {np.exp(avg(self.losses[0])):5.3f} "
return status_string
| 1.757813 | 2 |
AdaptivePELE/validator/validatorBlockNames.py | cescgina/AdaptivePELE | 13 | 12759258 | <gh_stars>10-100
try:
# Check if the basestring type if available, this will fail in python3
basestring
except NameError:
basestring = str
class ControlFileParams:
generalParams = "GeneralParams"
spawningBlockname = "SpawningParams"
simulationBlockname = "SimulationParams"
clusteringBlockname = "clusteringTypes"
class GeneralParams:
mandatory = {
"restart": "bool",
"outputPath": "basestring",
"initialStructures": "list"
}
params = {
"restart": "bool",
"outputPath": "basestring",
"initialStructures": "list",
"debug": "bool",
"writeAllClusteringStructures": "bool",
"nativeStructure": "basestring"
}
class SpawningParams:
params = {
"epsilon": "numbers.Real",
"T": "numbers.Real",
"reportFilename": "basestring",
"metricColumnInReport": "numbers.Real",
"varEpsilonType": "basestring",
"maxEpsilon": "numbers.Real",
"minEpsilon": "numbers.Real",
"variationWindow": "numbers.Real",
"maxEpsilonWindow": "numbers.Real",
"period": "numbers.Real",
"alpha": "numbers.Real",
"metricWeights": "basestring",
"metricsInd": "list",
"condition": "basestring",
"n": "numbers.Real",
"lagtime": "numbers.Real",
"minPos": "list",
"SASA_column": "int",
"filterByMetric": "bool",
"filter_value": "numbers.Real",
"filter_col": "int"
}
types = {
"sameWeight": {
"reportFilename": "basestring"
},
"independent": {
"reportFilename": "basestring"
},
"independentMetric": {
"metricColumnInReport": "numbers.Real",
"reportFilename": "basestring"
},
"inverselyProportional": {
"reportFilename": "basestring"
},
"null": {
"reportFilename": "basestring"
},
"epsilon": {
"epsilon": "numbers.Real",
"reportFilename": "basestring",
"metricColumnInReport": "numbers.Real",
},
"FAST": {
"epsilon": "numbers.Real",
"reportFilename": "basestring",
"metricColumnInReport": "numbers.Real",
},
"variableEpsilon": {
"epsilon": "numbers.Real",
"reportFilename": "basestring",
"metricColumnInReport": "numbers.Real",
"varEpsilonType": "basestring",
"maxEpsilon": "numbers.Real"
},
"UCB": {
"reportFilename": "basestring",
"metricColumnInReport": "numbers.Real"
},
"REAP": {
"reportFilename": "basestring",
"metricColumnInReport": "numbers.Real"
},
"ProbabilityMSM": {
"lagtime": "numbers.Real"
},
"MetastabilityMSM": {
"lagtime": "numbers.Real"
},
"UncertaintyMSM": {
"lagtime": "numbers.Real"
},
"IndependentMSM": {
"lagtime": "numbers.Real"
}
}
density = {
"types": {
"heaviside": "basestring",
"null": "basestring",
"constant": "basestring",
"exitContinuous": "basestring",
"continuous": "basestring"
},
"params": {
"heaviside": "basestring",
"null": "basestring",
"constant": "basestring",
"values": "list",
"conditions": "list",
"exitContinuous": "basestring",
"continuous": "basestring"
}
}
class SimulationParams:
types = {
"pele": {
"processors": "numbers.Real",
"controlFile": "basestring",
"seed": "numbers.Real",
"peleSteps": "numbers.Real",
"iterations": "numbers.Real"
},
"test": {
"destination": "basestring",
"origin": "basestring",
"processors": "numbers.Real",
"seed": "numbers.Real",
"peleSteps": "numbers.Real",
"iterations": "numbers.Real"
},
"md": {
"processors": "numbers.Real",
"seed": "numbers.Real",
"productionLength": "numbers.Real",
"iterations": "numbers.Real",
"numReplicas": "numbers.Real"
}}
params = {
"executable": "basestring",
"data": "basestring",
"documents": "basestring",
"destination": "basestring",
"origin": "basestring",
"time": "numbers.Real",
"processors": "numbers.Real",
"controlFile": "basestring",
"seed": "numbers.Real",
"peleSteps": "numbers.Real",
"iterations": "numbers.Real",
"modeMovingBox": "basestring",
"boxCenter": "list",
"boxRadius": "numbers.Real",
"runEquilibration": "bool",
"equilibrationMode": "basestring",
"equilibrationLength": "numbers.Real",
"equilibrationBoxRadius": "numbers.Real",
"equilibrationTranslationRange": "numbers.Real",
"equilibrationRotationRange": "numbers.Real",
"numberEquilibrationStructures": "numbers.Real",
"useSrun": "bool",
"srunParameters": "basestring",
"mpiParameters": "basestring",
"exitCondition": "dict",
"trajectoryName": "basestring",
"ligandCharge": "list|numbers.Real",
"ligandName": "list|basestring",
"cofactors": "list",
"ligandsToRestrict": "list",
"nonBondedCutoff": "numbers.Real",
"timeStep": "numbers.Real",
"temperature": "numbers.Real",
"runningPlatform": "basestring",
"minimizationIterations": "numbers.Real",
"reporterFrequency": "numbers.Real",
"productionLength": "numbers.Real",
"WaterBoxSize": "numbers.Real",
"forcefield": "basestring",
"trajectoriesPerReplica": "numbers.Real",
"equilibrationLengthNVT": "numbers.Real",
"equilibrationLengthNPT": "numbers.Real",
"devicesPerTrajectory": "int",
"constraintsMinimization": "numbers.Real",
"constraintsNVT": "numbers.Real",
"constraintsNPT": "numbers.Real",
"customparamspath": "basestring",
"numReplicas": "numbers.Real",
"maxDevicesPerReplica": "numbers.Real",
"format": "basestring",
"constraints": "list",
"boxType": "basestring",
"postprocessing": "bool",
"cylinderBases": "list"
}
exitCondition = {
"types": {
"metric": "basestring",
"clustering": "basestring",
"metricMultipleTrajectories": "basestring"
},
"params": {
"metricCol": "numbers.Real",
"exitValue": "numbers.Real",
"condition": "basestring",
"numTrajs": "numbers.Real"
}
}
class clusteringTypes:
types = {
"rmsd": {},
"contactMap": {
"similarityEvaluator": "basestring",
"ligandResname": "basestring"
},
"lastSnapshot": {
"ligandResname": "basestring"
},
"null": {},
"MSM": {
"ligandResname": "basestring",
"nclusters": "numbers.Real"
}
}
params = {
"rmsd": "basestring",
"contactMap": "basestring",
"lastSnapshot": "basestring",
"null": "basestring",
"contactThresholdDistance": "numbers.Real",
"ligandResname": "basestring",
"ligandResnum": "numbers.Real",
"ligandChain": "basestring",
"similarityEvaluator": "basestring",
"symmetries": "list",
"alternativeStructure": "bool",
"nclusters": "numbers.Real",
"tica": "bool",
"atom_Ids": "list",
"writeCA": "bool",
"sidechains": "bool",
"tica_lagtime": "numbers.Real",
"tica_nICs": "numbers.Real",
"tica_kinetic_map": "bool",
"tica_commute_map": "bool"
}
thresholdCalculator = {
"types": {
"heaviside": "basestring",
"constant": "basestring"
},
"params": {
"conditions": "list",
"values": "list",
"value": "numbers.Real",
"heaviside": "basestring",
"constant": "basestring"
}
}
| 2.421875 | 2 |
SimpleConditions/05.numbersToTenText.py | SpleefDinamix/SoftuniPythonProgrammingBasics | 0 | 12759259 | <filename>SimpleConditions/05.numbersToTenText.py
num = int(input())
if num == 0:
print("zero")
elif num == 1:
print("one")
elif num == 2:
print("two")
elif num == 3:
print("three")
elif num == 4:
print("four")
elif num == 5:
print("five")
elif num == 6:
print("six")
elif num == 7:
print("seven")
elif num == 8:
print("eight")
elif num == 9:
print("nine")
else:
print("number too big") | 3.53125 | 4 |
CybORG/CybORG/Shared/ActionHandler.py | rafvasq/cage-challenge-1 | 18 | 12759260 | <reponame>rafvasq/cage-challenge-1
# Copyright DST Group. Licensed under the MIT license.
from CybORG.Shared.Actions.Action import Action
from CybORG.Shared.Observation import Observation
class ActionHandler:
def __init__(self):
pass
def perform(self, action: Action) -> Observation:
raise NotImplementedError
| 2 | 2 |
radical_translations/core/migrations/0032_delete_instance.py | kingsdigitallab/radical_translations | 3 | 12759261 | # Generated by Django 2.2.10 on 2020-05-18 10:48
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('events', '0008_alter_field_classification_on_event'),
('core', '0031_delete_item'),
]
operations = [
migrations.DeleteModel(
name='Instance',
),
]
| 1.3125 | 1 |
linkml_model_enrichment/utils/schemautils.py | noelmcloughlin/linkml-model-enrichment | 0 | 12759262 | <filename>linkml_model_enrichment/utils/schemautils.py
import copy
import logging
from typing import Union
from deprecated.classic import deprecated
import yaml
from linkml_runtime.dumpers import yaml_dumper
from linkml_runtime.linkml_model import SchemaDefinition
@deprecated("Replaced by linkml.runtime.utils.schema_as_dict")
def minify_schema(obj: Union[dict, SchemaDefinition]) -> dict:
# TODO prefixes
if isinstance(obj, SchemaDefinition):
yd = yaml_dumper.dumps(obj)
obj = yaml.safe_load(yd)
if isinstance(obj, dict):
for k, v in obj.items():
if isinstance(v, dict) and 'name' in v and v['name'] == k:
del v['name']
minify_schema(v)
elif isinstance(obj, list):
for v in obj:
minify_schema(v)
else:
None
return obj
# TODO: replace with schemaview
def merge_schemas(schemas, nomerge_enums_for=[]):
schema = copy.deepcopy(schemas[0])
for s in schemas:
for n,x in s['classes'].items():
if n not in schema['classes']:
schema['classes'][n] = x
for n,x in s['slots'].items():
if n not in schema['slots']:
schema['slots'][n] = x
else:
cur_slot = schema['slots'][n]
if 'range' in cur_slot and 'range' in x:
if cur_slot['range'] != x['range']:
logging.error(f'Inconsistent ranges: {cur_slot} vs {x}')
if cur_slot['range'] == 'string':
cur_slot['range'] = x['range']
elif 'range' not in cur_slot:
cur_slot['range'] = x['range']
for n,x in s['types'].items():
if n not in schema['types']:
schema['types'][n] = x
else:
None # TODO
for n,x in s['enums'].items():
if n not in schema['enums']:
schema['enums'][n] = x
else:
None # TODO
return schema | 1.96875 | 2 |
examples/automlbechmark/run_experiments/bike/bike_lr.py | jianzhnie/AutoTabular | 48 | 12759263 | <reponame>jianzhnie/AutoTabular<filename>examples/automlbechmark/run_experiments/bike/bike_lr.py<gh_stars>10-100
import numpy as np
import pandas as pd
from autofe.get_feature import *
from sklearn.linear_model import Ridge
if __name__ == '__main__':
root_path = './data/bike/'
train_data = pd.read_csv(root_path + 'data_train.csv')
len_train = len(train_data)
test_data = pd.read_csv(root_path + 'data_test.csv')
total_data = pd.concat([train_data, test_data]).reset_index(drop=True)
target_name = 'count'
classfier = Ridge(random_state=0)
"""lr baseline"""
r2_score: 0.9999999999999997
total_data_base = get_baseline_total_data(total_data)
score = train_and_evaluate(total_data_base, target_name, len_train,
classfier, task_type='regression')
| 2.625 | 3 |
scripts/generate.py | wateryan/fixp | 0 | 12759264 | <filename>scripts/generate.py<gh_stars>0
""" Simple script to generate Rust source code based on a provided XML Fix message specification """
import os
import re
import xml.etree.ElementTree
from string import Template
ROOT_DIR = "spec"
F_IMPORTS = "use field::Field;"
F_TRAIT = "trait Field {fn tag(&self) -> u16;}"
F_IMPL_TEMPLATE = Template(
"impl Field for $fieldName {fn tag(&self) -> u16 {return $tagNumber;}}")
F_ENUM_TEMPLATE = Template(
"#[derive(Debug)]pub enum $fieldName { $enumValues }")
F_STRUCT_TEMPLATE = Template(
"pub struct $fieldName {value: $type,}")
M_IMPORTS_TEMPLATE = Template("use $fixVersion::fields::*;")
M_STRUCT_TEMPLATE = Template("struct $messageName { $structFields }")
M_F_REQ_TEMPLATE = Template("$fieldName: $fieldType,")
M_F_NREQ_TEMPLATE = Template("$fieldName: Option<$fieldType>,")
# TODO Figure out actual values for this, they're a best guess currently
TYPES = {
"AMT": "f32",
"BOOLEAN": "bool",
"CHAR": "char",
"COUNTRY": "String",
"CURRENCY": "f32",
"DATA": "[u8; 1024]",
"DATE": "String",
"DAYOFMONTH": "u8",
"EXCHANGE": "String",
"FLOAT": "f32",
"INT": "u16",
"LANGUAGE": "String",
"LENGTH": "usize",
"LOCALMKTDATE": "u64",
"MONTHYEAR": "u8",
"MULTIPLECHARVALUE": "String",
"NUMINGROUP": "u16",
"PERCENTAGE": "f32",
"PRICE": "f32",
"PRICEOFFSET": "i8",
"QTY": "f32",
"QUANTITY": "f64",
"SEQNUM": "u64",
"STRING": "String",
"TIME": "u64",
"TZTIMEONLY": "u64",
"TZTIMESTAMP": "u64",
"UTCDATE": "String",
"UTCDATEONLY": "u16",
"UTCTIMEONLY": "String",
"UTCTIMESTAMP": "u64",
"XMLDATA": "String",
}
def create_fields(elements):
""" Creates the field module for the file's FIX spec """
fields = F_IMPORTS + "\n"
fixv = get_fix_version(elements)
for field in elements.findall("fields/field"):
fields += "\n" + F_IMPL_TEMPLATE.substitute(
fieldName=field.get("name"), tagNumber=field.get("number"))
if field.findall("value"):
# Some of the spec has values such as 5yr which can't be valid
# enums, so prepend _
enumset = ",".join("_" + v.get("description").replace("_", "").title()
for v in field.findall("value"))
fields += "\n" + F_ENUM_TEMPLATE.substitute(
fieldName=field.get("name"), enumValues=enumset)
else:
field_type = TYPES[field.get("type")]
fields += "\n" + F_STRUCT_TEMPLATE.substitute(
fieldName=field.get("name"), type=field_type)
file_dir = get_dir(fixv)
if not os.path.exists(file_dir):
os.makedirs(file_dir)
fields_file = open(file_dir + "/fields.rs", "w")
fields_file.write(fields)
def create_messages(elements):
""" Creates the message structs based on the provided FIX spec """
fixv = get_import(get_fix_version(elements))
messages = M_IMPORTS_TEMPLATE.substitute(fixVersion=fixv) + "\n"
for message in elements.findall("messages/message"):
m_name = message.get("name") + "Message"
m_fields = ""
for field in message.findall("field"):
field_type = field.get("name")
field_name = to_snake(field_type[0].lower() + field_type[1:])
if field.get("required") == 'Y':
m_fields += M_F_REQ_TEMPLATE.substitute(
fieldName=field_name, fieldType=field_type)
else:
m_fields += M_F_NREQ_TEMPLATE.substitute(
fieldName=field_name, fieldType=field_type)
messages += M_STRUCT_TEMPLATE.substitute(
messageName=m_name, structFields=m_fields) + "\n"
file_dir = get_dir(fixv)
if not os.path.exists(file_dir):
os.makedirs(file_dir)
messages_file = open(file_dir + "/messages.rs", "w")
messages_file.write(messages)
def parse_file(file_name):
""" Returns the file as a parsed element tree """
elements = xml.etree.ElementTree.parse(
ROOT_DIR + "/" + file_name).getroot()
return elements
def get_fix_version(elements):
""" Gets the FIX version String for the FIX spec """
return "{}.{}.{}.{}".format(elements.get("type"),
elements.get("major"), elements.get("minor"),
elements.get("servicepack"))
def get_import(fix_version):
""" Gets the Rust import String for the provided FIX version"""
return fix_version.lower().replace(".", "_")
def get_dir(fix_version):
""" Gets the directory where the generated Rust source should go"""
return "src/" + get_import(fix_version)
def to_snake(name):
""" Converts a string to snake_case """
string = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', string).lower()
def export_mods(elements):
""" Creates the module export file for this FIX version """
fixv = get_fix_version(elements)
file_dir = get_dir(fixv)
mod_file = open(file_dir + "/mod.rs", "w")
mod_file.write(
"pub mod fields;\npub mod messages;"
)
for filename in os.listdir(ROOT_DIR):
print "Generating code for " + filename
e = parse_file(filename)
create_fields(e)
create_messages(e)
export_mods(e)
| 2.375 | 2 |
sample.py | zbingwen/qqbot | 0 | 12759265 | <filename>sample.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
from qqbot import QQBot
class MyQQBot(QQBot):
def onPollComplete(self, msgType, from_uin, buddy_uin, message):
if message == '-hello':
self.send(msgType, from_uin, '你好,我是QQ机器人')
elif message == '-stop':
self.stopped = True
self.send(msgType, from_uin, 'QQ机器人已关闭')
myqqbot = MyQQBot()
myqqbot.Login()
myqqbot.PollForever()
| 2.828125 | 3 |
chat_app/consumers.py | chunky2808/Hire-Me | 0 | 12759266 | from channels import Group
def ws_add(message):
print('Connection Recieved')
message.reply_channel.send({'accept':True})
Group('chat').add(message.reply_channel)
def ws_message(message):
print('Message received:{}'.format(message.content['text']))
Group('chat').send({'text': message.content['text']})
def ws_disconnect(message):
print('Connection closed')
Group('chat').discard(message.reply_channel) | 2.546875 | 3 |
src/main/kaudio/app/widgets/main_widget.py | Martmists-GH/kaudio-python | 0 | 12759267 | <reponame>Martmists-GH/kaudio-python<gh_stars>0
from NodeGraphQt import NodeGraph
from PySide2.QtWidgets import QWidget, QHBoxLayout
from kaudio.app.widgets.sidebar_widget import SidebarWidget
class MainWidget(QWidget):
def __init__(self, graph: NodeGraph):
super().__init__()
layout = QHBoxLayout()
layout.addWidget(SidebarWidget(), 4)
layout.addWidget(graph.widget, 10)
self.setLayout(layout)
| 2.09375 | 2 |
tests/tools/test_dump_user_routes.py | plastr/extrasolar-game | 0 | 12759268 | # Copyright (c) 2010-2011 Lazy 8 Studios, LLC.
# All rights reserved.
from front.lib import db, utils
from front.tools import dump_user_routes, replay_game
from front.tests import base
from front.tests.base import points
class TestDumpUserRoutes(base.TestCase):
def setUp(self):
super(TestDumpUserRoutes, self).setUp()
self.create_user('<EMAIL>', 'pw')
def test_dump_user_routes(self):
user = self.get_logged_in_user()
chip_result = self.create_target_and_move(**points.FIRST_MOVE)
target_one = self.last_chip_value_for_path(['user', 'rovers', '*', 'targets', '*'], chip_result)
start_delay = utils.in_seconds(hours=4)
arrival_delta = utils.in_seconds(hours=10)
self.advance_now(seconds=start_delay)
chip_result = self.create_target(arrival_delta=arrival_delta, **points.SECOND_MOVE)
target_two = self.last_chip_value_for_path(['user', 'rovers', '*', 'targets', '*'], chip_result)
with db.commit_or_rollback(self.get_ctx()) as ctx:
with db.conn(ctx) as ctx:
routes_by_rover, all_targets = dump_user_routes.targets_as_route_for_user_id(ctx, user.user_id)
# Only one rover so far.
self.assertEqual(len(routes_by_rover), 1)
route = routes_by_rover[0][1]
# Only two user created targets.
self.assertEqual(route.num_points(), 2)
points_iter = route.iterpoints()
# The first points arrival_delta is going to be strange, since we create the initial
# lander points and then some amount of time goes by before the user can create their
# first target.
point = points_iter.next()
# However, the second point's arrival_delta should equal the amount we delayed before
# creating it plus how long its travel time was.
point = points_iter.next()
self.assertEqual(point.arrival_delta, arrival_delta)
self.assertEqual(point.start_delay, start_delay)
# Pass the dumped route through replay_game and see if the data looks correct.
with db.commit_or_rollback(self.get_ctx()) as ctx:
with db.conn(ctx) as ctx:
tool = replay_game.ReplayGame(ctx, '<EMAIL>',
route_structs=[route.to_struct()], verbose=False)
tool.run()
user = self.get_user_by_email('<EMAIL>')
rover = user.rovers.active()[0]
# Verify that the start_time and arrival_time fields made the round trip intact.
last_two_targets = rover.targets.by_arrival_time()[-2:]
replay_target_one, replay_target_two = last_two_targets[0], last_two_targets[1]
self._assert_targets_same_times(target_one, replay_target_one)
self._assert_targets_same_times(target_two, replay_target_two)
def _assert_targets_same_times(self, original, replay):
# NOTE: Ideally we could verify lat==lat,lng==lng but loss of precision prevents this currently.
self.assertEqual(original['start_time'], replay.start_time)
self.assertEqual(original['arrival_time'], replay.arrival_time)
| 2.265625 | 2 |
python/hello.py | ykoon-git/aws | 0 | 12759269 | <gh_stars>0
import boto3
# Let's use Amazon S3
s3 = boto3.resource('s3')
for bucket in s3.buckets.all():
print(bucket.name)
| 2.546875 | 3 |
commands/help.py | AndreyCortez/Telegram-Bot | 0 | 12759270 | <reponame>AndreyCortez/Telegram-Bot
from telegram import Update
from telegram.ext import CallbackContext
from commands.general import send_message
# Dictionary containing available commands
available_commands = {
"help": ("Lista informações a respeito dos comandos disponíveis"),
"planilha": (
"Envia o link da planilha de comandos do Tupão\n"
"Para cadastrar um novo comando, basta inserir este e a resposta "
"de texto esperada na ultima linha.\n"
"Para formatações específicas no texto de resposta, use tags <i>HTML</i>\n\n"
"Após inserção do comando na planilha, execute o comando <code>/refresh</code>"
"para atualizar os comandos internos do Bot."
),
"list": (
"Lista todas as tarefas não concluídas de um subsistema, com base na planilha "
"de atividades deste.\n\n"
"As tarefas são listadas separadas por projeto\n\n"
"É possível executar <code>/list <subsistema></code> para "
"obter informações de um subsistema imediatamente. O subsistema deve ser fornecido "
"através de sua abreviação (i.e. <code>/list sw</code>).\n\n"
"Por outro lado, pode-se utilizar <code>/list</code> sem argumentos para receber "
"uma lista com os sistemas e subsistemas disponíveis."
),
"add": (
"Adiciona uma nova tarefa na planilha de atividades do sistema\n\n"
"Ao selecionar o subsistema, o bot responderá com a lista de projetos ativos.\n"
"É possível então selecionar um dos projetos já existentes através de seu número ou "
"criar um projeto totalmente novo, fornecendo seu nome.\n"
"Em seguida, serão realizadas perguntas a respeito da atividade a ser incluída.\n\n"
"Ao finalizar a conversa com o bot, a nova atividade será adicionada imediatamente na planilha "
"de atividades do sistema.\n\n"
"É possível executar <code>/add <subsistema></code> para "
"selecionar um subsistema imediatamente. O subsistema deve ser fornecido "
"através de sua abreviação (i.e. <code>/add sw</code>).\n\n"
"Por outro lado, pode-se utilizar <code>/add</code> sem argumentos para receber "
"uma lista com os sistemas e subsistemas disponíveis."
),
"start": (
"Muda o status de uma tarefa da planilha de atividades do sistema para Fazendo\n\n"
"Ao selecionar o subsistema, o bot responderá com a lista de tarefas ativas. Ao selecionar "
"a desejada, esta terá seu status atualizado automaticamente na planilha.\n\n"
"É possível executar <code>/start <subsistema></code> para "
"obter as tarefas de um subsistema imediatamente. O subsistema deve ser fornecido "
"através de sua abreviação (i.e. <code>/start sw</code>).\n\n"
"Por outro lado, pode-se utilizar <code>/start</code> sem argumentos para receber "
"uma lista com os sistemas e subsistemas disponíveis."
),
"end": (
"Muda o status de uma tarefa da planilha de atividades do sistema para Concluído\n\n"
"Ao selecionar o subsistema, o bot responderá com a lista de tarefas ativas. Ao selecionar "
"a desejada, serão realizadas algumas perguntas a respeito do desenvolvimento desta, que "
"serão automaticamente adicionadas na planilha.\n\n"
"É possível executar <code>/end <subsistema></code> para "
"obter as tarefas de um subsistema imediatamente. O subsistema deve ser fornecido "
"através de sua abreviação (i.e. <code>/end sw</code>).\n\n"
"Por outro lado, pode-se utilizar <code>/end</code> sem argumentos para receber "
"uma lista com os sistemas e subsistemas disponíveis."
),
}
# Returns description with all available commands
def get_default_description() -> str:
return (
"<b>Comandos disponíveis</b>\n"
f"<code>{'</code>, <code>'.join(available_commands.keys())}</code>.\n\n"
"Utilize <code>/help <comando></code> para obter ajuda para um comando específico"
)
# Returns help for specified command as stated in dictionary
def get_personalized_description(command: str) -> str:
return f"<b>Comando {command}</b>\n\n<u>Descrição</u>\n{available_commands[command]}"
def help_command(update: Update, ctx: CallbackContext) -> None:
"""
Help command method
Returns a list of available commands if no argument is specified
Returns help for specific command otherwise
"""
if ctx.args and ctx.args[0] in available_commands.keys():
send_message(update, ctx, get_personalized_description(ctx.args[0]))
else:
send_message(update, ctx, get_default_description())
| 2.765625 | 3 |
tcfcli/common/scf_client/scf_report_client.py | Juliiii/scfcli | 1 | 12759271 | # -*- coding: utf-8 -*-
from tcfcli.cmds.cli import __version__
from . import ScfBaseClient
class ScfReportClient(ScfBaseClient):
CLOUD_API_REQ_TIMEOUT = 3
def __init__(self):
super(ScfReportClient, self).__init__("ap-guangzhou")
def report(self):
try:
params = {'Downloads': 1, 'Source': 'cli', 'SourceVersion': __version__}
self._client.call("ReportCliInfos", params)
except Exception as err:
pass
| 2.09375 | 2 |
widget/steps.py | asyatrina/selenium_kurs | 0 | 12759272 | <filename>widget/steps.py
from selenium.webdriver.remote.webdriver import WebDriver
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
class UsedSteps:
def __init__(self, driver: WebDriver):
self.driver = driver
def find_and_click_on_element(self, selector):
self.driver.find_element_by_css_selector(selector).click()
def wait_until_element_presence(self, method, selector):
WebDriverWait(self.driver, 10).until(EC.presence_of_element_located((method, selector)))
def find_and_fill_input(self, selector, text):
self.driver.find_element_by_css_selector(selector).send_keys(text)
| 2.28125 | 2 |
tests/conftest.py | evhart/nasty | 49 | 12759273 | <reponame>evhart/nasty
#
# Copyright 2019-2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Iterator
import pytest
from _pytest.config import Config
from _pytest.fixtures import FixtureRequest
from _pytest.monkeypatch import MonkeyPatch
from nasty_utils import LoggingSettings
from nasty._settings import NastySettings
from .util.requests_cache import RequestsCache
def pytest_configure(config: Config) -> None:
LoggingSettings.setup_pytest_logging(config)
_configure_requests_cache(config)
def _configure_requests_cache(config: Config) -> None:
config.addinivalue_line(
"markers", "requests_cache_disabled: Disable caching of requests."
)
config.addinivalue_line(
"markers", "requests_cache_regenerate: Regenerate requested cached requests."
)
@pytest.fixture(scope="session")
def requests_cache() -> Iterator[RequestsCache]:
with RequestsCache() as requests_cache:
yield requests_cache
@pytest.fixture(scope="session")
def settings() -> Iterator[NastySettings]:
settings = NastySettings.find_and_load_from_settings_file()
yield settings
@pytest.fixture(autouse=True)
def activate_requests_cache(
request: FixtureRequest, monkeypatch: MonkeyPatch, requests_cache: RequestsCache
) -> None:
if not request.node.get_closest_marker("requests_cache_disabled"):
requests_cache.activate(
monkeypatch,
bool(request.node.get_closest_marker("requests_cache_regenerate")),
)
@pytest.fixture(autouse=True)
def disrespect_robotstxt(monkeypatch: MonkeyPatch) -> None:
monkeypatch.setenv("NASTY_DISRESPECT_ROBOTSTXT", "1")
| 1.671875 | 2 |
orc8r/gateway/python/magma/magmad/check/network_check/traceroute.py | saurabhsoni88/magma | 2 | 12759274 | """
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Module for executing `traceroute` commands via subprocess
"""
from collections import namedtuple
import asyncio
from magma.magmad.check import subprocess_workflow
DEFAULT_TTL = 30
DEFAULT_BYTES_PER_PACKET = 60
TracerouteParams = namedtuple('TracerouteParams',
['host_or_ip', 'max_hops', 'bytes_per_packet'])
TracerouteResult = namedtuple('TracerouteResult',
['error', 'host_or_ip', 'stats'])
TracerouteStats = namedtuple('TracerouteStats', ['hops'])
TracerouteHop = namedtuple('TracerouteHop', ['idx', 'probes'])
TracerouteProbe = namedtuple('TracerouteProbe',
['hostname', 'ip_addr', 'rtt_ms'])
def traceroute(params):
"""
Execute some `traceroute` commands via subprocess.
Args:
params ([TracerouteParams]): params for the `traceroute` commands
Returns:
[TracerouteResult]: stats from the executed `traceroute` commands
"""
return subprocess_workflow.exec_and_parse_subprocesses(
params,
_get_traceroute_command_args_list,
parse_traceroute_output,
)
@asyncio.coroutine
def traceroute_async(params, loop=None):
"""
Execute some `traceroute` commands asynchronously and return results.
Args:
params ([TracerouteParams]): params for the `traceroute` commands
loop: event loop to run in (optional)
Returns:
[TracerouteResult]: stats from the executed `traceroute` commands
"""
return subprocess_workflow.exec_and_parse_subprocesses_async(
params,
_get_traceroute_command_args_list,
parse_traceroute_output,
loop=loop,
)
def _get_traceroute_command_args_list(param):
return [
'traceroute',
'-m', str(param.max_hops or DEFAULT_TTL),
param.host_or_ip,
str(param.bytes_per_packet or DEFAULT_BYTES_PER_PACKET),
]
def parse_traceroute_output(stdout, stderr, param):
def create_error_result(error_msg):
return TracerouteResult(
error=error_msg,
host_or_ip=param.host_or_ip,
stats=None,
)
if stderr:
return create_error_result(stderr)
else:
try:
stats = TracerouteParser().parse(stdout)
return TracerouteResult(
error=None,
host_or_ip=param.host_or_ip,
stats=stats,
)
except ValueError as e:
msg = 'Error while parsing output. ' \
'Original exception message:\n{}'.format(str(e.args[0]))
return create_error_result(msg)
except IndexError as e:
msg = 'Error while parsing output - an incomplete line ' \
'was encountered. Original exception message:\n{}' \
.format(str(e.args[0]))
return create_error_result(msg)
class TracerouteParser(object):
HostnameAndIP = namedtuple('HostnameAndIP', ['hostname', 'ip'])
DEFAULT_ENDPOINT = HostnameAndIP(hostname=None, ip=None)
def __init__(self):
self._probe_endpoint = self.DEFAULT_ENDPOINT
def parse(self, output):
"""
Raises:
ValueError, IndexError
"""
output_lines = output.decode('ascii').strip().split('\n')
output_lines.pop(0) # strip header line
hops = []
for line in output_lines:
self._probe_endpoint = self.DEFAULT_ENDPOINT
hops.append(self._parse_hop(line))
return TracerouteStats(hops)
def _parse_hop(self, line):
hop_split = line.split()
hop_idx = int(hop_split.pop(0))
probes = []
while hop_split:
probe = self._parse_next_probe(hop_split)
if probe:
probes.append(probe)
return TracerouteHop(idx=hop_idx, probes=probes)
def _parse_next_probe(self, tokens):
head_token = tokens.pop(0)
if head_token == '*':
return TracerouteProbe(hostname=self._probe_endpoint.hostname,
ip_addr=self._probe_endpoint.ip,
rtt_ms=0)
lookahead_token = tokens.pop(0)
if lookahead_token == 'ms':
return TracerouteProbe(hostname=self._probe_endpoint.hostname,
ip_addr=self._probe_endpoint.ip,
rtt_ms=float(head_token))
else:
ip_addr = lookahead_token[1:-1]
self._probe_endpoint = self.HostnameAndIP(hostname=head_token,
ip=ip_addr)
return None
| 2.328125 | 2 |
models/Deep-learning/models/lstm_guide_only_nolin_hp2_model.py | jingyi7777/CasRx_guide_efficiency | 0 | 12759275 | import tensorflow as tf
from tensorflow import keras
from models.layers import recurrent_dense
def lstm_guide_only_nolin_hp2_model(args,lstm_units=64, dense_units=16, recurrent_layers=1, dropout=0.0):
seq = keras.Input(shape=(None, 4)) # 4
x = keras.layers.Bidirectional(keras.layers.LSTM(lstm_units, dropout=dropout))(seq)
x = keras.layers.Dense(dense_units)(x)
for _ in range(recurrent_layers):
x = recurrent_dense(x, dense_units)
outputs = keras.layers.Dense(1)(x)
return keras.Model(inputs=[seq], outputs=outputs)
| 2.890625 | 3 |
AntShares/Exceptions.py | OTCGO/sync_antshares | 10 | 12759276 | # -*- coding:utf-8 -*-
"""
Description:
Exceptions
Usage:
from AntShares.Exceptions import *
"""
class WorkIdError(Exception):
"""Work Id Error"""
def __init__(self, info):
super(Exception, self).__init__(info)
self.error_code = 0x0002
class OutputError(Exception):
"""Output Error"""
def __init__(self, info):
super(Exception, self).__init__(info)
self.error_code = 0x0003
class RegisterNameError(Exception):
"""Regiser Transaction Name Error"""
def __init__(self, info):
super(Exception, self).__init__(info)
self.error_code = 0x0004
| 2.5 | 2 |
lib/oeqa/runtime/cases/rubygems_rubygems_rubyntlm.py | tuxable-ltd/meta-rubygems | 0 | 12759277 | <filename>lib/oeqa/runtime/cases/rubygems_rubygems_rubyntlm.py
from rubygems_utils import RubyGemsTestUtils
class RubyGemsTestrubygems_rubyntlm(RubyGemsTestUtils):
def test_gem_list_rubygems_rubyntlm(self):
self.gem_is_installed("rubyntlm")
def test_load_rubyntlm(self):
self.gem_is_loadable("rubyntlm")
| 1.648438 | 2 |
publish.py | sirmammingtonham/futureNEWS | 0 | 12759278 | <reponame>sirmammingtonham/futureNEWS
import re
import os
import sys
import random
import gpt_2_simple as gpt2
import tensorflow as tf
import numpy as np
from random_word import RandomWords
import requests
import giphy_client
from unsplash.api import Api
from unsplash.auth import Auth
from medium import Client
def clean(story):
story = story.replace('<|url|>', 'https://github.com/sirmammingtonham/futureMAG')
story = story.replace('OneZero', 'FutureMAG')
story = story.replace('onezero', 'FutureMAG')
return story[16:]
def split_story(story, run_name):
story = clean(story)
split = re.split('(\.)', story)[0:-1]
metadata = split[0]
title = metadata[metadata.find('# ')+2:metadata.find('## ')].strip('\n')
subtitle = metadata[metadata.find('## ')+3:metadata.find('\n', metadata.find('## '))].strip('\n')
split[0] = split[0].replace(subtitle, f"{subtitle} | AI generated article*")
# if len(title.split(' ')) <= 2:
# split = story.split('\n', 3)
# title = split[1]
# subtitle = split[2]
# return [title, subtitle, split[3]]
return [title, f"{subtitle} | AI generated article*", ''.join(split), None, run_name]
def retrieve_images(story):
#story[1] = subtitle
#story[2] = story
matches = [(m.group(), m.start(0)) for m in re.finditer(r"(<\|image\|>)", story[2])]
image_creds = []
try:
client_id = "b9a6edaadf1b5ec49cf05f10aab79d5d2ea1fe66431605d12ec0f7ec22bc7289"
client_secret = "f00e14688a25656c07f07d85e17b4ebd94e93fcf9bf0fd1859f7713ea1d94c16"
redirect_uri = "urn:ietf:wg:oauth:2.0:oob"
auth = Auth(client_id, client_secret, redirect_uri)
api = Api(auth)
# q = max(re.sub(r'[^\w\s]', '', story[0]).split(), key=len) #take longest word from subtitle as search term
q = story[0].split(' ')[:5]
for match, idx in matches:
pic = api.photo.random(query=q)[0]
img = pic.urls.raw
image_creds.append((f'https://unsplash.com/@{pic.user.username}', pic.user.name))
cap_idx = story[2].find('*', idx+11)
story[2] = story[2][:cap_idx] + '**' + story[2][cap_idx:]
story[2] = story[2][:idx] + img + story[2][idx+9:]
except:
return story
story[3] = image_creds
return story
def publish(title, sub, article, creds, run_name):
if title == sub:
return
#holy shit this is excessive
tag = tags['onezero'] + [max(re.sub(r'[^\w\s]','',title).split(), key=len).capitalize()]
access_token = '<KEY>'
headers = {
'Authorization': "Bearer " + access_token,
'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36'
}
base_url = "https://api.medium.com/v1/"
me_response = requests.request("GET", base_url + 'me', headers=headers).text
json_me_response = json.loads(me_response)
user_id = json_me_response['data']['id']
user_url = base_url + 'users/' + user_id + '/'
posts_url = user_url + 'posts/'
pub_url = base_url + 'publications/424c42caa624/posts/'
if not creds:
return
else:
img_creds = ""
for auth_url, author in creds:
img_creds += f"[{author}]({auth_url})"
img_creds += ', ' if len(creds) > 1 else ' '
img_creds += "on [Unsplash](https://unsplash.com/)"
article += "\n\n*This article was written by a [GPT-2 neural network](https://openai.com/blog/better-language-models). All information in this story is most likely false, and all opinions expressed are fake. Weird to think about…\n\n"
article += f"**This caption was artificially generated. Image downloaded automatically from {img_creds}.\n\n"
article += "All links in this article are placeholders generated by the neural network, signifying that an actual link should have been generated there. These placeholders were later replaced by a link to the github project page.\n\n"
article += "**futureMAG** is an experiment in automated storytelling/journalism. This story was created and published without human intervention.\n\n"
article += "Code for this project available on github: "
article += "**[sirmammingtonham/futureMAG](https://github.com/sirmammingtonham/futureMAG)**"
payload = {
'title': title,
'contentFormat': 'markdown',
'tags': tag if run_name == 'onezero' else tags[run_name],
'publishStatus': 'draft',
'content': article
}
response = requests.request('POST', pub_url, data=payload, headers=headers)
print(response.text)
return payload
if __name__ == "__main__":
tags = {#'montag': ['Fake News', 'Opinion', 'Artificial Intelligence', 'NLP', 'Future'],
'onezero': ['Artificial Intelligence', 'Technology', 'NLP', 'Future'],
#'futura': ['Sci Fi Fantasy', 'Artificial Intelligence', 'NLP', 'Future', 'Storytelling']
}
run_name = 'onezero_m'
sess = gpt2.start_tf_sess()
gpt2.load_gpt2(sess, run_name)
stories = gpt2.generate(
sess, run_name, return_as_list=True,
truncate="<|endoftext|>", prefix="<|startoftext|>",
nsamples=1, batch_size=1, length=8000,
temperature=1,
top_p=0.9,
split_context=0.5,
)
articles = []
for story in stories:
articles.append(split_story(story, run_name))
for article in articles:
article = retrieve_images(article)
publish(*articles[0]) | 2.640625 | 3 |
cPull.py | HenryBlairG/CanvasClient | 2 | 12759279 | #! /usr/bin/python3
'''
Python script to pull through terminal the fileTree
'''
import sys
from src.CanvasBackend import semester_board as bk
if __name__ == '__main__':
bk.Profile(sys.argv[1]) | 1.28125 | 1 |
training.py | ayivima/face_landmarks | 1 | 12759280 | <filename>training.py<gh_stars>1-10
"""Implements a function for training model."""
from math import inf
import time
import torch
__author__ = "<NAME> <<EMAIL>>"
def fit(
model,
criterion,
optimizer,
train_loader,
test_loader,
epochs=1,
dynamic_lr=False,
model_save=False,
save_name="bestmodela.pt"
):
"""Trains a neural network and returns the lists of training
and validation losses.
Arguments
---------
:model: Model to be trained
:criterion: The Loss function
:optimizer: The optimizer to be used for gradient descent
:train_loader: A generator for loading training data
:test_loader: A generator for loading testing data
:epochs: The number of complete passes through training data
:dynamic_lr: Specifies whether learning rate gets changed
dynamically during training
:model_save: Specifies whether best model should be saved,
and based on the lowest validation loss.
:save_name: Specifies the name to be used to save best model
"""
rate_switch=0
train_losses, test_losses = [], []
# Set up for GPU use if applicable
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
fltTensor = (
torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor
)
if model_save:
min_val_loss = inf
save_epoch = 0
model.to(device)
print("Started Training")
for epoch in range(1, epochs+1):
starttime = time.time()
running_loss = 0.0
model.train()
for batch_i, data in enumerate(train_loader):
# Get and prepare images and
# their corresponding keypoints
images, key_pts = data
images = images.to(device)
key_pts = key_pts.to(device)
key_pts = key_pts.view(key_pts.size(0), -1)
key_pts = key_pts.type(fltTensor)
images = images.type(fltTensor)
# Forward Pass
output_pts = model(images)
# Backpropagation
loss = criterion(output_pts, key_pts)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Print loss statistics
# and implement learning rate change
running_loss += loss.item()
batch_num = batch_i + 1
avg_running_loss = running_loss/batch_num
# Print average loss at end of all 346 batches
print('Epoch: {}/{}, Batch Count: {}, Avg. Training Loss: {}'.format(
epoch, epochs, batch_num, avg_running_loss
))
# Implement learning rate change dynamically
if dynamic_lr:
if avg_running_loss<0.04 and rate_switch==0:
optimizer.param_groups[0]['lr']=1e-4
rate_switch=1
elif avg_running_loss<0.035 and rate_switch<2:
optimizer.param_groups[0]['lr']=1e-5
rate_switch=2
elif avg_running_loss<0.030 and rate_switch<3:
optimizer.param_groups[0]['lr']=1e-10
rate_switch=3
print("Learning Rate:", optimizer.param_groups[0]['lr'])
train_losses.append(avg_running_loss)
# =============================================
# Get Average Loss on a subset of Training data
# =============================================
model.eval()
total_batches = 0
total_test_loss = 0
for images, key_pts in test_loader:
total_batches += 1
images = images.to(device)
key_pts = key_pts.view(key_pts.size(0), -1)
key_pts = key_pts.type(fltTensor)
images = images.type(fltTensor)
# Forward Pass
output_pts = model(images)
# Loss calculation
loss = criterion(output_pts, key_pts)
total_test_loss += loss
# Break at the 200th image, keypoints pair
if total_batches == 200: break
avg_val_loss = total_test_loss / total_batches
print('\t Average Validation Loss: {}'.format(avg_val_loss))
avg_val_loss_item = avg_val_loss.item()
test_losses.append(avg_val_loss_item)
if model_save:
if avg_val_loss_item < min_val_loss:
min_val_loss = avg_val_loss_item
save_epoch = epoch
torch.save(model.state_dict(), save_name)
est_comp = (time.time() - starttime) * (epochs - epoch)
est_comp_hrs = int(est_comp / 3600)
est_comp_mins = (est_comp - (est_comp_hrs*3600)) % 60
print(
"\t Estimated Completion Time: {} hours, {} minutes".format(
est_comp_hrs, est_comp_mins
)
)
print('Finished Training. Best model saved at Epoch {}'.format(save_epoch))
return train_losses, test_losses
| 3.140625 | 3 |
demo.py | GioCurnis/FCGF | 0 | 12759281 | import logging
import os
import numpy as np
import argparse
import open3d as o3d
from urllib.request import urlretrieve
from util.visualization import get_colored_point_cloud_feature
from util.misc import extract_features
from model.resunet import ResUNetBN2C
import torch
if not os.path.isfile('ResUNetBN2C-16feat-3conv.pth'):
print('Downloading weights...')
urlretrieve(
"https://node1.chrischoy.org/data/publications/fcgf/2019-09-18_14-15-59.pth",
'ResUNetBN2C-16feat-3conv.pth')
if not os.path.isfile('redkitchen-20.ply'):
print('Downloading a mesh...')
urlretrieve("https://node1.chrischoy.org/data/publications/fcgf/redkitchen-20.ply",
'redkitchen-20.ply')
def demo(config):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
checkpoint = torch.load(config.model)
model = ResUNetBN2C(1, 16, normalize_feature=True, conv1_kernel_size=3, D=3)
model.load_state_dict(checkpoint['state_dict'])
model.eval()
model = model.to(device)
pcd = o3d.io.read_point_cloud(config.input)
xyz_down, feature = extract_features(
model,
xyz=np.array(pcd.points),
voxel_size=config.voxel_size,
device=device,
skip_check=True)
vis_pcd = o3d.geometry.PointCloud()
vis_pcd.points = o3d.utility.Vector3dVector(xyz_down)
vis_pcd = get_colored_point_cloud_feature(vis_pcd,
feature.detach().cpu().numpy(),
config.voxel_size)
#o3d.visualization.draw_geometries([vis_pcd])
#o3d.io.write_triangle_mesh('/home/curnis/result/fcgf/mesh.obj', vis_pcd)
print(type(vis_pcd))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'-i',
'--input',
default='redkitchen-20.ply',
type=str,
help='path to a pointcloud file')
parser.add_argument(
'-m',
'--model',
default='ResUNetBN2C-16feat-3conv.pth',
type=str,
help='path to latest checkpoint (default: None)')
parser.add_argument(
'--voxel_size',
default=0.025,
type=float,
help='voxel size to preprocess point cloud')
config = parser.parse_args()
demo(config)
| 2.3125 | 2 |
ui/gcode_display.py | kyapp69/GCodeViewer | 3 | 12759282 | <reponame>kyapp69/GCodeViewer
import wx
import wx.lib.newevent
#try:
# from glcanvas import GLCanvas
#except ImportError:
# print(ex)
# from ui.glcanvas import GLCanvas
from ui.glcanvas import GLCanvas
import logging
class GcodeDisplayPanel(wx.Panel):
def __init__(self, parent):
self.parent = parent
wx.Panel.__init__(self, self.parent, -1, style=wx.RAISED_BORDER)
logging.debug("Starting api")
self.status = ""
self.skip_slider = wx.Slider(self, value=50, minValue=1, maxValue=200)
self.canvas = GLCanvas(self)
sizer_display_control = wx.BoxSizer(wx.HORIZONTAL)
sizer_display_control.Add(self.skip_slider, 1, wx.ALL | wx.EXPAND, 5)
sizer_display_control.Add(self.canvas, 4, wx.ALL | wx.EXPAND, 5)
self.UpdateEvent, EVT_UPDATE = wx.lib.newevent.NewEvent()
self.Bind(EVT_UPDATE, self.updateDisplay)
self.SetAutoLayout(True)
self.SetSizer(sizer_display_control)
self.SetFocus()
def load_file(self, afile):
self.status = "Loading"
logging.info("Loading file")
self.api.load_gcode(afile, self._gcode_load_call_back, self._gcode_complete_call_back)
def updateDisplay(self, message):
logging.info('Updating display')
def shutdown(self):
pass
| 2.140625 | 2 |
test.py | DragonYong/GenerateAncientPoetryCodeLiterature | 0 | 12759283 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 4/12/21-11:00
# @Author : TuringEmmy
# @Email : <EMAIL>
# @WeChat : superior_god
# @File : test.py
# @Project : 00PythonProjects
import tensorflow as tf
with open("data/jay.csv") as f:
content = f.read()
content=content[8:]
print(content)
| 2.515625 | 3 |
test/testMain.py | yakumo-saki/zabbix-getdata | 0 | 12759284 | <filename>test/testMain.py<gh_stars>0
import unittest
import sys
sys.path.append("../")
class TestMain(unittest.TestCase):
"""test class of mqtt_zabbix_gateway.py
"""
def test_parse_value(self):
"""test method for mqtt_zabbix_gateway.py
"""
# self.assertEqual(1234.567, parse_value("b'1234.567'"))
# self.assertEqual(1234, parse_value("b'1234'"))
# self.assertEqual("abcd", parse_value("b'abcd'"))
if __name__ == "__main__":
unittest.main()
| 2.78125 | 3 |
TP1.py | Ismail-Maj/Large-interaction-networks | 0 | 12759285 | import numpy as np
import sys
import os, psutil
from collections import deque
def mem() :
process = psutil.Process(os.getpid())
print(process.memory_info().rss / 1000000, "Mb", file=sys.stderr)
class Graph:
def neighbors(self, node):
return self.neighbors_array[self.index[node]:self.index[node]+self.deg[node]] # thanks to great implementation of numpy, this is a view and not a copy
def __init__(self, left, right, number_nodes):
self.nb_nodes = number_nodes
self.nb_edges = len(left)
self.deg = np.zeros(self.nb_nodes, dtype = np.int32)
uniques,counts = np.unique(np.concatenate((l1,l2)), return_counts=True)
self.deg = np.zeros(maxIdx+1, dtype = np.int32)
for unique, count in zip(uniques,counts):
self.deg[unique] = count
self.index = np.zeros(self.nb_nodes, dtype = np.int32)
for i in range(1, self.nb_nodes):
self.index[i] = self.index[i-1]+self.deg[i-1]
mutable_index = np.copy(self.index)
self.neighbors_array = np.zeros(self.index[self.nb_nodes-1]+self.deg[self.nb_nodes-1], dtype = np.int32) # memory of size sum number of degrees
for a, b in zip(left, right):
self.neighbors_array[mutable_index[a]] = b
self.neighbors_array[mutable_index[b]] = a
mutable_index[a]+=1
mutable_index[b]+=1
if __name__ == "__main__":
argv = sys.argv[1:]
estimNbAretes = int(argv[1])
#lecture du fichier et constitution du tableau des arêtes
l1 = np.zeros(estimNbAretes, dtype=np.int32)
l2 = np.zeros(estimNbAretes, dtype=np.int32)
with open(argv[0], 'r') as f:
count=0
for line in f:
if line[0]!='#':
newline=line.split()
a = int(newline[0],10)
b = int(newline[1],10)
l1[count]=a
l2[count]=b
count+=1
maxIdx = max(np.max(l1),np.max(l2))
l1 = l1[:count]
l2 = l2[:count]
G = Graph(l1, l2, maxIdx+1)
del l1
del l2
mem()
#on peut retourner le nombre de sommets et d'arêtes
print("n="+str(G.nb_nodes))
print("m="+str(G.nb_edges))
#calcul et retour du degré max
degMax=np.max(G.deg)
print("degmax="+str(degMax))
#calcul et retour de las distance entre u et v
u=int(argv[2])
v=int(argv[3])
res = -1
#on procède à un BFS partant de u en utilisant une file pour la visite (to_visit) et en retenant les noeuds vus (seen) et leur distance
if u==v:
res = 0
else:
seen=np.zeros(maxIdx+1, dtype=np.int32)
dist=np.zeros(maxIdx+1, dtype=np.int32)
seen[u]=1
to_visit=deque([])
for w in G.neighbors(u):
seen[w]=1
dist[w]=1
to_visit.append(w)
while to_visit:
w=to_visit.popleft()
if w==v:
res = dist[w]
break
else:
for z in G.neighbors(w):
if not seen[z]:
to_visit.append(z)
seen[z]=1
dist[z]=dist[w]+1
mem()
if res == -1:
print("dist="+str(float('inf')))
else:
print("dist="+str(res))
| 2.96875 | 3 |
questions/api/urls.py | alexyvassili/otuspy-hasker | 0 | 12759286 | <filename>questions/api/urls.py
from django.urls import path
from questions.api import api_schema
from questions.api import views
# ### DRF URL's ###
urlpatterns = [
path('', api_schema.schema_view),
path('v1.0/', api_schema.schema_view),
path('v1.0/questions/', views.questions_list),
path('v1.0/questions/<int:uid>/', views.question_detail),
path('v1.0/questions/<int:uid>/answers/', views.question_answers),
path('v1.0/trending/', views.trending),
path('v1.0/search/', views.search),
]
| 2.046875 | 2 |
weight/tests/maintest.py | mfa/weight-app | 0 | 12759287 | #!/bin/env python
""" Part of weight_app
:copyright: (c) 2012 by <NAME>.
:license: BSD, see LICENSE for more details.
"""
# testsuite
# run: python main.py
import unittest
import os
import sys
def suite():
from base import BaseTest
from test_importer import ImportTest
from test_forms import FormTest
suite = unittest.TestSuite()
# Testmodules
suite.addTest(unittest.makeSuite(BaseTest))
suite.addTest(unittest.makeSuite(ImportTest))
suite.addTest(unittest.makeSuite(FormTest))
return suite
if __name__ == '__main__':
# this_file=os.path.join(os.path.dirname(__file__),'../../env_weight/bin/activate_this.py')
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
os.environ['TEST'] = 'yes'
unittest.main(defaultTest='suite')
| 2.046875 | 2 |
zmqrpc/client/__init__.py | brgirgis/pyzmqrpc3 | 0 | 12759288 | <reponame>brgirgis/pyzmqrpc3<gh_stars>0
from .ZmqRpcClient import ZmqRpcClient
| 1.078125 | 1 |
day-01/part-2/bebert.py | Corendos/adventofcode-2021 | 6 | 12759289 | from tool.runners.python import SubmissionPy
class BebertSubmission(SubmissionPy):
def run(self, s):
times = 0
a = 100000
b = 100000
c = 100000
for line in s.splitlines():
m = int(line)
if b + c + m > a + b + c:
times += 1
a = b
b = c
c = m
return times
def test_bebert():
"""
Run `python -m pytest ./day-01/part-2/bebert.py` to test the submission.
"""
assert BebertSubmission().run("""
199
200
208
210
200
207
240
269
260
263
""".strip()) == 5
| 2.6875 | 3 |
MessageConverters/MCF.py | meberli/mqtt-relais | 0 | 12759290 | #!/usr/bin/python3
from MessageConverters.MessageConverter import MessageConverter
import logging
from datetime import datetime
from datetime import timezone
class MCF(MessageConverter):
msg_types = {
0x01: "time_sync_request",
0x04: "t_p_rh",
0x05: "uart",
0x09: "power",
0x0A: "io",
0x0B: "report_data",
0x0C: "t_p_rh_lux_voc",
0x0D: "analog_data",
0x0E: "t_p_rh_lux_voc_co2",
0x0F: "special_data",
0x10: "digital_data",
0x11: "length_error"
}
def __init__(self, devicename=None):
super().__init__(devicename)
def __parse_time(self):
a = (
self.payload.pop(0) |
self.payload.pop(0) << 8 |
self.payload.pop(0) << 16 |
self.payload.pop(0) << 24)
value = list(a.to_bytes(4, byteorder='little'))
self.logger.info(f"time payload: {value}")
year = 2000 + (value[3] >> 1)
month = ((value[3] & 0x01) << 3) | (value[2] >> 5)
day = value[2] & 0x1f
hours = value[1] >> 3
minutes = ((value[1] & 0x7) << 3) | (value[0] >> 5)
seconds = value[0] & 0x1f
self.logger.info(
f'year : {year}, '
f'month : {month}, '
f'day : {day}, '
f'hours : {hours}, '
f'minutes : {minutes}, '
f'seconds : {seconds}')
# datetime(year, month, day, hour, minute, second, microsecond)
date_time_obj = datetime(year, month, day, hours, minutes, seconds)
return int(datetime.timestamp(date_time_obj))
def parse_time_sync_request(self):
entry = {}
if (len(self.payload) == 0):
self.logger.warn("message has no content - skipping.")
return entry
fields = {}
# sync id
value = (
self.payload.pop(0) |
self.payload.pop(0) << 8 |
self.payload.pop(0) << 16 |
self.payload.pop(0) << 24)
fields['sync_id'] = value
# sync version
value = (
self.payload.pop(0) |
self.payload.pop(0) << 8 |
self.payload.pop(0) << 16)
fields['sync version'] = value
# application type
value = (
self.payload.pop(0) |
self.payload.pop(0) << 8)
fields["app_type"] = value
# option
value = self.payload.pop(0)
fields["option"] = value
entry['fields'] = fields
return entry
def parse_t_p_rh(self):
entry = {}
if (len(self.payload) == 0):
self.logger.warn("message has no content - skipping.")
return entry
fields = {}
# time
fields['time'] = self.__parse_time()
# temperature
value = (
self.payload.pop(0) |
self.payload.pop(0) << 8) / 100
fields['temperature'] = value
# humidity
value = self.payload.pop(0) / 2
fields["humidity"] = value
# pressure1
value = (
self.payload.pop(0) |
self.payload.pop(0) << 8 |
self.payload.pop(0) << 16)
fields["pressure"] = value
entry['fields'] = fields
return entry
def parse_uart(self):
entry = {}
if (len(self.payload) == 0):
self.logger.warn("message has no content - skipping.")
return entry
self.logger.warn("parse function not implemented - skipping.")
return entry
def parse_power(self):
# TODO: there is a 2nd payload version woth more data..
entry = {}
if (len(self.payload) == 0):
self.logger.warn("message has no content - skipping.")
return entry
fields = {}
# time
fields['time'] = self.__parse_time()
# active energy
value = (
self.payload.pop(0) |
self.payload.pop(0) << 8 |
self.payload.pop(0) << 16 |
self.payload.pop(0) << 24)
fields['active_energy'] = value
# active energy
value = (
self.payload.pop(0) |
self.payload.pop(0) << 8 |
self.payload.pop(0) << 16 |
self.payload.pop(0) << 24)
fields['active_energy'] = value
# reactive energy
value = (
self.payload.pop(0) |
self.payload.pop(0) << 8 |
self.payload.pop(0) << 16 |
self.payload.pop(0) << 24)
fields['reactive_energy'] = value
# apparent energy
value = (
self.payload.pop(0) |
self.payload.pop(0) << 8 |
self.payload.pop(0) << 16 |
self.payload.pop(0) << 24)
fields['apparent_energy'] = value
# running_time
value = (
self.payload.pop(0) |
self.payload.pop(0) << 8 |
self.payload.pop(0) << 16 |
self.payload.pop(0) << 24)
fields['running_time'] = value
entry['fields'] = fields
return entry
def parse_io(self):
entry = {}
if (len(self.payload) == 0):
self.logger.warn("message has no content - skipping.")
return entry
fields = {}
# time
fields['time'] = self.__parse_time()
# inputs
value = (
self.payload.pop(0) |
self.payload.pop(0) << 8 |
self.payload.pop(0) << 16 |
self.payload.pop(0) << 24)
fields['inputs'] = bin(value)[2:]
# outputs
value = (
self.payload.pop(0) |
self.payload.pop(0) << 8 |
self.payload.pop(0) << 16 |
self.payload.pop(0) << 24)
fields['outputs'] = bin(value)[2:]
# events
value = (
self.payload.pop(0) |
self.payload.pop(0) << 8 |
self.payload.pop(0) << 16 |
self.payload.pop(0) << 24)
fields['events'] = bin(value)[2:]
entry['fields'] = fields
return entry
def parse_report_data(self):
entry = {}
if (len(self.payload) == 0):
self.logger.warn("message has no content - skipping.")
return entry
self.logger.warn("parse function not implemented - skipping.")
return entry
def parse_t_p_rh_lux_voc(self):
entry = {}
if (len(self.payload) == 0):
self.logger.warn("message has no content - skipping.")
return entry
fields = {}
# first part is identical with t_p_rh
fields.update(self.parse_t_p_rh()['fields'])
# illuminance
value = value = (
self.payload.pop(0) |
self.payload.pop(0) << 8)
fields['illuminance'] = value
# voc
value = value = (
self.payload.pop(0) |
self.payload.pop(0) << 8)
fields['voc'] = value
entry['fields'] = fields
return entry
def parse_analog_data(self):
entry = {}
if (len(self.payload) == 0):
self.logger.warn("message has no content - skipping.")
return entry
self.logger.warn("parse function not implemented - skipping.")
return entry
def parse_t_p_rh_lux_voc_co2(self):
entry = {}
if (len(self.payload) == 0):
self.logger.warn("message has no content - skipping.")
return entry
fields = {}
# first part is identical with t_p_rh
fields.update(self.parse_t_p_rh_lux_voc()['fields'])
# co2
value = value = (
self.payload.pop(0) |
self.payload.pop(0) << 8)
fields['co2'] = value
entry['fields'] = fields
return entry
def parse_special_data(self):
entry = {}
if (len(self.payload) == 0):
self.logger.warn("message has no content - skipping.")
return entry
self.logger.warn("parse function not implemented - skipping.")
return entry
def parse_digital_data(self):
entry = {}
if (len(self.payload) == 0):
self.logger.warn("message has no content - skipping.")
return entry
fields = {}
# type
value = self.payload.pop(0)
fields['type'] = value
if value == 0:
for num in range(16):
if (self.payload):
value = value = (
self.payload.pop(0) |
self.payload.pop(0) << 8)
fields[f'input_{num}'] = value
elif (value == 1):
# time
fields['time'] = self.__parse_time()
# frequency
value = (
self.payload.pop(0) |
self.payload.pop(0) << 8)
fields['frequency'] = value/10
# battery pecentage (optional)
if (self.payload):
value = self.payload.pop(0)
fields['battery_percentage'] = value
elif (value == 2):
for num in range(5):
if (self.payload):
fields[f'time_{num}'] = self.__parse_time()
value = value = (
self.payload.pop(0) |
self.payload.pop(0) << 8)
fields[f'input_{num}'] = value
# battery pecentage (optional)
if (self.payload):
value = self.payload.pop(0)
fields['battery_percentage'] = value
else:
self.logger.warn(f'unknown type "{value}" - skipping.')
entry['fields'] = fields
return entry
def parse_serial_data(self):
entry = {}
if (len(self.payload) == 0):
self.logger.warn("message has no content - skipping.")
return entry
self.logger.warn("parse function not implemented - skipping.")
return entry
def parse_length_error(self):
entry = {}
if (len(self.payload) == 0):
self.logger.warn("message has no content - skipping.")
return entry
fields = {}
# ignore seq no
self.payload.pop(0) | self.payload.pop(0) << 8
# bat level
value = self.payload.pop(0)
fields['batt_level'] = str(value)
# hw&fw version
value = self.payload.pop(0)
fields['hwfw'] = str(value)
entry['fields'] = fields
return entry
def _hasDownlinkMessage(self):
return self.downlinkMessage is not None
def _getDownlinkMessage(self):
return self.downlinkMessage
def _convert(self, payload, port):
'''
ldc publish format:
[
{
"measurement": "abc",
"tags": {
"tag_a": "irgendwas",
"tag_b": "irgendwasanderes"
},
"time": "2021-02-01",
"fields": {
"field_a": "value_a",
"field_n": "value_n"
}
}
]
'''
publ_array = []
dt = datetime.utcnow()
self.current_ts = int(dt.replace(tzinfo=timezone.utc).timestamp())
self.current_time = dt.strftime('%Y-%m-%dT%H:%M:%SZ')
self.payload = list(bytearray(payload))
self.logger.debug(
"decoding payload {}. servertime is {} (ts: {})".format(
payload,
self.current_time,
self.current_ts))
try:
self.cursor = 0
while len(self.payload) > 0:
# header
messagetype_byte = self.payload.pop(0)
self.logger.debug("message type: {}".format(
hex(messagetype_byte)))
messagetype = self.msg_types.get(messagetype_byte, None)
if messagetype:
method_name = "parse_" + messagetype
method = getattr(self, method_name, lambda: None)
if method:
entry = method()
self.payload = []
if entry:
# add common tags and fields
entry["ts"] = self.current_time
if "tags" not in entry:
entry["tags"] = {}
entry["tags"]["devicename"] = self.devicename
entry["tags"]["messagetype"] = messagetype
self.logger.debug(
"method_name: {}, result:{}".format(
method_name,
entry))
publ_array.extend([entry])
else:
self.logger.exception(
"Method for {} nor implemented".format(
method_name))
else:
self.logger.exception(
"Unknown Message Type: {}".format(messagetype_byte))
except Exception:
self.logger.exception("Error while trying to decode payload..")
return publ_array
| 2.796875 | 3 |
make_stations_json.py | nmercer/Ask-The-L-Train-Alexa-Backend | 1 | 12759291 | <filename>make_stations_json.py
# Given stations.csv, creates a stations.json of stop groupings where each group's lat/lon is the average of its member stops.
import argparse, csv, json, sys
def main():
parser = argparse.ArgumentParser(description='Generate stations JSON file for MtaSanitize server.')
parser.add_argument('stations_file', default='stations.json')
args = parser.parse_args()
stations = {}
with open(args.stations_file, 'rb') as f:
reader = csv.DictReader(f)
for row in reader:
try:
stations[row['parent_id']]['stops'][row['stop_id']] = [float(row['lat']), float(row['lon'])]
stations[row['parent_id']]['name'].add(row['name'])
except KeyError as e:
stations[row['parent_id']] = {
'name': set([row['name']]),
'stops': {
row['stop_id']: [float(row['lat']), float(row['lon'])]
}
}
for station in stations.values():
station['name'] = ' / '.join(station['name'])
station['location'] = [
sum(v[0] for v in station['stops'].values()) / float(len(station['stops'])),
sum(v[1] for v in station['stops'].values()) / float(len(station['stops']))
]
json.dump(stations.values(), sys.stdout, sort_keys=True, indent=4, separators=(',', ': '))
if __name__ == '__main__':
main()
| 3.25 | 3 |
wagtail_wordpress_import/test/tests/test_linebreaks_wp.py | fabienheureux/wagtail-wordpress-import | 22 | 12759292 | <reponame>fabienheureux/wagtail-wordpress-import
import os
from bs4 import BeautifulSoup
from django.test import TestCase
from wagtail_wordpress_import.prefilters.linebreaks_wp_filter import (
filter_linebreaks_wp,
)
BASE_PATH = os.path.dirname(os.path.dirname(__file__))
FIXTURES_PATH = BASE_PATH + "/fixtures"
class TestLinebreaks(TestCase):
def setUp(self):
self.raw_html_file = open(f"{FIXTURES_PATH}/raw_text.txt", "r")
self.stream = self.raw_html_file.read()
self.soup = BeautifulSoup(filter_linebreaks_wp(self.stream), "html.parser")
def test_linebreaks_wp(self):
p_tags = self.soup.findAll("p")
self.assertEqual(len(p_tags), 7)
def test_simple_string(self):
input = """line 1
line 2
line 3"""
soup = BeautifulSoup(filter_linebreaks_wp(input), "html.parser")
paragraphs = soup.findAll("p")
self.assertEqual(len(paragraphs), 3)
for i in range(len(paragraphs)):
self.assertEqual(paragraphs[i].text.strip()[-1], str(i + 1))
| 2.546875 | 3 |
detect_trt.py | hitfeelee/rtm3d | 2 | 12759293 | <reponame>hitfeelee/rtm3d
import tensorrt as trt
import argparse
import os
from utils import trt_utils
from models.configs.detault import CONFIGS as config
import torch
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
from datasets.dataset_reader import DatasetReader
from preprocess.data_preprocess import TestTransform
from models import model_factory
import tqdm
import numpy as np
import time
import cv2
from utils import ParamList
from utils import model_utils
from utils import visual_utils
def get_engine(engine_file_path):
if os.path.exists(engine_file_path):
# If a serialized engine exists, use it instead of building an engine.
print("Reading engine from file {}".format(engine_file_path))
with open(engine_file_path, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime:
return runtime.deserialize_cuda_engine(f.read())
def setup(args):
cfg = config.clone()
if len(args.model_config) > 0:
cfg.merge_from_file(args.model_config)
device = torch.device(cfg.DEVICE) if torch.cuda.is_available() else torch.device('cpu')
cfg.update({'DEVICE': device})
model = model_factory.create_model(cfg)
dataset = DatasetReader(cfg.DATASET.PATH, cfg,
augment=TestTransform(cfg.INPUT_SIZE[0]), is_training=False, split='test')
model.to(device)
model.eval()
return model, dataset, cfg
def detect(model, dataset, cfg):
with get_engine(args.engine_file_path) as engine, engine.create_execution_context() as context:
inputs, outputs, bindings, stream = trt_utils.allocate_buffers(engine)
# Do inference
nb = len(dataset)
pbar = tqdm.tqdm(dataset, total=nb) # progress bar
print(('\n' + '%10s' * 3) % ('mem', 'targets', 'time'))
# videowriter = cv2.VideoWriter('rtm3d_detect.mp4', cv2.VideoWriter.fourcc(*'mp4v'), 1, (848, 624),True)
num_classes = len(cfg.DATASET.OBJs)
for imgs, targets, paths, _, _ in pbar:
src = imgs.clone().permute(1, 2, 0).contiguous().cpu().numpy()
src = (src * dataset._norm_params['std_rgb'] + dataset._norm_params['mean_rgb']) * 255
src = src.astype(np.uint8)
imgs = np.ascontiguousarray(imgs.unsqueeze(dim=0).numpy())
h, w = imgs.shape[2:]
img_ids = targets.get_field('img_id')
Ks = targets.get_field('K')
Bs = imgs.shape[0]
NKs = [None] * Bs
for i in range(Bs):
NKs[i] = Ks[img_ids == i][0:1, :]
NKs = torch.cat(NKs, dim=0).to(cfg.DEVICE)
invKs = NKs.view(-1, 3, 3).inverse()
# Set host input to the image. The trt_utils.do_inference function will copy the input to the GPU before executing.
nh, nw = int(h // cfg.MODEL.DOWN_SAMPLE), int(w // cfg.MODEL.DOWN_SAMPLE)
output_shapes = [[1, num_classes, nh, nw], [1, 8, nh, nw]]
inputs[0].host = imgs
t1 = time.time()
trt_outputs = trt_utils.do_inference_v2(context, bindings=bindings, inputs=inputs, outputs=outputs, stream=stream)
trt_outputs = [torch.from_numpy(output.reshape(shape)).to(cfg.DEVICE) for output, shape in zip(trt_outputs, output_shapes)]
preds = model.inference(trt_outputs, invKs)
t2 = time.time()
mem = '%.3gG' % (torch.cuda.memory_cached() / 1E9 if torch.cuda.is_available() else 0) # (GB)
s = ('%10s' + '%10.4g' * 2) % (mem, targets.get_field('mask').shape[0], t2 - t1)
pbar.set_description(s)
Ks = NKs
H, W, _ = src.shape
bird_view = np.zeros((H, H, 3), dtype=np.uint8)
src_bv = np.copy(bird_view)
# src_optim = np.copy(src)
# src_optim_bv = np.copy(bird_view) + np.array([50, 50, 50], dtype=np.uint8)
# src_vertex_reg = np.copy(src)
# src_vertex_reg_bv = np.copy(bird_view) + np.array([100, 100, 100], dtype=np.uint8)
if preds[0] is not None:
K = Ks[0].cpu().numpy()
K[:6] *= cfg.MODEL.DOWN_SAMPLE
pred = preds[0].cpu().numpy()
pred_out = ParamList.ParamList((0, 0))
pred_out.add_field('class', pred[:, 0].astype(np.int32))
pred_out.add_field('alpha', pred[:, 1])
# pred_out.add_field('bbox', pred[:, 2:6])
pred_out.add_field('dimension', pred[:, 2:5])
pred_out.add_field('location', pred[:, 5:8])
pred_out.add_field('Ry', pred[:, 8])
pred_out.add_field('score', pred[:, 9])
# pred_out.add_field('vertex', pred[:, 14:].reshape(-1, 8, 2))
pred_out.add_field('K', K.reshape(1, 9).repeat((pred.shape[0]), axis=0))
targ = ParamList.ParamList(targets.size, is_training=False)
targ.copy_field(targets, ['mask', 'class', 'noise_mask',
'dimension', 'location', 'Ry', 'alpha'])
m_mask = targ.get_field('mask').bool()
noise_mask = targ.get_field('noise_mask')
m_mask &= noise_mask.bool().bitwise_not()
targ.update_field('mask', m_mask)
N = m_mask.float().sum()
targ.delete_by_mask()
targ = targ.numpy()
targ.update_field('K', K.reshape(1, 9).repeat((N,), axis=0))
# optim_out = model_utils.optim_decode_bbox3d(pred_out, K)
visual_utils.cv_draw_bboxes_3d_kitti(src, pred_out,
label_map=cfg.DATASET.OBJs)
visual_utils.cv_draw_bbox3d_birdview(src_bv, targ, color=(0, 0, 255))
visual_utils.cv_draw_bbox3d_birdview(src_bv, pred_out, color=(0, 255, 0))
kf = np.concatenate([src, src_bv], axis=1)
kf = cv2.resize(kf, (kf.shape[1] // 2, kf.shape[0] // 2))
cv2.imshow('rtm3d_detect', kf)
# videowriter.write(kf)
key = cv2.waitKey(100)
if key & 0xff == ord('q'):
break
cv2.destroyAllWindows()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="RTM3D Detecting")
parser.add_argument("--model-config", default="", help="specific model config path")
parser.add_argument("--engine_file_path", default="", help="specific trt engine path")
args = parser.parse_args()
model, dataset, cfg = setup(args)
detect(model, dataset, cfg) | 2.0625 | 2 |
redbot/webui/captcha.py | gusdleon/redbot | 167 | 12759294 | import hmac
from http import cookies
import json
from typing import Callable, TYPE_CHECKING
from urllib.parse import urlencode
import thor
from thor.http import HttpClient, get_header
from thor.http.error import HttpError
from redbot.resource import HttpResource
from redbot.type import RawHeaderListType
token_client = HttpClient()
token_client.idle_timeout = 30
token_client.connect_timeout = 10
token_client.read_timeout = 10
token_client.max_server_conn = 30
if TYPE_CHECKING:
from redbot.webui import RedWebUi # pylint: disable=cyclic-import,unused-import
class CaptchaHandler:
def __init__(
self,
webui: "RedWebUi",
client_id: str,
continue_test: Callable,
error_response: Callable,
) -> None:
self.webui = webui
self.client_id = client_id
self.continue_test = continue_test
self.error_response = error_response
self.secret = webui.config.get("hcaptcha_secret", "").encode("utf-8")
self.token_lifetime = webui.config.getint("token_lifetime", fallback=300)
def run(self) -> None:
captcha_token = self.webui.body_args.get("captcha_token", [None])[0]
cookie_str = b", ".join(get_header(self.webui.req_headers, b"cookie"))
try:
cookiejar = cookies.SimpleCookie(
cookie_str.decode("utf-8", "replace")
) # type: cookies.SimpleCookie
except cookies.CookieError:
self.error_response(
b"400",
b"Bad Request",
"Sorry, your cookies appear corrupted. Please try again.",
f"Cookie Parse Error: {cookie_str.decode('utf-8', 'replace')}",
)
return
human_time = cookiejar.get("human_time", None)
human_hmac = cookiejar.get("human_hmac", None)
if human_time and human_time.value.isdigit() and human_hmac:
if self.verify_human(int(human_time.value), human_hmac.value):
self.continue_test()
else:
self.error_response(
b"403",
b"Forbidden",
"I need to double-check that you're human; please resubmit.",
"Invalid human token",
)
elif captcha_token:
self.verify_captcha(captcha_token)
else:
self.error_response(
b"403",
b"Forbidden",
"I need to double-check that you're human; please resubmit.",
"Invalid captcha.",
)
def verify_captcha(self, presented_token: str) -> None:
exchange = token_client.exchange()
@thor.events.on(exchange)
def error(err_msg: HttpError) -> None:
self.error_response(
b"403",
b"Forbidden",
"There was a problem with the Captcha server; please try again soon.",
f"Captcha error: {err_msg}.",
)
@thor.events.on(exchange)
def response_start(
status: bytes, phrase: bytes, headers: RawHeaderListType
) -> None:
exchange.tmp_status = status
exchange.tmp_res_body = b""
@thor.events.on(exchange)
def response_body(chunk: bytes) -> None:
exchange.tmp_res_body += chunk
@thor.events.on(exchange)
def response_done(trailers: RawHeaderListType) -> None:
try:
results = json.loads(exchange.tmp_res_body)
except ValueError:
if exchange.tmp_status != b"200":
e_str = f"Captcha server returned {exchange.tmp_status.decode('utf-8')} status code"
else:
e_str = f"Captcha server response error"
self.error_response(
b"500",
b"Internal Server Error",
e_str,
e_str,
)
return
if results["success"]:
self.continue_test(self.issue_human())
else:
e_str = f"Captcha errors: {', '.join(results.get('error-codes', ['unknown error']))}"
self.error_response(
b"403",
b"Forbidden",
e_str,
e_str,
)
request_form = {
"secret": self.secret,
"response": presented_token,
"remoteip": self.client_id,
}
exchange.request_start(
b"POST",
b"https://hcaptcha.com/siteverify",
[[b"content-type", b"application/x-www-form-urlencoded"]],
)
exchange.request_body(urlencode(request_form).encode("utf-8", "replace"))
exchange.request_done({})
def issue_human(self) -> RawHeaderListType:
"""
Return cookie headers for later verification that this is a human.
"""
human_time = str(int(thor.time()) + self.token_lifetime)
human_hmac = hmac.new(
self.secret, bytes(human_time, "ascii"), "sha512"
).hexdigest()
return [
(
b"Set-Cookie",
f"human_time={human_time}; Max-Age={self.token_lifetime}; SameSite=Strict".encode(
"ascii"
),
),
(
b"Set-Cookie",
f"human_hmac={human_hmac}; Max-Age={self.token_lifetime}; SameSite=Strict".encode(
"ascii"
),
),
]
def verify_human(self, human_time: int, human_hmac: str) -> bool:
"""
Check the user's human HMAC.
"""
computed_hmac = hmac.new(self.secret, bytes(str(human_time), "ascii"), "sha512")
is_valid = human_hmac == computed_hmac.hexdigest()
if is_valid and human_time >= thor.time():
return True
else:
return False
| 2.375 | 2 |
src/devpi_pr/client.py | devpi/devpi-pr | 1 | 12759295 | from contextlib import contextmanager
from devpi_common.metadata import parse_requirement
from operator import itemgetter
from pluggy import HookimplMarker
from tempfile import NamedTemporaryFile
from subprocess import call
import appdirs
import attr
import json
import os
import textwrap
import traceback
client_hookimpl = HookimplMarker("devpiclient")
devpi_pr_data_dir = appdirs.user_data_dir("devpi-pr", "devpi")
def get_message_from_file(f):
lines = f.read().decode('utf-8').splitlines()
msg = '\n'.join(x for x in lines if not x.strip().startswith('#'))
return msg.strip()
def get_message(hub, msg):
if msg and msg.strip():
return msg
editor = os.environ.get("EDITOR")
if not editor:
hub.fatal("No EDITOR environment variable set.")
with NamedTemporaryFile(prefix="devpi-pr-", suffix=".txt") as tf:
tf.write(textwrap.dedent("""\n
# Please enter the message for your pull request.
# Lines starting with '#' will be ignored.
# An empty message aborts the current command.""").encode('utf-8'))
tf.flush()
try:
result = call([editor, tf.name])
except Exception as e:
hub.fatal(''.join(traceback.format_exception(e.__class__, e, None)))
if result != 0:
hub.fatal("Error (%s) calling editor %s" % (result, editor))
tf.seek(0)
msg = get_message_from_file(tf)
if not msg:
# try to reopen the file. vim seems to replace it.
with open(tf.name, 'rb') as f:
msg = get_message_from_file(f)
if msg:
return msg
hub.fatal("A message is required.")
@contextmanager
def devpi_pr_review_lock(hub):
if not os.path.exists(devpi_pr_data_dir):
os.mkdir(devpi_pr_data_dir)
lock_fn = os.path.join(devpi_pr_data_dir, "reviews.lock")
try:
with open(lock_fn, "x"):
yield
except FileExistsError:
hub.fatal(
"There is an existing lock at %s\n"
"This can happen if a previous devpi-pr command crashed. "
"If you are sure there is no other devpi-pr command still running, "
"you can remove the file." % lock_fn)
else:
if os.path.exists(lock_fn):
os.remove(lock_fn)
@contextmanager
def devpi_pr_review_data(hub):
with devpi_pr_review_lock(hub):
fn = os.path.join(devpi_pr_data_dir, "reviews.json")
if os.path.exists(fn):
with open(fn, "rb") as f:
data = f.read().decode("utf-8")
else:
data = ""
if not data:
original = None
info = {}
else:
original = json.loads(data)
info = dict(original)
yield info
if info != original:
with open(fn, "wb") as f:
f.write(json.dumps(info).encode("utf-8"))
def full_indexname(hub, prname):
if '/' in prname:
try:
user, prname = prname.split('/')
except ValueError:
hub.fatal("Invalid index name")
else:
user = hub.current.get_auth_user()
if user is None:
hub.fatal("not logged in")
return "%s/%s" % (user, prname)
@attr.s
class PRIndexInfos:
user = attr.ib(type=str)
index = attr.ib(type=str)
indexname = attr.ib(type=str)
url = attr.ib(type=str)
ixconfig = attr.ib(type=dict)
def require_pr_index(hub, name):
hub.requires_login()
current = hub.require_valid_current_with_index()
indexname = full_indexname(hub, name)
(user, index) = indexname.split('/')
url = current.get_index_url(indexname, slash=False)
result = hub.http_api("get", url, fatal=False)
if result.reason != 'OK':
hub.fatal("Couldn't access pr index '%s': %s" % (
name, result.reason))
ixconfig = result.result
if ixconfig['type'] != 'pr':
hub.fatal("The index '%s' is not a pr index" % name)
return PRIndexInfos(user, index, indexname, url, ixconfig)
def new_pr_arguments(parser):
""" Create a new pull request.
"""
parser.add_argument(
"name", metavar="NAME", type=str, action="store", nargs=1,
help="pull request name")
parser.add_argument(
"target", metavar="TARGETSPEC", type=str, nargs=1,
action="store",
help="target index of form 'USER/NAME'")
parser.add_argument(
"pkgspec", metavar="PKGSPEC", type=str, nargs="*",
default=None, action="store",
help="releases in format 'name==version' which are added to "
"this pull request.")
def new_pr(hub, args):
(name,) = args.name
(target,) = args.target
reqs = []
for pkgspec in args.pkgspec:
req = parse_requirement(pkgspec)
if len(req.specs) != 1 or req.specs[0][0] != '==':
hub.fatal(
"The release specification needs to be of this form: name==version")
reqs.append(req)
indexname = full_indexname(hub, name)
url = hub.current.get_index_url(indexname, slash=False)
hub.http_api("put", url, dict(
type="pr", bases=target,
states=["new"], messages=["New pull request"]))
for req in reqs:
hub.http_api(
"push",
hub.current.index,
kvdict=dict(
name=req.project_name,
version="%s" % req.specs[0][1],
targetindex=indexname),
fatal=True)
def abort_pr_review_arguments(parser):
""" Abort review of pull request.
"""
parser.add_argument(
"name", type=str, action="store", nargs=1,
help="pull request name")
def abort_pr_review(hub, args):
(name,) = args.name
indexinfos = require_pr_index(hub, name)
with devpi_pr_review_data(hub) as review_data:
if indexinfos.indexname in review_data:
hub.info("Aborted review of '%s'" % indexinfos.indexname)
del review_data[indexinfos.indexname]
else:
hub.error("No review of '%s' active" % indexinfos.indexname)
def approve_pr_arguments(parser):
""" Approve reviewed pull request.
"""
parser.add_argument(
"name", type=str, action="store", nargs=1,
help="pull request name")
parser.add_argument(
"-s", "--serial", type=str, action="store",
help="pull request serial, only required if not using 'review-pr' first")
parser.add_argument(
"-m", "--message", action="store",
help="Message to add on submit.")
parser.add_argument(
"-k", "--keep-index", action="store_true",
help="Keep the pr index instead of deleting it after approval.")
def approve_pr(hub, args):
(name,) = args.name
indexinfos = require_pr_index(hub, name)
serial = args.serial
if serial is None:
with devpi_pr_review_data(hub) as review_data:
if indexinfos.indexname not in review_data:
hub.fatal(
"No review data found for '%s', "
"it looks like you did not use review-pr or "
"you forgot the --serial option." % indexinfos.indexname)
serial = "%s" % review_data[indexinfos.indexname]
message = get_message(hub, args.message)
hub.http_api(
"patch", indexinfos.url, [
"states+=approved",
"messages+=%s" % message],
headers={'X-Devpi-PR-Serial': serial})
if not args.keep_index:
hub.http_api("delete", indexinfos.url)
with devpi_pr_review_data(hub) as review_data:
review_data.pop(indexinfos.indexname, None)
def list_prs_arguments(parser):
""" List pull requests.
"""
parser.add_argument(
"indexname", type=str, action="store", nargs="?",
help="index name, specified as NAME or USER/NAME. If no index "
"is specified use the current index")
parser.add_argument(
"-a", "--all-states", action="store_true",
help="Output normally hidden states.")
parser.add_argument(
"-m", "--messages", action="store_true",
help="Include state change messages in output.")
def merge_pr_data(data1, data2):
states = set(data1).union(data2)
result = {}
for state in states:
state_data = result[state] = {}
state_data1 = data1.get(state, {})
state_data2 = data2.get(state, {})
users = set(state_data1).union(state_data2)
for user in users:
user_data1 = set(
tuple(
(k, tuple(v) if isinstance(v, list) else v)
for k, v in x.items())
for x in state_data1.get(user, []))
user_data2 = set(
tuple(
(k, tuple(v) if isinstance(v, list) else v)
for k, v in x.items())
for x in state_data2.get(user, []))
state_data[user] = list(
dict(x)
for x in user_data1.union(user_data2))
return result
def get_prs(users_prs):
result = []
for user, prs in users_prs.items():
for pr in prs:
result.append(dict(pr, name="%s/%s" % (user, pr['name'])))
return sorted(result, key=itemgetter("name", "base", "last_serial"))
def create_pr_list_output(users_prs, review_data, include_messages):
out = []
prs = get_prs(users_prs)
longest_name = max(len(pr["name"]) for pr in prs)
longest_base = max(len(pr["base"]) for pr in prs)
longest_serial = max(len("%d" % pr["last_serial"]) for pr in prs)
fmt = "{0:<%d} -> {1:<%d} at serial {2:>%d}{3}" % (longest_name, longest_base, longest_serial)
for pr in prs:
if pr["name"] in review_data:
active = " (reviewing)"
else:
active = ""
out.append(fmt.format(
pr["name"], pr["base"], pr["last_serial"], active))
if not include_messages:
continue
for state, by, message in zip(pr['states'], pr['by'], pr['messages']):
out.append(" %s by %s:\n%s" % (
state, by, textwrap.indent(message, " ")))
out.append("")
return "\n".join(out)
def list_prs(hub, args):
indexname = args.indexname
current = hub.require_valid_current_with_index()
index_url = current.get_index_url(indexname, slash=False)
r = hub.http_api("get", index_url, fatal=False, type="indexconfig")
ixconfig = r.result or {}
hidden_states = set()
if not args.all_states:
hidden_states.add("approved")
pull_requests_allowed = ixconfig.get("pull_requests_allowed", False)
is_pr_index = ixconfig["type"] == "pr"
if pull_requests_allowed or is_pr_index:
list_url = index_url.asdir().joinpath("+pr-list")
r = hub.http_api("get", list_url, type="pr-list")
index_data = r.result
else:
index_data = {}
if not is_pr_index and not args.all_states:
hidden_states.add("new")
user = current.get_auth_user()
if user:
login_status = "logged in as %s" % user
else:
login_status = "not logged in"
hub.info("current devpi index: %s (%s)" % (current.index, login_status))
if user:
user_url = current.get_user_url(indexname)
list_url = user_url.asdir().joinpath("+pr-list")
r = hub.http_api("get", list_url, type="pr-list")
user_data = r.result
if is_pr_index and not args.all_states:
user_data.pop("new", None)
else:
user_data = {}
pr_data = merge_pr_data(index_data, user_data)
if not pr_data:
hub.line("no pull requests")
return
for state in sorted(pr_data):
if state in hidden_states:
continue
with devpi_pr_review_data(hub) as review_data:
out = create_pr_list_output(
pr_data[state], review_data, args.messages)
hub.line("%s pull requests" % state)
hub.line(textwrap.indent(out, " "))
def reject_pr_arguments(parser):
""" Reject pull request.
"""
parser.add_argument(
"name", type=str, action="store", nargs=1,
help="pull request name")
parser.add_argument(
"-m", "--message", action="store",
help="Message to add on reject.")
def reject_pr(hub, args):
(name,) = args.name
indexinfos = require_pr_index(hub, name)
message = get_message(hub, args.message)
hub.http_api("patch", indexinfos.url, [
"states+=rejected",
"messages+=%s" % message])
def review_pr_arguments(parser):
""" Start reviewing a submitted pull request.
"""
parser.add_argument(
"name", type=str, action="store", nargs=1,
help="pull request name")
parser.add_argument(
"-u", "--update", action="store_true",
help="Update the serial of the review.")
def review_pr(hub, args):
(name,) = args.name
indexinfos = require_pr_index(hub, name)
(targetindex,) = indexinfos.ixconfig['bases']
targeturl = hub.current.get_index_url(targetindex)
r = hub.http_api("get", targeturl.asdir().joinpath("+pr-list"), type="pr-list")
pending_prs = r.result.get("pending")
if not pending_prs:
hub.fatal("There are no pending PRs.")
users_prs = pending_prs.get(indexinfos.user)
for prs in users_prs:
if prs["name"] == indexinfos.index:
last_serial = prs["last_serial"]
break
else:
hub.fatal("Could not find PR '%s'." % indexinfos.indexname)
with devpi_pr_review_data(hub) as review_data:
if indexinfos.indexname in review_data:
if args.update:
hub.info("Updated review of '%s' to serial %s" % (
indexinfos.indexname, last_serial))
else:
hub.warn("Already reviewing '%s' at serial %s" % (
indexinfos.indexname, review_data[indexinfos.indexname]))
return
else:
hub.info(
"Started review of '%s' at serial %s" % (
indexinfos.indexname, last_serial))
review_data[indexinfos.indexname] = last_serial
def submit_pr_arguments(parser):
""" Submit an existing pull request for review.
"""
parser.add_argument(
"name", type=str, action="store", nargs=1,
help="pull request name")
parser.add_argument(
"-m", "--message", action="store",
help="Message to add on submit.")
def submit_pr(hub, args):
(name,) = args.name
indexinfos = require_pr_index(hub, name)
message = get_message(hub, args.message)
hub.http_api("patch", indexinfos.url, [
"states+=pending",
"messages+=%s" % message])
def cancel_pr_arguments(parser):
""" Cancel submitted state of pull request by submitter.
"""
parser.add_argument(
"name", type=str, action="store", nargs=1,
help="pull request name")
parser.add_argument(
"-m", "--message", action="store",
help="Message to add on cancel.")
def cancel_pr(hub, args):
(name,) = args.name
indexinfos = require_pr_index(hub, name)
message = get_message(hub, args.message)
hub.http_api("patch", indexinfos.url, [
"states+=new",
"messages+=%s" % message])
def delete_pr_arguments(parser):
""" Completely remove a pull request including any uploaded packages.
"""
parser.add_argument(
"name", type=str, action="store", nargs=1,
help="pull request name")
def delete_pr(hub, args):
(name,) = args.name
indexinfos = require_pr_index(hub, name)
hub.http_api("delete", indexinfos.url)
@client_hookimpl
def devpiclient_subcommands():
return [
(new_pr_arguments, "new-pr", "devpi_pr.client:new_pr"),
(submit_pr_arguments, "submit-pr", "devpi_pr.client:submit_pr"),
(list_prs_arguments, "list-prs", "devpi_pr.client:list_prs"),
(review_pr_arguments, "review-pr", "devpi_pr.client:review_pr"),
(abort_pr_review_arguments, "abort-pr-review", "devpi_pr.client:abort_pr_review"),
(approve_pr_arguments, "approve-pr", "devpi_pr.client:approve_pr"),
(reject_pr_arguments, "reject-pr", "devpi_pr.client:reject_pr"),
(cancel_pr_arguments, "cancel-pr", "devpi_pr.client:cancel_pr"),
(delete_pr_arguments, "delete-pr", "devpi_pr.client:delete_pr")]
| 2.15625 | 2 |
src/impurity/ctqmc/cix_compress.py | dmft-wien2k/dmft-wien2k-v2 | 7 | 12759296 | <reponame>dmft-wien2k/dmft-wien2k-v2
#!/usr/bin/env python
from scipy import *
import sys, glob
import operator
def cmp_small_large(wlarge_small):
nsize = len(wlarge_small)
large_small = []
for i in range(nsize): large_small.append(-1)
ii=0
for i in range(nsize):
if len(wlarge_small[i])>0:
large_small[i] = ii
ii+=1
small_large=[]
for i in range(nsize):
if len(wlarge_small[i])>0:
small_large.append((i,wlarge_small[i]))
return (small_large, large_small)
def union(data):
" Takes a union of array or list"
c = []
for d in data:
if d not in c:
c.append(d)
return c
def PrintHeader(small_large, large_small, start, sindex, Ene, Spin):
for i in range(len(small_large)):
ii = small_large[i][0]
print "%3s %3s %3s %4s" % (i+1, start[ii][1], start[ii][2], start[ii][3]),
print "%3d " % len(small_large[i][1]), #sdim[ii],
for j in sindex[ii]:
if j>=0: print "%3d " % (large_small[j]+1),
else: print "%3d " % (j+1),
for j in small_large[i][1]: print Ene[ii][j],
for j in small_large[i][1]: print Spin[ii][j],
print
def PrintHeader1(small_large, large_small, start, sindex, findex, Ene, Spin):
for i in range(len(small_large)):
ii = small_large[i][0]
print "%3s %3s %3s %3s %4s" % (i+1, findex[i]+1, start[ii][2], start[ii][3], start[ii][4]),
print "%3d " % len(small_large[i][1]), #sdim[ii],
for j in sindex[ii]:
if j>=0: print "%3d " % (large_small[j]+1),
else: print "%3d " % (j+1),
for j in small_large[i][1]: print Ene[ii][j],
for j in small_large[i][1]: print Spin[ii][j],
print
if __name__ == '__main__':
cutof = 1e-6
maxDim = 1000
files=[]
par=[]
for a in sys.argv[1:]:
if len(glob.glob(a))>0:
files.append(a)
else:
par.append(a)
if len(files)<2:
print 'Need input parameters: <cix file> <probability>'
sys.exit(0)
if len(par)>0:
cutof = float(par[0])
fcix = files[0]
fProb = files[1]
fc = open(fcix, 'r')
lines = fc.readlines()
count = 2
(Nm, nsize, N_ifl, max_size) = map(int, lines[count].split())
count += 2
Nbaths=0
for b in range(N_ifl):
bath = map(int, lines[count].split())
if bath[0] != b : print 'something wrong in reading cix file ib!=ibath'
dimbath = bath[1]
Nbaths += dimbath
count +=1
#print Nbaths
count += 3
bath_header = count
state=[]
start=[]
Ene=[]
Spin=[]
sdim=[]
sindex=[]
for i in range(nsize):
state.append(lines[count])
slines = lines[count].split()
start.append(slines[0:5])
wsdim = int(slines[4])
wsindex = map(int, slines[5:5+Nbaths])
sdim.append(wsdim)
#print 'b=', wsindex
for iw in range(len(wsindex)): wsindex[iw]-=1
#print 'a=', wsindex
sindex.append(wsindex)
#print wsdim, sindex
Ene.append(slines[5+Nbaths:5+Nbaths+wsdim])
Spin.append(slines[5+Nbaths+wsdim:5+Nbaths+2*wsdim])
#print Ene
#print Spin
count += 1
count += 1
FM=[]
for i in range(nsize):
bFM=[]
for b in range(Nbaths):
(ii, il, s1, s2) = map(int, lines[count].split()[0:4])
#print lines[count], len(sdim)
if (ii!=i+1): print 'something wrong reading cix two'
if (il!=sindex[i][b]+1): print 'something wrong reading cix three', il, sindex[i][b]
if il>0:
if (s1!=sdim[i]): print 'something wrong reading cix four', s1, sdim[i]
if (s2!=sdim[il-1]): print 'something wrong reading cix five', s2, sdim[il-1]
clines = lines[count].split()[4:]
qFM = zeros((s1,s2), dtype=float)
for i0 in range(s1):
for i1 in range(s2):
qFM[i0,i1] = float(clines[i0*s2 + i1])
bFM.append(qFM)
count += 1
FM.append(bFM)
# HERE WE NEED TO CHANGE IF OPERATORS ARE PRESENT
count += 4
# Reading the HB1 part from the cix file
(Nm, wnsize, wN_ifl, wmax_size) = map(int, lines[count].split())
count += 2
wstate=[]
wstart=[]
wEne=[]
wSpin=[]
wsdim=[]
wsindex=[]
wrindex=[]
for i in range(wnsize):
wstate.append(lines[count])
slines = lines[count].split()
wstart.append(slines[0:6])
wrindex.append(int(slines[1])-1)
twsdim = int(slines[5])
twsindex = map(int, slines[6:6+Nbaths])
wsdim.append(twsdim)
for iw in range(len(twsindex)): twsindex[iw]-=1
wsindex.append(twsindex)
wEne.append(slines[6+Nbaths:6+Nbaths+twsdim])
wSpin.append(slines[6+Nbaths+twsdim:6+Nbaths+2*twsdim])
count += 1
count += 1
wFM=[]
for i in range(wnsize):
bFM=[]
for b in range(Nbaths):
(ii, il, s1, s2) = map(int, lines[count].split()[0:4])
if (ii!=i+1): print 'something wrong reading cix two 2'
if (il!=wsindex[i][b]+1): print 'something wrong reading cix three 2', il, wsindex[i][b]
if il>0:
if (s1!=wsdim[i]): print 'something wrong reading cix four 2', s1, wsdim[i]
if (s2!=wsdim[il-1]): print 'something wrong reading cix five 2', s2, wsdim[il-1]
clines = lines[count].split()[4:]
qFM = zeros((s1,s2), dtype=float)
for i0 in range(s1):
for i1 in range(s2):
qFM[i0,i1] = float(clines[i0*s2 + i1])
bFM.append(qFM)
count += 1
wFM.append(bFM)
# Creating inverse index between the short and long list of states in cix file
inv_wrindex=zeros(nsize,dtype=int); inv_wrindex -= 1
for i in range(wnsize):
if wrindex[i]>=0:
inv_wrindex[wrindex[i]]=i
#print inv_wrindex
# DEBUGGING HERE
# Reading probability
fp = open(fProb, 'r')
plines = fp.readlines()
pcount = 1
Prob = []
for i in range(nsize):
inw = inv_wrindex[i]
#print 'i=', i, sdim[i]
for j in range(sdim[i]):
splines = plines[pcount].split()
(ii, ij) = map(int, splines[:2])
P = float(splines[2])
if i+1 != ii : print 'something wrong reading probability 1'
if j != ij : print 'something wrong reading probability 2'
Prob.append([P,i,j])
pcount += 1
Prob.sort(lambda x, y: cmp(abs(y[0]),abs(x[0])))
# Creates wlarge for dynamic treatment
wlarge_small=[]
for i in range(nsize): wlarge_small.append([])
for ip,pr in enumerate(Prob):
if (pr[0]>cutof):
if len(wlarge_small[pr[1]])<maxDim:
wlarge_small[pr[1]].append(pr[2])
#print 'wlarge_small', wlarge_small
# creates small_large and large_small index
(small_large, large_small) = cmp_small_large(wlarge_small)
#print 'small_large=', small_large
#for i in range(len(small_large)):
# ii = small_large[i][0]
# for j in small_large[i][1]:
# print 'ii+1=', ii+1, 'j=', j, 'len(Ene[ii])=', len(Ene[ii]), 'True=', j<len(Ene[ii]), ' ', Ene[ii]
max_size_new = max(map(lambda x: len(x[1]), small_large))
# Start printing !!!
for l in lines[0:2]: print l,
print Nm, len(small_large), N_ifl, max_size_new
for l in lines[3:bath_header]: print l,
PrintHeader(small_large, large_small, start, sindex, Ene, Spin)
print "# matrix elements"
for i in range(len(small_large)):
ii = small_large[i][0]
for b in range(Nbaths):
ifi = sindex[ii][b]
if ifi>=0 and large_small[ifi]>=0:
sifi = large_small[ifi]
ws0 = small_large[i][1]
ws1 = small_large[sifi][1]
print "%3s %3s %3s %3s " % (i+1, sifi+1, len(ws0), len(ws1)),
for i0 in ws0:
for i1 in ws1:
print FM[ii][b][i0,i1],
print
else:
print "%3s %3s %3s %3s " % (i+1, 0, 0, 0)
# Creates wlarge for static treatment
# First creates static list of states
dynamic=[]
for sm in small_large: dynamic.append(inv_wrindex[sm[0]])
static=dynamic[:]
for i in dynamic:
for j in wsindex[i]:
if (j>=0): static.append(j)
for i in range(wnsize):
for j in wsindex[i]:
if j in dynamic:
static.append(i)
static = union(static)
static.sort()
# Finally creates wlarge index
wlarge_small=[]
for i in range(wnsize): wlarge_small.append([])
for i in range(len(static)):
ii = static[i]
wlarge_small[ii] = range(wsdim[ii])
# creates small_large and large_small index
(qsmall_large, qlarge_small) = cmp_small_large(wlarge_small)
findex = zeros(len(qsmall_large),dtype=int)
findex -= 1
for i in range(len(qsmall_large)):
#print qsmall_large[i][0] , wrindex[qsmall_large[i][0]]
findex[i] = large_small[wrindex[qsmall_large[i][0]]]
#print findex
print "HB1"
print "# number of operators needed"
print "0"
print "# Data for HB1"
max_size_new1 = max(map(lambda x: len(x[1]), qsmall_large))
print Nm, len(qsmall_large), N_ifl, max_size_new1
print "# ind N K Jz size"
PrintHeader1(qsmall_large, qlarge_small, wstart, wsindex, findex, wEne, wSpin)
print "# matrix elements"
for i in range(len(qsmall_large)):
ii = qsmall_large[i][0]
for b in range(Nbaths):
ifi = wsindex[ii][b]
if ifi>=0 and qlarge_small[ifi]>=0:
sifi = qlarge_small[ifi]
ws0 = qsmall_large[i][1]
ws1 = qsmall_large[sifi][1]
print "%3s %3s %3s %3s " % (i+1, sifi+1, len(ws0), len(ws1)),
for i0 in ws0:
for i1 in ws1:
print wFM[ii][b][i0,i1],
print
else:
print "%3s %3s %3s %3s " % (i+1, 0, 0, 0)
| 2.421875 | 2 |
Aula 7/Aula 7.py | Katakhan/TrabalhosPython2 | 0 | 12759297 | <gh_stars>0
#aula 7 14-11-2019
#Dicionarios
lista = []
dicionario = { 'Nome': 'Antonio' , 'Sobrenome' : 'Gastaldi' }
print(dicionario)
print(dicionario['Sobrenome'])
nome = 'Antônio'
lista_notas = [10,20,50,70]
media = sum(lista_notas)/ len(lista_notas)
situacao = 'Reprovado'
if media >= 7 :
situacao = 'Aprovado'
dicionario_alunos = {'nome' :nome, 'lista_Notas': lista_notas, 'Media' : media, 'Situacao': situacao }
print(f"{dicionario_alunos['nome']} - {dicionario_alunos['Situacao']}") | 3.078125 | 3 |
federate_learning/device/__init__.py | deib-polimi/FederatedLearningFramework | 0 | 12759298 | <gh_stars>0
from .device import Device
from .device_app import DeviceApp
from .results_dispatcher import ResultsDispatcher
__all__ = [
"Device",
"DeviceApp",
"ResultsDispatcher"
]
| 1.109375 | 1 |
tests/conftest.py | konstantin-stepanov/aiodirector | 0 | 12759299 | import gc
import time
import logging
import aiohttp
import asyncio
import socket
import pytest
from aiohttp.test_utils import TestServer
import aioamqp
import aioamqp.channel
import aioamqp.protocol
import aiohttp.web
import asyncpg
from docker.client import DockerClient
from docker.utils import kwargs_from_env
from async_generator import yield_, async_generator
from aioapp.app import Application
# отключаем логи ошибок, чтоб не засирать вывод
# logging.basicConfig(level=logging.CRITICAL)
logging.basicConfig(
format='%(asctime)-15s %(message)s %(filename)s %(lineno)s %(funcName)s')
aioamqp.channel.logger.level = logging.CRITICAL
aioamqp.protocol.logger.level = logging.CRITICAL
@pytest.fixture(scope='session')
def event_loop():
asyncio.set_event_loop_policy(asyncio.DefaultEventLoopPolicy())
loop = asyncio.get_event_loop_policy().new_event_loop()
yield loop
gc.collect()
loop.close()
@pytest.fixture(scope='session')
def loop(event_loop):
return event_loop
def get_free_port():
sock = socket.socket()
try:
sock.bind(('', 0))
return sock.getsockname()[1]
finally:
sock.close()
@pytest.fixture(scope='session')
async def postgres(loop):
tag = 'latest'
image = 'postgres'
host = '127.0.0.1'
timeout = 60
unused_tcp_port = get_free_port()
client = DockerClient(version='auto', **kwargs_from_env())
client.images.pull(image, tag=tag)
print('Stating %s:%s on %s:%s' % (image, tag, host, unused_tcp_port))
cont = client.containers.run('%s:%s' % (image, tag), detach=True,
ports={'5432/tcp': ('0.0.0.0',
unused_tcp_port)})
try:
start_time = time.time()
conn = None
while conn is None:
if start_time + timeout < time.time():
raise Exception("Initialization timeout, failed to "
"initialize postgresql container")
try:
conn = await asyncpg.connect(
'postgresql://postgres@%s:%s/postgres'
'' % (host, unused_tcp_port),
loop=loop)
except Exception as e:
time.sleep(.1)
await conn.close()
yield (host, unused_tcp_port)
finally:
cont.kill()
cont.remove()
@pytest.fixture(scope='session')
async def rabbit(loop, rabbit_override_addr):
if rabbit_override_addr:
yield rabbit_override_addr.split(':')
return
tag = '3.7.1'
image = 'rabbitmq:{}'.format(tag)
host = '0.0.0.0'
timeout = 60
unused_tcp_port = get_free_port()
client = DockerClient(version='auto', **kwargs_from_env())
print('Stating rabbitmq %s on %s:%s' % (image, host, unused_tcp_port))
cont = client.containers.run(image, detach=True,
ports={'5672/tcp': ('0.0.0.0',
unused_tcp_port)})
try:
start_time = time.time()
conn = transport = None
while conn is None:
if start_time + timeout < time.time():
raise Exception("Initialization timeout, failed t o "
"initialize rabbitmq container")
try:
transport, conn = await aioamqp.connect(host, unused_tcp_port,
loop=loop)
except Exception:
time.sleep(.1)
await conn.close()
transport.close()
yield (host, unused_tcp_port)
finally:
cont.kill()
cont.remove()
@pytest.fixture
@async_generator
async def client(loop):
async with aiohttp.ClientSession(loop=loop) as client:
await yield_(client)
@pytest.fixture(scope='session')
def tracer_server(loop):
"""Factory to create a TestServer instance, given an app.
test_server(app, **kwargs)
"""
servers = []
async def go(**kwargs):
def tracer_handle(request):
return aiohttp.web.Response(text='', status=201)
app = aiohttp.web.Application()
app.router.add_post('/api/v2/spans', tracer_handle)
server = TestServer(app, port=None)
await server.start_server(loop=loop, **kwargs)
servers.append(server)
return server
yield go
async def finalize():
while servers:
await servers.pop().close()
loop.run_until_complete(finalize())
@pytest.fixture
async def app(tracer_server, loop):
tracer_host = '127.0.0.1'
tracer_port = (await tracer_server()).port
tracer_addr = 'http://%s:%s/' % (tracer_host, tracer_port)
app = Application(loop=loop)
app.setup_logging(tracer_driver='zipkin', tracer_addr=tracer_addr,
tracer_name='test')
yield app
await app.run_shutdown()
| 1.859375 | 2 |
backend/charscript.py | Naenoh/FG-Vault | 1 | 12759300 | <filename>backend/charscript.py
import sys
if len(sys.argv) != 3:
print("Too few or too many arguments")
print("Usage : python charscript.py inputfile outputfile")
else:
with open(sys.argv[1], encoding="utf-8") as infile:
with open(sys.argv[2], "w", encoding="utf-8") as outfile:
outfile.write("BEGIN;")
game = ""
for line in infile:
line = line.strip().replace("\'", "\'\'")
if line.startswith("#"):
if(game != ""):
outfile.write(";\n\n")
game = line[1:]
outfile.write("INSERT INTO games (name) VALUES ('{}');\n".format(game))
outfile.write("INSERT INTO chars (name, game_id) VALUES")
outfile.write("('General', (SELECT id from games where name = '{}'))\n".format(game))
else:
outfile.write(",('{}', (SELECT id from games where name = '{}'))\n".format(line,game))
outfile.write(";\n")
outfile.write("COMMIT;") | 3.359375 | 3 |
profetch/profetch/pipelines.py | doge-search/webdoge | 0 | 12759301 | <gh_stars>0
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import os
import urllib
import xml.dom.minidom as minidom
class writePipeline(object):
def __init__(self):
self.fout_xml = file('test.xml','w')
self.doc = minidom.Document()
self.institution = self.doc.createElement("institution")
self.doc.appendChild(self.institution)
self.cnt = 0
if(not os.path.exists('d:/PYPJ/images')):
os.mkdir('d:/PYPJ/images')
def process_item(self, item, spider):
professor = self.doc.createElement("professor")
self.institution.appendChild(professor)
namenode = self.doc.createElement("name")
namenode.appendChild(self.doc.createTextNode(item['name'].encode('utf-8')))
professor.appendChild(namenode)
titlenode = self.doc.createElement("title")
titlenode.appendChild(self.doc.createTextNode(item['title'].encode('utf-8')))
professor.appendChild(titlenode)
websitenode = self.doc.createElement("website")
websitenode.appendChild(self.doc.createTextNode(item['website'].encode('utf-8')))
professor.appendChild(websitenode)
emailnode = self.doc.createElement("email")
emailnode.appendChild(self.doc.createTextNode(item['email'].encode('utf-8')))
professor.appendChild(emailnode)
phonenode = self.doc.createElement("phone")
phonenode.appendChild(self.doc.createTextNode(item['phone'].encode('utf-8')))
professor.appendChild(phonenode)
officenode = self.doc.createElement("office")
officenode.appendChild(self.doc.createTextNode(item['office'].encode('utf-8')))
professor.appendChild(officenode)
if(item['picture']!=''):
piclocal = "d:/PYPJ/images/" + item['name'].encode('utf-8') + ".jpg"
urllib.urlretrieve(item['picture'], piclocal)
picnode = self.doc.createElement("image")
picnode.appendChild(self.doc.createTextNode("./images/" + item['name'].encode('utf-8') + ".jpg"))
professor.appendChild(picnode)
self.cnt += 1
def close_spider(self, spider):
print self.cnt
self.doc.writexml(self.fout_xml, "\t", "\t", "\n")
self.fout_xml.close()
| 2.40625 | 2 |
affiliates/base/views.py | glogiotatidis/affiliates | 15 | 12759302 | from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.shortcuts import render
from django.views.decorators.http import require_POST
from django.views.defaults import page_not_found, server_error
import basket
import commonware
from commonware.response.decorators import xframe_allow
from django_browserid.views import Verify
from tower import ugettext as _
from affiliates.base.forms import NewsletterSubscriptionForm
from affiliates.base.http import JSONResponse
from affiliates.base.utils import redirect
from affiliates.facebook.utils import in_facebook_app
from affiliates.links.models import Link
log = commonware.log.getLogger('a.facebook')
def home(request):
if request.user.is_authenticated():
return redirect('base.dashboard')
else:
return render(request, 'base/home.html', {
'affiliate_count': User.objects.count(),
'link_count': Link.objects.count(),
'click_count': Link.objects.total_link_clicks(),
})
def about(request):
return render(request, 'base/about.html')
def terms(request):
return render(request, 'base/terms.html')
@login_required
def dashboard(request):
# Replace request.user and prefetch related items that we need.
request.user = (User.objects
.prefetch_related('link_set__datapoint_set',
'link_set__banner_variation')
.get(pk=request.user.pk))
# Sort links in python to use prefetched data
links = sorted(request.user.link_set.all(), lambda x, y: cmp(x.created, y.created))
return render(request, 'base/dashboard.html', {
'links': links,
})
@require_POST
def newsletter_subscribe(request):
form = NewsletterSubscriptionForm(request.POST)
if form.is_valid():
data = form.cleaned_data
try:
basket.subscribe(data['email'], 'affiliates',
format=data['format'], country=data['country'],
source_url=request.build_absolute_uri())
except basket.BasketException as e:
log.error('Error subscribing email {0} to mailing list: {1}'.format(data['email'], e))
return JSONResponse({'error': 'basket_error'}, status=500)
return JSONResponse({'success': 'success'})
@xframe_allow
def handler404(request):
if in_facebook_app(request):
return render(request, 'facebook/error.html', status=404)
else:
return page_not_found(request)
@xframe_allow
def handler500(request):
if in_facebook_app(request):
return render(request, 'facebook/error.html', status=500)
else:
return server_error(request)
def strings(request):
return render(request, 'base/strings.html')
class BrowserIDVerify(Verify):
def login_failure(self, msg=None):
if not msg:
msg = _('Login failed. Firefox Affiliates has stopped accepting new users.')
messages.error(self.request, msg)
return JSONResponse({'redirect': self.failure_url})
| 1.90625 | 2 |
Exercicios/mundo1-exercicios-01-35/ex003.py | rafaelbarretomg/Curso-Python-3 | 0 | 12759303 | <reponame>rafaelbarretomg/Curso-Python-3
# Crie um progrmaa que leia dois numeros e mostra a soma entre eles
n1 = int(input('Digite o primeiro numero: '))
n2 = int(input('Digite o segundo numero: '))
s = n1 + n2
print('A soma de \033[31m{}\033[m e \033[34m{}\033[m eh de \033[32m{}\033[m ' .format(n1, n2, s))
| 3.875 | 4 |
crud/admin.py | OuroborosD/03-PiggoV2 | 0 | 12759304 | <reponame>OuroborosD/03-PiggoV2
from django.contrib import admin
# Register your models here.
from .models import Receita,Despesa,Emprestimo
admin.site.register(Receita)
admin.site.register(Despesa)
admin.site.register(Emprestimo) | 1.46875 | 1 |
venv/lib/python3.7/site-packages/rinoh_typeface_texgyreheros/__init__.py | rodrez/jobpy | 3 | 12759305 | from os import path
from rinoh.font import Typeface
from rinoh.font.style import REGULAR, BOLD, ITALIC, CONDENSED
from rinoh.font.opentype import OpenTypeFont
__all__ = ['typeface']
def otf(style, variant=''):
filename = 'texgyreheros{}-{}.otf'.format(variant, style)
return path.join(path.dirname(__file__), filename)
typeface = Typeface('TeX Gyre Heros',
OpenTypeFont(otf('regular'), weight=REGULAR),
OpenTypeFont(otf('italic'), weight=REGULAR, slant=ITALIC),
OpenTypeFont(otf('bold'), weight=BOLD),
OpenTypeFont(otf('bolditalic'), weight=BOLD, slant=ITALIC),
OpenTypeFont(otf('regular', 'cn'),
width=CONDENSED, weight=REGULAR),
OpenTypeFont(otf('italic', 'cn'),
width=CONDENSED, weight=REGULAR, slant=ITALIC),
OpenTypeFont(otf('bold', 'cn'),
width=CONDENSED, weight=BOLD),
OpenTypeFont(otf('bolditalic', 'cn'),
width=CONDENSED, weight=BOLD, slant=ITALIC))
| 2.34375 | 2 |
layouts/window_history.py | TkfleBR/PyManager | 0 | 12759306 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'window_history.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QMainWindow
class History(QMainWindow):
def __init__(self):
super().__init__()
self.setObjectName("MainWindow")
self.resize(758, 473)
self.centralwidget = QtWidgets.QWidget(self)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayoutWidget = QtWidgets.QWidget(self.centralwidget)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(10, 10, 741, 451))
self.verticalLayoutWidget.setObjectName("verticalLayoutWidget")
self.hverticalleyout = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)
self.hverticalleyout.setContentsMargins(0, 0, 0, 0)
self.hverticalleyout.setObjectName("hverticalleyout")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.hlabel = QtWidgets.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setPointSize(18)
self.hlabel.setFont(font)
self.hlabel.setStyleSheet("")
self.hlabel.setAlignment(QtCore.Qt.AlignCenter)
self.hlabel.setObjectName("hlabel")
self.horizontalLayout.addWidget(self.hlabel)
self.hdataedit = QtWidgets.QDateEdit(self.verticalLayoutWidget)
self.hdataedit.setObjectName("hdataedit")
self.horizontalLayout.addWidget(self.hdataedit)
self.hbtn = QtWidgets.QPushButton(self.verticalLayoutWidget)
self.hbtn.setObjectName("hbtn")
self.horizontalLayout.addWidget(self.hbtn)
self.hbtn1 = QtWidgets.QPushButton(self.verticalLayoutWidget)
self.hbtn1.setObjectName("hbtn1")
self.horizontalLayout.addWidget(self.hbtn1)
self.hverticalleyout.addLayout(self.horizontalLayout)
self.plainTextEdit = QtWidgets.QPlainTextEdit(
self.verticalLayoutWidget)
self.plainTextEdit.setReadOnly(True)
self.plainTextEdit.setObjectName("plainTextEdit")
self.hverticalleyout.addWidget(self.plainTextEdit)
self.setCentralWidget(self.centralwidget)
self.retranslateUi()
QtCore.QMetaObject.connectSlotsByName(self)
self.show()
def retranslateUi(self):
_translate = QtCore.QCoreApplication.translate
self.setWindowTitle(_translate("MainWindow", "History"))
self.hlabel.setText(_translate("MainWindow", "Filter"))
self.hbtn.setText(_translate("MainWindow", "Filter"))
self.hbtn1.setText(_translate("MainWindow", "No Filter"))
| 2.03125 | 2 |
__main__.py | Olddude/scripts | 0 | 12759307 | <gh_stars>0
#!/usr/bin/env python3
from sqlite3 import connect
from os import environ
for item, value in environ.items():
print('{}: {}'.format(item, value))
def main():
connection = connect(environ["DATABASE"])
cursor = connection.cursor()
for row in cursor.execute("""
select odp.portal_title, odp.api, odp.url
from opendataportals as odp
"""): print(row)
connection.commit()
connection.close()
if __name__ == "__main__":
main()
| 3.40625 | 3 |
service-workers/service-worker/resources/update-fetch-worker.py | meyerweb/wpt | 14,668 | 12759308 | <reponame>meyerweb/wpt
import random
import time
def main(request, response):
# no-cache itself to ensure the user agent finds a new version for each update.
headers = [(b'Cache-Control', b'no-cache, must-revalidate'),
(b'Pragma', b'no-cache')]
content_type = b''
extra_body = u''
content_type = b'application/javascript'
headers.append((b'Content-Type', content_type))
extra_body = u"self.onfetch = (event) => { event.respondWith(fetch(event.request)); };"
# Return a different script for each access.
return headers, u'/* %s %s */ %s' % (time.time(), random.random(), extra_body)
| 2.234375 | 2 |
aplikace/views.py | it1925/docker-django | 0 | 12759309 | <reponame>it1925/docker-django
from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse
from django.template.loader import render_to_string, get_template
from django.template import engines
django_engine = engines['django']
template = django_engine.from_string("Hello {{ name }}!")
def index(request):
a = HttpResponse("Hello, world. You're at the polls index.")
return a
def test(request):
template = get_template('test.html')
return HttpResponse(template.render(request))
| 2.359375 | 2 |
notebooks/thunderdome/eth2spec/test/phase0/epoch_processing/test_process_historical_roots_update.py | casparschwa/beaconrunner | 2,161 | 12759310 | from eth2spec.test.context import spec_state_test, with_all_phases
from eth2spec.test.helpers.epoch_processing import (
run_epoch_processing_with
)
def run_process_historical_roots_update(spec, state):
yield from run_epoch_processing_with(spec, state, 'process_historical_roots_update')
@with_all_phases
@spec_state_test
def test_historical_root_accumulator(spec, state):
# skip ahead to near the end of the historical roots period (excl block before epoch processing)
state.slot = spec.SLOTS_PER_HISTORICAL_ROOT - 1
history_len = len(state.historical_roots)
yield from run_process_historical_roots_update(spec, state)
assert len(state.historical_roots) == history_len + 1
| 2.140625 | 2 |
tests/items/steps/get_last_timestamp_test.py | umd-lib/caia | 0 | 12759311 | import pytest
from caia.items.items_job_config import ItemsJobConfig
from caia.items.steps.get_last_timestamp import GetLastTimestamp
def test_get_last_timestamp_default_timestamp():
config = {
'storage_dir': '/tmp',
'last_success_lookup': 'tests/storage/items/items_last_success.txt'
}
job_config = ItemsJobConfig(config, 'test')
get_last_timestamp = GetLastTimestamp(job_config)
step_result = get_last_timestamp.execute()
assert step_result.was_successful() is True
last_timestamp = step_result.get_result()
assert "202007021300" == last_timestamp
def test_get_last_timestamp_no_timestamp_in_file():
config = {
'storage_dir': '/tmp',
'last_success_lookup': 'tests/storage/items/items_last_success.txt'
}
job_config = ItemsJobConfig(config, 'test')
# Override "last_success_filepath"
last_success_filepath = 'tests/resources/items/no_timestamp_src_response.json'
job_config['last_success_filepath'] = last_success_filepath
get_last_timestamp = GetLastTimestamp(job_config)
step_result = get_last_timestamp.execute()
assert step_result.was_successful() is False
assert f"Could not find timestamp in {last_success_filepath}" in step_result.get_errors()
def test_get_last_timestamp_bad_file():
config = {
'storage_dir': '/tmp',
'last_success_lookup': 'tests/storage/items/items_last_success.txt'
}
job_config = ItemsJobConfig(config, 'test')
# Override "last_success_filepath"
job_config['last_success_filepath'] = 'tests/resources/items/non_existent_response.json'
get_last_timestamp = GetLastTimestamp(job_config)
with pytest.raises(FileNotFoundError):
get_last_timestamp.execute()
| 2.0625 | 2 |
Homework_4/Part I/Q1/consumer.py | jaiswalpuru/CS_6350-BigDataAnalyticsAndManagement- | 0 | 12759312 | <gh_stars>0
import os
import sys
import json
from elasticsearch import Elasticsearch
from pyspark import SparkContext, SparkConf
from pyspark.shell import spark
from pyspark.sql import SparkSession
from pyspark.streaming import StreamingContext
from pyspark.streaming.kafka import KafkaUtils
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
es = Elasticsearch("http://localhost:9200")
def get_sentiment_val(text):
s_a = SentimentIntensityAnalyzer()
polarity = s_a.polarity_scores(text)
if polarity["compound"] < 0:
return "negative"
elif polarity["compound"] > 0:
return "positive"
else:
return "neutral"
def get_hash_tag(text):
if "blacklivesmatter" in text.lower():
return "#blacklivesmatter"
elif "covid19" in text.lower():
return "#covid19"
elif "corona" in text.lower():
return "corona"
else:
return "others"
def analyze(time, rdd):
t = rdd.collect()
for i in t:
es.index(index="hash_tags_sentiment_analysis", document=i)
if __name__ == '__main__':
spark = SparkSession.builder.appName("Sentiment_Analysis").config("spark.eventLog.enabled", "false").getOrCreate()
sc = SparkContext.getOrCreate()
topics = 'tweets_sentiment'
ssc = StreamingContext(sc, 20)
consumer = KafkaUtils.createDirectStream(
ssc, [topics], {"metadata.broker.list":'localhost:9092'})
rdds = consumer.map(lambda x : str(x[1].encode('ascii','ignore'))).map(
lambda x: (x, get_sentiment_val(x), get_hash_tag(x))).map(
lambda x: {"tweet": x[0], "sentiment": x[1], "hash_tag": x[2]})
rdds.foreachRDD(analyze)
ssc.start()
ssc.awaitTermination() | 2.46875 | 2 |
code/models/xvae.py | CancerAI-CL/IntegrativeVAEs | 17 | 12759313 | from keras import backend as K
from keras import optimizers
from keras.layers import BatchNormalization as BN, Concatenate, Dense, Input, Lambda,Dropout
from keras.models import Model
from models.common import sse, bce, mmd, sampling, kl_regu
from keras.losses import mean_squared_error,binary_crossentropy
import numpy as np
from tensorflow import set_random_seed
class XVAE:
def __init__(self, args):
self.args = args
self.vae = None
self.encoder = None
def build_model(self):
np.random.seed(42)
set_random_seed(42)
# Build the encoder network
# ------------ Input -----------------
s1_inp = Input(shape=(self.args.s1_input_size,))
s2_inp = Input(shape=(self.args.s2_input_size,))
inputs = [s1_inp, s2_inp]
# ------------ Concat Layer -----------------
x1 = Dense(self.args.ds, activation=self.args.act)(s1_inp)
x1 = BN()(x1)
x2 = Dense(self.args.ds, activation=self.args.act)(s2_inp)
x2 = BN()(x2)
x = Concatenate(axis=-1)([x1, x2])
x = Dense(self.args.ds, activation=self.args.act)(x)
x = BN()(x)
# ------------ Embedding Layer --------------
z_mean = Dense(self.args.ls, name='z_mean')(x)
z_log_sigma = Dense(self.args.ls, name='z_log_sigma', kernel_initializer='zeros')(x)
z = Lambda(sampling, output_shape=(self.args.ls,), name='z')([z_mean, z_log_sigma])
self.encoder = Model(inputs, [z_mean, z_log_sigma, z], name='encoder')
self.encoder.summary()
# Build the decoder network
# ------------ Dense out -----------------
latent_inputs = Input(shape=(self.args.ls,), name='z_sampling')
x = latent_inputs
x = Dense(self.args.ds, activation=self.args.act)(x)
x = BN()(x)
x=Dropout(self.args.dropout)(x)
# ------------ Dense branches ------------
x1 = Dense(self.args.ds, activation=self.args.act)(x)
x1 = BN()(x1)
x2 = Dense(self.args.ds, activation=self.args.act)(x)
x2 = BN()(x2)
# ------------ Out -----------------------
s1_out = Dense(self.args.s1_input_size, activation='sigmoid')(x1)
if self.args.integration == 'Clin+CNA':
s2_out = Dense(self.args.s2_input_size,activation='sigmoid')(x2)
else:
s2_out = Dense(self.args.s2_input_size)(x2)
decoder = Model(latent_inputs, [s1_out, s2_out], name='decoder')
decoder.summary()
outputs = decoder(self.encoder(inputs)[2])
self.vae = Model(inputs, outputs, name='vae_x')
if self.args.distance == "mmd":
true_samples = K.random_normal(K.stack([self.args.bs, self.args.ls]))
distance = mmd(true_samples, z)
if self.args.distance == "kl":
distance = kl_regu(z_mean,z_log_sigma)
s1_loss= binary_crossentropy(inputs[0], outputs[0])
if self.args.integration == 'Clin+CNA':
s2_loss =binary_crossentropy(inputs[1], outputs[1])
else:
s2_loss =mean_squared_error(inputs[1], outputs[1])
reconstruction_loss = s1_loss+s2_loss
vae_loss = K.mean(reconstruction_loss + self.args.beta * distance)
self.vae.add_loss(vae_loss)
adam = optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, amsgrad=False, decay=0.001)
self.vae.compile(optimizer=adam, metrics=[mean_squared_error, mean_squared_error])
self.vae.summary()
def train(self, s1_train, s2_train, s1_test, s2_test):
self.vae.fit([s1_train, s2_train], epochs=self.args.epochs, batch_size=self.args.bs, shuffle=True,
validation_data=([s1_test, s2_test], None))
if self.args.save_model:
self.vae.save_weights('./models/vae_xvae.h5')
def predict(self, s1_data, s2_data):
return self.encoder.predict([s1_data, s2_data], batch_size=self.args.bs)[0]
| 2.421875 | 2 |
code_scanner/tests/test_data_model.py | stargrep/rmm-utils | 0 | 12759314 | from pathlib import Path
import pytest
from code_scanner.enums import FileType
from code_scanner.file_info import FileInfo
@pytest.fixture
def file_info():
"""
Returns a test python file info
"""
return FileInfo(Path('/tmp/test.py'), FileType.SOURCE_CODE)
@pytest.fixture
def another_file_info():
"""
Returns another test python file info
"""
return FileInfo(Path('/tmp/test.py'), FileType.SOURCE_CODE)
def test_file_info_funcs(file_info, another_file_info):
assert file_info.full_name == Path('/tmp/test.py')
assert str(file_info) == 'SOURCE_CODE-/tmp/test.py'
assert file_info == another_file_info
assert hash(file_info) == hash(another_file_info)
assert file_info is file_info # 'is' test only identical objects
assert repr(file_info) == repr(another_file_info)
file_infos = [another_file_info]
assert file_info in file_infos
with pytest.raises(AssertionError):
assert id(file_info) == id(another_file_info)
@pytest.mark.parametrize("updated_name,expected_full_name", [
('/tmp/new.py', 'SOURCE_CODE-/tmp/new.py'),
('/tmp/tests.py', 'SOURCE_CODE-/tmp/tests.py')
])
def test_update_file_name(file_info, updated_name, expected_full_name):
file_info.full_name = updated_name
assert str(file_info) == expected_full_name
| 2.65625 | 3 |
FISTALearner.py | emdodds/DictLearner | 0 | 12759315 | # -*- coding: utf-8 -*-
import numpy as np
from DictLearner import DictLearner
import scipy.sparse.linalg
"""The inference code was adapted from <NAME>'s sparsenet implementation,
available on github."""
class FISTALearner(DictLearner):
def __init__(self, data, learnrate, nunits, lam = 0.4, niter=100, **kwargs):
self.lam = 0.4
self.niter = niter
super().__init__(data, learnrate, nunits, **kwargs)
def infer(self, data, max_iterations=None, display=False):
""" FISTA Inference for Lasso (l1) Problem
data: Batches of data (dim x batch)
Phi: Dictionary (dictionary element x dim) (nparray or sparse array)
lambdav: Sparsity penalty
max_iterations: Maximum number of iterations
"""
lambdav=self.lam
def proxOp(x,t):
""" L1 Proximal Operator """
return np.fmax(x-t, 0) + np.fmin(x+t, 0)
x = np.zeros((self.Q.shape[0], data.shape[1]))
c = self.Q.dot(self.Q.T)
b = -2*self.Q.dot(data)
L = scipy.sparse.linalg.eigsh(2*c, 1, which='LM')[0]
invL = 1/float(L)
y = x
t = 1
max_iterations = max_iterations or self.niter
for i in range(max_iterations):
g = 2*c.dot(y) + b
x2 = proxOp(y-invL*g,invL*lambdav)
t2 = (1+np.sqrt(1+4*(t**2)))/2.0
y = x2 + ((t-1)/t2)*(x2-x)
x = x2
t = t2
if display == True:
print ("L1 Objective " + str(np.sum((data-self.Q.T.dot(x2))**2) + lambdav*np.sum(np.abs(x2))))
return x2, 0, 0 | 2.734375 | 3 |
gan_ae.py | shenberg/neural_compressor | 0 | 12759316 | import os, sys, time
sys.path.append(os.getcwd())
import torch
import torchvision
from torch import nn
from torch import autograd
from torch import optim
import torch.nn.functional as F
#torch.set_default_tensor_type('torch.cuda.HalfTensor')
import time
import tflib as lib
import tflib.save_images
import tflib.mnist
import tflib.cifar10
import tflib.plot
#import tflib.inception_score
import numpy as np
from tqdm import tqdm
from layers import LayerNorm
from models import Generator, Comparator, AutoEncoder, Picker, Critic, PickerMirrored, MultiscalePicker
from utils import DatasetSubset, mix_samples, _mix_samples, load_batches
import orthoreg
from pytorch_ssim import SSIM
# Download CIFAR-10 (Python version) at
# https://www.cs.toronto.edu/~kriz/cifar.html and fill in the path to the
# extracted files here!
DATA_DIR = '/home/shenberg/Documents/compression/'
BATCHES_PATH = '/mnt/7FC1A7CD7234342C/compression-results/dataset_bs_128_size_64_half_2k/'
OUTPUT_BASE_DIR = '/mnt/7FC1A7CD7234342C/compression-results/'
RUN_PATH = '{}{}/'.format(OUTPUT_BASE_DIR, time.strftime('%Y_%m_%d_%H_%M_%S')) #TODO: generate by settings
if not os.path.exists(RUN_PATH):
os.mkdir(RUN_PATH)
#TODO:hack
tflib.plot.log_dir = RUN_PATH
if len(DATA_DIR) == 0:
raise Exception('Please specify path to data directory in gan_cifar.py!')
MODE = 'wgan-ae' # Valid options are dcgan, wgan, or wgan-gp
DIM = 64 # This overfits substantially; you're probably better off with 64
CRITIC_DIM = 32 # ambition
INPUT_DIM = 256 # generator input dimension (latent variable dimension)
LAMBDA = 10 # Gradient penalty lambda hyperparameter
CRITIC_ITERS = 5 # How many critic iterations per generator iteration
BATCH_SIZE = 64 # Batch size
IMAGE_SIZE = 64 # size of side of image
ITERS = 100000 # How many generator iterations to train for
KERNEL_SIZE = 4
LR = 5e-5
TRAIN_RATIO=0.9
GENERATOR_INSTANCE_NORM = LayerNorm
ENCODER_LAYER_NORM = True
ORTHOREG_LOSS = True
ONE_SIDED = False
GENERATOR_L2_LOSS = True
GENERATOR_L2_LAMBDA = 8
GENERATOR_SSIM_LOSS = False
GENERATOR_SSIM_LAMBDA = 8
NET_D = 'multiscale-picker'
params = dict(
MODE = 'wgan-ae', # Valid options are dcgan, wgan, or wgan-gp
DIM = DIM, # This overfits substantially; you're probably better off with 64
INPUT_DIM = INPUT_DIM, # generator input dimension (latent variable dimension)
LAMBDA = LAMBDA, # Gradient penalty lambda hyperparameter
CRITIC_ITERS = CRITIC_ITERS, # How many critic iterations per generator iteration
BATCH_SIZE = BATCH_SIZE, # Batch size
ITERS = ITERS, # How many generator iterations to train for
KERNEL_SIZE = KERNEL_SIZE,
GENERATOR_INSTANCE_NORM = GENERATOR_INSTANCE_NORM.__name__ if GENERATOR_INSTANCE_NORM else 'None',
# GENERATOR_GP = GENERATOR_GP,
ENCODER_LAYER_NORM = ENCODER_LAYER_NORM,
LR=LR,
ONE_SIDED=ONE_SIDED,
CRITIC_DIM=CRITIC_DIM,
ORTHOREG_LOSS=ORTHOREG_LOSS,
GENERATOR_L2_LOSS = GENERATOR_L2_LOSS,
GENERATOR_L2_LAMBDA = GENERATOR_L2_LAMBDA,
GENERATOR_SSIM_LOSS = GENERATOR_SSIM_LOSS,
GENERATOR_SSIM_LAMBDA = GENERATOR_SSIM_LAMBDA,
NET_D = NET_D,
)
with open(RUN_PATH + '/algo_params.txt','w') as f:
import json
json.dump(params, f, indent=2)
def critic_schedule():
for i in range(30):
yield 100
while True:
for i in range(99):
yield CRITIC_ITERS
#yield 100 # 100 iters every 100 iters
def gen_schedule():
for i in range(10):
yield 1
for i in range(100):
yield 1
for i in range(7000):
yield 1
while True:
yield 1
# custom weights initialization called on netG and netD
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
if m.bias is not None:
m.bias.data.fill_(0)
elif classname.find('Norm') != -1:
if m.weight is not None:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('Linear') != -1:
m.weight.data.normal_(0.0, 0.01)
m.bias.data.fill_(0)
#netG = Generator(DIM, INPUT_DIM, IMAGE_SIZE, GENERATOR_INSTANCE_NORM)
netG = AutoEncoder(DIM, INPUT_DIM, IMAGE_SIZE, GENERATOR_INSTANCE_NORM is not None)
if NET_D == 'comparator':
netD = Comparator(CRITIC_DIM, INPUT_DIM, IMAGE_SIZE, ENCODER_LAYER_NORM)
elif NET_D == 'picker':
netD = Picker(CRITIC_DIM, IMAGE_SIZE, ENCODER_LAYER_NORM)
elif NET_D == 'picker-mirrored':
netD = PickerMirrored(CRITIC_DIM, IMAGE_SIZE, ENCODER_LAYER_NORM)
elif NET_D == 'critic':
netD = Critic(CRITIC_DIM, IMAGE_SIZE, ENCODER_LAYER_NORM)
elif NET_D == 'multiscale-picker':
netD = MultiscalePicker(CRITIC_DIM, ENCODER_LAYER_NORM)
else:
raise Exception('no choice of net D')
netG.apply(weights_init)
netD.apply(weights_init)
print(netG)
print(netD)
use_cuda = torch.cuda.is_available()
ssim_loss = SSIM()
mse_loss = torch.nn.MSELoss()
if use_cuda:
gpu = 0
# makes things slower?!
torch.backends.cudnn.benchmark = True
netD = netD.cuda(gpu)
netG = netG.cuda(gpu)
mse_loss = mse_loss.cuda(gpu)
ssim_loss = ssim_loss.cuda(gpu)
#TODO: hack
# pre-processing transform
# augmentation goes here, e.g. RandomResizedCrop instead of regular random crop
transform = torchvision.transforms.Compose([
#torchvision.transforms.RandomCrop(IMAGE_SIZE),
torchvision.transforms.RandomResizedCrop(IMAGE_SIZE),
#torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.ToTensor(),
torchvision.transforms.Lambda(lambda x: (x - 0.5) * 2) # convert pixel values from 0..1 to -1..1
])
# Dataset iterator
#images_dataset = torchvision.datasets.ImageFolder(DATA_DIR, transform=transform)
images_dataset = load_batches(BATCHES_PATH)
train_dataset = DatasetSubset(images_dataset, 0, TRAIN_RATIO)
dev_dataset = DatasetSubset(images_dataset, TRAIN_RATIO, 1.0)
train_gen = torch.utils.data.DataLoader(train_dataset, BATCH_SIZE, shuffle=False,
pin_memory=use_cuda, num_workers=6)
dev_gen = torch.utils.data.DataLoader(dev_dataset, BATCH_SIZE, shuffle=False,
pin_memory=use_cuda, num_workers=5)
#TODO: end of hack
#if use_cuda:
# torch.set_default_tensor_type('torch.cuda.HalfTensor')
ortho_loss_g = torch.zeros(1)
ortho_loss_d = torch.zeros(1)
if use_cuda:
ortho_loss_g = ortho_loss_g.cuda()
ortho_loss_d = ortho_loss_d.cuda()
#for mod in [netG, netD]:
# mod.half()
optimizerD = optim.Adam(netD.parameters(), lr=LR, betas=(0.5, 0.9))
optimizerG = optim.Adam(netG.parameters(), lr=LR, betas=(0.5, 0.9))
netG.train()
netD.train()
def calc_gradient_penalty(netD, real_data, fake_data):
# print "real_data: ", real_data.size(), fake_data.size()
alpha = torch.rand(real_data.size(0), 1)
#alpha = alpha.expand_as(real_data.data)
# unnecessary with broadcasting
alpha = alpha.expand(real_data.size(0), real_data.nelement()//real_data.size(0)).contiguous().view(real_data.size(0), 3, IMAGE_SIZE, IMAGE_SIZE)
alpha = alpha.cuda(gpu) if use_cuda else alpha
interpolates = alpha * real_data.data + ((1 - alpha) * fake_data.data)
#interpolates = real_data + (alpha * real_data_grad.data * torch.norm(real_data_grad.data, p=2, dim=1).unsqueeze(1))
if use_cuda:
interpolates = interpolates.cuda(gpu)
interpolates = autograd.Variable(interpolates, requires_grad=True)
#scores = netD.encoder(interpolates)
g1, g2, g1_is_real = _mix_samples(autograd.Variable(real_data.data, requires_grad=True), interpolates, use_cuda)
scores = netD(g1, g2)
#gradients = autograd.grad(outputs=scores, inputs=[g1, g2],
gradients_list = autograd.grad(outputs=scores, inputs=[g1, g2],
grad_outputs=torch.ones(scores.size()).cuda(gpu) if use_cuda else torch.ones(scores.size()),
create_graph=True, retain_graph=True, only_inputs=True)
# Get gradient relative to interpolates
#grad1, grad2 = gradients_list
#gradients = grad1.clone() # assume interpolate in g1
# if real was in g1, copy from same row in g2
#real_indices = g1_is_real.nonzero()
#if len(real_indices) > 0:
# real_indices = real_indices.squeeze()
# gradients[real_indices] = grad2[real_indices]
#gradients = gradients.contiguous().view(gradients.size(0), -1)
#if ONE_SIDED:
# gradient_penalty = (F.relu(gradients.norm(2, dim=1) - 1, inplace=True) ** 2).mean()
#else:
# gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean()
# Gradients relative to all inputs
gradient_penalty = 0
for gradients in gradients_list:
gradients = gradients.contiguous().view(gradients.size(0), -1)
if ONE_SIDED:
gradient_penalty += (F.relu(gradients.norm(2, dim=1) - 1, inplace=True) ** 2).mean()
else:
gradient_penalty += ((gradients.norm(2, dim=1) - 1) ** 2).mean()
return gradient_penalty
def save_images(images_tensor, output_path):
samples = images_tensor
samples = samples.mul(0.5).add(0.5)
samples = samples.cpu().data.numpy()
lib.save_images.save_images(samples, output_path)
def inf_train_gen():
while True:
for images, _ in train_gen:
# yield images.astype('float32').reshape(BATCH_SIZE, 3, 32, 32).transpose(0, 2, 3, 1)
yield images
gen = inf_train_gen()
#torch.set_default_tensor_type('torch.HalfTensor')
CRITIC_GEN = critic_schedule()
GEN_ITERS = gen_schedule()
for iteration in tqdm(range(ITERS)):
start_time = time.time()
############################
# (1) Update D network
###########################
for p in netD.parameters(): # reset requires_grad
p.requires_grad = True # they are set to False below in netG update
#for i in range(CRITIC_ITERS):
netG.eval()
netD.train()
for i in range(next(CRITIC_GEN)):
_data = next(gen)
netD.zero_grad()
# train with real
real_data, real_target = _data # preprocess(_data)#torch.stack([preprocess(item) for item in _data])
if use_cuda:
real_data = real_data.cuda(gpu, async=True)
real_target = real_target.cuda(gpu, async=True)
real_data_v = autograd.Variable(real_data)
real_target_v = autograd.Variable(real_target)
# import torchvision
# filename = os.path.join("test_train_data", str(iteration) + str(i) + ".jpg")
# torchvision.utils.save_image(real_data, filename)
# train with fake
#gen_input = netD.encoder(real_data_v)
#fake = netG(gen_input)
fake = netG(real_data_v)
g1, g2, real_is_first = mix_samples(real_target_v, fake, use_cuda)
# positive score = first item is real
# negative score = second item is real
# so if critic guesses positive where 1st item is real, scale score by -1
# to drive loss down
# and so forth
# TODO: preallocate all this stuff
error_scaling = (1 - 2*real_is_first.float())
if use_cuda:
error_scaling = error_scaling.cuda()
# make right dimensions for broadcasting
error_scaling_v = autograd.Variable(error_scaling.unsqueeze(1))
loss = (netD(g1, g2) * error_scaling_v).mean()
if ORTHOREG_LOSS:
ortho_loss_d[0] = 0
ortho_loss_v = autograd.Variable(ortho_loss_d)
orthoreg.orthoreg_loss(netD, ortho_loss_v)
loss += ortho_loss_v
# train with gradient penalty
gradient_penalty = calc_gradient_penalty(netD, real_target_v, fake)
#gradient_penalty = autograd.Variable(torch.cuda.FloatTensor(1).fill_(0))
#torch.nn.utils.clip_grad_norm(netD.parameters(), 2, 1)
loss += gradient_penalty * LAMBDA
loss.backward()
optimizerD.step()
Wasserstein_D = -(loss.data - gradient_penalty.data * LAMBDA - ortho_loss_v.data)
D_cost = loss.data
############################
# (2) Update G network
###########################
netG.train()
netD.eval()
for p in netD.parameters():
p.requires_grad = False # to avoid computation
for i in range(next(GEN_ITERS)):
netG.zero_grad()
_data = next(gen)
# train with real
real_data, real_target = _data # preprocess(_data)#torch.stack([preprocess(item) for item in _data])
if use_cuda:
real_data = real_data.cuda(gpu, async=True)
real_target = real_target.cuda(gpu, async=True)
real_data_v = autograd.Variable(real_data)
real_target_v = autograd.Variable(real_target)
#gen_input = netD.encoder(real_data_v)
#fake = netG(gen_input)
fake = netG(real_data_v)
g1, g2, real_is_first = mix_samples(real_target_v, fake, use_cuda)
# positive score = first item is real
# negative score = second item is real
# so if critic guesses positive where 1st item is real, scale score by -1
# to drive loss down
# and so forth
# TODO: preallocate all this stuff
error_scaling = (1 - 2*real_is_first.float())
if use_cuda:
error_scaling = error_scaling.cuda()
error_scaling_v = autograd.Variable(error_scaling.unsqueeze(1))
# minus discriminator loss is generator loss
loss = -(netD(g1, g2) * error_scaling_v).mean()
if ORTHOREG_LOSS:
ortho_loss_g[0] = 0
ortho_loss_v = autograd.Variable(ortho_loss_g)
orthoreg.orthoreg_loss(netG, ortho_loss_v)
loss += ortho_loss_v
if GENERATOR_L2_LOSS:
l2_loss = mse_loss(fake, real_target_v)
loss += l2_loss*GENERATOR_L2_LAMBDA
if GENERATOR_SSIM_LOSS:
ssim_penalty = ssim_loss(fake*0.5 + 0.5, real_target_v *0.5 + 0.5)
loss += ssim_penalty*GENERATOR_SSIM_LAMBDA
# no GP
loss.backward()
G_cost = loss.data - ortho_loss_v.data
optimizerG.step()
# Write logs and save samples
lib.plot.plot(RUN_PATH + 'train disc cost', D_cost.cpu().numpy())
lib.plot.plot(RUN_PATH + 'time', time.time() - start_time)
lib.plot.plot(RUN_PATH + 'train gen cost', G_cost.cpu().numpy())
lib.plot.plot(RUN_PATH + 'wasserstein distance', Wasserstein_D.cpu().numpy())
if ORTHOREG_LOSS:
lib.plot.plot(RUN_PATH + 'ortho loss G', ortho_loss_g.cpu().numpy())
lib.plot.plot(RUN_PATH + 'ortho loss D', ortho_loss_d.cpu().numpy())
# Calculate dev loss and generate samples every 100 iters
if iteration % 100 == 99:
#dev_disc_costs = []
#netD.eval()
#for images, _ in dev_gen:
# images = images.view((-1, 3, 128, 128))
# imgs = images#preprocess(images)
#
# #imgs = preprocess(images)
# if use_cuda:
# imgs = imgs.cuda(gpu)
# imgs_v = autograd.Variable(imgs, volatile=True)
#
# D, encoded = netD(imgs_v)
# _dev_disc_cost = -D.mean().cpu().data.numpy()
# dev_disc_costs.append(_dev_disc_cost)
#netD.train()
#lib.plot.plot(RUN_PATH + 'dev disc cost', np.mean(dev_disc_costs))
#fixed_noise_128 = torch.randn(128, INPUT_DIM)
#if use_cuda:
# fixed_noise_128 = fixed_noise_128.cuda(gpu)
#generate_image(iteration, netG, fixed_noise_128)
#generate_image("{}_reconstruct".format(iteration), netG, encoded.data, True)
save_images(real_data_v, RUN_PATH + 'samples_{}_original.jpg'.format(iteration))
save_images(fake, RUN_PATH + 'samples_{}_reconstruct.jpg'.format(iteration))
#print(encoded)
#print(fixed_noise_128)
# Save logs every 200 iters
if (iteration < 5) or (iteration % 100 == 99):
lib.plot.flush()
lib.plot.tick()
if iteration % 1000 == 999:
state_dict = {
'iters': iteration + 1,
'algo_params': params,
'gen_state_dict': netG.state_dict(),
'critic_state_dict': netD.state_dict(),
'optimizerG' : optimizerG.state_dict(),
'optimizerD' : optimizerD.state_dict(),
}
torch.save(state_dict, RUN_PATH + 'state_{}.pth.tar'.format(iteration+1)) | 2.015625 | 2 |
poky-dunfell/bitbake/lib/toaster/tests/browser/selenium_helpers.py | lacie-life/YoctoPi | 14 | 12759317 | <gh_stars>10-100
#! /usr/bin/env python3
#
# BitBake Toaster Implementation
#
# Copyright (C) 2013-2016 Intel Corporation
#
# SPDX-License-Identifier: GPL-2.0-only
#
# The Wait class and some of SeleniumDriverHelper and SeleniumTestCase are
# modified from Patchwork, released under the same licence terms as Toaster:
# https://github.com/dlespiau/patchwork/blob/master/patchwork/tests.browser.py
"""
Helper methods for creating Toaster Selenium tests which run within
the context of Django unit tests.
"""
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from tests.browser.selenium_helpers_base import SeleniumTestCaseBase
class SeleniumTestCase(SeleniumTestCaseBase, StaticLiveServerTestCase):
pass
| 1.890625 | 2 |
worker/LinkedIn/service1/Extractor.py | LamriAli/remote-extraction-proxy-and-worker | 0 | 12759318 | <filename>worker/LinkedIn/service1/Extractor.py
from asyncio.windows_events import NULL
from selenium import webdriver
from bs4 import BeautifulSoup
import time
import pandas as pd
from bot_studio import *
import re as re
from selenium.webdriver.common.by import By
import win32clipboard
import os
from bs4 import BeautifulSoup as bs
from connect import connect
from context import Context
import networkx as nx
from get_data import get_post_url
from Add_data import Add_comment_user,Add_Posts_nodes
import json
from networkx.readwrite import json_graph
#from API_ExtractionService.Network_Extractor import Network_Extractor
from Zos_context import Zos_Context
class Extractor():#NetworkExtractor
context=None
Schema=[]
context= None
graphe=NULL
linkedin=NULL
def __init__(self,Schema):#,context,Schema,publisher,roadmap,Graphe
print("Extractors")
### ZOS CONTEXT
zos_context=Zos_Context()
keys={'username':zos_context.get("LI_USERNAME"),'password':zos_context.get("LI_PASSWORD")}
#self.super().__init__("LinkedIn",context,Schema,publisher,roadmap)
self.context=Context(zos_context.get("LI_ACCOUNT"),keys,zos_context.get("LIMIT_POSTS"),zos_context.get("LIMIT_FRIENDS"),zos_context.get("LIMIT_COMMENTS"),zos_context.get("USER_COMMENT"),zos_context.get("ADD_USER"),zos_context.get("ADD_COM"),zos_context.get("POST"))
self.graphe=nx.DiGraph()
####Linkedin instance
self.linkedin=bot_studio.linkedin()
self.linkedin.login(zos_context.get("LI_USERNAME") ,zos_context.get("LI_PASSWORD") )
self.create_graphe(Schema)
def get_graph(self):
return self.graphe
def set_graph(self,g):
self.graphe=g
def save_json(self,filename,graph):
g = graph
g_json = json_graph.node_link_data(g)
json.dump(g_json,open(filename,'w'),indent=2)
def Scraper_page(self,username,password,page_url):
driver=connect().connection(username,password)
data={}
driver.get(page_url)
time.sleep(3)
src = driver.page_source
soup = BeautifulSoup(src, 'lxml')
time.sleep(5)
intro = soup.find('div', {'class': 'block mt2'})
name_loc = intro.find("h1")
data["name"]=name_loc.get_text().strip()
#print(name_loc.get_text().strip())
works_at_loc = intro.find("div", {'class': 'inline-block'}).find_all('div')
loc=works_at_loc[0].get_text().strip()
data['localisation']=loc
#print(loc)
abonnee=works_at_loc[1].get_text().strip()
data['abonnee']=abonnee
#print(abonnee)
description= soup.find('div', {'class': 't-14 t-black--light full-width break-words ember-view'})
data['description']=description.get_text().strip()
driver.close()
return data
<EMAIL>
def create_graphe(self,Schema):
print("-----create graphe--------")
context=self.context
username=context.keys['username']
password=context.keys['password']
page=context.account
limit_comment= context.limit_comments
Graphe=self.graphe
if Graphe.number_of_nodes() ==0:
Graphe.add_nodes_from([(page, {'id':page,
'checked' :0 ,
'type':'page'
} )])
page_inf=self.Scraper_page(username,password,page)
for attr in Schema['page']:
nx.set_node_attributes(Graphe, name=attr, values=str(page_inf[attr]))
try:
Nodeslist = [v for v in Graphe.nodes()]
for v in Nodeslist:
if Graphe.nodes[v]['checked']==0 :
Graphe.nodes[v]['checked']=1
if Graphe.nodes[v]['type']=='page':
#Add Postprint()
limit_posts=context.limit_posts
list_url=get_post_url(username,password,context.limit_posts,v)
time.sleep(4)
if len(list_url)==0:
print("no url selected")
break
Add_Posts_nodes(Graphe,context,Schema,list_url,v)
#Add Comment
user_comment=context.user_comment
if(user_comment=='True'):
add_comm=context.add_comm
add_user=context.add_user
Add_comment_user(self.linkedin,Graphe,context,username , password ,list_url,limit_comment,Schema,add_user,add_comm)
Nodeslist = [v for v in Graphe.nodes()]
print("Extraction complete.")
# Get Graph
# self.graphe=Graphe
self.set_graph(context.graph)
final_graph=self.get_graph()
self.save_json("file_graphe.json",final_graph)
loaded_json = json.loads("file_graphe.json")
#dateien = json_graph(Graphe)
print("dateeien")
print(loaded_json)
payload = loaded_json
payload["road_map"] = []
# delivering payload
# locator.getPublisher().publish("Twitter",json.dumps(payload))
except Exception as ex:
self.save_json("file_graphe.json",context.graph)
print(ex)
| 2.34375 | 2 |
hummingbot/strategy/pure_market_making/__init__.py | csdenboer/hummingbot | 5 | 12759319 | #!/usr/bin/env python
from .pure_market_making import PureMarketMakingStrategy
from .asset_price_delegate import AssetPriceDelegate
from .order_book_asset_price_delegate import OrderBookAssetPriceDelegate
from .api_asset_price_delegate import APIAssetPriceDelegate
__all__ = [
PureMarketMakingStrategy,
AssetPriceDelegate,
OrderBookAssetPriceDelegate,
APIAssetPriceDelegate
]
| 0.996094 | 1 |
src/python/nimbusml/internal/entrypoints/transforms_columnselector.py | michaelgsharp/NimbusML | 134 | 12759320 | <reponame>michaelgsharp/NimbusML<gh_stars>100-1000
# - Generated by tools/entrypoint_compiler.py: do not edit by hand
"""
Transforms.ColumnSelector
"""
from ..utils.entrypoints import EntryPoint
from ..utils.utils import try_set, unlist
def transforms_columnselector(
data,
output_data=None,
model=None,
keep_columns=None,
drop_columns=None,
keep_hidden=False,
ignore_missing=False,
**params):
"""
**Description**
Selects a set of columns, dropping all others
:param keep_columns: List of columns to keep. (inputs).
:param data: Input dataset (inputs).
:param drop_columns: List of columns to drop. (inputs).
:param keep_hidden: Specifies whether to keep or remove hidden
columns. (inputs).
:param ignore_missing: Specifies whether to ignore columns that
are missing from the input. (inputs).
:param output_data: Transformed dataset (outputs).
:param model: Transform model (outputs).
"""
entrypoint_name = 'Transforms.ColumnSelector'
inputs = {}
outputs = {}
if keep_columns is not None:
inputs['KeepColumns'] = try_set(
obj=keep_columns,
none_acceptable=True,
is_of_type=list,
is_column=True)
if data is not None:
inputs['Data'] = try_set(
obj=data,
none_acceptable=False,
is_of_type=str)
if drop_columns is not None:
inputs['DropColumns'] = try_set(
obj=drop_columns,
none_acceptable=True,
is_of_type=list,
is_column=True)
if keep_hidden is not None:
inputs['KeepHidden'] = try_set(
obj=keep_hidden,
none_acceptable=True,
is_of_type=bool)
if ignore_missing is not None:
inputs['IgnoreMissing'] = try_set(
obj=ignore_missing,
none_acceptable=True,
is_of_type=bool)
if output_data is not None:
outputs['OutputData'] = try_set(
obj=output_data,
none_acceptable=False,
is_of_type=str)
if model is not None:
outputs['Model'] = try_set(
obj=model,
none_acceptable=False,
is_of_type=str)
input_variables = {
x for x in unlist(inputs.values())
if isinstance(x, str) and x.startswith("$")}
output_variables = {
x for x in unlist(outputs.values())
if isinstance(x, str) and x.startswith("$")}
entrypoint = EntryPoint(
name=entrypoint_name, inputs=inputs, outputs=outputs,
input_variables=input_variables,
output_variables=output_variables)
return entrypoint
| 2.015625 | 2 |
api/serializers.py | safrimus/malikburhani | 0 | 12759321 | from rest_framework import serializers
from drf_queryfields import QueryFieldsMixin
from django.db.models import Sum, F, DecimalField
import decimal
import database.models as models
# Customer
class CustomerSerializer(QueryFieldsMixin, serializers.ModelSerializer):
class Meta:
model = models.Customer
fields = '__all__'
# Source
class SourceSerializer(QueryFieldsMixin, serializers.ModelSerializer):
total_value = serializers.DecimalField(max_digits=15, decimal_places=3, read_only=True)
class Meta:
model = models.Source
fields = '__all__'
# Category
class CategorySerializer(QueryFieldsMixin, serializers.ModelSerializer):
total_value = serializers.DecimalField(max_digits=15, decimal_places=3, read_only=True)
class Meta:
model = models.Category
fields = '__all__'
# Product
class ProductSerializer(QueryFieldsMixin, serializers.ModelSerializer):
class Meta:
model = models.Product
fields = '__all__'
# Supplier
class SupplierSerializer(QueryFieldsMixin, serializers.ModelSerializer):
class Meta:
model = models.Supplier
fields = '__all__'
# Credit Payments
class InvoiceCreditPaymentSerializer(serializers.ModelSerializer):
class Meta:
model = models.InvoiceCreditPayment
fields = ('invoice', 'payment', 'date_of_payment',)
def validate(self, data):
invoice = data["invoice"]
current_payments = invoice.payments_total if invoice.payments_total else decimal.Decimal(0.0)
if not invoice.credit:
raise serializers.ValidationError("Invoice {0} is not a credit invoice.".format(invoice.id))
if current_payments >= invoice.invoice_total:
raise serializers.ValidationError("Invoice {0} is already fully paid.".format(invoice.id))
max_payment = invoice.invoice_total - current_payments
if decimal.Decimal(data["payment"]) > max_payment:
raise serializers.ValidationError("Payment must be less than or equal to {0}.".format(max_payment))
return data
# Invoice
class InvoiceProductSerializer(serializers.ModelSerializer):
class Meta:
model = models.InvoiceProduct
exclude = ('invoice', 'id',)
class InvoiceSerializer(QueryFieldsMixin, serializers.ModelSerializer):
products = InvoiceProductSerializer(many=True)
invoice_total = serializers.DecimalField(max_digits=15, decimal_places=3, read_only=True)
payments_total = serializers.DecimalField(max_digits=15, decimal_places=3, read_only=True)
class Meta:
model = models.Invoice
fields = '__all__'
@staticmethod
def setup_eager_loading(queryset):
queryset = queryset.prefetch_related('products')
return queryset
def validate(self, data):
products = data["products"]
if not products:
raise serializers.ValidationError("no products in invoice.")
for product in products:
try:
if product["sell_price"] <= 0.0 or product["quantity"] <= 0:
raise serializers.ValidationError("Sell price and quantity must be greater than 0.")
except KeyError:
pass
try:
if product["returned_quantity"] > product["quantity"]:
raise serializers.ValidationError("Return quantity must be less than quantity sold.")
except KeyError:
pass
return data
def update(self, instance, validated_data):
products = validated_data.pop('products')
for product in products:
invoice_product = models.InvoiceProduct.objects.get(invoice=instance.id, product=product["product"])
original_returned_quantity = invoice_product.returned_quantity
invoice_product.returned_quantity = product["returned_quantity"]
invoice_product.save(update_fields=["returned_quantity"])
product_object = models.Product.objects.get(id=product["product"].id)
product_object.stock += (product["returned_quantity"] - original_returned_quantity)
product_object.save(update_fields=["stock"])
return instance
def create(self, validated_data):
products = validated_data.pop('products')
invoice = models.Invoice.objects.create(**validated_data)
for product in products:
product_object = models.Product.objects.get(id=product["product"].id)
models.InvoiceProduct.objects.create(invoice=invoice, cost_price=product_object.cost_price, **product)
product_object.stock -= product["quantity"]
product_object.save(update_fields=["stock"])
return invoice
# Sales total
class SalesTotalSerializer(serializers.Serializer):
year = serializers.IntegerField(read_only=True, required=False)
month = serializers.IntegerField(read_only=True, required=False)
day = serializers.IntegerField(read_only=True, required=False)
sales = serializers.DecimalField(max_digits=15, decimal_places=3, read_only=True)
profit = serializers.DecimalField(max_digits=15, decimal_places=3, read_only=True)
# Category and source sales
class SalesCategorySourceSerializer(serializers.Serializer):
requested_type = serializers.IntegerField(read_only=True)
sales = serializers.DecimalField(max_digits=15, decimal_places=3, read_only=True)
profit = serializers.DecimalField(max_digits=15, decimal_places=3, read_only=True)
units = serializers.IntegerField(read_only=True)
customer = serializers.IntegerField(read_only=True, required=False)
year = serializers.IntegerField(read_only=True, required=False)
month = serializers.IntegerField(read_only=True, required=False)
day = serializers.IntegerField(read_only=True, required=False)
# Product sales total
class SalesProductsSerializer(serializers.Serializer):
sales = serializers.DecimalField(max_digits=15, decimal_places=3, read_only=True)
profit = serializers.DecimalField(max_digits=15, decimal_places=3, read_only=True)
units = serializers.IntegerField(read_only=True)
product = serializers.IntegerField(read_only=True, required=False)
customer = serializers.IntegerField(read_only=True, required=False)
year = serializers.IntegerField(read_only=True, required=False)
month = serializers.IntegerField(read_only=True, required=False)
day = serializers.IntegerField(read_only=True, required=False)
# Supplier sales total
class SalesSuppliersSerializer(serializers.Serializer):
sales = serializers.DecimalField(max_digits=15, decimal_places=3, read_only=True)
profit = serializers.DecimalField(max_digits=15, decimal_places=3, read_only=True)
units = serializers.IntegerField(read_only=True)
supplier = serializers.IntegerField(read_only=True, required=False)
customer = serializers.IntegerField(read_only=True, required=False)
year = serializers.IntegerField(read_only=True, required=False)
month = serializers.IntegerField(read_only=True, required=False)
day = serializers.IntegerField(read_only=True, required=False)
# Customer sales total
class SalesCustomersSerializer(serializers.Serializer):
sales = serializers.DecimalField(max_digits=15, decimal_places=3, read_only=True)
profit = serializers.DecimalField(max_digits=15, decimal_places=3, read_only=True)
units = serializers.IntegerField(read_only=True)
customer = serializers.IntegerField(read_only=True, required=False)
year = serializers.IntegerField(read_only=True, required=False)
month = serializers.IntegerField(read_only=True, required=False)
day = serializers.IntegerField(read_only=True, required=False)
# Cashflow total
class CashflowTotalSerializer(serializers.Serializer):
type = serializers.CharField()
year = serializers.IntegerField(read_only=True, required=False)
month = serializers.IntegerField(read_only=True, required=False)
day = serializers.IntegerField(read_only=True, required=False)
cash = serializers.DecimalField(max_digits=15, decimal_places=3, read_only=True)
# Stock history
class StockSoldTotalSerializer(serializers.Serializer):
product = serializers.IntegerField(read_only=True)
month = serializers.IntegerField(read_only=True)
quantity = serializers.IntegerField(read_only=True)
| 2.171875 | 2 |
var/spack/repos/builtin/packages/orfm/package.py | kkauder/spack | 2,360 | 12759322 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Orfm(AutotoolsPackage):
"""A simple and not slow open reading frame (ORF) caller. No bells or
whistles like frameshift detection, just a straightforward goal of
returning a FASTA file of open reading frames over a certain length
from a FASTA/Q file of nucleotide sequences."""
homepage = "https://github.com/wwood/OrfM"
url = "https://github.com/wwood/OrfM/releases/download/v0.7.1/orfm-0.7.1.tar.gz"
version('0.7.1', sha256='19f39c72bcc48127b757613c5eef4abae95ee6c82dccf96b041db527b27f319a')
depends_on('zlib', type='link')
| 1.648438 | 2 |
pydmfet/libgen/__init__.py | fishjojo/pydmfe | 3 | 12759323 | <reponame>fishjojo/pydmfe
from pydmfet.libgen.ops import *
| 0.84375 | 1 |
ensyuu/program/ensyuu23.py | tbor8080/pyprog | 0 | 12759324 | <gh_stars>0
import random
counter=0
num=77
while True:
counter+=1
line=int(input("(整数の入力をしてください>>)"))
if num==line:
print("WIN!")
break
elif num>line:
print("LOW")
elif num<line:
print("HIGH")
elif counter==10:
print("GAME OVER")
break | 3.53125 | 4 |
attrkid/exceptions.py | danfairs/attrkid | 4 | 12759325 | <reponame>danfairs/attrkid
# TODO(dan): Sort out how this displays in tracebacks, it's terrible
class ValidationError(Exception):
def __init__(self, errors, exc=None):
self.errors = errors
self.exc = exc
self._str = str(exc) if exc is not None else '' + ', '.join(
[str(e) for e in errors])
def __str__(self):
return self._str
| 2.34375 | 2 |
shortio/builtins_wrapper.py | byshyk/shortio | 0 | 12759326 | <filename>shortio/builtins_wrapper.py
"""Contains wrappers for builtins.
Each wrapper is used to avoid context manager boilerplate.
Attributes:
read_json: Wrapped ``json.load``.
write_json: Wrapped ``json.dump``.
read_pickle: Wrapped ``pickle.load``.
write_pickle: Wrapped ``pickle.dump``.
"""
import json
import pickle
from .utils import read_wrapper, write_wrapper
def read(file, mode='r', **kwargs):
"""Read and return content of the file.
Args:
file: Path-like object giving the pathname of the file to be opened.
mode: String giving the mode of the file to be opened. Default: 'r'.
**kwargs: Optional arguments that ``open`` takes.
Returns:
File content.
"""
with open(file, mode, **kwargs) as f:
return f.read()
def write(file, s, mode='w', **kwargs):
"""Read and return content of the file.
Args:
file: Path-like object giving the pathname of the file to be opened.
s: Content to be written.
mode: String giving the mode of the file to be opened. Default: 'w'.
**kwargs: Optional arguments that ``open`` takes.
Returns:
The number of characters/bytes (according to mode) written.
"""
with open(file, mode, **kwargs) as f:
return f.write(s)
read_json = read_wrapper(json.load, mode='r')
write_json = write_wrapper(json.dump, mode='w')
read_pickle = read_wrapper(pickle.load, mode='rb')
write_pickle = write_wrapper(pickle.dump, mode='wb')
__all__ = ['read', 'write',
'read_json', 'write_json',
'read_pickle', 'write_pickle']
| 2.671875 | 3 |
src/djanban/apps/dev_times/views.py | diegojromerolopez/djanban | 33 | 12759327 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import calendar
import datetime
import re
from django.conf import settings
from django.core.mail import EmailMultiAlternatives
from django.utils import timezone
from dateutil.relativedelta import relativedelta
from django.contrib.auth.decorators import login_required
from django.db.models import Sum, Q
from django.http.response import HttpResponse, Http404, JsonResponse
from django.shortcuts import render
from django.template import loader
from djanban.apps.base.auth import get_user_boards, user_is_member
from djanban.apps.boards.models import Label, Board
from djanban.apps.dev_times.models import DailySpentTime
from djanban.apps.members.models import Member
from django.template.loader import get_template
from djanban.apps.multiboards.models import Multiboard
# View spent time report
@login_required
def view_daily_spent_times(request):
try:
parameters = _get_daily_spent_times_replacements(request)
except (Multiboard.DoesNotExist, Board.DoesNotExist) as e:
raise Http404
if "board" in parameters["replacements"] and parameters["replacements"]["board"]:
return render(request, "daily_spent_times/list_by_board.html", parameters["replacements"])
return render(request, "daily_spent_times/list.html", parameters["replacements"])
# Export daily spent report in CSV format
@login_required
def export_daily_spent_times(request):
spent_times = _get_daily_spent_times_from_request(request)
# Start and end date of the interval of the spent times that will be exported
start_date = spent_times["start_date"]
end_date = spent_times["end_date"]
name_str = ""
if "multiboard" in spent_times and spent_times["multiboard"]:
multiboard = spent_times["multiboard"]
name_str = (u"mb-{0}-".format(multiboard.name)).lower()
if "board" in spent_times and spent_times["board"]:
board = spent_times["board"]
name_str = (u"{0}-".format(board.name)).lower()
# Creation of the HTTP response
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="{0}export-daily-spent-times-from-{1}-to-{2}.csv"'.format(
name_str, start_date.strftime("%Y-%m-%d"), end_date.strftime("%Y-%m-%d")
)
csv_template = loader.get_template('daily_spent_times/csv.txt')
replacements = {
'spent_times': spent_times["all"],
}
response.write(csv_template.render(replacements))
return response
# Export daily spent report in CSV format
@login_required
def send_daily_spent_times(request):
if request.method != "POST":
raise Http404
current_user = request.user
current_user_boards = get_user_boards(current_user)
recipient_email = request.POST.get("email")
if not re.match(r"[^@]+@[^@]+", recipient_email):
return JsonResponse({"message": "Invalid email"})
daily_spent_times_filter = {}
# Start date
start_date_str = request.POST.get("start_date")
start_date = None
if start_date_str:
try:
start_date = datetime.datetime.strptime(start_date_str, "%Y-%m-%d")
daily_spent_times_filter["date__gte"] = start_date
except ValueError:
start_date = None
# End date
end_date_str = request.POST.get("end_date")
end_date = None
if end_date_str:
try:
end_date = datetime.datetime.strptime(end_date_str, "%Y-%m-%d")
daily_spent_times_filter["date__lte"] = end_date
except ValueError:
end_date = None
# Week
week = request.POST.get('week') if request.POST.get('week') and request.POST.get('week') > 0 else None
if week:
daily_spent_times_filter["week"] = week
# Default filter is None
multiboard = None
label = None
board = None
# Filter spent time by multiboard
multiboard_str = request.POST.get("multiboard")
if multiboard_str and hasattr(current_user, "member") and\
current_user.member.multiboards.filter(id=multiboard_str).exists():
multiboard = current_user.member.multiboards.get(id=multiboard_str)
daily_spent_times_filter["board__multiboards"] = multiboard
# Filter spent time by label o board
else:
# Label
label_str = request.POST.get("label")
matches = re.match(r"all_from_board_(?P<board_id>\d+)", label_str)
if matches and current_user_boards.filter(id=matches.group("board_id")).exists():
label = None
board = current_user_boards.get(id=matches.group("board_id"))
daily_spent_times_filter["board"] = board
elif Label.objects.filter(id=label_str).exists():
label = Label.objects.get(id=label_str)
board = label.board
daily_spent_times_filter["board"] = board
daily_spent_times_filter["card__labels"] = label
# Member
member = None
if user_is_member(current_user):
current_user_members = Member.objects.filter(Q(boards__in=current_user_boards)|Q(id=current_user.member.id)).distinct()
else:
current_user_members = Member.objects.filter(boards__in=current_user_boards).distinct()
if request.POST.get("member") and current_user_members.filter(id=request.POST.get("member")).exists():
member = current_user_members.get(id=request.POST.get("member"))
daily_spent_times_filter["member"] = member
daily_spent_times = DailySpentTime.objects.filter(**daily_spent_times_filter)
replacements = {
"email": recipient_email,
"daily_spent_times": daily_spent_times,
"week": week,
"start_date": start_date,
"end_date": end_date,
"label": label,
"board": board,
"multiboard": multiboard,
"member": member
}
report_subject = get_template('daily_spent_times/emails/send_daily_spent_times_subject.txt').render(replacements)
txt_message = get_template("daily_spent_times/emails/send_daily_spent_times.txt").render(replacements)
html_message = get_template("daily_spent_times/emails/send_daily_spent_times.html").render(replacements)
csv_report = get_template('daily_spent_times/csv.txt').render({"spent_times": daily_spent_times})
csv_file_name = "custom_report_for_{0}.csv".format(recipient_email)
try:
message = EmailMultiAlternatives(report_subject, txt_message, settings.EMAIL_HOST_USER, [recipient_email])
message.attach_alternative(html_message, "text/html")
message.attach(csv_file_name, csv_report, 'text/csv')
message.send()
if request.GET.get("ajax"):
return JsonResponse({"message": "Spent times sent successfully"})
return render(request, "daily_spent_times/send_daily_spent_times_ok.html", replacements)
except Exception:
if request.GET.get("ajax"):
return JsonResponse({"message": "Error when sending data"}, status=500)
return render(request, "daily_spent_times/send_daily_spent_times_error.html", replacements)
# Return the filtered queryset and the replacements given the GET parameters
def _get_daily_spent_times_replacements(request):
selected_member_id = request.GET.get("member_id")
selected_member = None
if selected_member_id:
selected_member = Member.objects.get(id=selected_member_id)
spent_times = _get_daily_spent_times_from_request(request)
replacements = {
"multiboards": request.user.member.multiboards.all() if user_is_member(request.user) else None,
"member": request.user.member if user_is_member(request.user) else None,
"boards": get_user_boards(request.user),
"members": Member.objects.all()
}
# Start date
start_date_str = request.GET.get("start_date")
if start_date_str:
try:
start_date = datetime.datetime.strptime(start_date_str, "%Y-%m-%d")
replacements["start_date"] = start_date
replacements["date_interval"] = [start_date, timezone.now().date()]
except ValueError:
start_date = None
# End date
end_date_str = request.GET.get("end_date")
if end_date_str:
try:
end_date = datetime.datetime.strptime(end_date_str, "%Y-%m-%d")
replacements["end_date"] = end_date
replacements["date_interval"][1] = end_date
except ValueError:
end_date = None
replacements["selected_member"] = selected_member
multiboard_id = request.GET.get("multiboard_id", request.GET.get("multiboard"))
multiboard = None
if multiboard_id:
multiboard = request.user.member.multiboards.get(id=multiboard_id)
replacements["multiboard"] = multiboard
replacements["selected_multiboard"] = multiboard
replacements["board__multiboard"] = multiboard
else:
# If we are filtering by board, filter by board_id
label_id = request.GET.get("label_id", request.GET.get("label"))
label = None
board = None
if label_id:
matches = re.match(r"all_from_board_(?P<board_id>\d+)", label_id)
if matches:
board = get_user_boards(request.user).get(id=matches.group("board_id"))
label = None
replacements["selected_label"] = label
replacements["label"] = label
replacements["selected_board"] = board
replacements["board"] = board
else:
boards = get_user_boards(request.user)
label = Label.objects.get(board__in=boards, id=label_id)
replacements["selected_label"] = label
replacements["label"] = label
replacements["selected_board"] = label.board
replacements["board"] = label.board
board_id = request.GET.get("board_id", request.GET.get("board"))
if not label_id and board_id:
board = get_user_boards(request.user).get(id=board_id)
label = None
replacements["selected_label"] = label
replacements["label"] = label
replacements["selected_board"] = board
replacements["board"] = board
daily_spent_times = spent_times["all"]
replacements["week"] = request.GET.get('week') if request.GET.get('week') and request.GET.get('week') > 0 else None
replacements["months"] = spent_times["per_month"]
return {"queryset": daily_spent_times, "replacements": replacements}
# Return the daily spent times from a request
def _get_daily_spent_times_from_request(request):
current_user = request.user
selected_member = None
if request.GET.get("member_id"):
selected_member = Member.objects.get(id=request.GET.get("member_id"))
multiboard_id = None
if request.GET.get("multiboard_id"):
multiboard_id = request.GET.get("multiboard_id")
label_id = None
if request.GET.get("label_id"):
label_id = request.GET.get("label_id")
elif request.GET.get("board_id"):
label_id = "all_from_board_{0}".format(request.GET.get("board_id"))
spent_times = _get_daily_spent_times_queryset(
current_user, selected_member,
request.GET.get("start_date"), request.GET.get("end_date"), request.GET.get('week'),
label_id=label_id, multiboard_id=multiboard_id
)
return spent_times
# Return the filtered queryset and the replacements given the GET parameters
def _get_daily_spent_times_queryset(current_user, selected_member, start_date_, end_date_, week, multiboard_id, label_id):
daily_spent_time_filter = {}
# Member filter
if selected_member:
daily_spent_time_filter["member_id"] = selected_member.id
# Start date
start_date = None
if start_date_:
try:
start_date = datetime.datetime.strptime(start_date_, "%Y-%m-%d").date()
daily_spent_time_filter["date__gte"] = start_date
except ValueError:
start_date = None
# End date
end_date = None
if end_date_:
try:
end_date = datetime.datetime.strptime(end_date_, "%Y-%m-%d").date()
daily_spent_time_filter["date__lte"] = end_date
except ValueError:
end_date = None
# Week
if week and int(week) > 0:
daily_spent_time_filter["week_of_year"] = week
board = None
multiboard = None
if multiboard_id and hasattr(current_user, "member"):
multiboard = current_user.member.multiboards.get(id=multiboard_id)
daily_spent_time_filter["board__multiboards"] = multiboard
else:
# Label
label = None
board = None
current_user_boards = get_user_boards(current_user)
if label_id:
matches = re.match(r"all_from_board_(?P<board_id>\d+)", label_id)
if matches:
if current_user_boards.filter(id=matches.group("board_id")).exists():
label = None
board = current_user_boards.get(id=matches.group("board_id"))
daily_spent_time_filter["board"] = board
else:
if Label.objects.filter(id=label_id, board__in=current_user_boards).exists():
label = Label.objects.get(id=label_id)
board = label.board
daily_spent_time_filter["board"] = board
daily_spent_time_filter["card__labels"] = label
# Daily Spent Times
daily_spent_times = DailySpentTime.objects.filter(**daily_spent_time_filter).order_by("-date")
months = []
# Grouped by months
if daily_spent_times.exists():
if start_date is None:
start_date = daily_spent_times.order_by("date")[0].date
if end_date is None:
end_date = daily_spent_times[0].date
date_i = datetime.date(start_date.year, start_date.month, 1)
while date_i <= end_date:
month_index = date_i.month
year = date_i.year
month_name = calendar.month_name[month_index]
daily_spent_times_in_month_i = daily_spent_times.filter(date__year=year, date__month=month_index).order_by(
"date")
first_weekday, number_of_days_in_month = calendar.monthrange(year, month_index)
rate_amount_sum = daily_spent_times_in_month_i.aggregate(sum=Sum("rate_amount"))["sum"]
adjusted_amount_sum = _adjusted_amount_sum(daily_spent_times_in_month_i)
spent_time_sum = daily_spent_times_in_month_i.aggregate(sum=Sum("spent_time"))["sum"]
adjusted_spent_time_sum = _adjusted_spent_time_sum(daily_spent_times_in_month_i)
estimated_time_sum = daily_spent_times_in_month_i.aggregate(sum=Sum("estimated_time"))["sum"]
diff_time_sum = daily_spent_times_in_month_i.aggregate(sum=Sum("diff_time"))["sum"]
month = {
"daily_spent_times": daily_spent_times_in_month_i,
"values": {
"first_day": datetime.date(year, month_index, 1).isoformat(),
"last_day": datetime.date(year, month_index, number_of_days_in_month).isoformat(),
"name": month_name,
"number": month_index,
"year": year,
"i": month_index,
"rate_amount_sum": float(rate_amount_sum) if rate_amount_sum else None,
"adjusted_amount_sum": float(adjusted_amount_sum) if adjusted_amount_sum else None,
"spent_time_sum": float(spent_time_sum) if spent_time_sum else None,
'adjusted_spent_time_sum': float(adjusted_spent_time_sum) if adjusted_spent_time_sum else None,
"estimated_time_sum": float(estimated_time_sum) if estimated_time_sum else None,
"diff_time_sum": float(diff_time_sum) if diff_time_sum else None
}
}
months.append(month)
date_i = (date_i + relativedelta(months=1))
replacements = {
"all": daily_spent_times, "per_month": months,
"start_date": start_date, "end_date": end_date,
"board": board, "multiboard": multiboard
}
return replacements
# Computes the adjusted amount according to the factor each member has
def _adjusted_amount_sum(daily_spent_times):
return _adjusted_daily_spent_time_attribute_sum(daily_spent_times, attribute="rate_amount")
# Computes the adjusted spent time according to the factor each member has
def _adjusted_spent_time_sum(daily_spent_times):
return _adjusted_daily_spent_time_attribute_sum(daily_spent_times, attribute="spent_time")
# Computes the adjusted spent time according to the factor each member has
def _adjusted_daily_spent_time_attribute_sum(daily_spent_times, attribute="spent_time"):
adjusted_value_sum = 0
member_dict = {}
for daily_spent_time in daily_spent_times:
if not daily_spent_time.member_id in member_dict:
member_dict[daily_spent_time.member_id] = daily_spent_time.member
member = member_dict[daily_spent_time.member_id]
adjusted_value_sum += member.adjust_daily_spent_time(daily_spent_time, attribute)
return adjusted_value_sum
| 2.046875 | 2 |
armulator/armv6/opcodes/thumb_instruction_set/thumb_instruction_set_encoding_32_bit/thumb_store_single_data_item/strh_register_t2.py | matan1008/armulator | 16 | 12759328 | from armulator.armv6.opcodes.abstract_opcodes.strh_register import StrhRegister
from armulator.armv6.opcodes.opcode import Opcode
from armulator.armv6.arm_exceptions import UndefinedInstructionException
from armulator.armv6.shift import SRType
class StrhRegisterT2(StrhRegister, Opcode):
def __init__(self, instruction, add, wback, index, m, t, n, shift_t, shift_n):
Opcode.__init__(self, instruction)
StrhRegister.__init__(self, add, wback, index, m, t, n, shift_t, shift_n)
def is_pc_changing_opcode(self):
return False
@staticmethod
def from_bitarray(instr, processor):
rm = instr[28:32]
imm2 = instr[26:28]
rt = instr[16:20]
rn = instr[12:16]
index = True
add = True
wback = False
if rn == "0b1111":
raise UndefinedInstructionException()
elif rt.uint in (13, 15) or rm.uint in (13, 15):
print "unpredictable"
else:
return StrhRegisterT2(instr, **{"add": add, "wback": wback, "index": index, "m": rm.uint, "t": rt.uint,
"n": rn.uint, "shift_t": SRType.SRType_LSL, "shift_n": imm2.uint})
| 2.390625 | 2 |
plugins/mc_server/__init__.py | Orilx/Niko-py | 4 | 12759329 | from nonebot import on_command
from nonebot.matcher import Matcher
from nonebot.adapters.onebot.v11 import GroupMessageEvent
from utils.utils import get_json
from utils.config_util import ConfigManager
status = on_command('status')
data = {
"server_url": "",
"uuid": "",
"remote_uuid": "",
"apikey": ""
}
conf = ConfigManager.register("mc_status", data)
@status.handle()
async def mc_server_status(matcher: Matcher):
query = {
"uuid": conf["uuid"],
"remote_uuid": conf["remote_uuid"],
"apikey": conf["apikey"],
}
headers = {
"accept": "application/json"
}
js = await get_json(conf["server_url"], query, headers)
msg = ''
if not js:
msg += 'エラー発生'
await matcher.send(msg)
data = js['data']
if data["status"] == 0:
msg += f'服务器当前状态:关闭\n上次启动时间:{data["config"]["lastDatetime"]}'
elif data["status"] == 3:
if data["info"]["version"]:
time = int(data["processInfo"]["elapsed"]) / 1000
d = int(time / (24 * 3600))
h = int((time / 3600) % 24)
m = int((time / 60) % 60)
s = int(time % 60)
msg += f'服务器名称:{data["config"]["nickname"]}\n当前状态:开启\n' \
f'启动时间:{data["config"]["lastDatetime"]}\n服务端版本:{data["info"]["version"]}\n' \
f'在线人数:{data["info"]["currentPlayers"]}/{data["info"]["maxPlayers"]}\n已运行:{d}天{h}时{m}分{s}秒'
else:
msg += '服务器正在启动...'
await matcher.send(msg)
| 2.0625 | 2 |
HumiditySensor.py | majorpeter/puha-manager | 0 | 12759330 | from datetime import timedelta
from LoggedSensor import LoggedSensor
class HumiditySensor(LoggedSensor):
def __init__(self, node):
super(HumiditySensor, self).__init__(type_id=2, max_measurements=500, holdoff_time=timedelta(minutes=1))
node.Humidity.subscribe_to_changes(self.on_property_changed)
def on_property_changed(self, name, value):
self.humidity = float(value)
self.add_measurement(self.humidity)
| 2.6875 | 3 |
aiohttp_swagger3/index_templates.py | scotthaleen/aiohttp-swagger3 | 56 | 12759331 | import string
SWAGGER_UI_TEMPLATE = string.Template(
"""
<!-- HTML for static distribution bundle build -->
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Swagger UI</title>
<link rel="stylesheet" type="text/css" href="./swagger_ui_static/swagger-ui.css" >
<link rel="icon" type="image/png" href="./swagger_ui_static/favicon-32x32.png" sizes="32x32" />
<link rel="icon" type="image/png" href="./swagger_ui_static/favicon-16x16.png" sizes="16x16" />
<style>
html
{
box-sizing: border-box;
overflow: -moz-scrollbars-vertical;
overflow-y: scroll;
}
*,
*:before,
*:after
{
box-sizing: inherit;
}
body
{
margin:0;
background: #fafafa;
}
</style>
</head>
<body>
<div id="swagger-ui"></div>
<script src="./swagger_ui_static/swagger-ui-bundle.js"> </script>
<script src="./swagger_ui_static/swagger-ui-standalone-preset.js"> </script>
<script>
window.onload = function() {
// Begin Swagger UI call region
const ui = SwaggerUIBundle({...{
url: "./swagger.json",
dom_id: '#swagger-ui',
deepLinking: true,
presets: [
SwaggerUIBundle.presets.apis,
SwaggerUIStandalonePreset
],
plugins: [
SwaggerUIBundle.plugins.DownloadUrl
],
layout: "StandaloneLayout"
}, ...${settings}});
// End Swagger UI call region
window.ui = ui
}
</script>
</body>
</html>
"""
)
REDOC_UI_TEMPLATE = string.Template(
"""
<!DOCTYPE html>
<html>
<head>
<title>ReDoc</title>
<!-- needed for adaptive design -->
<meta charset="utf-8"/>
<meta name="viewport" content="width=device-width, initial-scale=1">
<link rel="stylesheet" type="text/css" href="./redoc_ui_static/google-fonts.css" >
<link rel="shortcut icon" href="./redoc_ui_static/favicon.ico"/>
<link rel="icon" type="image/png" sizes="16x16" href="./redoc_ui_static/favicon-16x16.png"/>
<link rel="icon" type="image/png" sizes="32x32" href="./redoc_ui_static/favicon-32x32.png"/>
<!--
ReDoc doesn't change outer page styles
-->
<style>
body {
margin: 0;
padding: 0;
}
</style>
</head>
<body>
<div id='redoc-ui'></div>
<script src="./redoc_ui_static/redoc.standalone.js"> </script>
<script>
Redoc.init('./swagger.json', ${settings}, document.getElementById('redoc-ui'))
</script>
</body>
</html>
"""
)
RAPIDOC_UI_TEMPLATE = string.Template(
"""
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<link rel="stylesheet" type="text/css" href="./rapidoc_ui_static/fonts.css" >
<script type="module" src="./rapidoc_ui_static/rapidoc-min.js"></script>
</head>
<body>
<rapi-doc
id='rapidoc-ui'
spec-url='./swagger.json'
regular-font='Rapidoc Regular'
mono-font='Roboto Mono'
> </rapi-doc>
<script>
const docEl = document.getElementById('rapidoc-ui');
const settings = ${settings}
for (const key in settings) {
docEl.setAttribute(key, settings[key]);
}
</script>
</body>
</html>
"""
)
| 2 | 2 |
5. SelectingFeatures/3. forward_backward_selection.py | michaelbwalker/Data-Cleaning-and-Exploration-with-Machine-Learning | 7 | 12759332 | <reponame>michaelbwalker/Data-Cleaning-and-Exploration-with-Machine-Learning
# import pandas, numpy, and matplotlib
import pandas as pd
from feature_engine.encoding import OneHotEncoder
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestClassifier
from mlxtend.feature_selection import SequentialFeatureSelector
pd.set_option('display.width', 75)
pd.set_option('display.max_columns', 20)
pd.set_option('display.max_rows', 100)
pd.options.display.float_format = '{:,.0f}'.format
# load the NLS data
nls97compba = pd.read_csv("data/nls97compba.csv")
feature_cols = ['satverbal','satmath','gpascience',
'gpaenglish','gpamath','gpaoverall','gender','motherhighgrade',
'fatherhighgrade','parentincome']
# separate NLS data into train and test datasets
X_train, X_test, y_train, y_test = \
train_test_split(nls97compba[feature_cols],\
nls97compba[['completedba']], test_size=0.3, random_state=0)
# encode the data
ohe = OneHotEncoder(drop_last=True, variables=['gender'])
X_train_enc = ohe.fit_transform(X_train)
scaler = StandardScaler()
standcols = X_train_enc.iloc[:,:-1].columns
X_train_enc = \
pd.DataFrame(scaler.fit_transform(X_train_enc[standcols]),
columns=standcols, index=X_train_enc.index).\
join(X_train_enc[['gender_Female']])
# Build RF classifier to use in feature selection
rfc = RandomForestClassifier(n_estimators=100, n_jobs=-1, random_state=0)
# Build step forward feature selection
sfs = SequentialFeatureSelector(rfc, k_features=5,
forward=True, floating=False, verbose=2,
scoring='accuracy', cv=5)
# Perform SFFS
sfs.fit(X_train_enc, y_train.values.ravel())
selcols = X_train_enc.columns[list(sfs.k_feature_idx_)]
selcols
# Build step forward feature selection
rfc = RandomForestClassifier(n_estimators=100, n_jobs=-1, random_state=0)
sfs = SequentialFeatureSelector(rfc, k_features=5,
forward=False, floating=False, verbose=2,
scoring='accuracy', cv=5)
# Perform SFFS
sfs.fit(X_train_enc, y_train.values.ravel())
selcols = X_train_enc.columns[list(sfs.k_feature_idx_)]
selcols
| 3.453125 | 3 |
devilry/devilry_admin/tests/subject_for_period_admin/test_overview_for_periodadmin.py | devilry/devilry-django | 29 | 12759333 | <reponame>devilry/devilry-django<filename>devilry/devilry_admin/tests/subject_for_period_admin/test_overview_for_periodadmin.py<gh_stars>10-100
from django.conf import settings
from django.test import TestCase
from cradmin_legacy import cradmin_testhelpers
from cradmin_legacy import crinstance
from model_bakery import baker
from devilry.apps.core.baker_recipes import ACTIVE_PERIOD_START, ACTIVE_PERIOD_END
from devilry.devilry_admin.views.subject_for_period_admin import overview_for_periodadmin
from devilry.utils import datetimeutils
class TestOverview(TestCase, cradmin_testhelpers.TestCaseMixin):
viewclass = overview_for_periodadmin.Overview
def test_title(self):
testsubject = baker.make('core.Subject',
short_name='testsubject')
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=testsubject)
self.assertEqual('testsubject',
mockresponse.selector.one('title').alltext_normalized)
def test_h1(self):
testsubject = baker.make('core.Subject',
long_name='Test Subject')
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=testsubject)
self.assertEqual('Test Subject',
mockresponse.selector.one('h1').alltext_normalized)
def test_period_list_one_period_where_user_is_period_admin(self):
testperiod = baker.make('core.Period', long_name='Test period')
periodpermissiongroup = baker.make('devilry_account.PeriodPermissionGroup', period=testperiod)
testuser = baker.make(settings.AUTH_USER_MODEL)
baker.make('devilry_account.PermissionGroupUser',
user=testuser, permissiongroup=periodpermissiongroup.permissiongroup)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testperiod.parentnode,
requestuser = testuser
)
self.assertEqual('Test period',
mockresponse.selector.one(
'.cradmin-legacy-listbuilder-itemvalue-titledescription-title').alltext_normalized)
def test_period_list_should_only_see_periods_where_user_is_period_admin(self):
testperiod = baker.make('core.Period', long_name='Test period')
testperiod2 = baker.make('core.Period', long_name='Test period 2', parentnode=testperiod.parentnode)
testperiod3 = baker.make('core.Period', long_name='Test period 3', parentnode=testperiod.parentnode)
periodpermissiongroup = baker.make('devilry_account.PeriodPermissionGroup',
period=testperiod)
periodpermissiongroup2 = baker.make('devilry_account.PeriodPermissionGroup',
period=testperiod2)
baker.make('devilry_account.PeriodPermissionGroup', period=testperiod3)
testuser = baker.make(settings.AUTH_USER_MODEL)
baker.make('devilry_account.PermissionGroupUser',
user=testuser, permissiongroup=periodpermissiongroup.permissiongroup)
baker.make('devilry_account.PermissionGroupUser',
user=testuser, permissiongroup=periodpermissiongroup2.permissiongroup)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testperiod.parentnode,
requestuser=testuser
)
periodlist = [x.alltext_normalized for x in
mockresponse.selector.list('.cradmin-legacy-listbuilder-itemvalue-titledescription-title') ]
self.assertEqual(['Test period 2', 'Test period'], periodlist)
def test_periodlist_no_periods(self):
testsubject = baker.make('core.Subject')
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=testsubject)
self.assertFalse(mockresponse.selector.exists('#devilry_admin_period_overview_periodlist'))
def test_periodlist_itemrendering_name(self):
testsubject = baker.make('core.Subject')
testperiod = baker.make_recipe('devilry.apps.core.period_active',
parentnode=testsubject,
long_name='Test Period')
testuser = baker.make(settings.AUTH_USER_MODEL)
periodpermissiongroup = baker.make('devilry_account.PeriodPermissionGroup', period=testperiod)
baker.make('devilry_account.PermissionGroupUser',
user=testuser, permissiongroup=periodpermissiongroup.permissiongroup)
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=testsubject, requestuser=testuser)
self.assertEqual('Test Period',
mockresponse.selector.one(
'.cradmin-legacy-listbuilder-itemvalue-titledescription-title').alltext_normalized)
def test_periodlist_itemrendering_url(self):
testsubject = baker.make('core.Subject')
testperiod = baker.make_recipe('devilry.apps.core.period_active',
parentnode=testsubject,
long_name='Test Period')
testuser = baker.make(settings.AUTH_USER_MODEL)
periodpermissiongroup = baker.make('devilry_account.PeriodPermissionGroup', period=testperiod)
baker.make('devilry_account.PermissionGroupUser',
user=testuser, permissiongroup=periodpermissiongroup.permissiongroup)
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=testsubject, requestuser=testuser)
self.assertEqual(crinstance.reverse_cradmin_url(instanceid='devilry_admin_periodadmin',
appname='overview',
roleid=testperiod.id),
mockresponse.selector.one(
'.devilry-admin-period-overview-perioditemframe')['href'])
def test_periodlist_itemrendering_start_time(self):
testsubject = baker.make('core.Subject')
testperiod = baker.make_recipe('devilry.apps.core.period_active', parentnode=testsubject)
testuser = baker.make(settings.AUTH_USER_MODEL)
periodpermissiongroup = baker.make('devilry_account.PeriodPermissionGroup',
period=testperiod)
baker.make('devilry_account.PermissionGroupUser',
user=testuser, permissiongroup=periodpermissiongroup.permissiongroup)
with self.settings(DATETIME_FORMAT=datetimeutils.ISODATETIME_DJANGOFORMAT, USE_L10N=False):
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=testsubject, requestuser=testuser)
self.assertEqual(datetimeutils.isoformat_noseconds(ACTIVE_PERIOD_START),
mockresponse.selector.one(
'.devilry-cradmin-perioditemvalue-start-time-value').alltext_normalized)
def test_periodlist_itemrendering_end_time(self):
testsubject = baker.make('core.Subject')
testperiod = baker.make_recipe('devilry.apps.core.period_active',
parentnode=testsubject)
testuser = baker.make(settings.AUTH_USER_MODEL)
periodpermissiongroup = baker.make('devilry_account.PeriodPermissionGroup', period=testperiod)
baker.make('devilry_account.PermissionGroupUser',
user=testuser, permissiongroup=periodpermissiongroup.permissiongroup)
with self.settings(DATETIME_FORMAT=datetimeutils.ISODATETIME_DJANGOFORMAT, USE_L10N=False):
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=testsubject, requestuser=testuser)
self.assertEqual(datetimeutils.isoformat_noseconds(ACTIVE_PERIOD_END),
mockresponse.selector.one(
'.devilry-cradmin-perioditemvalue-end-time-value').alltext_normalized)
def test_periodlist_ordering(self):
testsubject = baker.make('core.Subject')
testperiod1 = baker.make_recipe('devilry.apps.core.period_active',
parentnode=testsubject,
long_name='Period 2')
testperiod2 = baker.make_recipe('devilry.apps.core.period_old',
parentnode=testsubject,
long_name='Period 1')
testperiod3 = baker.make_recipe('devilry.apps.core.period_future',
parentnode=testsubject,
long_name='Period 3')
testuser = baker.make(settings.AUTH_USER_MODEL)
periodpermissiongroup1 = baker.make('devilry_account.PeriodPermissionGroup', period=testperiod1)
baker.make('devilry_account.PermissionGroupUser',
user=testuser, permissiongroup=periodpermissiongroup1.permissiongroup)
periodpermissiongroup2 = baker.make('devilry_account.PeriodPermissionGroup', period=testperiod2)
baker.make('devilry_account.PermissionGroupUser',
user=testuser, permissiongroup=periodpermissiongroup2.permissiongroup)
periodpermissiongroup3 = baker.make('devilry_account.PeriodPermissionGroup', period=testperiod3)
baker.make('devilry_account.PermissionGroupUser',
user=testuser, permissiongroup=periodpermissiongroup3.permissiongroup)
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=testsubject, requestuser=testuser)
periodnames = [
element.alltext_normalized
for element in mockresponse.selector.list(
'.cradmin-legacy-listbuilder-itemvalue-titledescription-title')]
self.assertEqual([
'Period 3',
'Period 2',
'Period 1',
], periodnames)
def test_periodlist_only_periods_in_subject(self):
testsubject = baker.make('core.Subject')
othersubject = baker.make('core.Subject')
testperiod1 = baker.make_recipe('devilry.apps.core.period_active',
parentnode=testsubject,
long_name='Testsubject Period 1')
testperiod2 = baker.make_recipe('devilry.apps.core.period_active',
parentnode=othersubject,
long_name='Othersubject Period 1')
testuser = baker.make(settings.AUTH_USER_MODEL)
periodpermissiongroup = baker.make('devilry_account.PeriodPermissionGroup',
period=testperiod1)
baker.make('devilry_account.PermissionGroupUser',
user=testuser, permissiongroup=periodpermissiongroup.permissiongroup)
periodpermissiongroup2 = baker.make('devilry_account.PeriodPermissionGroup',
period=testperiod2)
baker.make('devilry_account.PermissionGroupUser',
user=testuser, permissiongroup=periodpermissiongroup2.permissiongroup)
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=testsubject, requestuser=testuser)
self.assertEqual(
1,
mockresponse.selector.count('.cradmin-legacy-listbuilder-itemvalue-titledescription-title')
)
self.assertEqual(
'Testsubject Period 1',
mockresponse.selector.one(
'.cradmin-legacy-listbuilder-itemvalue-titledescription-title').alltext_normalized
)
| 2.03125 | 2 |
Visualization/EndGameScreen.py | frosthamster/Tower-defense | 1 | 12759334 | <reponame>frosthamster/Tower-defense<filename>Visualization/EndGameScreen.py
from kivy.properties import StringProperty, ObjectProperty
from kivy.uix.screenmanager import Screen
class EndGameScreen(Screen):
message_image = StringProperty('')
background = StringProperty('res/end_game_background.jpg')
game_screen = ObjectProperty(None)
| 1.679688 | 2 |
pypeln/task/api/concat_task_test.py | quarckster/pypeln | 1,281 | 12759335 | <gh_stars>1000+
import sys
import time
import typing as tp
from unittest import TestCase
import hypothesis as hp
from hypothesis import strategies as st
import pypeln as pl
MAX_EXAMPLES = 10
T = tp.TypeVar("T")
@hp.given(nums=st.lists(st.integers()))
@hp.settings(max_examples=MAX_EXAMPLES)
def test_concat_basic(nums: tp.List[int]):
nums_py = list(map(lambda x: x + 1, nums))
nums_py1 = list(map(lambda x: x ** 2, nums_py))
nums_py2 = list(map(lambda x: -x, nums_py))
nums_py = nums_py1 + nums_py2
nums_pl = pl.task.map(lambda x: x + 1, nums)
nums_pl1 = pl.task.map(lambda x: x ** 2, nums_pl)
nums_pl2 = pl.task.map(lambda x: -x, nums_pl)
nums_pl = pl.task.concat([nums_pl1, nums_pl2])
assert sorted(nums_pl) == sorted(nums_py)
@hp.given(nums=st.lists(st.integers()))
@hp.settings(max_examples=MAX_EXAMPLES)
@pl.task.utils.run_test_async
async def test_concat_basic_2(nums: tp.List[int]):
nums_py = list(map(lambda x: x + 1, nums))
nums_py1 = list(map(lambda x: x ** 2, nums_py))
nums_py2 = list(map(lambda x: -x, nums_py))
nums_py = nums_py1 + nums_py2
nums_pl = pl.task.map(lambda x: x + 1, nums)
nums_pl1 = pl.task.map(lambda x: x ** 2, nums_pl)
nums_pl2 = pl.task.map(lambda x: -x, nums_pl)
nums_pl = await pl.task.concat([nums_pl1, nums_pl2])
assert sorted(nums_pl) == sorted(nums_py)
# @hp.given(nums=st.lists(st.integers()))
# @hp.settings(max_examples=MAX_EXAMPLES)
def test_concat_multiple(nums: tp.List[int] = [1, 2, 3]):
nums_py = [x + 1 for x in nums]
nums_py1 = nums_py + nums_py
nums_py2 = nums_py1 + nums_py
nums_pl = pl.task.map(lambda x: x + 1, nums)
nums_pl1 = pl.task.concat([nums_pl, nums_pl])
nums_pl2 = pl.task.concat([nums_pl1, nums_pl])
# assert sorted(nums_py1) == sorted(list(nums_pl1))
assert sorted(nums_py2) == sorted(list(nums_pl2))
@pl.task.utils.run_test_async
async def test_concat_multiple_2(nums: tp.List[int] = [1, 2, 3]):
nums_py = [x + 1 for x in nums]
nums_py1 = nums_py + nums_py
nums_py2 = nums_py1 + nums_py
nums_pl = pl.task.map(lambda x: x + 1, nums)
nums_pl1 = pl.task.concat([nums_pl, nums_pl])
nums_pl2 = await pl.task.concat([nums_pl1, nums_pl])
# assert sorted(nums_py1) == sorted(list(nums_pl1))
assert sorted(nums_py2) == sorted(list(nums_pl2))
| 2.609375 | 3 |
edb/server/pgsql/deltadbops.py | mcaramma/edgedb | 2 | 12759336 | #
# This source file is part of the EdgeDB open source project.
#
# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Abstractions for low-level database DDL and DML operations."""
from edb.lang.schema import delta as sd
from edb.lang.schema import name as sn
from edb.lang.schema import objects as s_obj
from edb.lang.common import adapter
from edb.lang import edgeql
from edb.server.pgsql import common
from edb.server.pgsql import dbops
from edb.server.pgsql import metaschema
from edb.server.pgsql.dbops import catalogs as pg_catalogs
class SchemaDBObjectMeta(adapter.Adapter, type(s_obj.Object)):
def __init__(cls, name, bases, dct, *, adapts=None):
adapter.Adapter.__init__(cls, name, bases, dct, adapts=adapts)
type(s_obj.Object).__init__(cls, name, bases, dct)
class SchemaDBObject(metaclass=SchemaDBObjectMeta):
@classmethod
def adapt(cls, obj):
return cls.copy(obj)
@classmethod
def get_canonical_class(cls):
for base in cls.__bases__:
if issubclass(base, s_obj.Object) and not issubclass(
base, SchemaDBObject):
return base
return cls
class CallDeltaHook(dbops.Command):
def __init__(
self, *, hook, stage, op, conditions=None, neg_conditions=None,
priority=0):
super().__init__(
conditions=conditions, neg_conditions=neg_conditions,
priority=priority)
self.hook = hook
self.stage = stage
self.op = op
async def execute(self, context):
try:
self.op.call_hook(
context.session, stage=self.stage, hook=self.hook)
except sd.DeltaHookNotFoundError:
pass
class ConstraintCommon:
def constraint_name(self, quote=True):
name = self.raw_constraint_name()
name = common.edgedb_name_to_pg_name(name)
return common.quote_ident(name) if quote else name
def schema_constraint_name(self):
return self._constraint.name
def raw_constraint_name(self):
name = '{};{}'.format(self._constraint.name, 'schemaconstr')
return name
async def extra(self, context):
text = self.raw_constraint_name()
cmd = dbops.Comment(object=self, text=text)
return [cmd]
def rename_extra(self, context, new_constraint):
new_name = new_constraint.raw_constraint_name()
cmd = dbops.Comment(object=new_constraint, text=new_name)
return [cmd]
@property
def is_abstract(self):
return self._constraint.is_abstract
class SchemaConstraintDomainConstraint(
ConstraintCommon, dbops.DomainConstraint):
def __init__(self, domain_name, constraint, exprdata):
super().__init__(domain_name)
self._exprdata = exprdata
self._constraint = constraint
async def extra(self, context):
# There seems to be no direct way to COMMENT on a domain constraint.
# See http://postgr.es/m/5310157.yWWCtg2qIU@klinga.prans.org
# Work this around by updating pg_description directly.
#
# text = self.raw_constraint_name()
# cmd = dbops.Comment(object=self, text=text)
# return [cmd]
table = pg_catalogs.PgDescriptionTable()
rec = table.record()
objoid = dbops.Query(
'(SELECT oid FROM pg_constraint WHERE conname = $1)',
[self.constraint_name(quote=False)], type='oid')
classoid = dbops.Query(
'''(SELECT c.oid
FROM
pg_class c INNER JOIN pg_namespace ns
ON c.relnamespace = ns.oid
WHERE
c.relname = 'pg_constraint' AND
ns.nspname = 'pg_catalog')
''', [], type='oid')
rec.objoid = objoid
rec.classoid = classoid
rec.description = self.raw_constraint_name()
rec.objsubid = 0
cond = [('objoid', objoid), ('classoid', classoid)]
cmd = dbops.Merge(table=table, record=rec, condition=cond)
return [cmd]
async def constraint_code(self, context):
if len(self._exprdata) == 1:
expr = self._exprdata[0]['exprdata']['plain']
else:
exprs = [e['plain'] for e in self._exprdata['exprdata']]
expr = '(' + ') AND ('.join(exprs) + ')'
return 'CHECK ({})'.format(expr)
def __repr__(self):
return '<{}.{} "{}" "%r">' % (
self.__class__.__module__, self.__class__.__name__,
self.domain_name, self._constraint)
class SchemaConstraintTableConstraint(ConstraintCommon, dbops.TableConstraint):
def __init__(self, table_name, *, constraint, exprdata, scope, type):
super().__init__(table_name, None)
self._constraint = constraint
self._exprdata = exprdata
self._scope = scope
self._type = type
async def constraint_code(self, context):
if self._scope == 'row':
if len(self._exprdata) == 1:
expr = self._exprdata[0]['exprdata']['plain']
else:
exprs = [e['exprdata']['plain'] for e in self._exprdata]
expr = '(' + ') AND ('.join(exprs) + ')'
expr = 'CHECK ({})'.format(expr)
else:
if self._type != 'unique':
raise ValueError(
'unexpected constraint type: {}'.format(self._type))
constr_exprs = []
for expr in self._exprdata:
if expr['is_trivial']:
# A constraint that contains one or more
# references to columns, and no expressions.
#
expr = ', '.join(expr['exprdata']['plain_chunks'])
expr = 'UNIQUE ({})'.format(expr)
else:
# Complex constraint with arbitrary expressions
# needs to use EXCLUDE.
#
chunks = expr['exprdata']['plain_chunks']
expr = ', '.join(
"{} WITH =".format(chunk) for chunk in chunks)
expr = 'EXCLUDE ({})'.format(expr)
constr_exprs.append(expr)
expr = constr_exprs
return expr
def numbered_constraint_name(self, i, quote=True):
raw_name = self.raw_constraint_name()
name = common.edgedb_name_to_pg_name('{}#{}'.format(raw_name, i))
return common.quote_ident(name) if quote else name
def get_trigger_procname(self):
schema = common.edgedb_module_name_to_schema_name(
self.schema_constraint_name().module)
proc_name = common.edgedb_name_to_pg_name(
self.raw_constraint_name() + '_trigproc')
return schema, proc_name
def get_trigger_condition(self):
chunks = []
for expr in self._exprdata:
condition = '{old_expr} IS DISTINCT FROM {new_expr}'.format(
old_expr=expr['exprdata']['old'],
new_expr=expr['exprdata']['new'])
chunks.append(condition)
if len(chunks) == 1:
return chunks[0]
else:
return '(' + ') OR ('.join(chunks) + ')'
def get_trigger_proc_text(self):
chunks = []
if self.is_multiconstraint():
constr_name = self.numbered_constraint_name(0)
raw_constr_name = self.numbered_constraint_name(0, quote=False)
else:
constr_name = self.constraint_name()
raw_constr_name = self.constraint_name(quote=False)
errmsg = 'duplicate key value violates unique ' \
'constraint {constr}'.format(constr=constr_name)
subject_table = self.get_subject_name()
for expr in self._exprdata:
exprdata = expr['exprdata']
text = '''
PERFORM
TRUE
FROM
{table}
WHERE
{plain_expr} = {new_expr};
IF FOUND THEN
RAISE unique_violation
USING
TABLE = '{table[1]}',
SCHEMA = '{table[0]}',
CONSTRAINT = '{constr}',
MESSAGE = '{errmsg}',
DETAIL = 'Key ({plain_expr}) already exists.';
END IF;
'''.format(
plain_expr=exprdata['plain'], new_expr=exprdata['new'],
table=subject_table, constr=raw_constr_name, errmsg=errmsg)
chunks.append(text)
text = 'BEGIN\n' + '\n\n'.join(chunks) + '\nRETURN NEW;\nEND;'
return text
def is_multiconstraint(self):
"""Determine if multiple database constraints are needed."""
return self._scope != 'row' and len(self._exprdata) > 1
def is_natively_inherited(self):
"""Determine if this constraint can be inherited natively."""
return self._type == 'check'
def __repr__(self):
return '<{}.{} {!r}>'.format(
self.__class__.__module__, self.__class__.__name__,
self._constraint)
class MultiConstraintItem:
def __init__(self, constraint, index):
self.constraint = constraint
self.index = index
def get_type(self):
return self.constraint.get_type()
def get_id(self):
raw_name = self.constraint.raw_constraint_name()
name = common.edgedb_name_to_pg_name(
'{}#{}'.format(raw_name, self.index))
name = common.quote_ident(name)
return '{} ON {} {}'.format(
name, self.constraint.get_subject_type(),
self.constraint.get_subject_name())
class AlterTableAddMultiConstraint(dbops.AlterTableAddConstraint):
async def code(self, context):
exprs = await self.constraint.constraint_code(context)
if isinstance(exprs, list) and len(exprs) > 1:
chunks = []
for i, expr in enumerate(exprs):
name = self.constraint.numbered_constraint_name(i)
chunk = 'ADD CONSTRAINT {} {}'.format(name, expr)
chunks.append(chunk)
code = ', '.join(chunks)
else:
if isinstance(exprs, list):
exprs = exprs[0]
name = self.constraint.constraint_name()
code = 'ADD CONSTRAINT {} {}'.format(name, exprs)
return code
async def extra(self, context, alter_table):
comments = []
exprs = await self.constraint.constraint_code(context)
constr_name = self.constraint.raw_constraint_name()
if isinstance(exprs, list) and len(exprs) > 1:
for i, expr in enumerate(exprs):
constraint = MultiConstraintItem(self.constraint, i)
comment = dbops.Comment(constraint, constr_name)
comments.append(comment)
else:
comment = dbops.Comment(self.constraint, constr_name)
comments.append(comment)
return comments
class AlterTableRenameMultiConstraint(
dbops.AlterTableBaseMixin, dbops.CommandGroup):
def __init__(
self, name, *, constraint, new_constraint, contained=False,
conditions=None, neg_conditions=None, priority=0):
dbops.CommandGroup.__init__(
self, conditions=conditions, neg_conditions=neg_conditions,
priority=priority)
dbops.AlterTableBaseMixin.__init__(
self, name=name, contained=contained)
self.constraint = constraint
self.new_constraint = new_constraint
async def execute(self, context):
c = self.constraint
nc = self.new_constraint
exprs = await self.constraint.constraint_code(context)
if isinstance(exprs, list) and len(exprs) > 1:
for i, expr in enumerate(exprs):
old_name = c.numbered_constraint_name(i, quote=False)
new_name = nc.numbered_constraint_name(i, quote=False)
ac = dbops.AlterTableRenameConstraintSimple(
name=self.name, old_name=old_name, new_name=new_name)
self.add_command(ac)
else:
old_name = c.constraint_name(quote=False)
new_name = nc.constraint_name(quote=False)
ac = dbops.AlterTableRenameConstraintSimple(
name=self.name, old_name=old_name, new_name=new_name)
self.add_command(ac)
return await super().execute(context)
async def extra(self, context):
comments = []
exprs = await self.new_constraint.constraint_code(context)
constr_name = self.new_constraint.raw_constraint_name()
if isinstance(exprs, list) and len(exprs) > 1:
for i, expr in enumerate(exprs):
constraint = MultiConstraintItem(self.new_constraint, i)
comment = dbops.Comment(constraint, constr_name)
comments.append(comment)
else:
comment = dbops.Comment(self.new_constraint, constr_name)
comments.append(comment)
return comments
class AlterTableDropMultiConstraint(dbops.AlterTableDropConstraint):
async def code(self, context):
exprs = await self.constraint.constraint_code(context)
if isinstance(exprs, list) and len(exprs) > 1:
chunks = []
for i, expr in enumerate(exprs):
name = self.constraint.numbered_constraint_name(i)
chunk = 'DROP CONSTRAINT {}'.format(name)
chunks.append(chunk)
code = ', '.join(chunks)
else:
name = self.constraint.constraint_name()
code = 'DROP CONSTRAINT {}'.format(name)
return code
class AlterTableInheritableConstraintBase(
dbops.AlterTableBaseMixin, dbops.CommandGroup):
def __init__(
self, name, *, constraint, contained=False, conditions=None,
neg_conditions=None, priority=0):
dbops.CompositeCommandGroup.__init__(
self, conditions=conditions, neg_conditions=neg_conditions,
priority=priority)
dbops.AlterTableBaseMixin.__init__(
self, name=name, contained=contained)
self._constraint = constraint
def create_constr_trigger(self, table_name, constraint, proc_name):
cmds = []
cname = constraint.raw_constraint_name()
ins_trigger_name = common.edgedb_name_to_pg_name(cname + '_instrigger')
ins_trigger = dbops.Trigger(
name=ins_trigger_name, table_name=table_name, events=('insert', ),
procedure=proc_name, is_constraint=True, inherit=True)
cr_ins_trigger = dbops.CreateTrigger(ins_trigger)
cmds.append(cr_ins_trigger)
disable_ins_trigger = dbops.DisableTrigger(ins_trigger, self_only=True)
cmds.append(disable_ins_trigger)
upd_trigger_name = common.edgedb_name_to_pg_name(cname + '_updtrigger')
condition = constraint.get_trigger_condition()
upd_trigger = dbops.Trigger(
name=upd_trigger_name, table_name=table_name, events=('update', ),
procedure=proc_name, condition=condition, is_constraint=True,
inherit=True)
cr_upd_trigger = dbops.CreateTrigger(upd_trigger)
cmds.append(cr_upd_trigger)
disable_upd_trigger = dbops.DisableTrigger(upd_trigger, self_only=True)
cmds.append(disable_upd_trigger)
return cmds
def rename_constr_trigger(self, table_name):
constraint = self._constraint
new_constr = self._new_constraint
cname = constraint.raw_constraint_name()
ncname = new_constr.raw_constraint_name()
ins_trigger_name = common.edgedb_name_to_pg_name(cname + '_instrigger')
new_ins_trg_name = common.edgedb_name_to_pg_name(
ncname + '_instrigger')
ins_trigger = dbops.Trigger(
name=ins_trigger_name, table_name=table_name, events=('insert', ),
procedure='null', is_constraint=True, inherit=True)
rn_ins_trigger = dbops.AlterTriggerRenameTo(
ins_trigger, new_name=new_ins_trg_name)
upd_trigger_name = common.edgedb_name_to_pg_name(cname + '_updtrigger')
new_upd_trg_name = common.edgedb_name_to_pg_name(
ncname + '_updtrigger')
upd_trigger = dbops.Trigger(
name=upd_trigger_name, table_name=table_name, events=('update', ),
procedure='null', is_constraint=True, inherit=True)
rn_upd_trigger = dbops.AlterTriggerRenameTo(
upd_trigger, new_name=new_upd_trg_name)
return (rn_ins_trigger, rn_upd_trigger)
def drop_constr_trigger(self, table_name, constraint):
cname = constraint.raw_constraint_name()
ins_trigger_name = common.edgedb_name_to_pg_name(cname + '_instrigger')
ins_trigger = dbops.Trigger(
name=ins_trigger_name, table_name=table_name, events=('insert', ),
procedure='null', is_constraint=True, inherit=True)
drop_ins_trigger = dbops.DropTrigger(ins_trigger)
upd_trigger_name = common.edgedb_name_to_pg_name(cname + '_updtrigger')
upd_trigger = dbops.Trigger(
name=upd_trigger_name, table_name=table_name, events=('update', ),
procedure='null', is_constraint=True, inherit=True)
drop_upd_trigger = dbops.DropTrigger(upd_trigger)
return [drop_ins_trigger, drop_upd_trigger]
def drop_constr_trigger_function(self, proc_name):
return [dbops.DropFunction(name=proc_name, args=())]
def create_constraint(self, constraint):
# Add the constraint normally to our table
#
my_alter = dbops.AlterTable(self.name)
add_constr = AlterTableAddMultiConstraint(constraint=constraint)
my_alter.add_command(add_constr)
self.add_command(my_alter)
if not constraint.is_natively_inherited():
# The constraint is not inherited by descendant tables natively,
# use triggers to emulate inheritance.
#
# Create trigger function
#
proc_name = constraint.get_trigger_procname()
proc_text = constraint.get_trigger_proc_text()
proc = dbops.CreateFunction(
dbops.Function(
name=proc_name, text=proc_text, volatility='stable',
returns='trigger', language='plpgsql'))
self.add_command(proc)
# Add a (disabled) inheritable trigger on self.
# Trigger inheritance will propagate and maintain
# the trigger on current and future descendants.
#
cr_trigger = self.create_constr_trigger(
self.name, constraint, proc_name)
self.add_commands(cr_trigger)
def rename_constraint(self, old_constraint, new_constraint):
# Rename the native constraint(s) normally
#
rename_constr = AlterTableRenameMultiConstraint(
name=self.name, constraint=old_constraint,
new_constraint=new_constraint)
self.add_command(rename_constr)
if not old_constraint.is_natively_inherited():
# Alter trigger function
#
old_proc_name = old_constraint.get_trigger_procname()
new_proc_name = new_constraint.get_trigger_procname()
rename_proc = dbops.RenameFunction(
name=old_proc_name, args=(), new_name=new_proc_name)
self.add_command(rename_proc)
new_proc_text = new_constraint.get_trigger_proc_text()
alter_text = dbops.AlterFunctionReplaceText(
name=new_proc_name, args=(), new_text=new_proc_text)
self.add_command(alter_text)
mv_trigger = self.rename_constr_trigger(self.name)
self.add_commands(mv_trigger)
def alter_constraint(self, old_constraint, new_constraint):
if old_constraint.is_abstract and not new_constraint.is_abstract:
# No longer abstract, create db structures
self.create_constraint(new_constraint)
elif not old_constraint.is_abstract and new_constraint.is_abstract:
# Now abstract, drop db structures
self.drop_constraint(new_constraint)
else:
# Some other modification, drop/create
self.drop_constraint(new_constraint)
self.create_constraint(new_constraint)
def drop_constraint(self, constraint):
if not constraint.is_natively_inherited():
self.add_commands(self.drop_constr_trigger(self.name, constraint))
# Drop trigger function
#
proc_name = constraint.raw_constraint_name() + '_trigproc'
proc_name = self.name[0], common.edgedb_name_to_pg_name(proc_name)
self.add_commands(self.drop_constr_trigger_function(proc_name))
# Drop the constraint normally from our table
#
my_alter = dbops.AlterTable(self.name)
drop_constr = AlterTableDropMultiConstraint(constraint=constraint)
my_alter.add_command(drop_constr)
self.add_command(my_alter)
class AlterTableAddInheritableConstraint(AlterTableInheritableConstraintBase):
def __repr__(self):
return '<{}.{} {!r}>'.format(
self.__class__.__module__, self.__class__.__name__,
self._constraint)
async def _execute(self, context, code, vars):
if not self._constraint.is_abstract:
self.create_constraint(self._constraint)
await super()._execute(context, code, vars)
class AlterTableRenameInheritableConstraint(
AlterTableInheritableConstraintBase):
def __init__(self, name, *, constraint, new_constraint, **kwargs):
super().__init__(name, constraint=constraint, **kwargs)
self._new_constraint = new_constraint
def __repr__(self):
return '<{}.{} {!r}>'.format(
self.__class__.__module__, self.__class__.__name__,
self._constraint)
async def execute(self, context):
if not self._constraint.is_abstract:
self.rename_constraint(self._constraint, self._new_constraint)
await super().execute(context)
class AlterTableAlterInheritableConstraint(
AlterTableInheritableConstraintBase):
def __init__(self, name, *, constraint, new_constraint, **kwargs):
super().__init__(name, constraint=constraint, **kwargs)
self._new_constraint = new_constraint
def __repr__(self):
return '<{}.{} {!r}>'.format(
self.__class__.__module__, self.__class__.__name__,
self._constraint)
async def execute(self, context):
self.alter_constraint(self._constraint, self._new_constraint)
await super().execute(context)
class AlterTableDropInheritableConstraint(AlterTableInheritableConstraintBase):
def __repr__(self):
return '<{}.{} {!r}>'.format(
self.__class__.__module__, self.__class__.__name__,
self._constraint)
async def execute(self, context):
if not self._constraint.is_abstract:
self.drop_constraint(self._constraint)
await super().execute(context)
class MappingIndex(dbops.Index):
def __init__(self, name_prefix, cardinality, link_names, table_name):
super().__init__(None, table_name, True)
self.link_names = link_names
self.name_prefix = name_prefix
self.cardinality = cardinality
async def creation_code(self, context):
link_map = await context.get_class_map()
ids = tuple(sorted(list(link_map[n] for n in self.link_names)))
id_str = '_'.join(str(i) for i in ids)
name = '%s_%s_%s_cardinality_idx' % (
self.name_prefix, id_str, self.cardinality)
name = common.edgedb_name_to_pg_name(name)
predicate = 'ptr_item_id IN (%s)' % ', '.join(str(id) for id in ids)
code = '''
CREATE {unique} INDEX {name} ON {table}s ({cols}) {predicate}
'''.format(unique='UNIQUE',
name=common.qname(name),
table=common.qname(*self.table_name),
cols=', '.join(common.quote_ident(c) for c in self.columns),
predicate=('WHERE {}'.format(predicate)))
return code
def __repr__(self):
name = '%s_%s_%s_cardinality_idx' % (
self.name_prefix, '<HASH>', self.cardinality)
predicate = 'ptr_item_id IN (%s)' % ', '.join(
str(n) for n in self.link_names)
return \
'<{mod.{cls} name="{name}" cols=({cols}) unique={uniq} ' \
'predicate={pred}>'.format(
mod=self.__class__.__module__,
cls=self.__class__.__name__,
name=name,
cols=','.join(self.columns),
uniq=self.unique,
pred=predicate)
class MangleExprObjectRefs(dbops.Command):
def __init__(self, *, scls, field, expr,
conditions=None, neg_conditions=None, priority=0):
super().__init__(
conditions=conditions, neg_conditions=neg_conditions,
priority=priority)
self.name = scls.name
self.table = metaschema.get_metaclass_table(scls.__class__)
self.field = common.edgedb_name_to_pg_name(field)
self.expr = expr
async def execute(self, context):
class_map = await context._get_class_map()
def _cb(name):
clsid = class_map.get(name)
if clsid:
return sn.Name(module='__class__', name=str(clsid))
else:
return name
expr = edgeql.rewrite_refs(self.expr, _cb)
rec = self.table.record()
setattr(rec, self.field, expr)
condition = [('name', str(self.name))]
upd = dbops.Update(table=self.table, record=rec, condition=condition)
await upd.execute(context)
| 1.90625 | 2 |
codeScanner/views.py | code-goodbuy/Goodbuy | 2 | 12759337 | <gh_stars>1-10
from django.shortcuts import render, redirect
from goodbuyDatabase.models import Product, Rating
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
@login_required
def scanCode(request):
return render(request, "codeScanner/code_scanner.html")
| 1.515625 | 2 |
wall_following_master/labs/wall_following/scripts/pid_error.py | Yuze-HE/F1_tenth | 0 | 12759338 | #!/usr/bin/env python
import rospy
from math import cos, sin, atan, pi
import numpy as np
import yaml
import sys
from sensor_msgs.msg import LaserScan
from std_msgs.msg import Float64
import pdb
pub = rospy.Publisher('pid_error', Float64, queue_size=10)
# You can define constants in Python as uppercase global names like these.
MIN_DISTANCE = 0.1
MAX_DISTANCE = 30.0
MIN_ANGLE = -45.0
MAX_ANGLE = 225.0
a = 0.0
b = 0.0
al = 0.0
bl = 0.0
ar = 0.0
br = 0.0
# data: single message from topic /scan
# angle: between -45 to 225 degrees, where 0 degrees is directly to the right
# Outputs length in meters to object with angle in lidar scan field of view
def getRange(data, angle):
# TODO: implement
ranges = np.asarray(data.ranges)
angle_index = (angle + 45) * 4 # hokuyo ust-10lx fen bian lv 0.25du
output_range = ranges[angle_index]
return output_range
# data: single message from topic /scan
# desired_distance: desired distance to the left wall [meters]
# Outputs the PID error required to make the car follow the left wall.
def followLeft(data, desired_distance):
# TODO: implement
global a, b
L = 0.015 #old: 0.025
desired_distance = desired_distance
a = getRange(data, 135)
b = getRange(data, 180)
theta = 45 * pi / 180
alpha = atan((a * cos(theta) - b) / (a * sin(theta)))
current_dist = b * cos(alpha)
next_dist = current_dist + L * sin(alpha)
error_t = -(current_dist - desired_distance + L * sin(alpha))
# pass the error_t term into some function and output the next_angle and velocity
return error_t
# data: single message from topic /scan
# desired_distance: desired distance to the right wall [meters]
# Outputs the PID error required to make the car follow the right wall.
def followRight(data, desired_distance):
# TODO: implement
global a, b
L = 0.025
desired_distance = desired_distance
a = getRange(data, 45)
b = getRange(data, 0)
theta = 45 * pi / 180
alpha = atan((a * cos(theta) - b) / (a * sin(theta)))
current_dist = b * cos(alpha)
next_dist = current_dist + L * sin(alpha)
error_t = -(current_dist - desired_distance + L * sin(alpha))
# pass the error_t term into some function and output the next_angle and velocity
return error_t
# data: single message from topic /scan
# Outputs the PID error required to make the car drive in the middle
# of the hallway.
def followCenter(data):
# TODO: implement
global al, bl, ar, br
L = 0.025
al = getRange(data, 135)
bl = getRange(data, 180)
ar = getRange(data, 0)
br = getRange(data, 45)
theta = 45 * pi / 180
alpha_l = atan((al * cos(theta) - bl) / (al * sin(theta)))
alpha_r = atan((ar * cos(theta) - br) / (ar * sin(theta)))
left_dist = bl * cos(alpha_l)
right_dist = br * cos(alpha_r)
desired_distance = (left_dist + right_dist) / 2.0
error_t = -(right_dist - desired_distance + L * sin(alpha_r))
# pass the error_t term into some function and output the next_angle and velocity
return error_t
# Callback for receiving LIDAR data on the /scan topic.
# data: the LIDAR data, published as a list of distances to the wall.
def scan_callback(data):
error = followCenter(data) # TODO: replace with followLeft, followRight, or followCenter
msg = Float64()
msg.data = error
pub.publish(msg)
# Boilerplate code to start this ROS node.
# DO NOT MODIFY!
if __name__ == '__main__':
rospy.init_node('pid_error_node', anonymous = True)
rospy.Subscriber("scan", LaserScan, scan_callback)
rospy.spin()
| 2.828125 | 3 |
util/utilities.py | umbertov/SpicyArbs | 0 | 12759339 | """
Utility functions go here.
SpiceBucks
"""
# ------------------------------------------------------------------
import sys
import numpy as np
from util.message import message
# ------------------------------------------------------------------
# -----------------------------------------------------------------------------------------
# system functions
# ----------------------------------------z-------------------------------------------------
def exit(code):
"""
Exit the program, 0 is failure, 1 is success.
"""
if not isinstance(code, int):
message.logError("Exit code must be an interger.")
exit(0)
if code == 0:
message.logError("Exiting program with failure status.")
elif code == 1:
message.logDebug("Exiting program with success status.")
else:
message.logError(
"Exiting program with unknown error status (" + str(code) + ")"
)
sys.exit()
| 2.90625 | 3 |
tests/test_static.py | schnitzelbub/bocadillo | 0 | 12759340 | <reponame>schnitzelbub/bocadillo
import pytest
from bocadillo import API, static
FILE_DIR = "js"
FILE_NAME = "foo.js"
FILE_CONTENTS = "console.log('foo!');"
def _create_asset(static_dir):
asset = static_dir.mkdir(FILE_DIR).join(FILE_NAME)
asset.write(FILE_CONTENTS)
return asset
def test_assets_are_served_at_static_by_default(tmpdir_factory):
static_dir = tmpdir_factory.mktemp("static")
_create_asset(static_dir)
api = API(static_dir=str(static_dir))
response = api.client.get(f"/static/{FILE_DIR}/{FILE_NAME}")
assert response.status_code == 200
assert response.text == FILE_CONTENTS
def test_if_asset_does_not_exist_then_404(api: API):
assert api.client.get(f"/static/{FILE_DIR}/{FILE_NAME}").status_code == 404
def test_customize_static_root(tmpdir_factory):
static_dir = tmpdir_factory.mktemp("static")
_create_asset(static_dir)
api = API(static_dir=str(static_dir), static_root="assets")
assert api.client.get(f"/static/{FILE_DIR}/{FILE_NAME}").status_code == 404
response = api.client.get(f"/assets/{FILE_DIR}/{FILE_NAME}")
assert response.status_code == 200
assert response.text == FILE_CONTENTS
def test_if_static_dir_is_none_then_no_assets_served(tmpdir_factory):
static_dir = tmpdir_factory.mktemp("static")
_create_asset(static_dir)
api = API(static_dir=None)
assert api.client.get(f"/static/{FILE_DIR}/{FILE_NAME}").status_code == 404
def test_static_root_defaults_to_static_dir(tmpdir_factory):
static_dir = tmpdir_factory.mktemp("foo")
_create_asset(static_dir)
api = API(static_dir=str(static_dir), static_root=None)
response = api.client.get(f"{static_dir}/{FILE_DIR}/{FILE_NAME}")
assert response.status_code == 200
def test_mount_extra_static_files_dirs(tmpdir_factory):
static_dir = tmpdir_factory.mktemp("staticfiles")
_create_asset(static_dir)
api = API(static_dir=None)
api.mount("assets", static(str(static_dir)))
response = api.client.get(f"/assets/{FILE_DIR}/{FILE_NAME}")
assert response.status_code == 200
assert response.text == FILE_CONTENTS
def test_if_static_dir_does_not_exist_then_no_files_mounted():
with pytest.warns(None) as record:
API(static_dir="foo")
assert len(record) == 0
| 2.375 | 2 |
timeflow/tests/tests.py | trimailov/timeflow | 18 | 12759341 | <filename>timeflow/tests/tests.py<gh_stars>10-100
import datetime
import os
import pytest
import timeflow.utils
from timeflow import cli
FAKE_TIME = datetime.datetime(2015, 1, 1, 23, 59, 59)
@pytest.fixture
def patch_datetime_now(monkeypatch):
class mydatetime(datetime.datetime):
@classmethod
def now(cls):
return FAKE_TIME
monkeypatch.setattr(datetime, 'datetime', mydatetime)
def test_patch_datetime(patch_datetime_now):
assert datetime.datetime.now() == FAKE_TIME
def test_log(patch_datetime_now, tmpdir, capsys):
tmp_path = tmpdir.join("test_log.txt").strpath
timeflow.utils.LOG_FILE = tmp_path
# run log command
parser = cli.create_parser()
args = parser.parse_args(['log', 'message'])
args.func(args)
with open(tmp_path, 'r') as f:
lines = f.readlines()
assert len(lines) == 1
assert lines[0] == '2015-01-01 23:59: message\n'
def test_edit(patch_datetime_now, tmpdir, capsys):
test_dir = os.path.dirname(os.path.realpath(__file__))
# overwrite log file setting, to define file to be used in tests
timeflow.utils.LOG_FILE = test_dir + '/fake_log.txt'
# run edit command
parser = cli.create_parser()
args = parser.parse_args(['edit'])
args.func(args)
def test_stats_now(patch_datetime_now, capsys):
test_dir = os.path.dirname(os.path.realpath(__file__))
# overwrite log file setting, to define file to be used in tests
timeflow.utils.LOG_FILE = test_dir + '/fake_log.txt'
# run stats command
parser = cli.create_parser()
args = parser.parse_args(['stats'])
args.func(args)
# extract STDOUT, as stats command prints to it
out, err = capsys.readouterr()
result = ("Work: 2 hours 50 min\n"
"Slack: 1 hour 10 min\n"
"\n"
"Today working for: 15 hours 59 min\n")
assert out == result
def test_stats_yesterday(patch_datetime_now, capsys):
test_dir = os.path.dirname(os.path.realpath(__file__))
# overwrite log file setting, to define file to be used in tests
timeflow.utils.LOG_FILE = test_dir + '/fake_log.txt'
# run stats command
parser = cli.create_parser()
args = parser.parse_args(['stats', '--yesterday'])
args.func(args)
# extract STDOUT, as stats command prints to it
out, err = capsys.readouterr()
result = ("Work: 2 hours 50 min\n"
"Slack: 1 hour 10 min\n")
assert out == result
def test_stats_day(patch_datetime_now, capsys):
test_dir = os.path.dirname(os.path.realpath(__file__))
# overwrite log file setting, to define file to be used in tests
timeflow.utils.LOG_FILE = test_dir + '/fake_log.txt'
# run stats command
parser = cli.create_parser()
args = parser.parse_args(['stats', '--day', '2015-01-01'])
args.func(args)
# extract STDOUT, as stats command prints to it
out, err = capsys.readouterr()
result = ("Work: 2 hours 50 min\n"
"Slack: 1 hour 10 min\n")
assert out == result
def test_stats_this_week(patch_datetime_now, capsys):
test_dir = os.path.dirname(os.path.realpath(__file__))
# overwrite log file setting, to define file to be used in tests
timeflow.utils.LOG_FILE = test_dir + '/fake_log.txt'
# run stats command
parser = cli.create_parser()
args = parser.parse_args(['stats', '--this-week'])
args.func(args)
# extract STDOUT, as stats command prints to it
out, err = capsys.readouterr()
result = ("Work: 8 hours 50 min\n"
"Slack: 3 hours 50 min\n")
assert out == result
def test_stats_last_week(patch_datetime_now, capsys):
test_dir = os.path.dirname(os.path.realpath(__file__))
# overwrite log file setting, to define file to be used in tests
timeflow.utils.LOG_FILE = test_dir + '/fake_log.txt'
# run stats command
parser = cli.create_parser()
args = parser.parse_args(['stats', '--last-week'])
args.func(args)
# extract STDOUT, as stats command prints to it
out, err = capsys.readouterr()
result = ("Work: 2 hours 50 min\n"
"Slack: 1 hour 10 min\n")
assert out == result
def test_stats_week(patch_datetime_now, capsys):
test_dir = os.path.dirname(os.path.realpath(__file__))
# overwrite log file setting, to define file to be used in tests
timeflow.utils.LOG_FILE = test_dir + '/fake_log.txt'
# run stats command
parser = cli.create_parser()
args = parser.parse_args(['stats', '--week', '2015-01-01'])
args.func(args)
# extract STDOUT, as stats command prints to it
out, err = capsys.readouterr()
result = ("Work: 8 hours 50 min\n"
"Slack: 3 hours 50 min\n")
assert out == result
def test_stats_last_month(patch_datetime_now, capsys):
test_dir = os.path.dirname(os.path.realpath(__file__))
# overwrite log file setting, to define file to be used in tests
timeflow.utils.LOG_FILE = test_dir + '/fake_log.txt'
# run stats command
parser = cli.create_parser()
args = parser.parse_args(['stats', '--last-month'])
args.func(args)
# extract STDOUT, as stats command prints to it
out, err = capsys.readouterr()
result = ("Work: 5 hours 40 min\n"
"Slack: 2 hours 20 min\n")
assert out == result
def test_stats_this_month(patch_datetime_now, capsys):
test_dir = os.path.dirname(os.path.realpath(__file__))
# overwrite log file setting, to define file to be used in tests
timeflow.utils.LOG_FILE = test_dir + '/fake_log.txt'
# run stats command
parser = cli.create_parser()
args = parser.parse_args(['stats', '--this-month'])
args.func(args)
# extract STDOUT, as stats command prints to it
out, err = capsys.readouterr()
result = ("Work: 2 hours 50 min\n"
"Slack: 1 hour 10 min\n")
assert out == result
def test_stats_month(patch_datetime_now, capsys):
test_dir = os.path.dirname(os.path.realpath(__file__))
# overwrite log file setting, to define file to be used in tests
timeflow.utils.LOG_FILE = test_dir + '/fake_log.txt'
# run stats command
parser = cli.create_parser()
args = parser.parse_args(['stats', '--month', '1'])
args.func(args)
# extract STDOUT, as stats command prints to it
out, err = capsys.readouterr()
result = ("Work: 6 hours\n"
"Slack: 2 hours 40 min\n")
assert out == result
def test_stats_from(patch_datetime_now, capsys):
test_dir = os.path.dirname(os.path.realpath(__file__))
# overwrite log file setting, to define file to be used in tests
timeflow.utils.LOG_FILE = test_dir + '/fake_log.txt'
# run stats command
parser = cli.create_parser()
args = parser.parse_args(['stats', '--from', '2014-12-28'])
args.func(args)
# extract STDOUT, as stats command prints to it
out, err = capsys.readouterr()
result = ("Work: 5 hours 40 min\n"
"Slack: 2 hours 20 min\n")
assert out == result
def test_stats_from_to(patch_datetime_now, capsys):
test_dir = os.path.dirname(os.path.realpath(__file__))
# overwrite log file setting, to define file to be used in tests
timeflow.utils.LOG_FILE = test_dir + '/fake_log.txt'
# run stats command
parser = cli.create_parser()
args = parser.parse_args(['stats', '--from', '2014-12-24',
'--to', '2015-01-01'])
args.func(args)
# extract STDOUT, as stats command prints to it
out, err = capsys.readouterr()
result = ("Work: 8 hours 30 min\n"
"Slack: 3 hours 30 min\n")
assert out == result
def test_stats_now_report(patch_datetime_now, capsys):
test_dir = os.path.dirname(os.path.realpath(__file__))
# overwrite log file setting, to define file to be used in tests
timeflow.utils.LOG_FILE = test_dir + '/fake_log.txt'
# run stats command
parser = cli.create_parser()
args = parser.parse_args(['stats', '--report'])
args.func(args)
# extract STDOUT, as stats command prints to it
out, err = capsys.readouterr()
result = (
"------------------------------ WORK -------------------------------\n"
"Django:\n"
" 1 hour 35 min: read documentation\n"
" Total: 1 hour 35 min\n"
"\n"
"Timeflow:\n"
" 1 hour 15 min: start project\n"
" Total: 1 hour 15 min\n"
"------------------------------ SLACK ------------------------------\n"
"Breakfast:\n"
" 0 hours 45 min: Breakfast\n"
" Total: 0 hours 45 min\n"
"\n"
"Slack:\n"
" 0 hours 25 min: watch YouTube\n"
" Total: 0 hours 25 min\n"
)
assert out == result
def test_stats_now_report_as_gtimelog(patch_datetime_now, capsys):
test_dir = os.path.dirname(os.path.realpath(__file__))
# overwrite log file setting, to define file to be used in tests
timeflow.utils.LOG_FILE = test_dir + '/fake_log.txt'
# run stats command
parser = cli.create_parser()
args = parser.parse_args(['stats', '--report-as-gtimelog'])
args.func(args)
# extract STDOUT, as stats command prints to it
out, err = capsys.readouterr()
result = (
" time\n"
"Django: read documentation 1 hour 35 min\n"
"Timeflow: start project 1 hour 15 min"
"\n"
"\n"
"Total work done: 2 hours 50 min"
"\n"
"\n"
"By category:"
"\n"
"\n"
"Django 1 hour 35 min\n"
"Timeflow 1 hour 15 min\n\n"
)
assert out == result
| 2.328125 | 2 |
cuhk03/model.py | cwpeng-cn/TorchReID | 0 | 12759342 | <gh_stars>0
from . import init_env
from torch import nn
from torch.nn import functional as F
from torch.nn import init
import torchvision
import torch
import zipfile
from resnet_ibn_b import *
from reid.utils.model_save_restore import *
class STN(nn.Module):
def __init__(self):
super(STN, self).__init__()
self.localization = nn.Sequential(
nn.Conv2d(1024, 2048, kernel_size=3),
nn.BatchNorm2d(2048),
nn.ReLU(True),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
)
self.fc_loc = nn.Sequential(
nn.Linear(2048, 512),
nn.ReLU(True),
nn.Linear(512, 2 * 3),
)
# Initialize the weights/bias with identity transformation
self.fc_loc[2].weight.data.zero_()
self.fc_loc[2].bias.data.copy_(torch.tensor([1, 0, 0, 0, 1, 0], dtype=torch.float))
# Spatial transformer network forward function
def forward(self, x):
xs = self.localization(x)
xs = F.adaptive_avg_pool2d(xs, (1, 1))
xs = xs.view(xs.size(0), -1) # N,4096
theta = self.fc_loc(xs) # N,6
theta = theta.view(-1, 2, 3) # N,2,3
grid = F.affine_grid(theta, x.size())
x = F.grid_sample(x, grid)
return x
class ResNet(nn.Module):
__factory = {
18: torchvision.models.resnet18,
34: torchvision.models.resnet34,
50: torchvision.models.resnet50,
101: torchvision.models.resnet101,
152: torchvision.models.resnet152,
}
def __init__(self, depth=50, pretrained=True, cut_at_pooling=False,
num_features=1024, dropout=0.5, num_classes=0):
super(ResNet, self).__init__()
self.depth = depth
self.pretrained = pretrained
self.cut_at_pooling = cut_at_pooling
# Construct base (pretrained) resnet
base = resnet50_ibn_b(pretrained=pretrained)
base_stn = resnet50_ibn_b(pretrained=pretrained)
self.stn = STN()
self.conv1 = base.conv1
self.bn1 = base.bn1
self.relu = base.relu
self.maxpool = base.maxpool
self.layer1 = base.layer1
self.layer2 = base.layer2
self.layer3 = base.layer3
self.layer4 = base.layer4
self.layer4_stn = base_stn.layer4
for mo in self.layer4[0].modules():
if isinstance(mo, nn.Conv2d):
mo.stride = (1, 1)
for mo in self.layer4_stn[0].modules():
if isinstance(mo, nn.Conv2d):
mo.stride = (1, 1)
self.mmaxpool = nn.AdaptiveMaxPool2d((1, 1))
if not self.cut_at_pooling:
self.num_features = num_features
self.dropout = dropout
self.has_embedding = num_features > 0
self.num_classes = num_classes
out_planes = base.fc.in_features
# Append new layers
if self.has_embedding:
feat = nn.Linear(out_planes, self.num_features)
feat_bn = nn.BatchNorm1d(self.num_features)
init.kaiming_normal_(feat.weight, mode='fan_out')
init.constant_(feat.bias, 0)
init.normal_(feat_bn.weight, 1, 0.02)
init.constant_(feat_bn.bias, 0.0)
embed_layer = [feat, feat_bn]
self.embed_layer = nn.Sequential(*embed_layer)
feat = nn.Linear(out_planes, self.num_features)
feat_bn = nn.BatchNorm1d(self.num_features)
init.kaiming_normal_(feat.weight, mode='fan_out')
init.constant_(feat.bias, 0)
init.normal_(feat_bn.weight, 1, 0.02)
init.constant_(feat_bn.bias, 0.0)
embed_layer = [feat, feat_bn]
self.embed_layer_stn = nn.Sequential(*embed_layer)
else:
# Change the num_features to CNN output channels
self.num_features = out_planes
if self.dropout > 0:
self.drop = nn.Dropout(self.dropout)
if self.num_classes > 0:
self.last_fc = nn.Linear(self.num_features, self.num_classes)
init.normal_(self.last_fc.weight, std=0.001)
init.constant_(self.last_fc.bias, 0.0)
self.last_fc_stn = nn.Linear(self.num_features, self.num_classes)
init.normal_(self.last_fc_stn.weight, std=0.001)
init.constant_(self.last_fc_stn.bias, 0.0)
if not self.pretrained:
self.reset_params()
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x_stn = self.stn(x)
x = self.layer4(x)
x_stn = self.layer4_stn(x_stn)
# 如果是测试,则取这一结果作为特征
# if not self.training:
# result_sum=torch.sum(x,1) # N,H,W
# result_mean=torch.mean(result_sum,1).mean(1) # N
# result_mean=result_mean.view(-1,1,1)
# mask=result_sum>result_mean #N,H,W
# mask=mask.unsqueeze(1).repeat(1,2048,1,1).float() #N,C,H,W
# x2=x*mask #N,C,H,W
# x2=self.mmaxpool(x2) #N,C,1,1
# x2 = x2.view(x2.size(0), x2.size(1)) #N,C
# triplet_out = self.embed_layer(x2)
# result_sum_stn=torch.sum(x_stn,1) # N,H,W
# result_mean_stn=torch.mean(result_sum_stn,1).mean(1) # N
# result_mean_stn=result_mean_stn.view(-1,1,1)
# mask_stn=result_sum_stn>result_mean_stn #N,H,W
# mask_stn=mask_stn.unsqueeze(1).repeat(1,2048,1,1).float() #N,C,H,W
# x2_stn=x_stn*mask_stn #N,C,H,W
# x2_stn=self.mmaxpool(x2_stn) #N,C,1,1
# x2_stn = x2_stn.view(x2_stn.size(0), x2_stn.size(1)) #N,C
# triplet_out_stn = self.embed_layer_stn(x2_stn)
# triplet_out=self.normalize(triplet_out)
# triplet_out_stn=self.normalize(triplet_out_stn)
# return torch.cat((triplet_out,triplet_out_stn),1) #N,2C
if self.cut_at_pooling:
return x
x = F.max_pool2d(x, x.size()[2:]).view(x.size()[:2])
x_stn = F.max_pool2d(x_stn, x_stn.size()[2:]).view(x_stn.size()[:2])
if self.has_embedding:
triplet_out = self.embed_layer(x)
triplet_out_stn = self.embed_layer_stn(x_stn)
if not self.training:
triplet_out = self.normalize(triplet_out)
triplet_out_stn = self.normalize(triplet_out_stn)
return torch.cat((triplet_out, triplet_out_stn), 1) # N,2C
if self.num_classes > 0:
x = self.last_fc(triplet_out)
x_stn = self.last_fc_stn(triplet_out_stn)
return triplet_out, x, triplet_out_stn, x_stn
def reset_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.normal_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, std=0.001)
if m.bias is not None:
init.constant_(m.bias, 0)
def normalize(self, x, axis=-1):
"""Normalizing to unit length along the specified dimension.
Args:
x: pytorch Variable
Returns:
x: pytorch Variable, same shape as input
"""
x = 1. * x / (torch.norm(x, 2, axis, keepdim=True).expand_as(x) + 1e-12)
return x
class MNet(nn.Module):
def __init__(self, net, depth=50, pretrained=True, cut_at_pooling=False,
num_features=512, dropout=0.5, num_classes=0):
super(MNet, self).__init__()
self.depth = depth
self.pretrained = pretrained
self.cut_at_pooling = cut_at_pooling
# Construct base (pretrained) resnet
base = net
self.conv1 = base.conv1
self.bn1 = base.bn1
self.relu = base.relu
self.maxpool = base.maxpool
self.layer1 = base.layer1
self.layer2 = base.layer2
self.layer3 = base.layer3
self.layer4 = base.layer4
self.layer4_stn = base.layer4_stn
self.stn = base.stn
self.mmaxpool = base.mmaxpool
if not self.cut_at_pooling:
self.num_features = num_features
self.dropout = dropout
self.has_embedding = num_features > 0
self.num_classes = num_classes
out_planes = 2048
# Append new layers
if self.has_embedding:
self.embed_layer = base.embed_layer
self.embed_layer_stn = base.embed_layer_stn
else:
# Change the num_features to CNN output channels
self.num_features = 2048
if self.dropout > 0:
self.drop = nn.Dropout(self.dropout)
if self.num_classes > 0:
self.last_fc = nn.Linear(self.num_features, self.num_classes)
init.normal_(self.last_fc.weight, std=0.001)
init.constant_(self.last_fc.bias, 0.0)
self.last_fc_stn = nn.Linear(self.num_features, self.num_classes)
init.normal_(self.last_fc_stn.weight, std=0.001)
init.constant_(self.last_fc_stn.bias, 0.0)
if not self.pretrained:
self.reset_params()
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x_stn = self.stn(x)
x = self.layer4(x)
x_stn = self.layer4_stn(x_stn)
if self.cut_at_pooling:
return x
x = F.max_pool2d(x, x.size()[2:]).view(x.size()[:2])
x_stn = F.max_pool2d(x_stn, x_stn.size()[2:]).view(x_stn.size()[:2])
if self.has_embedding:
triplet_out = self.embed_layer(x)
triplet_out_stn = self.embed_layer_stn(x_stn)
if not self.training:
triplet_out = self.normalize(triplet_out)
triplet_out_stn = self.normalize(triplet_out_stn)
return torch.cat((triplet_out, triplet_out_stn), 1) # N,2C
if self.num_classes > 0:
x = self.last_fc(triplet_out)
x_stn = self.last_fc_stn(triplet_out_stn)
return triplet_out, x, triplet_out_stn, x_stn
def reset_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.normal_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, std=0.001)
if m.bias is not None:
init.constant_(m.bias, 0)
def normalize(self, x, axis=-1):
"""Normalizing to unit length along the specified dimension.
Args:
x: pytorch Variable
Returns:
x: pytorch Variable, same shape as input
"""
x = 1. * x / (torch.norm(x, 2, axis, keepdim=True).expand_as(x) + 1e-12)
return x
def get_model():
net = ResNet(num_classes=4101, num_features=1024)
net = restore_network("./", 149, net).cuda()
return net
| 1.992188 | 2 |
seqcluster/libs/seqviz.py | kkarolis/seqcluster | 33 | 12759343 | JSVIZ1='''<script type="text/javascript">
function go() {
var zoomCanvas = document.getElementById('canvas');
origZoomChart = new Scribl(zoomCanvas, 100);
//origZoomChart.scale.min = 0;
// origZoomChart.scale.max = 12000;
'''
JSVIZ2='''
origZoomChart.scrollable = true;
origZoomChart.scrollValues = [10, 250];
origZoomChart.draw();
}
</script>
'''
CANVAS=''' <div id="container">
<canvas id="canvas" width="940px" height="400px" style="margin-left:auto; margin-right:auto"></canvas>
</div>
'''
SEQ='''origZoomChart.addFeature( new Seq('human', %s, %s, "%s") );'''
def addseq(pos,len,seq):
return(SEQ % (pos,len,seq))
| 2.6875 | 3 |
dispenser-server.py | nineclicks/treat-dispenser | 0 | 12759344 | import RPi.GPIO as GPIO
import time
from flask import Flask, render_template
import threading
app = Flask('dispener-server')
lock = threading.Lock()
last_treat = 0
TREAT_MIN_SECONDS = 2
GPIO.setmode(GPIO.BOARD)
control_pins = [7,11,13,15]
for pin in control_pins:
GPIO.setup(pin, GPIO.OUT)
GPIO.output(pin, 0)
halfstep_seq = [
[0,1,0,1],
[0,0,0,1],
[1,0,0,1],
[1,0,0,0],
[1,0,1,0],
[0,0,1,0],
[0,1,1,0],
[0,1,0,0],
]
def get_step(n):
x = n % len(halfstep_seq)
return halfstep_seq[x]
pos = 0
def stop():
for pin in range(4):
GPIO.output(control_pins[pin], 1)
def step():
with lock:
global pos
global last_treat
if time.time() - last_treat < TREAT_MIN_SECONDS:
return
last_treat = time.time()
for i in range(pos, pos+33):
s = get_step(i)
for pin in range(len(control_pins)):
GPIO.output(control_pins[pin], 1-s[pin])
time.sleep(.005)
time.sleep(.1)
stop()
pos = pos + 33
if pos % 100 == 99:
pos += 1
@app.route('/')
def index():
return render_template('index.html')
@app.route('/step', methods=['POST'])
def call_step():
step()
return 'ok'
| 2.765625 | 3 |
keywords.py | ChinaKevinLi/PixivURLs | 0 | 12759345 | <reponame>ChinaKevinLi/PixivURLs<filename>keywords.py<gh_stars>0
import sys
from pixivpy3 import AppPixivAPI
username="pixiv username"
password="<PASSWORD>"
def get_imageUrls(aapi, keyword, minbook):
json_result = aapi.search_illust(keyword)
while 1:
try:
nLen = len(json_result.illusts)
except Exception as e:
print(e)
for illust in json_result.illusts:
#yield illust
if illust.total_bookmarks >= int(minbook):
yield illust
if illust.page_count == 1 and illust.type != 'ugoira':
print(illust.meta_single_page.original_image_url)
print('Bookmarks:' + str(illust.total_bookmarks))
with open(keyword + '.txt','a+') as f:
f.write(illust.meta_single_page.original_image_url+'\n')
pass
elif illust.page_count > 1:
image_urls = [
page.image_urls.original
for page in illust.meta_pages
]
print(image_urls)
print('Bookmarks:' + str(illust.total_bookmarks))
for url in image_urls:
with open(keyword + '.txt','a+') as f:
f.write(url+'\n')
else:
image_urls = []
try:
next_qs = aapi.parse_qs(json_result.next_url)
if next_qs is None:
break
json_result = aapi.search_illust(**next_qs)
except Exception as e:
print(e)
break
return 1
def main():
aapi = AppPixivAPI()
aapi.login(username, password)
illust_ids = set()
for illust in get_imageUrls(aapi, sys.argv[1], sys.argv[2]):
illust_ids.add(illust.id)
print('Images Found:'+str(len(illust_ids)))
if __name__ == '__main__':
main()
| 2.421875 | 2 |
client.py | YIYANGCAI/Remote-Image-Processing-Platform | 1 | 12759346 | <reponame>YIYANGCAI/Remote-Image-Processing-Platform
# client.py
import numpy as np
import sys
import pickle, struct
from socket import *
import cv2
def send_from(arr, dest):
view = memoryview(arr).cast('B')
while len(view):
nsent = dest.send(view)
view = view[nsent:]
def recv_into(arr, source):
view = memoryview(arr).cast('B')
while len(view):
nrecv = source.recv_into(view)
view = view[nrecv:]
c = socket(AF_INET, SOCK_STREAM)
c.connect(('10.0.11.46', 25000))
flag = True
frame = cv2.imread("0.jpg")
frame = cv2.resize(frame, (300, 300))
frame = frame/255.0
#cv2.imshow("before", client_memory)
#cv2.waitKey(0)
while flag:
print("--------------------------------------------------")
# client_memory = frame
# allocate an area
# each epoch the area must be re-allocated, or the
client_memory = np.zeros(shape = (300, 300, 3), dtype = float)
cv2.imshow("before", frame)
#cv2.waitKey(0)
send_from(frame, c)
recv_into(client_memory, c)
cv2.imshow("after", client_memory)
cv2.waitKey(0)
cv2.destroyAllWindows()
#print(client_memory)
msg = input("Would you like to shutdown the connection?[y/n]").strip()
if msg == 'y':
print("Connection closed")
c.send(msg.encode())
flag = False
else:
c.send(msg.encode())
c.close()
| 2.921875 | 3 |
anasymod/sources.py | SubjeBilisim/anasymod | 0 | 12759347 | import os
from anasymod.generators.codegen import CodeGenerator
from anasymod.util import back2fwd, expand_searchpaths
from typing import Union
class ConfigFileObj(CodeGenerator):
def __init__(self, files, config_path, name):
super().__init__()
self.files = None
""" type(str) : mandatory setting; defines the path to sources. The path can be relative, absolute and
contain wildcards. """
if isinstance(files, list):
self.files = files
elif isinstance(files, str):
self.files = [files]
else:
raise TypeError(f"Type of config_paths variable provided to SubConfig class is not a list, is:{type(files)} instead.")
self.config_path = config_path
self.name = name
def expand_paths(self):
"""
Expand environment variables in provided list of paths.
Check if path is absolute or relative, in case of a relative path, it will be expanded to an absolute path,
whereas the folder of the config_file will be used to complete the path.
"""
self.files = expand_searchpaths(paths=self.files, rel_path_reference=os.path.dirname(self.config_path))
class SubConfig(ConfigFileObj):
def __init__(self, files: Union[list, str], name, config_path=None):
super().__init__(files=files, config_path=config_path, name=name)
class Sources(ConfigFileObj):
def __init__(self, files: list, fileset, config_path, name):
super().__init__(files=files, config_path=config_path, name=name)
self.fileset = fileset
""" type(str) : Fileset, the source shall be associsted with. """
def generate(self):
pass
def set_property(self, name, value, objects):
self.writeln(' '.join(['set_property', '-name', name, '-value', value, '-objects', objects]))
class VerilogSource(Sources):
"""
Container for source of type Verilog/SystemVerilog.
:param files: Path to source file, could be relative/absolute and contain wildcards
:type files: str
"""
def __init__(self, files: Union[list, str], name, fileset=r"default", config_path=None, version=None):
super().__init__(files=files, fileset=fileset, config_path=config_path, name=name)
self.version = version
""" type(str) : Verilog version, that shall be used when compiling sources. """
def generate(self):
self.text = self.files
return self.dump()
class VerilogHeader(Sources):
def __init__(self, files: Union[list, str], name, fileset=r"default", config_path=None):
super().__init__(files=files, fileset=fileset, config_path=config_path, name=name)
def set_header_files(self):
file_list = '{ ' + ' '.join('"' + back2fwd(file) + '"' for file in self.files) + ' }'
self.set_property('file_type', '{Verilog Header}', f'[get_files {file_list}]')
def generate(self):
self.dump()
class VHDLSource(Sources):
def __init__(self, files: Union[list, str], name, library=None, fileset=r"default", config_path=None, version=None):
super().__init__(files=files, fileset=fileset, config_path=config_path, name=name)
self.library = library
""" type(str) : Library, the source shall be associated with when compiling. """
self.version = version
""" type(str) : VHDL version, that shall be used when compiling sources. """
def generate(self):
self.dump()
class EDIFFile(Sources):
def __init__(self, files: Union[list, str], name, fileset=r"default", config_path=None):
super().__init__(files=files, fileset=fileset, config_path=config_path, name=name)
class FirmwareFile(Sources):
def __init__(self, files: Union[list, str], name, fileset=r"default", config_path=None):
super().__init__(files=files, fileset=fileset, config_path=config_path, name=name)
class XCIFile(Sources):
def __init__(self, files: Union[list, str], name, fileset=r"default", config_path=None):
super().__init__(files=files, fileset=fileset, config_path=config_path, name=name)
class TCLFile(Sources):
def __init__(self, files: Union[list, str], name, fileset=r"default", config_path=None):
super().__init__(files=files, fileset=fileset, config_path=config_path, name=name)
class XDCFile(Sources):
def __init__(self, files: Union[list, str], name, fileset=r"default", config_path=None):
super().__init__(files=files, fileset=fileset, config_path=config_path, name=name)
class MEMFile(Sources):
def __init__(self, files: str, name, fileset=r"default", config_path=None):
super().__init__(files=[files], fileset=fileset, config_path=config_path, name=name)
class BDFile(Sources):
def __init__(self, files: str, name, fileset=r"default", config_path=None):
super().__init__(files=[files], fileset=fileset, config_path=config_path, name=name)
class IPRepo(Sources):
def __init__(self, files: str, name, fileset=r"default", config_path=None):
super().__init__(files=[files], fileset=fileset, config_path=config_path, name=name)
class FunctionalModel(Sources):
def __init__(self, files: str, name, fileset=r"default", config_path=None):
super().__init__(files=[files], fileset=fileset, config_path=config_path, name=name)
self.gen_files = None
def set_gen_files_path(self, hdl_dir_root):
"""
Set the result HDL path, where generated files can be found after generation was conducted.
:param hdl_dir_root: Root directory for gen_files, this is usually set in emu config.
"""
# TODO: Have the model generator declare what files should be included in "gen_files"
# It is possible that not everything in the hdl_dir_root is an HDL source (e.g.,
# temporary files generated during processing, memory files that are included, etc.)
self.gen_files = [os.path.join(hdl_dir_root, self.fileset, self.name, '*.*v')]
def expand_gen_files_path(self):
"""
Expand environment variables in provided list of paths.
Check if path is absolute or relative, in case of a relative path, it will be expanded to an absolute path,
whereas the folder of the config_file will be used to complete the path.
"""
self.gen_files = expand_searchpaths(paths=self.gen_files, rel_path_reference=os.path.dirname(self.config_path)) | 2.828125 | 3 |
manifold_embedding/tsne_rossant.py | laputian/dml | 0 | 12759348 | <filename>manifold_embedding/tsne_rossant.py
# see https://www.oreilly.com/learning/an-illustrated-introduction-to-the-t-sne-algorithm
# The original code does not work due to https://github.com/scikit-learn/scikit-learn/issues/6450 and a inaccessible websites
# for writing files.
# Fixed here.
# That's an impressive list of imports.
import numpy as np
from numpy import linalg
from numpy.linalg import norm
from scipy.spatial.distance import squareform, pdist
# We import sklearn.
import sklearn
from sklearn.manifold import TSNE
from sklearn.datasets import load_digits
# We'll hack a bit with the t-SNE code in sklearn 0.15.2.
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.manifold.t_sne import (_joint_probabilities)
# Random state.
RS = 20150101
# We'll use matplotlib for graphics.
import matplotlib.pyplot as plt
import matplotlib.patheffects as PathEffects
# We import seaborn to make nice plots.
import seaborn as sns
sns.set_style('darkgrid')
sns.set_palette('muted')
sns.set_context("notebook", font_scale=1.5,
rc={"lines.linewidth": 2.5})
imgpath='../data/pics/'
# We'll generate an animation with matplotlib and moviepy.
from moviepy.video.io.bindings import mplfig_to_npimage
import moviepy.editor as mpy
digits = load_digits()
print(digits['DESCR'])
nrows, ncols = 2, 5
plt.figure(figsize=(6,3))
plt.gray()
for i in range(ncols * nrows):
ax = plt.subplot(nrows, ncols, i + 1)
ax.matshow(digits.images[i,...])
plt.xticks([]); plt.yticks([])
plt.title(digits.target[i])
plt.savefig(imgpath + 'digits-generated.png', dpi=150)
# We first reorder the data points according to the handwritten numbers.
X = np.vstack([digits.data[digits.target==i]
for i in range(10)])
y = np.hstack([digits.target[digits.target==i]
for i in range(10)])
digits_proj = TSNE(random_state=RS).fit_transform(X)
def scatter(x, colors):
# We choose a color palette with seaborn.
palette = np.array(sns.color_palette("hls", 10))
# We create a scatter plot.
f = plt.figure(figsize=(8, 8))
ax = plt.subplot(aspect='equal')
sc = ax.scatter(x[:,0], x[:,1], lw=0, s=40,
c=palette[colors.astype(np.int)])
plt.xlim(-25, 25)
plt.ylim(-25, 25)
ax.axis('off')
ax.axis('tight')
# We add the labels for each digit.
txts = []
for i in range(10):
# Position of each label.
xtext, ytext = np.median(x[colors == i, :], axis=0)
txt = ax.text(xtext, ytext, str(i), fontsize=24)
txt.set_path_effects([
PathEffects.Stroke(linewidth=5, foreground="w"),
PathEffects.Normal()])
txts.append(txt)
return f, ax, sc, txts
def _joint_probabilities_constant_sigma(D, sigma):
P = np.exp(-D**2/2 * sigma**2)
P /= np.sum(P, axis=1)
return P
scatter(digits_proj, y)
plt.savefig(imgpath + 'digits_tsne-generated.png', dpi=120)
# Pairwise distances between all data points.
D = pairwise_distances(X, squared=True)
# Similarity with constant sigma.
P_constant = _joint_probabilities_constant_sigma(D, .002)
# Similarity with variable sigma.
P_binary = _joint_probabilities(D, 30., False)
# The output of this function needs to be reshaped to a square matrix.
P_binary_s = squareform(P_binary)
plt.figure(figsize=(12, 4))
pal = sns.light_palette("blue", as_cmap=True)
plt.subplot(131)
plt.imshow(D[::10, ::10], interpolation='none', cmap=pal)
plt.axis('off')
plt.title("Distance matrix", fontdict={'fontsize': 16})
plt.subplot(132)
plt.imshow(P_constant[::10, ::10], interpolation='none', cmap=pal)
plt.axis('off')
plt.title("$p_{j|i}$ (constant $\sigma$)", fontdict={'fontsize': 16})
plt.subplot(133)
plt.imshow(P_binary_s[::10, ::10], interpolation='none', cmap=pal)
plt.axis('off')
plt.title("$p_{j|i}$ (variable $\sigma$)", fontdict={'fontsize': 16})
plt.savefig(imgpath + 'similarity-generated.png', dpi=120)
# This list will contain the positions of the map points at every iteration.
positions = []
def _gradient_descent(objective, p0, it, n_iter, objective_error=None,
n_iter_check=1, n_iter_without_progress=50,
momentum=0.5, learning_rate=1000.0, min_gain=0.01,
min_grad_norm=1e-7, min_error_diff=1e-7, verbose=0,
args=None, kwargs=None):
if args is None:
args = []
if kwargs is None:
kwargs = {}
p = p0.copy().ravel()
update = np.zeros_like(p)
gains = np.ones_like(p)
error = np.finfo(np.float).max
best_error = np.finfo(np.float).max
best_iter = 0
for i in range(it, n_iter):
positions.append(p.copy())
new_error, grad = objective(p, *args, **kwargs)
grad_norm = linalg.norm(grad)
inc = update * grad >= 0.0
dec = np.invert(inc)
gains[inc] += 0.05
gains[dec] *= 0.95
np.clip(gains, min_gain, np.inf)
grad *= gains
update = momentum * update - learning_rate * grad
p += update
if (i + 1) % n_iter_check == 0:
if new_error is None:
new_error = objective_error(p, *args)
error_diff = np.abs(new_error - error)
error = new_error
if verbose >= 2:
m = "[t-SNE] Iteration %d: error = %.7f, gradient norm = %.7f"
print(m % (i + 1, error, grad_norm))
if error < best_error:
best_error = error
best_iter = i
elif i - best_iter > n_iter_without_progress:
if verbose >= 2:
print("[t-SNE] Iteration %d: did not make any progress "
"during the last %d episodes. Finished."
% (i + 1, n_iter_without_progress))
break
if grad_norm <= min_grad_norm:
if verbose >= 2:
print("[t-SNE] Iteration %d: gradient norm %f. Finished."
% (i + 1, grad_norm))
break
if error_diff <= min_error_diff:
if verbose >= 2:
m = "[t-SNE] Iteration %d: error difference %f. Finished."
print(m % (i + 1, error_diff))
break
if new_error is not None:
error = new_error
return p, error, i
sklearn.manifold.t_sne._gradient_descent = _gradient_descent
X_proj = TSNE(random_state=RS).fit_transform(X)
X_iter = np.dstack(position.reshape(-1, 2)
for position in positions)
f, ax, sc, txts = scatter(X_iter[..., -1], y)
def make_frame_mpl(t):
i = int(t*40)
x = X_iter[..., i]
sc.set_offsets(x)
for j, txt in zip(range(10), txts):
xtext, ytext = np.median(x[y == j, :], axis=0)
txt.set_x(xtext)
txt.set_y(ytext)
return mplfig_to_npimage(f)
animation = mpy.VideoClip(make_frame_mpl,
duration=X_iter.shape[2]/40.)
animation.write_gif(imgpath + "animation-94a2c1ff.gif", fps=20)
n = 1. / (pdist(X_iter[..., -1], "sqeuclidean") + 1)
Q = n / (2.0 * np.sum(n))
Q = squareform(Q)
f = plt.figure(figsize=(6, 6))
ax = plt.subplot(aspect='equal')
im = ax.imshow(Q, interpolation='none', cmap=pal)
plt.axis('tight')
plt.axis('off')
def make_frame_mpl(t):
i = int(t*40)
n = 1. / (pdist(X_iter[..., i], "sqeuclidean") + 1)
Q = n / (2.0 * np.sum(n))
Q = squareform(Q)
im.set_data(Q)
return mplfig_to_npimage(f)
animation = mpy.VideoClip(make_frame_mpl,
duration=X_iter.shape[2]/40.)
animation.write_gif(imgpath + "animation_matrix-da2d5f1b.gif", fps=20)
npoints = 1000
plt.figure(figsize=(15, 4))
for i, D in enumerate((2, 5, 10)):
# Normally distributed points.
u = np.random.randn(npoints, D)
# Now on the sphere.
u /= norm(u, axis=1)[:, None]
# Uniform radius.
r = np.random.rand(npoints, 1)
# Uniformly within the ball.
points = u * r**(1./D)
# Plot.
ax = plt.subplot(1, 3, i+1)
ax.set_xlabel('Ball radius')
if i == 0:
ax.set_ylabel('Distance from origin')
ax.hist(norm(points, axis=1),
bins=np.linspace(0., 1., 50))
ax.set_title('D=%d' % D, loc='left')
plt.savefig(imgpath + "spheres-generated.png", dpi=100, bbox_inches="tight")
z = np.linspace(0., 5., 1000)
gauss = np.exp(-z**2)
cauchy = 1/(1+z**2)
plt.plot(z, gauss, label='Gaussian distribution')
plt.plot(z, cauchy, label='Cauchy distribution')
plt.legend()
plt.savefig(imgpath + 'distributions-generated.png', dpi=100)
| 2.84375 | 3 |
tests/import/gen_context.py | learnforpractice/micropython-cpp | 692 | 12759349 | <gh_stars>100-1000
import gen_context2
GLOBAL = "GLOBAL"
def gen():
print(GLOBAL)
yield 1
gen_context2.call(gen())
| 1.515625 | 2 |
pub_data_visualization/auctions/plot/subplot/__init__.py | l-leo/pub-data-visualization | 1 | 12759350 |
"""
Module for the subplots of auctions data.
"""
from .price import * | 0.96875 | 1 |