content
stringlengths 5
1.05M
|
|---|
from pathlib import Path
import geopandas as gp
import pygeos as pg
from analysis.constants import M2_ACRES
from analysis.lib.pygeos_util import intersection
ownership_filename = "data/inputs/boundaries/ownership.feather"
results_dir = Path("data/results/huc12")
ownership_results_filename = results_dir / "ownership.feather"
protection_results_filename = results_dir / "protection.feather"
def summarize_by_huc12(units_df):
print("Calculating overlap with land ownership and protection")
ownership = gp.read_feather(
ownership_filename, columns=["geometry", "FEE_ORGTYP", "GAP_STATUS"]
)
index_name = units_df.index.name
df = intersection(units_df, ownership)
if not len(df):
return
df["acres"] = pg.area(df.geometry_right.values.data) * M2_ACRES
# drop areas that touch but have no overlap
df = df.loc[df.acres > 0].copy()
by_owner = (
df[["FEE_ORGTYP", "acres"]]
.groupby([index_name, "FEE_ORGTYP"])
.acres.sum()
.astype("float32")
.round()
.reset_index()
)
by_protection = (
df[["GAP_STATUS", "acres"]]
.groupby([index_name, "GAP_STATUS"])
.acres.sum()
.astype("float32")
.round()
.reset_index()
)
by_owner.to_feather(ownership_results_filename)
by_protection.to_feather(protection_results_filename)
|
# proxy module
from __future__ import absolute_import
from chaco.function_image_data import *
|
from lcarmq.rmq.abs_rmq import AbstractRmq
from lcarmq.rmq.rmq import Rmq
from lcarmq.rmq.bfc2000 import Bfc2000
|
import torch
import torch.nn as nn
import math
import librosa
def normalize_batch(x, seq_len, normalize_type: str):
if normalize_type == "per_feature":
assert not torch.isnan(x).any(), x
x_mean = torch.zeros(
(seq_len.shape[0], x.shape[1]), dtype=x.dtype, device=x.device)
x_std = torch.zeros(
(seq_len.shape[0], x.shape[1]), dtype=x.dtype, device=x.device)
for i in range(x.shape[0]):
x_mean[i, :] = x[i, :, :seq_len[i]].mean(dim=1)
x_std[i, :] = x[i, :, :seq_len[i]].std(dim=1)
# make sure x_std is not zero
x_std += 1e-5
return (x - x_mean.unsqueeze(2)) / x_std.unsqueeze(2)
elif normalize_type == "all_features":
x_mean = torch.zeros(seq_len.shape, dtype=x.dtype, device=x.device)
x_std = torch.zeros(seq_len.shape, dtype=x.dtype, device=x.device)
for i in range(x.shape[0]):
x_mean[i] = x[i, :, :int(seq_len[i])].mean()
x_std[i] = x[i, :, :int(seq_len[i])].std()
# make sure x_std is not zero
x_std += 1e-5
return (x - x_mean.view(-1, 1, 1)) / x_std.view(-1, 1, 1)
else:
return x
class FilterbankFeatures(nn.Module):
# For JIT. See
# https://pytorch.org/docs/stable/jit.html#python-defined-constants
__constants__ = ["dither", "preemph", "n_fft", "hop_length", "win_length",
"center", "log", "frame_splicing", "window", "normalize",
"pad_to", "max_duration", "max_length"]
def __init__(self,
sample_rate=16000,
win_length=320,
hop_length=160,
n_fft=512,
window="hann",
normalize="none",
log=True,
dither=1e-5,
pad_to=0,
max_duration=16.7,
preemph=0.97,
n_filt=64,
f_min=0,
f_max=None):
super(FilterbankFeatures, self).__init__()
torch_windows = {
'hann': torch.hann_window,
'hamming': torch.hamming_window,
'blackman': torch.blackman_window,
'bartlett': torch.bartlett_window,
'none': None,
}
self.win_length = win_length # frame size
self.hop_length = hop_length
self.n_fft = n_fft or 2 ** math.ceil(math.log2(self.win_length))
self.normalize = normalize
self.log = log
# TORCHSCRIPT: Check whether or not we need this
self.dither = dither
self.n_filt = n_filt
self.preemph = preemph
self.pad_to = pad_to
f_max = f_max or sample_rate / 2
window_fn = torch_windows.get(window, None)
window_tensor = window_fn(self.win_length,
periodic=False) if window_fn else None
filterbanks = torch.tensor(
librosa.filters.mel(
sample_rate, self.n_fft, n_mels=n_filt,
fmin=f_min, fmax=f_max),
dtype=torch.float).unsqueeze(0)
# self.fb = filterbanks
# self.window = window_tensor
self.register_buffer("fb", filterbanks)
self.register_buffer("window", window_tensor)
# Calculate maximum sequence length (# frames)
max_length = 1 + math.ceil(
(max_duration * sample_rate - self.win_length) / self.hop_length
)
max_pad = 16 - (max_length % 16)
self.max_length = max_length + max_pad
def get_seq_len(self, seq_len):
return torch.ceil(seq_len.float() / self.hop_length).int()
# do stft
# TORCHSCRIPT: center removed due to bug
def stft(self, x):
return torch.stft(x, n_fft=self.n_fft, hop_length=self.hop_length,
win_length=self.win_length,
window=self.window.to(dtype=torch.float))
def forward(self, x):
# dtype = x.dtype
seq_len = self.get_seq_len(torch.tensor([x.shape[1]]))
# dither
if self.dither > 0:
x += self.dither * torch.randn_like(x)
# do preemphasis
if self.preemph is not None:
x = torch.cat(
[x[:, 0].unsqueeze(1), x[:, 1:] - self.preemph * x[:, :-1]],
dim=1)
x = self.stft(x)
# get power spectrum
x = x.pow(2).sum(-1)
# dot with filterbank energies
x = torch.matmul(self.fb.to(x.dtype), x)
# log features if required
if self.log:
x = torch.log(x + 1e-20)
# normalize if required
x = normalize_batch(x, seq_len, normalize_type=self.normalize)
# mask to zero any values beyond seq_len in batch, pad to multiple of
# `pad_to` (for efficiency)
max_len = x.size(-1)
mask = torch.arange(max_len, dtype=seq_len.dtype).to(x.device).expand(
x.size(0), max_len) >= seq_len.unsqueeze(1)
x = x.masked_fill(mask.unsqueeze(1), 0)
# TORCHSCRIPT: Is this del important? It breaks scripting
# del mask
# TORCHSCRIPT: Cant have mixed types. Using pad_to < 0 for "max"
if self.pad_to < 0:
x = nn.functional.pad(x, (0, self.max_length - x.size(-1)))
elif self.pad_to > 0:
pad_amt = x.size(-1) % self.pad_to
# if pad_amt != 0:
x = nn.functional.pad(x, (0, self.pad_to - pad_amt))
return x
|
# -*- coding: utf-8 -*-
"""
pyeasyga module
"""
import random
import copy
from operator import attrgetter
import numpy as np
from time import gmtime, strftime
import matplotlib.pyplot as plt
from libs.get_random import get_rollet_wheel as rollet
from six.moves import range
class GeneticAlgorithm(object):
"""Genetic Algorithm class.
This is the main class that controls the functionality of the Genetic
Algorithm over 2 dim matrics.
"""
def __init__(self,
seed_data,
meta_data,
population_size=50,
generations=100,
crossover_probability=0.8,
mutation_probability=0.2,
elitism=True,
by_parent=False,
maximise_fitness=True,
initial_elit_prob=0.5,
initial_random_prob=0.5):
self.seed_data = seed_data
self.meta_data = meta_data
self.population_size = population_size
self.generations = generations
self.crossover_probability = crossover_probability
self.mutation_probability = mutation_probability
self.elitism = elitism
self.by_parent = by_parent
self.maximise_fitness = maximise_fitness
self.single_count = 0
self.double_count = 0
self.uniform_count = 0
self.mutate_count = 0
self.initial_elit_prob=initial_elit_prob,
self.initial_random_prob = initial_random_prob
self.current_generation = []
def single_crossover(parent_1, parent_2):
child_1, child_2 = parent_1, parent_2
row, col = parent_1.shape
for r in range(row):
crossover_index = (random.randrange(1, col - 1))
colt = crossover_index
child_1.iloc[r] = np.append(parent_1.iloc[r, :colt],
parent_2.iloc[r, colt:])
child_2.iloc[r] = np.append(parent_1.iloc[r ,
colt:],parent_2.iloc[r, :colt])
return child_1, child_2
def double_crossover(parent_1, parent_2):
child_1, child_2 = parent_1, parent_2
row, col = parent_1.shape
for r in range(row):
colt1 = (random.randrange(1, col - 1))
colt2 = (random.randrange(colt1, col - 1))
s1 = parent_2.iloc[r, :colt1]
s1 = np.append(s1, parent_1.iloc[r, colt1:colt2])
s1 = np.append(s1, parent_2.iloc[r, colt2:])
child_1.iloc[r] = s1
s2 = parent_1.iloc[r, :colt1]
s2 = np.append(s2, parent_2.iloc[r, colt1:colt2])
s2 = np.append(s2, parent_1.iloc[r, colt2:])
child_2.iloc[r] = s1
return child_1, child_2
def uniform_crossover(parent_1, parent_2):
child_1, child_2 = parent_1, parent_2
row, col = parent_1.shape
for r in range(row):
colt1 = (random.randrange(1, col - 1))
colt2 = (random.randrange(colt1, col - 1))
colt3 = (random.randrange(colt2, col - 1))
colt4 = (random.randrange(colt3, col - 1))
colt5 = (random.randrange(colt4, col - 1))
colt6 = (random.randrange(colt5, col - 1))
colt7 = (random.randrange(colt6, col - 1))
colt8 = (random.randrange(colt7, col - 1))
colt9 = (random.randrange(colt8, col - 1))
s1= parent_2.iloc[r, :colt1]
s1 = np.append(s1, parent_1.iloc[r, colt1:colt2])
s1 = np.append(s1, parent_2.iloc[r, colt2:colt3])
s1 = np.append(s1, parent_1.iloc[r, colt3:colt4])
s1 = np.append(s1, parent_2.iloc[r, colt4:colt5])
s1 = np.append(s1, parent_1.iloc[r, colt5:colt6])
s1 = np.append(s1, parent_2.iloc[r, colt6:colt7])
s1 = np.append(s1, parent_1.iloc[r, colt7:colt8])
s1 = np.append(s1, parent_2.iloc[r, colt8:colt9])
s1 = np.append(s1, parent_1.iloc[r, colt9:])
child_1.iloc[r] = s1
s2= parent_1.iloc[r, :colt1]
s2 = np.append(s2, parent_2.iloc[r, colt1:colt2])
s2 = np.append(s2, parent_1.iloc[r, colt2:colt3])
s2 = np.append(s2, parent_2.iloc[r, colt3:colt4])
s2 = np.append(s2, parent_1.iloc[r, colt4:colt5])
s2 = np.append(s2, parent_2.iloc[r, colt5:colt6])
s2 = np.append(s2, parent_1.iloc[r, colt6:colt7])
s2 = np.append(s2, parent_2.iloc[r, colt7:colt8])
s2 = np.append(s2, parent_1.iloc[r, colt8:colt9])
s2 = np.append(s2, parent_2.iloc[r, colt9:])
child_2.iloc[r] = s2
return child_1, child_2
def mutate(individual):
parent = individual
row , col = parent.shape
shift_list = np.flip(meta_data.index.values.tolist())
for r in range(row):
mutate_index1 = random.randrange(1, col)
mutate_index2 = random.randrange(1, col)
parent.iloc[r][mutate_index1] = np.random.choice(shift_list,
p=[0.0,0.0,0.0,
0.05,0.15,0.35,0.45],
size=1)
parent.iloc[r][mutate_index2] = np.random.choice(shift_list,
p=[0.0,0.0,0.0,
0.05,0.15,0.35,0.45],
size=1)
def create_individual(data,meta_data):
individual = data[:]
shift_list = meta_data.index.values.tolist()
for col in individual.columns :
individual[col] = np.random.choice(shift_list,
size=len(individual))
return individual
def create_individual_local_search(data,meta_data):
individual = data[:]
p = random.random()
if p < 0.25:
individual, _ = single_crossover(individual, individual)
elif p < 0.5:
individual, _ = double_crossover(individual, individual)
elif p < 0.75:
individual, _ = uniform_crossover(individual, individual)
else:
mutate(individual)
return individual
def random_selection(population):
"""Select and return a random member of the population."""
return random.choice(population)
def weighted_random_choice(population):
max = sum(chromosome.fitness for chromosome in population)
pick = random.uniform(0, max)
current = 0
for chromosome in population:
current += chromosome.fitness
if current > pick:
return chromosome
def tournament_selection(population):
"""Select a random number of individuals from the population and
return the fittest member of them all.
"""
if self.tournament_size == 0:
self.tournament_size = 2
members = random.sample(population, self.tournament_size)
members.sort(
key=attrgetter('fitness'), reverse=self.maximise_fitness)
return members[0]
self.fitness_function = None
self.tournament_selection = tournament_selection
self.tournament_size = self.population_size // 10
self.random_selection = random_selection
self.create_individual = create_individual
self.single_crossover_function = single_crossover
self.double_crossover_function = double_crossover
self.uniform_crossover_function = uniform_crossover
self.mutate_function = mutate
self.selection_function = self.tournament_selection
def create_initial_population(self):
"""Create members of the first population randomly.
"""
initial_population = []
individual = Chromosome(self.seed_data)
parent = copy.deepcopy(individual)
for i in range(self.population_size):
genes = self.create_individual(self.seed_data,self.meta_data)
individual = Chromosome(genes)
individual.life_cycle = 1
self.single_count += 1
initial_population.append(individual)
if self.by_parent:
initial_population[0] = parent
self.current_generation = initial_population
def calculate_population_fitness(self):
"""Calculate the fitness of every member of the given population using
the supplied fitness_function.
"""
for individual in self.current_generation:
individual.set_fitness(self.fitness_function(individual.genes,
self.meta_data)
)
def rank_population(self):
"""Sort the population by fitness according to the order defined by
maximise_fitness.
"""
self.current_generation.sort(
key=attrgetter('fitness'), reverse=self.maximise_fitness)
def create_new_population(self):
"""Create a new population using the genetic operators (selection,
crossover, and mutation) supplied.
"""
new_population = []
elite = copy.deepcopy(self.current_generation[0])
selection = self.selection_function
while len(new_population) < self.population_size:
parent_1 = copy.deepcopy(selection(self.current_generation))
parent_2 = copy.deepcopy(selection(self.current_generation))
child_1, child_2 = parent_1, parent_2
child_1.parent_fitness, child_2.parent_fitness = (parent_1.fitness,
parent_2.fitness)
#-------------------- use tabu search ----------------------------#
''' if parent_1 or parent_2 use any opertator then these operators
shoud not play for create child_1 and child_2.
<< Tabu Search by last state of serach operation >>
'''
parent_single_cross_count = max(parent_1.single_cross_count,
parent_2.single_cross_count)
parent_double_cross_count = max(parent_1.double_cross_count,
parent_2.double_cross_count)
parent_uniform_cross_count = max(parent_1.uniform_cross_count,
parent_2.uniform_cross_count)
parent_mutate_count = max(parent_1.mutate_count,
parent_2.mutate_count)
prob_single_cross = int(parent_single_cross_count == 0)
prob_double_cross = int(parent_double_cross_count == 0)
prob_uniform_cross = int(parent_uniform_cross_count == 0)
prob_mutate = int(parent_mutate_count == 0)
sum_all_prob = (prob_single_cross+prob_double_cross+
prob_uniform_cross+prob_mutate)
# sum_all_prob = 0.00001 if sum_all_prob==0 else sum_all_prob
prob_single_cross = prob_single_cross/sum_all_prob
prob_double_cross = prob_double_cross/sum_all_prob
prob_uniform_cross = prob_uniform_cross/sum_all_prob
prob_mutate = prob_mutate/sum_all_prob
#------------- rollet wheel -----------------#
p = random.random()
cdf_prob_single_cross = prob_single_cross
cdf_prob_double_cross = (prob_single_cross +
prob_double_cross
if prob_double_cross else 0)
cdf_prob_uniform_cross = (prob_single_cross +
prob_double_cross +
prob_uniform_cross
if prob_uniform_cross else 0)
cdf_prob_mutate = (prob_single_cross +
prob_double_cross +
prob_uniform_cross+
prob_mutate
if prob_mutate else 0)
if p < cdf_prob_single_cross:
child_1.genes, child_2.genes = self.single_crossover_function(
parent_1.genes, parent_2.genes)
child_1.set_init_count()
child_2.set_init_count()
child_1.single_cross_count, child_2.single_cross_count = 1, 1
self.single_count += 1
# print('single_crossover_function')
elif p < cdf_prob_double_cross:
child_1.genes, child_2.genes = self.double_crossover_function(
parent_1.genes, parent_2.genes)
child_1.set_init_count()
child_2.set_init_count()
child_1.double_cross_count, child_2.double_cross_count = 1, 1
self.double_count += 1
# print('double_crossover_function')
elif p < cdf_prob_uniform_cross:
child_1.genes, child_2.genes = self.uniform_crossover_function(
parent_1.genes, parent_2.genes)
child_1.set_init_count()
child_2.set_init_count()
child_1.uniform_cross_count, child_2.uniform_cross_count = 1, 1
self.uniform_count += 1
# print('uniform_crossover_function')
else:
self.mutate_function(child_1.genes)
self.mutate_function(child_2.genes)
child_1.set_init_count()
child_2.set_init_count()
child_1.mutate_count, child_2.mutate_count = 1, 1
self.mutate_count += 1
# print('mutate_function')
#------------- ------------- -----------------#
new_population.append(child_1)
if len(new_population) < self.population_size:
new_population.append(child_2)
if self.elitism:
new_population[0] = elite
self.current_generation = new_population
def create_first_generation(self):
"""Create the first population, calculate the population's fitness and
rank the population by fitness according to the order specified.
"""
self.create_initial_population()
self.calculate_population_fitness()
self.rank_population()
def create_next_generation(self):
"""Create subsequent populations, calculate the population fitness and
rank the population by fitness in the order specified.
"""
self.create_new_population()
self.calculate_population_fitness()
self.rank_population()
def run(self):
"""Run (solve) the Genetic Algorithm."""
print('start: '+ strftime("%Y-%m-%d %H:%M:%S:%SS", gmtime()))
self.create_first_generation()
for g in range(1, self.generations):
print('---------- Start ---------------')
print('generation-' +str(g) + ' -> start: ')
self.create_next_generation()
print('best cost: ' + str(self.current_generation[0].fitness))
print('single_count:' +str(self.single_count))
print('double_count:' +str(self.double_count))
print('uniform_count:' +str(self.uniform_count))
print('mutate_count:' +str(self.mutate_count))
print('----------- End ----------------')
print('end: '+ strftime("%Y-%m-%d %H:%M:%S:%SS", gmtime()))
def best_individual(self):
"""Return the individual with the best fitness in the current
generation.
"""
best = self.current_generation[0]
return (best.fitness, best.genes)
def last_generation(self):
"""Return members of the last generation as a generator function."""
return ((member.fitness, member.genes) for member
in self.current_generation)
class Chromosome(object):
""" Chromosome class that encapsulates an individual's fitness and solution
representation.
"""
def __init__(self, genes):
"""Initialise the Chromosome."""
self.genes = genes
self.fitness = 0
self.parent_fitness = 0
self.life_cycle = 0
self.fitness_const_count = 0
self.single_cross_count = 0
self.double_cross_count = 0
self.uniform_cross_count = 0
self.mutate_count = 0
self.elit = 0
def __repr__(self):
"""Return initialised Chromosome representation in human readable form.
"""
return repr((self.fitness, self.genes))
def set_fitness(self, fitness):
self.life_cycle += 1
#print('life_cycle:' + str(self.life_cycle))
self.fitness = fitness
if self.parent_fitness == self.fitness :
self.fitness_const_count += 1
#print('fitness_const_count:' + str(self.fitness_const_count))
def set_init_count(self):
self.single_cross_count = 0
self.double_cross_count = 0
self.uniform_cross_count = 0
self.mutate_count = 0
|
from .models import Member
from rest_framework import serializers, viewsets
class MemberSerializer(serializers.ModelSerializer):
class Meta:
model = Member
fields = '__all__'
class MemberViewSet(viewsets.ModelViewSet):
queryset = Member.objects.all()
serializer_class = MemberSerializer
|
from baseic import FunDemo
FunDemo.message("JJGG", 25)
|
#!/usr/bin/env python3
import argparse
import os
import sys
import uuid
import util
import service_app_id as app_id
import service_cos as cos
import service_cloudant as nosql
import service_event_streams as es
CREDENTIALS_FILE = './.credentials'
COVSAFE_VIEW = 'covsafe-view'
ES_TOPICS = 'covsafe'
APPID_REGISTERED_APP = 'covsafe'
APPID_REGISTERED_USER = 'user@fake.email:JamesSmith:password'
UI_COMPONENTS_BUCKET = 'UI_COMPONENTS_BUCKET'
CLOUDANT_DB = 'assets,assets_staff,view-config,log_risk_calculation,log_risk_notifier,a_notification_template,ads,shops'
SERVICES = {
'app_id': 'app-id',
'cos': 'cos',
'cloudant': 'cloudant',
'event_streams': 'event-streams'
}
# get arguments
def parse_args(args):
parser = argparse.ArgumentParser(description="""
deploy covsafe solution to IBM Cloud.
This requires an environment variable ${APIKEY} as your IAM API key.
""")
parser.add_argument('-o', '--operation', default='create', help='create|delete the solution')
parser.add_argument('-p', '--project', default='covsafe', help='Project Name')
parser.add_argument(
'-t', '--tenant', default='c4c', help='''
Tenant ID to select data set. This should be one of dir name under /path/to/project/data/'
'''
)
parser.add_argument('-r', '--region', default='jp-tok', help='Region Name')
parser.add_argument('-g', '--resource-group', default='c4c-covid-19', help='Resource Group Name')
parser.add_argument('-l', '--plan', default='lite',
help='service plan. Event Streams is created as standard, and App ID is done as lite regardless of this value'
)
return parser.parse_args(args)
def create(args):
init()
args = parse_args(args)
util.login(args.region, args.resource_group)
# create UI namespace for app ID
util.create_functions_namespace(COVSAFE_VIEW)
view_ns = util.get_functions_namespace_id(COVSAFE_VIEW)
view_api = 'https://{}.functions.appdomain.cloud/api/v1/web/{}/covsafe/view/callback'.format(
args.region, view_ns.strip()
)
# create IBM Event Streams
# notice that we are creating paid plan!!
es.create([
'-r', args.region, '-g', args.resource_group, '-p', 'standard', '-n', SERVICES['event_streams'],
'-k', 'event-streams-key', '-c', CREDENTIALS_FILE, '-t', ES_TOPICS
])
# create IBM Cloud Cloudant
# FIXME: might need to create index to avoid the query error
data = [
'../data/common/cloudant/notification-template.json;a_notification_template',
'../data/common/cloudant/view-config.json;view-config',
'../data/tenants/{}/cloudant/assets.json;assets'.format(args.tenant),
'../data/tenants/{}/cloudant/assets_staff.json;assets_staff'.format(args.tenant),
'../data/tenants/{}/cloudant/shops.json;shops'.format(args.tenant)
]
nosql.create([
'-r', args.region, '-g', args.resource_group, '-p', args.plan, '-n', SERVICES['cloudant'],
'-k', 'cloudant-key', '-c', CREDENTIALS_FILE, '-b', CLOUDANT_DB,
'-d', ','.join(data)
])
# create IBM Cloud Object Storage
bucket = util.get_credentials_value(CREDENTIALS_FILE, UI_COMPONENTS_BUCKET)
cosdir = '../data/tenants/{}/cos'.format(args.tenant)
files = ['{}/{}'.format(cosdir, f) for f in os.listdir(cosdir) if os.path.isfile(os.path.join(cosdir, f))]
data = ','.join(['{};{}'.format(x, bucket) for x in files])
cos.create([
'-r', args.region, '-g', args.resource_group, '-p', args.plan, '-n', SERVICES['cos'],
'-k', 'cos-hmac', '-c', CREDENTIALS_FILE, '-b', bucket, '-d', data
])
# create IBM App ID
# should be later than deployment of UI, because it requires redirect URL
app_id.create([
'-r', args.region, '-g', args.resource_group, '-p', 'lite', '-n', SERVICES['app_id'],
'-e', 'OFF', '-u', view_api, '-a', APPID_REGISTERED_APP,
'-s', APPID_REGISTERED_USER
])
post_create()
def delete(args):
args = parse_args(args)
app_id.delete(['-n', SERVICES['app_id'], '-g', args.resource_group])
bucket = util.get_credentials_value(CREDENTIALS_FILE, UI_COMPONENTS_BUCKET)
cosdir = '../data/tenants/{}/cos'.format(args.tenant)
files = [f for f in os.listdir(cosdir) if os.path.isfile(os.path.join(cosdir, f))]
data = ','.join(['{};{}'.format(x, bucket) for x in files])
cos.delete([
'-n', SERVICES['cos'], '-g', args.resource_group, '-r', args.region,
'-b', bucket, '-d', data
])
nosql.delete(['-n', SERVICES['cloudant'], '-g', args.resource_group])
es.delete(['-n', SERVICES['event_streams'], '-g', args.resource_group])
util.delete_functions_namespace(COVSAFE_VIEW)
post_delete()
def init():
with open(CREDENTIALS_FILE, 'w') as f:
f.write('{}={}\n'.format(UI_COMPONENTS_BUCKET, str(uuid.uuid4())))
def post_create():
print('something new')
def post_delete():
os.remove(CREDENTIALS_FILE)
if __name__ == '__main__':
args = parse_args(sys.argv[1:])
if args.operation == 'create':
create(sys.argv[1:])
elif args.operation == 'delete':
delete(sys.argv[1:])
else:
print(util.bcolors.WARNING + 'no option. please check usage of this script.' + util.bcolors.ENDC)
|
from _functools import partial
def f(a, b, c=0, d=0):
return (a, b, c, d)
f7 = partial(f, b=7, c=1)
them = f7(10)
assert them == (10, 7, 1, 0)
|
import shutil
from typing import Any, Dict, Optional, Tuple
from faker import Faker
from nig.endpoints import INPUT_ROOT
from restapi.services.authentication import Role
from restapi.tests import API_URI, BaseTests, FlaskClient
def create_test_env(client: FlaskClient, faker: Faker, study: bool = False) -> Any:
# create a group with one user
uuid_group_A, _ = BaseTests.create_group(client)
user_A1_uuid, data = BaseTests.create_user(
client, data={"group": uuid_group_A}, roles=[Role.USER]
)
user_A1_headers, _ = BaseTests.do_login(
client, data.get("email"), data.get("password")
)
# create a second group with two users
uuid_group_B, _ = BaseTests.create_group(client)
user_B1_uuid, data = BaseTests.create_user(
client, data={"group": uuid_group_B}, roles=[Role.USER]
)
user_B1_headers, _ = BaseTests.do_login(
client, data.get("email"), data.get("password")
)
# create a second user for the group 2
user_B2_uuid, data = BaseTests.create_user(
client, data={"group": uuid_group_B}, roles=[Role.USER]
)
user_B2_headers, _ = BaseTests.do_login(
client, data.get("email"), data.get("password")
)
study1_uuid = None
study2_uuid = None
if study:
# create a study in group B
study1 = {"name": faker.pystr(), "description": faker.pystr()}
r = client.post(f"{API_URI}/study", headers=user_B1_headers, data=study1)
assert r.status_code == 200
study1_uuid = BaseTests.get_content(r)
# create a study in group A
study2 = {"name": faker.pystr(), "description": faker.pystr()}
r = client.post(f"{API_URI}/study", headers=user_A1_headers, data=study2)
assert r.status_code == 200
study2_uuid = BaseTests.get_content(r)
admin_headers, _ = BaseTests.do_login(client, None, None)
return (
admin_headers,
uuid_group_A,
user_A1_uuid,
user_A1_headers,
uuid_group_B,
user_B1_uuid,
user_B1_headers,
user_B2_uuid,
user_B2_headers,
study1_uuid,
study2_uuid,
)
def delete_test_env(
client: FlaskClient,
user_A1_headers: Tuple[Optional[Dict[str, str]], str],
user_B1_headers: Tuple[Optional[Dict[str, str]], str],
user_B1_uuid: str,
user_B2_uuid: str,
user_A1_uuid: str,
uuid_group_A: str,
uuid_group_B: str,
study1_uuid: Optional[str] = None,
study2_uuid: Optional[str] = None,
) -> None:
admin_headers, _ = BaseTests.do_login(client, None, None)
# delete all the elements used by the test
if study1_uuid:
r = client.delete(f"{API_URI}/study/{study1_uuid}", headers=user_B1_headers)
assert r.status_code == 204
if study2_uuid:
r = client.delete(f"{API_URI}/study/{study2_uuid}", headers=user_A1_headers)
assert r.status_code == 204
# first user
r = client.delete(f"{API_URI}/admin/users/{user_B1_uuid}", headers=admin_headers)
assert r.status_code == 204
# second user
r = client.delete(f"{API_URI}/admin/users/{user_B2_uuid}", headers=admin_headers)
assert r.status_code == 204
# other user
r = client.delete(f"{API_URI}/admin/users/{user_A1_uuid}", headers=admin_headers)
assert r.status_code == 204
# group A directory
INPUT_ROOT_path = INPUT_ROOT.joinpath(uuid_group_A)
shutil.rmtree(INPUT_ROOT_path)
# group A
r = client.delete(f"{API_URI}/admin/groups/{uuid_group_A}", headers=admin_headers)
assert r.status_code == 204
# group B directory
INPUT_ROOT_path = INPUT_ROOT.joinpath(uuid_group_B)
shutil.rmtree(INPUT_ROOT_path)
# group B
r = client.delete(f"{API_URI}/admin/groups/{uuid_group_B}", headers=admin_headers)
assert r.status_code == 204
|
from controller import Robot
def calculate_motor(signal):
return (signal/100)*6.28
def run_robot(robot):
#timestep = int(robot.getBasicTimeStep())
timestep = 64
#define sensor
sensor = []
sensor_name =['ps0', 'ps1', 'ps2', 'ps3', 'ps4', 'ps5', 'ps6', 'ps7']
for i in range(8):
sensor.append(robot.getDevice(sensor_name[i]))
sensor[i].enable(timestep)
#define motor
wheel = []
wheel_name =['left wheel motor', 'right wheel motor']
for i in range(2):
wheel.append(robot.getDevice(wheel_name[i]))
wheel[i].setPosition(float('inf'))
wheel[i].setVelocity(0.0)
#parameter
#pid_parameter = [0.55, 0.00004, 2.6]
pid_parameter = [0.35, 0.00001, 2.2] #Kp Ki Kd
error = [0, 0, 0]
set_point = 140
control = [0, 0, 0]
pid_control = 0
sensor_val = [0, 0, 0, 0, 0, 0, 0, 0]
while robot.step(timestep) != -1:
normal_speed = 80
fast_speed = 100
for i in range(8):
sensor_val[i] = sensor[i].getValue()
#kendali proporsional
error[0] = set_point - sensor_val[2]
control[0] = error[0]*pid_parameter[0]
#kendali integral
error[1] = error[1] + error[0]
if error[1] > 150:
error[1] = 150
if error[1] <= -150:
error[1] = -150
control[1] = error[1]*pid_parameter[1]
#kendali differensial
control[2] = (error[0]-error[2])*pid_parameter[2]
error[2] = error[0]
pid_control = control[0]+control[1]+control[2]
if pid_control >= (fast_speed-normal_speed-1):
pid_control = (fast_speed-normal_speed-1)
if pid_control <= -(fast_speed-normal_speed-1):
pid_control = -(fast_speed-normal_speed-1)
max_speed = calculate_motor(fast_speed)
#cek dinding depan
if sensor_val[0] > 80:
#print("Front")
left = -max_speed
right = max_speed
else:
#cek error kecil, booster
if error[0] >= -5 and error[0] <= 5:
left = max_speed
right = max_speed
else:
if sensor_val[2] > 80:
#print("Wall")
speed = calculate_motor(normal_speed)
pid_control = calculate_motor(pid_control)
left = speed+pid_control
right = speed-pid_control
else:
#print("No Wall")
left = max_speed
right = max_speed/6
wheel[0].setVelocity(left)
wheel[1].setVelocity(right)
#round_error = [f"{num:.2f}" for num in error]
#round_control = [f"{num:.2f}" for num in control]
#print("Error: {} Control: {}".format(round_error, round_control))
#print("Error: {:.2f} Control: {:.2f}".format(error[0], pid_control))
if __name__ == "__main__":
my_robot = Robot()
run_robot(my_robot)
|
'''Train function.
'''
import logging
from os import path
import time
import lasagne
import numpy as np
from progressbar import Bar, ProgressBar, Percentage, Timer
import theano
from theano import tensor as T
from utils import update_dict_of_lists
from viz import save_images, save_movie
logger = logging.getLogger('BGAN.train')
floatX_ = lasagne.utils.floatX
floatX = theano.config.floatX
ETA = None
GENERATOR = None
DISCRIMINATOR = None
OPTIONS = dict()
def summarize(summary, gen_fn, dim_z, prefix='', image_dir=None):
if len(prefix) > 0: prefix = prefix + '_'
logger.info(summary)
samples = gen_fn(floatX_(np.random.rand(5000, dim_z)))[0:64]
if image_dir is not None:
logger.debug('Saving images to {}'.format(image_dir))
out_path = path.join(image_dir, '{}gen.png'.format(prefix))
save_images(samples, 8, 8, out_file=out_path)
def make_gif(gen_fn, z=None, samples=[], prefix='', image_dir=None):
if len(prefix) > 0: prefix = prefix + '_'
samples.append(gen_fn(floatX_(z))[0:64])
out_path = path.join(image_dir, '{}movie.gif'.format(prefix))
save_movie(samples, 8, 8, out_file=out_path)
def save(results, prefix='', binary_dir=None):
if len(prefix) > 0: prefix = prefix + '_'
np.savez(path.join(binary_dir, '{}gen_params.npz'.format(prefix)),
*lasagne.layers.get_all_param_values(GENERATOR))
np.savez(path.join(binary_dir, '{}disc_params.npz'.format(prefix)),
*lasagne.layers.get_all_param_values(DISCRIMINATOR))
np.savez(path.join(binary_dir, '{}results.npz'.format(prefix)),
**results)
def setup(input_var, noise_var, log_Z,
generator, discriminator, g_results, d_results, discrete=False,
log_Z_gamma=None, clip=None, optimizer=None,
learning_rate=None, lr_decay=None, min_lr=None, decay_at_epoch=None,
optimizer_options=None):
global GENERATOR, DISCRIMINATOR, OPTIONS, ETA
GENERATOR = generator
DISCRIMINATOR = discriminator
decay_at_epoch = decay_at_epoch or 0
min_lr = min_lr or 0.
OPTIONS.update(lr_decay=lr_decay, min_lr=min_lr,
decay_at_epoch=decay_at_epoch)
generator_loss = g_results.get('g loss', None)
if generator_loss is None:
raise ValueError('Generator loss not found in results.')
discriminator_loss = d_results.get('d loss', None)
if discriminator_loss is None:
raise ValueError('Discriminator loss not found in results.')
generator_params = lasagne.layers.get_all_params(
GENERATOR, trainable=True)
discriminator_params = lasagne.layers.get_all_params(
DISCRIMINATOR, trainable=True)
logger.info('Training with {} and optimizer options {}'.format(
optimizer, optimizer_options))
if callable(optimizer):
op = optimizer
elif optimizer == 'adam':
op = lasagne.updates.adam
elif optimizer == 'rmsprop':
op = lasagne.updates.rmsprop
else:
raise NotImplementedError('Optimizer not supported `{}`'.format(
optimizer))
ETA = theano.shared(floatX_(learning_rate))
generator_updates = op(
generator_loss, generator_params, learning_rate=ETA,
**optimizer_options)
discriminator_updates = op(
discriminator_loss, discriminator_params, learning_rate=ETA,
**optimizer_options)
if clip is not None:
logger.info('Clipping weights with clip of {}'.format(clip))
for k in discriminator_updates.keys():
if k.name == 'W':
discriminator_updates[k] = T.clip(
discriminator_updates[k], -clip, clip)
if 'log Z (est)' in g_results.keys():
logger.info('Updating log Z estimate with gamma {}'.format(log_Z_gamma))
generator_updates.update(
[(log_Z, log_Z_gamma * log_Z
+ (1. - log_Z_gamma) * g_results['log Z (est)'])])
# COMPILE
logger.info('Compiling functions.')
train_discriminator = theano.function([noise_var, input_var], d_results, allow_input_downcast=True, updates=discriminator_updates)
train_generator = theano.function([noise_var], g_results, allow_input_downcast=True, updates=generator_updates)
gen_out = lasagne.layers.get_output(generator, deterministic=True)
if discrete:
if generator.output_shape[1] == 1:
gen_out = T.nnet.sigmoid(gen_out)
gen_fn = theano.function(
[noise_var], gen_out)
return train_discriminator, train_generator, gen_fn
def train(train_d, train_g, gen, stream,
summary_updates=None, epochs=None, training_samples=None,
num_iter_gen=None, num_iter_disc=None, batch_size=None, dim_z=None,
image_dir=None, binary_dir=None, archive_every=None):
'''
Main train function.
'''
# TRAIN
logger.info('Starting training of GAN...')
total_results = {}
exp_name = binary_dir.split('/')[-2]
rep_samples = floatX_(np.random.rand(5000, dim_z))
train_sample = (stream.get_epoch_iterator(as_dict=True)
.next()['features'][:64])
logger.debug('Saving images to {}'.format(image_dir))
out_path = path.join(image_dir, 'training_example.png')
save_images(train_sample, 8, 8, out_file=out_path, title='generated MNIST')
for epoch in range(epochs):
u = 0
start_time = time.time()
results = {}
widgets = ['Epoch {} ({}), '.format(epoch, exp_name), Timer(), Bar()]
pbar = ProgressBar(
widgets=widgets,
maxval=(training_samples // batch_size)).start()
for batch in stream.get_epoch_iterator(as_dict=True):
inputs = batch['features']
if inputs.shape[0] == batch_size:
for i in range(num_iter_disc):
random_disc = np.random.rand(len(inputs), dim_z)
noise = floatX_(random_disc)
d_outs = train_d(noise, inputs)
d_outs = dict((k, np.asarray(v))
for k, v in d_outs.items())
update_dict_of_lists(results, **d_outs)
for i in range(num_iter_gen):
random_gen = np.random.rand(len(inputs), dim_z)
noise = floatX_(random_gen)
g_outs = train_g(noise)
g_outs = dict((k, np.asarray(v)) for k, v in g_outs.items())
update_dict_of_lists(results, **g_outs)
u += 1
pbar.update(u)
if summary_updates is not None and u % summary_updates == 0:
summary = dict((k, np.mean(v)) for k, v in results.items())
summarize(summary, gen, dim_z, prefix=exp_name, image_dir=image_dir)
logger.info('Total Epoch {} of {} took {:.3f}s'.format(
epoch + 1, epochs, time.time() - start_time))
if archive_every is not None and epoch % archive_every == 0:
prefix = '{}({})'.format(exp_name, epoch)
else:
prefix = exp_name
result_summary = dict((k, np.mean(v)) for k, v in results.items())
update_dict_of_lists(total_results, **result_summary)
summarize(result_summary, gen, dim_z, prefix=prefix,
image_dir=image_dir)
make_gif(gen, z=rep_samples, prefix=exp_name, image_dir=image_dir)
save(total_results, prefix=prefix, binary_dir=binary_dir)
if (OPTIONS['lr_decay'] is not None
and epoch >= OPTIONS['decay_at_epoch']):
old_eta = ETA.get_value()
if old_eta > OPTIONS['min_lr']:
new_eta = old_eta * OPTIONS['lr_decay']
new_eta = max(new_eta, OPTIONS['min_lr'])
logger.debug('Setting learning rate to {}'.format(new_eta))
ETA.set_value(floatX_(new_eta))
|
flu_url = 'https://data.cityofchicago.org/api/views/rfdj-hdmf/rows.csv?accessType=DOWNLOAD'
flu_path = 'Flu_Shot_Clinic_Locations_-_2013.csv'
flu_shot_meta = {
'dataset_name': 'flu_shot_clinics',
'human_name': 'Flu Shot Clinic Locations',
'attribution': 'foo',
'description': 'bar',
'url': flu_url,
'update_freq': 'yearly',
'business_key': 'event',
'observed_date': 'date',
'latitude': 'latitude',
'longitude': 'longitude',
'location': None,
'contributor_name': 'Carlos',
'contributor_organization': 'StrexCorp',
'contributor_email': 'foo@bar.edu',
'contributed_data_types': None,
'approved_status': 'true',
'is_socrata_source': False,
'column_names': {"date": "DATE", "start_time": "VARCHAR", "end_time": "VARCHAR", "facility_name": "VARCHAR",
"facility_type": "VARCHAR", "street_1": "VARCHAR", "city": "VARCHAR", "state": "VARCHAR",
"zip": "INTEGER", "phone": "VARCHAR", "latitude": "DOUBLE PRECISION",
"longitude": "DOUBLE PRECISION", "day": "VARCHAR", "event": "VARCHAR", "event_type": "VARCHAR",
"ward": "INTEGER", "location": "VARCHAR"}
}
landmarks_url = 'https://data.cityofchicago.org/api/views/tdab-kixi/rows.csv?accessType=DOWNLOAD'
landmarks_path = 'Individual_Landmarks.csv'
landmarks_meta = {
'dataset_name': 'landmarks',
'human_name': 'Chicago Landmark Locations',
'attribution': 'foo',
'description': 'bar',
'url': landmarks_url,
'update_freq': 'yearly',
'business_key': 'id',
'observed_date': 'landmark_designation_date',
'latitude': 'latitude',
'longitude': 'longitude',
'location': 'location',
'contributor_name': 'Cecil Palmer',
'contributor_organization': 'StrexCorp',
'contributor_email': 'foo@bar.edu',
'contributed_data_types': None,
'approved_status': 'true',
'is_socrata_source': False,
'column_names': {"foo": "bar"}
}
crime_url = 'http://data.cityofchicago.org/api/views/ijzp-q8t2/rows.csv?accessType=DOWNLOAD'
crime_path = 'crime_sample.csv'
crime_meta = {
'dataset_name': 'crimes',
'human_name': 'Crimes',
'attribution': 'foo',
'description': 'bar',
'url': crime_url,
'update_freq': 'yearly',
'business_key': 'id',
'observed_date': 'date',
'latitude': 'latitude',
'longitude': 'longitude',
'location': 'location',
'contributor_name': 'Dana Cardinal',
'contributor_organization': 'City of Nightvale',
'contributor_email': 'foo@bar.edu',
'contributed_data_types': None,
'approved_status': 'true',
'is_socrata_source': False,
'column_names': {"foo": "bar"}
}
|
"""empty message
Revision ID: f636a63c48d6
Revises: 10aadf62ab2a
Create Date: 2021-11-16 19:45:56.961730
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'f636a63c48d6'
down_revision = '10aadf62ab2a'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('hospitalizace_jip_ockovani_vek',
sa.Column('tyden', sa.Unicode(), nullable=False),
sa.Column('tyden_od', sa.Date(), nullable=False),
sa.Column('tyden_do', sa.Date(), nullable=False),
sa.Column('vekova_skupina', sa.Unicode(), nullable=False),
sa.Column('hospitalizace_jip_celkem', sa.Integer(), nullable=True),
sa.Column('hospitalizace_jip_bez', sa.Integer(), nullable=True),
sa.Column('hospitalizace_jip_castecne', sa.Integer(), nullable=True),
sa.Column('hospitalizace_jip_plne', sa.Integer(), nullable=True),
sa.Column('hospitalizace_jip_posilujici', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('tyden', 'vekova_skupina')
)
op.create_table('hospitalizace_ockovani_vek',
sa.Column('tyden', sa.Unicode(), nullable=False),
sa.Column('tyden_od', sa.Date(), nullable=False),
sa.Column('tyden_do', sa.Date(), nullable=False),
sa.Column('vekova_skupina', sa.Unicode(), nullable=False),
sa.Column('hospitalizace_celkem', sa.Integer(), nullable=True),
sa.Column('hospitalizace_bez', sa.Integer(), nullable=True),
sa.Column('hospitalizace_castecne', sa.Integer(), nullable=True),
sa.Column('hospitalizace_plne', sa.Integer(), nullable=True),
sa.Column('hospitalizace_posilujici', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('tyden', 'vekova_skupina')
)
op.create_table('nakazeni_ockovani_vek',
sa.Column('tyden', sa.Unicode(), nullable=False),
sa.Column('tyden_od', sa.Date(), nullable=False),
sa.Column('tyden_do', sa.Date(), nullable=False),
sa.Column('vekova_skupina', sa.Unicode(), nullable=False),
sa.Column('nakazeni_celkem', sa.Integer(), nullable=True),
sa.Column('nakazeni_bez', sa.Integer(), nullable=True),
sa.Column('nakazeni_castecne', sa.Integer(), nullable=True),
sa.Column('nakazeni_plne', sa.Integer(), nullable=True),
sa.Column('nakazeni_posilujici', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('tyden', 'vekova_skupina')
)
op.drop_table('srovnani_ockovani')
connection = op.get_bind()
connection.execute("truncate table ockovani_lide")
connection.execute("update vakciny set vakcina='SPIKEVAX' where vyrobce='Moderna'")
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('srovnani_ockovani',
sa.Column('tyden', sa.VARCHAR(), autoincrement=False, nullable=False),
sa.Column('od', sa.DATE(), autoincrement=False, nullable=False),
sa.Column('do', sa.DATE(), autoincrement=False, nullable=False),
sa.Column('vekova_skupina', sa.VARCHAR(), autoincrement=False, nullable=False),
sa.Column('nakazeni_celkem', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('nakazeni_bez', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('nakazeni_castecne', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('nakazeni_plne', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('nakazeni_posilujici', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('hospitalizace_celkem', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('hospitalizace_bez', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('hospitalizace_castecne', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('hospitalizace_plne', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('hospitalizace_posilujici', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('hospitalizace_jip_celkem', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('hospitalizace_jip_bez', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('hospitalizace_jip_castecne', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('hospitalizace_jip_plne', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('hospitalizace_jip_posilujici', sa.INTEGER(), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('tyden', 'vekova_skupina', name='srovnani_ockovani_pkey')
)
op.drop_table('nakazeni_ockovani_vek')
op.drop_table('hospitalizace_ockovani_vek')
op.drop_table('hospitalizace_jip_ockovani_vek')
# ### end Alembic commands ###
|
from functools import lru_cache, partial
from operator import attrgetter
from django.conf import settings
from MyCapytain.common.constants import RDF_NAMESPACES
from MyCapytain.resources.collections.cts import XmlCtsTextInventoryMetadata
from MyCapytain.resources.prototypes.cts import inventory as cts
from .capitains import default_resolver
from .passage import Passage
from .reference import URN
from .toc import RefTree
from .typing import CtsCollectionMetadata
@lru_cache(maxsize=1)
def load_text_inventory_metadata() -> cts.CtsTextInventoryMetadata:
resolver_type = settings.CTS_RESOLVER["type"]
resolver = default_resolver()
if resolver_type == "api":
if getattr(settings, "CTS_LOCAL_TEXT_INVENTORY", None) is not None:
with open(settings.CTS_LOCAL_TEXT_INVENTORY, "r") as fp:
ti_xml = fp.read()
else:
ti_xml = resolver.endpoint.getCapabilities()
return XmlCtsTextInventoryMetadata.parse(ti_xml)
elif resolver_type == "local":
return resolver.getMetadata()["default"]
class TextInventory:
@classmethod
def load(cls):
return cls(load_text_inventory_metadata())
def __init__(self, metadata: cts.CtsTextInventoryMetadata):
self.metadata = metadata
def __repr__(self):
return f"<cts.TextInventory at {hex(id(self))}>"
def text_groups(self):
for urn in sorted(self.metadata.textgroups.keys()):
text_group = TextGroup(urn, self.metadata.textgroups[urn])
if next(text_group.works(), None) is None:
continue
yield text_group
class Collection:
def __init__(self, urn: URN, metadata: CtsCollectionMetadata):
self.urn = urn
self.metadata = metadata
def __repr__(self):
return f"<cts.Collection {self.urn} at {hex(id(self))}>"
def __eq__(self, other):
if type(other) is type(self):
return self.urn == other.urn
return NotImplemented
def __hash__(self):
return hash(str(self.urn))
@property
def label(self):
return self.metadata.get_label(lang="eng")
def ancestors(self):
for metadata in list(reversed(self.metadata.parents))[1:]:
cls = resolve_collection(metadata.TYPE_URI)
# the local resolver returns the text inventory from parents
# this is isn't a proper ancestor here so we'll ignore it.
if issubclass(cls, TextInventory):
continue
yield cls(metadata.urn, metadata)
class TextGroup(Collection):
def __repr__(self):
return f"<cts.TextGroup {self.urn} at {hex(id(self))}>"
def works(self):
children = self.metadata.works
for urn in sorted(children.keys()):
work = Work(urn, children[urn])
if next(work.texts(), None) is None:
continue
yield work
def as_json(self) -> dict:
return {
"urn": str(self.urn),
"label": str(self.label),
"works": [
{
"urn": str(work.urn),
"texts": [
{
"urn": str(text.urn),
}
for text in work.texts()
],
}
for work in self.works()
],
}
class Work(Collection):
def __repr__(self):
return f"<cts.Work {self.urn} at {hex(id(self))}>"
def texts(self):
children = self.metadata.texts
texts = []
for urn in children.keys():
metadata = children[urn]
if metadata.citation is None:
continue
texts.append(resolve_collection(metadata.TYPE_URI)(urn, metadata))
yield from sorted(texts, key=attrgetter("kind", "label"))
def as_json(self) -> dict:
return {
"urn": str(self.urn),
"label": str(self.label),
"texts": [
dict(urn=str(text.urn))
for text in self.texts()
],
}
class Text(Collection):
def __init__(self, kind, *args, **kwargs):
super().__init__(*args, **kwargs)
self.kind = kind
def __repr__(self):
return f"<cts.Text {self.urn} kind={self.kind} at {hex(id(self))}>"
@property
def description(self):
return self.metadata.get_description(lang="eng")
@property
def lang(self):
return self.metadata.lang
@property
def human_lang(self):
lang = self.metadata.lang
return {
"grc": "Greek",
"lat": "Latin",
"heb": "Hebrew",
"fa": "Farsi",
"eng": "English",
"ger": "German",
"fre": "French",
}.get(lang, lang)
@property
def rtl(self):
return self.lang in {"heb", "fa"}
def versions(self):
for edition in self.metadata.editions():
yield Text("edition", edition.urn, edition)
for translation in self.metadata.translations():
yield Text("translation", translation.urn, translation)
@lru_cache()
def toc(self):
citation = self.metadata.citation
depth = len(citation)
tree = RefTree(self.urn, citation)
for reff in default_resolver().getReffs(self.urn, level=depth):
tree.add(reff)
return tree
def first_passage(self):
chunk = next(self.toc().chunks(), None)
if chunk is not None:
return Passage(self, URN(chunk.urn).reference)
def as_json(self, with_toc=True) -> dict:
payload = {
"urn": str(self.urn),
"label": str(self.label),
"description": str(self.description),
"kind": self.kind,
"lang": self.lang,
"rtl": self.rtl,
"human_lang": self.human_lang,
}
if with_toc:
toc = self.toc()
payload.update({
"first_passage": dict(urn=str(self.first_passage().urn)),
"ancestors": [
{
"urn": str(ancestor.urn),
"label": ancestor.label,
}
for ancestor in self.ancestors()
],
"toc": [
{
"urn": next(toc.chunks(ref_node), None).urn,
"label": ref_node.label.title(),
"num": ref_node.num,
}
for ref_node in toc.num_resolver.glob(toc.root, "*")
],
})
return payload
def resolve_collection(type_uri):
return {
RDF_NAMESPACES.CTS.term("TextInventory"): TextInventory,
RDF_NAMESPACES.CTS.term("textgroup"): TextGroup,
RDF_NAMESPACES.CTS.term("work"): Work,
RDF_NAMESPACES.CTS.term("edition"): partial(Text, "edition"),
RDF_NAMESPACES.CTS.term("translation"): partial(Text, "translation"),
RDF_NAMESPACES.CTS.term("commentary"): partial(Text, "commentary"),
}[type_uri]
|
__title__ = "gulpio2"
__version__ = "0.0.4"
__copyright__ = 'Copyright 2021 Will Price & TwentyBN'
__description__ = "Binary storage format for deep learning on videos."
__author__ = ", ".join("""\
Eren Golge (eren.golge@twentybn.com)
Raghav Goyal (raghav.goyal@twentybn.com)
Susanne Westphal (susanne.westphal@twentybn.com)
Heuna Kim (heuna.kim@twentybn.com)
Guillaume Berger (guillaume.berger@twentybn.com)
Joanna Materzyńska (joanna.materzynska@twentybn.com)
Florian Letsch (florian.letsch@twentybn.com)
Valentin Haenel (valentin.haenel@twentybn.com)
Will Price (will.price94+dev@gmail.com)
""".split("\n"))
__author_email__ = "will.price94+gulpio2@gmail.com"
__url__= "https://github.com/willprice/GulpIO2"
|
# -*- python -*-
# -*- coding: utf-8 -*-
#
# michael a.g. aïvázis <michael.aivazis@para-sim.com>
#
# (c) 2013-2020 parasim inc
# (c) 2010-2020 california institute of technology
# all rights reserved
#
# get the package
import altar
# get the protocol
from . import distribution
# and my base class
from .Base import Base as base
# the declaration
class Gaussian(base, family="altar.distributions.gaussian"):
"""
The Gaussian probability distribution
"""
# user configurable state
mean = altar.properties.float(default=0)
mean.doc = "the mean value of the distribution"
sigma = altar.properties.float(default=1)
sigma.doc = "the standard deviation of the distribution"
# protocol obligations
@altar.export
def initialize(self, rng):
"""
Initialize with the given random number generator
"""
# set up my pdf
self.pdf = altar.pdf.gaussian(rng=rng.rng, mean=self.mean, sigma=self.sigma)
# all done
return self
@altar.export
def verify(self, theta, mask):
"""
Check whether my portion of the samples in {theta} are consistent with my constraints, and
update {mask}, a vector with zeroes for valid samples and non-zero for invalid ones
"""
# all samples are valid, so there is nothing to do
return mask
# end of file
|
from dataclasses import dataclass
from .t_artifact import TArtifact
__NAMESPACE__ = "http://www.omg.org/spec/BPMN/20100524/MODEL"
@dataclass
class Artifact(TArtifact):
class Meta:
name = "artifact"
namespace = "http://www.omg.org/spec/BPMN/20100524/MODEL"
|
import re
dic = {"1#": "#", "2#": "##", "3#": "###", "4#": "####", "5#": "#####",
"6#": "######", "*": "**", "/": "*", "~": "~~", "_": "__", "--": "----"} # {"Better Markdown": "Markdown"}
def replaceText(text, dic):
for i, j in dic.items():
text = text.replace(i, j)
print(text)
fileName = input("Enter name of file to read (include extension: \'.txt\'): ")
readFile = open(fileName, "r")
string = readFile.read()
splitString = string.split("\n")
for i in range(string.count("\n") + 1):
replaceText(splitString[i], dic)
readFile.close()
end = input("Press enter to end the program: ")
|
from django.apps import apps
from django.contrib import admin
from comment.tests.base import BaseCommentTest
from comment.models import FlagInstance, ReactionInstance
class TestCommentAdmin(BaseCommentTest):
def test_all_models_are_registered(self):
app = apps.get_app_config('comment')
models = app.get_models()
for model in models:
try:
self.assertIs(
True,
admin.site.is_registered(model),
msg=f'Did you forget to register the "{model.__name__}" in the django-admin?')
except AssertionError as exception:
# these models have been registred as inlines in the admin.
if model in [ReactionInstance, FlagInstance]:
continue
else:
raise exception
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Common Python library imports
from __future__ import unicode_literals
import six
import zipfile
import zlib
# Pip package imports
# Internal package imports
from flask_mm import files
DEFAULT_STORAGE = 'local'
class BaseStorage(object):
root = None
DEFAULT_MIME = 'application/octet-stream'
def __init__(self, *args, **kwargs):
self.public_view = kwargs.get('public_view', True)
@property
def has_url(self):
return False
@property
def base_url(self):
return None
def exists(self, filename):
raise NotImplementedError('Existance checking is not implemented')
def open(self, filename, *args, **kwargs):
raise NotImplementedError('Open operation is not implemented')
def read(self, filename):
raise NotImplementedError('Read operation is not implemented')
def write(self, filename, content):
raise NotImplementedError('Write operation is not implemented')
def delete(self, filename):
raise NotImplementedError('Delete operation is not implemented')
def copy(self, filename, target):
raise NotImplementedError('Copy operation is not implemented')
def move(self, filename, target):
self.copy(filename, target)
self.delete(filename)
def save(self, file_or_wfs, filename, **kwargs):
self.write(filename, file_or_wfs.read())
return filename
def metadata(self, filename):
meta = self.get_metadata(filename)
# Fix backend mime misdetection
meta['mime'] = meta.get('mime') or files.mime(filename, self.DEFAULT_MIME)
return meta
def archive_files(self, out_filename, filenames, *args, **kwargs):
if not isinstance(filenames, (tuple, list)):
filenames = [filenames]
with zipfile.ZipFile(self.path(out_filename), 'w', zipfile.ZIP_DEFLATED) as zipper:
for filename in filenames:
zipper.write(self.path(filename), filename)
return out_filename
def get_metadata(self, filename):
raise NotImplementedError('Copy operation is not implemented')
def serve(self, filename):
raise NotImplementedError('serve operation is not implemented')
def path(self, filename):
raise NotImplementedError('path operation is not implemented')
def as_binary(self, content, encoding='utf8'):
if hasattr(content, 'read'):
return content.read()
elif isinstance(content, six.text_type):
return content.encode(encoding)
else:
return content
def list_file(self):
raise NotImplementedError('list_files operation is not implemented')
def as_unicode(s):
if isinstance(s, bytes):
return s.decode('utf-8')
return str(s)
|
import grpc
from pygcdm.netcdf_encode import netCDF_Encode
from pygcdm.protogen import gcdm_server_pb2_grpc as grpc_server
from concurrent import futures
class Responder(grpc_server.GcdmServicer):
def __init__(self):
self.encoder = netCDF_Encode()
def GetNetcdfHeader(self, request, context):
print('Header Requested')
return self.encoder.generate_header_from_request(request)
def GetNetcdfData(self, request, context):
print('Data Requested')
# stream the data response
data_response = [self.encoder.generate_data_from_request(request)]
for data in data_response:
yield(data)
def server():
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
grpc_server.add_GcdmServicer_to_server(Responder(), server)
server.add_insecure_port('[::]:1234')
print('starting server...')
server.start()
server.wait_for_termination()
if __name__ == '__main__':
server()
|
# DESAFIO CLASSES E METODOS
'''Crie uma classe carro que tenha no mínimo 3 propriedades,
para a classe carro e no mínomo 3 metodos para ela'''
from time import sleep
cores = {'limpar': '\033[m',
'verde': '\033[32m',
'vermelho': '\033[31m',
'pretob': '\033[7;30m'
}
class Carro:
def __init__(self, marca, ano, cor, combustivel):
self.marca = marca
self.ano = ano
self.cor = cor
self.combustivel = combustivel
def Ligar(self):
print('{}Ligando carro...{}'.format(cores['pretob'], cores['limpar']))
sleep(2)
print('Carro ligado!!')
sleep(3)
def Andar(self):
print('O freio de mão foi puxado')
print('O carro começa a andar')
sleep(3)
def Acelerar(self):
print('{}O acelerador é acionado{}'.format(cores['verde'], cores['limpar']))
print('{}O carro começa a acelerar{}'.format(cores['verde'], cores['limpar']))
sleep(3)
def Frear(self):
print('{}os freios são acionados{}'.format(cores['vermelho'], cores['limpar']))
print('{}O carro começa a frear{}'.format(cores['vermelho'], cores['limpar']))
sleep(3)
def Desligar(self):
print('{}Desligando o motor{}'.format(cores['pretob'], cores['limpar']))
sleep(2)
print('Carro Desligado!')
carro1 = Carro('Ford', '2015', 'preto', 'flex')
carro1.Ligar()
carro1.Andar()
carro1.Acelerar()
carro1.Frear()
carro1.Desligar()
|
#!/usr/bin/python3
import numpy as np
import itertools
first = {}
def hasVoid(B, gramatica):
if B in gramatica.keys():
if 'ε' in gramatica[B]:
return True
return False
def concatDicts(test_dict1, test_dict2):
for key in test_dict1:
if key in test_dict2:
for val in test_dict1[key]:
if val not in test_dict2[key]:
test_dict2[key].append(val)
else:
test_dict2[key] = test_dict1[key][:]
return test_dict2
def flattenArray(arrayL):
flatten = itertools.chain.from_iterable
tmp = list(flatten(arrayL))
for rm in tmp: # Para evitar listas dentro de listas geradas com a recursão
if type(rm) == list:
tmp.remove(rm)
return list(set(tmp))
def firstSet(gramatica):
global first
# Regra 0, 1 e 2 do First
for a,b in reversed(gramatica.items()):
if not a in first.keys():
first[a] = []
for i in b:
if i == 'ε':
first[a].append(i)
elif i[0].islower():
first[a].append(i[0])
# Regra 3 do First
for a,b in reversed(gramatica.items()):
for i in b:
if i[0].isupper():
if len(i) > 1:
if not hasVoid(i[0], gramatica):
first[a].append(first[i[0]])
first[a] = flattenArray(first[a])
else:
B = first[i[0]].copy()
B.remove('ε')
first[a].append(B)
first[a] = flattenArray(first[a])
alpha = i[1:]
tmpfirst = concatDicts(first, firstSet({a: alpha}))
first = tmpfirst.copy()
elif len(i) == 1:
first[a].append(first[i])
first[a] = flattenArray(first[a])
return first
def run(arg):
gramatica = arg
# For duas vezes para garantir que os campos foram preenchidos corretamente,
# mesmo que um não-terminal tenha mudado
for i in range(2):
firstSet(gramatica)
for a,b in first.items():
first[a] = flattenArray(b)
return dict(reversed(list(first.items())))
|
# Author: Chengjia Lei
import gym
from RL_brain import DQNPrioritizedReplay
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
gym.envs.register(
id='CartPole_long_pole-v0',
entry_point='gym.envs.classic_control:CartPoleEnv',
tags={'wrapper_config.TimeLimit.max_episode_steps': 500.0},
reward_threshold=475.0,
kwargs={'change_len': 1.0},
)
env = gym.make('CartPole_long_pole-v0')
gym.envs.register(
id='CartPole_evaluate-v0',
entry_point='gym.envs.classic_control:CartPoleEnv',
tags={'wrapper_config.TimeLimit.max_episode_steps': 10000.0},
reward_threshold=10000.0,
kwargs={'change_len': 1.5},
)
env_e = gym.make('CartPole_evaluate-v0')
MEMORY_SIZE = 10000
gpu_options = tf.GPUOptions(allow_growth=True)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
with tf.variable_scope('DQN'):
RL_natural = DQNPrioritizedReplay(
n_actions=env.action_space.n, n_features=np.shape(env.observation_space)[0], memory_size=MEMORY_SIZE,
e_greedy_increment=0.0001, sess=sess, prioritized=False,
)
sess.run(tf.global_variables_initializer())
def train(RL, steps_limit):
# env.render()
steps_num = 0
solved = False
sumreward = 1
account = 0
sess.run(tf.global_variables_initializer())
RL.initiate_common_par()
observation = env.reset()
episodes_reward = []
episodes = []
done = False
action = 0
true_end = 0
for _ in range(MEMORY_SIZE):
if done:
observation = env.reset()
# env.render()
# if _ % 1 == 0:
action = env.action_space.sample()
observation_, reward, done, info = env.step(action)
if done:
reward=0
else:
reward=1
RL.store_transition(observation, action, reward, observation_)
# if done:
# print("fff")
# print(account)
# for i_episode in range(15):
done = True
# env._max_episode_steps = 200
while not solved:
if done:
observation = env.reset()
sumreward = 1
done = False
true_end = 0
# env.render()
action = RL.choose_action(observation, initial = False)
observation_, reward, done, info = env.step(action)
steps_num += 1
true_end += 1
if done:
if true_end == 500:
reward = 1
else:
reward=0
else:
reward=1
if reward == 0:
sumreward += 1
else:
sumreward+= reward
RL.store_transition(observation, action, reward, observation_)
RL.learn()
observation = observation_
if done:
account+=1
# print('episode ', i_episode, ' episode steps', episode_steps, 'total steps', np.sum(steps))
episodes_reward.append(sumreward)
print('times:', account, 'sumreward:', sumreward)
if steps_num == steps_limit:
solved = True
print('Model trained completely!')
# env_e.close()
# env._max_episode_steps = 10000
done = True
gather=[]
print('Evaluate!')
for pole_len in range(100, 1620, 20):
pole_len = pole_len / 1000.0
gym.envs.register(
id='CartPole_r-v0',
entry_point='gym.envs.classic_control:CartPoleEnv',
tags={'wrapper_config.TimeLimit.max_episode_steps': 1000},
reward_threshold=975.0,
kwargs={'change_len': pole_len},
)
env_r = gym.make('CartPole_r-v0')
sumreward = 0
episodes_reward = []
for _ in range(100):
while True:
if done:
observation = env_r.reset()
sumreward = 0
# if _ % 1 == 0:
# env_r.render()
action = RL.choose_action(observation, initial = False)
observation_, reward, done, info = env_r.step(action)
sumreward += reward
if done:
# print('episode ', i_episode, ' episode steps', episode_steps, 'total steps', np.sum(steps))
episodes_reward.append(sumreward)
print('pole len', pole_len, 'times:', _+1, 'sumreward:', sumreward)
break
observation = observation_
if pole_len == 0.1:
gather = [pole_len, np.mean(episodes_reward)]
else:
gather = np.vstack((gather, [pole_len, np.mean(episodes_reward)]))
# env_r.close()
return gather
log = []
inx_log = []
for _ in range(0, 20):
if _ == 0:
log = train(RL_natural, 50000)
else:
log = np.c_[log, train(RL_natural, 50000)[:, 1]]
print(np.shape(log))
# log = train(RL_natural, 100000)
x_axis = log[:, 0]
index = np.mean(log[:, 1:], 0)
# print(np.shape(np.reshape(index, [1, 10])))
# print(np.shape(log[:, 1:]))
index = np.reshape(index, [1, 20])
y_data = np.r_[index, log[:, 1:]]
y_data = y_data.T[np.lexsort(-y_data[::-1,:])].T
y_mean = np.mean(y_data[1:, 0:10], 1)
# y_std = log[:, 2]
y_std = np.std(y_data[1:, 0:10], 1)
y_max = np.max(y_data[1:, 0:10], 1)
y_min = np.min(y_data[1:, 0:10], 1)
np.save('x_axis.npy', x_axis)
np.save('y_mean.npy', y_mean)
np.save('y_std.npy', y_std)
np.save('y_max.npy', y_max)
np.save('y_min.npy', y_min)
plt.plot(x_axis, y_mean, c='tomato', label='DQN_baseline')
# plt.fill_between(x_axis, y_mean + y_std/2, y_mean - y_std/2, facecolor='tomato', alpha=0.3)
plt.fill_between(x_axis, y_max, y_min, facecolor='tomato', alpha=0.3)
plt.legend(loc='best')
plt.ylabel('Rewards')
plt.xlabel('Pole Length')
plt.title('CartPole')
plt.grid()
plt.show()
|
from .models import *
from django.core.exceptions import ObjectDoesNotExist
def context(request):
if request.user.is_authenticated:
try:
my_profile = UserProfile.objects.get(user=request.user)
my_posts = Post.objects.filter(post_holders=request.user)
interviews = Nomination.objects.filter(interview_panel=request.user).exclude(status = "Work done")
length = len(my_posts)
senate = False
for post in my_posts:
if post.perms == 'can ratify the post':
senate = True
return {'my_posts': my_posts, 'my_profile': my_profile, 'senate': senate, 'length': length,
'interviews': interviews}
except ObjectDoesNotExist:
return {'my_posts': 0, 'my_profile': 0}
else:
return {'my_posts': 0, 'my_profile': 0}
|
__all__ = ["plainramploader", "steppingramploader", "fullramploader"]
|
from captcha.fields import CaptchaField
from django import forms
from django.core.exceptions import ValidationError, NON_FIELD_ERRORS
from relais import helpers, constants
from relais.constants import RANGE_INDIVIDUAL, RANGE_TEAM
from relais.models import (
CATEGORY_CHOICES,
CONFIG_CHOICES,
GENDER_CHOICES,
METHOD_PAYMENT_CHOICES,
People,
Runner,
TSHIRT_CHOICES,
)
#------------------------------------------------------------------------------
class RulesForm(forms.Form):
"""
Validate rules at the beginning of the event.
"""
checkbox = forms.BooleanField(label="check", widget=forms.CheckboxInput(),
error_messages={'required': "Vous devez accepter " \
"la décharge et le règlement"})
#------------------------------------------------------------------------------
class ConfigForm(forms.Form):
"""
Configuration choice (Individual vs Team)
"""
choice = forms.ChoiceField(label='Choix', choices=CONFIG_CHOICES)
#------------------------------------------------------------------------------
class SubscriptionForm(forms.Form):
required_css_class = 'required'
email = forms.EmailField(label='Email')
category = forms.ChoiceField(label='Catégorie', choices=CATEGORY_CHOICES)
school = forms.CharField(label='Ecole', required=False)
company = forms.CharField(label='Entreprise', required=False)
club = forms.CharField(label='Club', required=False)
def __init__(self, *args, **kwargs):
self.is_a_team = kwargs.pop('is_a_team', False)
self.onsite = kwargs.pop('onsite', True)
super(SubscriptionForm, self).__init__(*args, **kwargs)
if self.is_a_team:
self.nb = 3
self.fields['name'] = forms.CharField(label='Nom')
else:
self.nb = 1
for i in range(self.nb):
self.fields['first_name_%d' % i] = forms.CharField(label='Prénom',
max_length=30)
self.fields['last_name_%d' % i] = forms.CharField(label='Nom',
max_length=30)
self.fields['gender_%d' % i] = forms.ChoiceField(label='Sexe',
choices=GENDER_CHOICES)
self.fields['birthday_%d' % i] = forms.DateField(label='Date de naissance')
self.fields['license_%d' % i] = forms.CharField(label='Numéro de licence',
max_length=30, required=False)
self.fields['federation_%d' % i] = forms.CharField(label='Fédération',
required=False)
self.fields['club_%d' % i] = forms.CharField(label='Club',
required=False)
self.fields['tshirt_%d' % i] = forms.ChoiceField(label='Taille t-shirt', choices=TSHIRT_CHOICES,
help_text='Les 150 premiers inscrits ont droit à un'
'tshirt technique offert',
required=False)
if self.onsite:
self.fields['num_%d' % i] = forms.IntegerField(label='Numéro de dossard')
if not self.onsite:
self.fields['captcha'] = CaptchaField()
def clean(self):
"""
Clean incoming data (after POST request for example) and check
validation.
"""
cleaned_data = super(SubscriptionForm, self).clean() # call default method
# force initialization
r = [None] * 3
# each People must be unique
for i in range(self.nb):
first_name = self.cleaned_data.get('first_name_%d' % i)
last_name = self.cleaned_data.get('last_name_%d' % i)
birthday = self.cleaned_data.get('birthday_%d' % i)
gender = self.cleaned_data.get('gender_%d' % i)
num = self.cleaned_data.get('num_%d' % i, None)
if not None in (first_name, last_name, birthday, gender):
r[i] = People(first_name=first_name,
last_name=last_name,
birthday=birthday,
gender=gender,
num=num)
if not num:
if self.is_a_team:
r[i].update_num(RANGE_TEAM[i])
else:
r[i].update_num(RANGE_INDIVIDUAL)
self.cleaned_data['num_%d' % i] = r[i].num
r[i].clean()
try:
r[i].validate_unique()
except ValidationError as e:
if dict(e).get('num'):
raise ValidationError(
{
NON_FIELD_ERRORS: [
'Numéro de dossard %s déjà pris' % num
],
}
)
raise ValidationError(
{
NON_FIELD_ERRORS: [
'Le coureur %s %s existe déjà' % (first_name, last_name)
],
}
)
# TODO: improve this (return People object ?)
self.cleaned_data['legal_status_%d' % i] = r[i].is_adult()
# convert name -> id (ForeignKey)
self.cleaned_data['club_%d' % i] = helpers.add_get_club(self.cleaned_data.get('club_%d' % i))
self.cleaned_data['federation_%d' % i] = helpers.add_get_fede(self.cleaned_data.get('federation_%d' % i))
run = Runner(runner_1=r[0], runner_2=r[1], runner_3=r[2],
team=self.cleaned_data.get('name', None),
email=self.cleaned_data.get('email'))
run.clean()
run.validate_unique()
# convert name -> id (ForeignKey)
self.cleaned_data['school'] = helpers.add_get_school(self.cleaned_data.get('school'))
self.cleaned_data['company'] = helpers.add_get_company(self.cleaned_data.get('company'))
return cleaned_data
#------------------------------------------------------------------------------
class PaymentForm(forms.Form):
# little bastard no ? :)
# just remove unknown payment for user form
choices = ((i, name) for i, name in METHOD_PAYMENT_CHOICES if i != constants.UNKNOWN)
method = forms.ChoiceField(label='Méthode', choices=choices)
|
import numpy as np
class Operators:
weights = -1
def propagate_backward(self, target, gammar, lrate= 0.1, momentum = 0.1):
error = target - self.ret
return error
def reset(self):
return 0
def set_init_weight_scale(self,x):
return
class OpTempo(Operators):
def __init__(self):
return
def propagate_forward(self,*data):
# self.ret = [data] # Warning ! It's a tuple
# return self.ret
s = np.shape(data)
self.ret = np.zeros(s[1])
for i in range(s[0]):
for j in range(s[1]):
self.ret[j] = data[i][j]
return self.ret
class OpPlus(Operators):
def __init__(self):
return
def propagate_forward(self, *data):
s = np.shape(data)
self.ret = np.zeros(s[1])
for i in range(s[0]):
for j in range(s[1]):
self.ret[j] += data[i][j]
return self.ret
class OpAvg(Operators):
def __init__(self):
return
def propagate_forward(self, *data):
s = np.shape(data)
self.ret = np.zeros(s[1])
for i in range(s[0]):
for j in range(s[1]):
self.ret[j] += data[i][j]
self.ret /= s[0]
return self.ret
class OpMinus(Operators):
def __init__(self):
return
def propagate_forward(self, *data):
s = np.shape(data)
self.ret = np.zeros(s[1])
for j in range(s[1]):
self.ret[j] = data[0][j] - data[1][j]
return self.ret
class OpDivide(Operators):
def __init__(self):
return
def propagate_forward(self, *data):
s = np.shape(data)
self.ret = np.zeros(s[1])
for j in range(s[1]):
if data[1][j] == 0 :
print "division par zero\n"
sys.exit()
self.ret[j] = data[0][j] / data[1][j] #a verifier
return self.ret
class OpMult(Operators):
def __init__(self):
return
def propagate_forward(self, *data):
s = np.shape(data)
self.ret = np.zeros(s[1])
self.ret += 1
for i in range(s[0]):
for j in range(s[1]):
self.ret[j] *= data[i][j] #a verifier
return self.ret
class OpRecomp(Operators):
def __init__(self):
return
def propagate_forward(self, *data):
s = np.shape(data)
self.ret = np.zeros(s[1]*s[0])
for i in range(s[0]):
for j in range(s[1]):
self.ret[i*s[1]+j] = data[i][j]
return self.ret
class OpDecomp(Operators):
def __init__(self,*args):
self.sizes = args
return
def propagate_forward(self, *data):
data = data[0]
current_deepness = 0
self.ret = []
for i in range(len(self.sizes)):
retinter = []
for j in range(self.sizes[i]):
retinter.append(data[current_deepness])
current_deepness += 1
self.ret.append(retinter)
return self.ret
#test
# maliste3 = [[1,2,3,4],[3,4,1,2],[0,5,0,5]]
# maliste2 = [[1,2,3,4],[1,2,3,4]]
# maliste1 = [[1,2,3,4]]
# monplus = OpPlus()
# print "mon plus " ,monplus.propagate_forward(*maliste3)
# monavg = OpAvg()
# print "mon Avg " ,monavg.propagate_forward(*maliste3)
# monmoins = OpMinus()
# print "mon minus " ,monmoins.propagate_forward(*maliste2)
# monfois = OpMult()
# print "mon fois" ,monfois.propagate_forward(*maliste3)
# mondiv = OpDivide()
# print "mon div" ,mondiv.propagate_forward(*maliste2)
# monrecomp = OpRecomp()
# print "mon recomp" ,monrecomp.propagate_forward(*maliste3)
# mondecomp = OpDecomp(2,1,1)
# print "mon decomp", mondecomp.propagate_forward(*maliste1)
|
from boa3.builtin import NeoMetadata, metadata
def Main() -> int:
return 5
@metadata
def standards_manifest() -> NeoMetadata:
meta = NeoMetadata()
meta.supported_standards = ['NEP-17'] # for nep17, boa checks if the standard is implemented
return meta
|
import os
import os.path
import logging
import json
from gii.core import *
from gii.moai.MOAIRuntime \
import \
MOAIRuntime, MOAILuaDelegate, LuaTableProxy, _G, _LuaTable, _LuaObject
signals.register ( 'mock.init' )
##----------------------------------------------------------------##
_MOCK = LuaTableProxy( None )
_MOCK_EDIT = LuaTableProxy( None )
_MOCK_GAME_CONFIG_NAME = 'game_config.json'
def isMockInstance( obj, name ):
if isinstance( obj, _LuaObject ):
clas = _MOCK[name]
assert clas
return _MOCK.isInstance( obj, clas )
else:
return False
def getMockClassName( obj ):
if isinstance( obj, _LuaTable ):
clas = obj.__class
if clas: return clas.__name
return None
##----------------------------------------------------------------##
class MockRuntime( EditorModule ):
def getDependency(self):
return [ 'moai', 'game_preview', 'script_library' ]
def getName(self):
return 'mock'
def onLoad(self):
self.affirmConfigFile()
self.runtime = self.getManager().affirmModule( 'moai' )
self.setupLuaModule()
signals.connect( 'project.load', self.onProjectLoaded )
signals.connect( 'moai.reset', self.onMoaiReset )
signals.connect( 'moai.ready', self.onMoaiReady )
signals.connect( 'project.post_deploy', self.postDeploy )
signals.connect( 'project.save', self.onProjectSave )
self.initMock()
def affirmConfigFile( self ):
proj = self.getProject()
self.configPath = proj.getConfigPath( _MOCK_GAME_CONFIG_NAME )
asetIndexPath = proj.getRelativePath( self.getAssetLibrary().assetIndexPath )
if os.path.exists( self.configPath ):
data = jsonHelper.loadJSON( self.configPath )
#fix invalid field
if data.get( 'asset_library', None ) != asetIndexPath: #fix assetlibrary path
data['asset_library'] = asetIndexPath
jsonHelper.trySaveJSON( data, self.configPath)
return
#create default config
defaultConfigData = {
"asset_library": asetIndexPath ,
"texture_library": "env/config/texture_library.json",
"layers" : [
{ "name" : "default",
"sort" : "priority_ascending",
"clear": False
},
]
}
jsonHelper.trySaveJSON( defaultConfigData, self.configPath )
def onAppReady( self ):
self.postInitMock()
# self.getModule( 'game_preview' ).updateView()
def postDeploy( self, context ):
configPath = context.getPath( 'game_config' )
game = _MOCK.game
data = json.loads( game.saveConfigToString( game ) )
data[ 'asset_library' ] = 'asset/asset_index'
data[ 'texture_library' ] = context.meta.get( 'mock_texture_library', False )
data[ 'script_library' ] = context.meta.get( 'mock_script_library', False )
jsonHelper.trySaveJSON( data, configPath, 'deploy game info' )
def setupLuaModule( self ):
self.runtime.requireModule( 'mock_edit' )
_MOCK._setTarget( _G['mock'] )
_MOCK_EDIT._setTarget( _G['mock_edit'] )
_MOCK.setBasePaths( self.getProject().getPath(), self.getProject().getAssetPath() )
def syncAssetLibrary(self): #TODO:
pass
def initMock( self ):
try:
self.runtime.changeRenderContext( 'game', 100, 100 )
_MOCK.init( self.configPath, True )
except Exception, e:
raise e
def postInitMock( self ):
try:
game = _MOCK.game
game.initCommonDataFromEditor( game )
signals.emit( 'mock.init' )
except Exception, e:
raise e
def onProjectLoaded(self,prj):
self.syncAssetLibrary()
def onProjectSave( self, prj ):
game = _MOCK.game
game.saveConfigToFile( game, self.configPath )
def onMoaiReset(self):
self.setupLuaModule()
def onMoaiReady( self ):
self.initMock()
def getMockEnv( self ):
return _MOCK
def getMockEditEnv( self ):
return _MOCK_EDIT
def getLuaEnv( self ):
return _G
def getComponentTypeList( self ):
pass
def getEntityTypeList( self ):
pass
##----------------------------------------------------------------##
MockRuntime().register()
|
from django.db import models
class Article(models.Model):
title = models.CharField(max_length=50)
slug = models.SlugField()
content = models.TextField()
is_published = models.BooleanField(default=False)
|
import rest_framework.permissions as drf_permissions
from rest_framework import generics
import data
from interfaces import views, permissions
from interfaces.api import serializers
class PublicationList(views.APIListView, views.APICreateView, generics.GenericAPIView):
"""
ProTReND database REST API.
Open programmatic access for the Publications available at ProTReND. Consult here the current list of all publications in ProTReND.
A publication consists of a manuscript published in a scientific jornal, a chapter of a scientific book, among others. Most ublications are associated to regulators, genes, and regulatory interactions, and thus supporting regulatory phenomena with exeperimental evidences.
Note that, we only provide the main details of each publication. The publication can then be consulted using the DOI or PMID.
"""
serializer_class = serializers.PublicationListSerializer
permission_classes = [drf_permissions.IsAuthenticatedOrReadOnly, permissions.SuperUserOrReadOnly]
model = data.Publication
fields = ['protrend_id', 'pmid', 'doi', 'title', 'author', 'year']
class PublicationDetail(views.APIRetrieveView, views.APIUpdateDestroyView, generics.GenericAPIView):
"""
ProTReND database REST API.
Open programmatic access for the Publications available at ProTReND. Consult here all information available over this publication.
A publication consists of a manuscript published in a scientific jornal, a chapter of a scientific book, among others. Most ublications are associated to regulators, genes, and regulatory interactions, and thus supporting regulatory phenomena with exeperimental evidences.
Note that, we only provide the main details of each publication. The publication can then be consulted using the DOI or PMID.
"""
serializer_class = serializers.PublicationDetailSerializer
permission_classes = [drf_permissions.IsAuthenticatedOrReadOnly, permissions.SuperUserOrReadOnly]
model = data.Publication
fields = ['protrend_id', 'pmid', 'doi', 'title', 'author', 'year']
targets = {'tfbs': ['protrend_id'],
'regulatory_interaction': ['protrend_id']}
relationships = {}
|
from bleu_score import compute_bleu
def remove_tags(s):
"""
Removes tags that should be ignored when computing similarites
:param s: Sentence as a list of words
:return: Same sentence without the tags
"""
# tags = set(['<START>', '<END>', '<UNK>', 0, 1, 2, 3])
tags = set(['<START>', '<END>', '<UNK>'])
filtered_words = []
for word in s:
if word not in tags:
filtered_words.append(word)
return filtered_words
def unigram_overlap(s, t):
"""
Computes the unigram overlap between s and t.
Note: this is not necessarily symmetric as s and t might have different lengths.
Note: ignores multiplicity of words
:param s: sentence 1 as a list of words
:param t: sentence 2 as a list of words
:return: The unigram precision
"""
s = remove_tags(s)
t = remove_tags(t)
if len(s) == 0 or len(t) == 0:
return 0
# Don't take into account multiplicity
unique_words = set(s)
t = set(t)
overlap = 0
for w in unique_words:
overlap += 1 if w in t else 0
return overlap / len(unique_words)
def unigram_precision(s, t):
"""
Computes the unigram precision between s and t.
Note: this is not necessarily symmetric as s and t might have different lengths.
:param s: sentence 1 as a list of words
:param t: sentence 2 as a list of words
:return: The unigram precision
"""
s = remove_tags(s)
t = remove_tags(t)
if len(s) == 0 or len(t) == 0:
return 0
# Don't take into account multiplicity
t = set(t)
overlap = 0
for w in s:
overlap += 1 if w in t else 0
return overlap / len(s)
def bigram_overlap(s, t):
s = remove_tags(s)
t = remove_tags(t)
if len(s) == 0 or len(t) == 0:
return 0
bigrams_s = list(zip(s[:-1], s[1:]))
bigrams_t = list(zip(t[:-1], t[1:]))
bigrams_s = set(bigrams_s)
if len(bigrams_s) == 0:
return 0
bigrams_t = set(bigrams_t)
overlap = 0
for bigram in bigrams_s:
overlap += 1 if bigram in bigrams_t else 0
return overlap / len(bigrams_s)
def bigram_precision(s, t):
s = remove_tags(s)
t = remove_tags(t)
if len(s) == 0 or len(t) == 0:
return 0
bigrams_s = list(zip(s[:-1], s[1:]))
bigram_t = list(zip(t[:-1], t[1:]))
if len(bigrams_s) == 0:
return 0
overlap = 0
for bigram in bigrams_s:
overlap += 1 if bigram in bigram_t else 0
return overlap / len(bigrams_s)
def bleu_similarity(s, t):
return compute_bleu([[s]], [t])[0]
if __name__ == "__main__":
print("Test")
s = [1, 4, 140, 36, 6, 4, 31, 28, 4, 163, 2]
t = [1, 4, 140, 36, 6, 4, 31, 28, 4, 3, 2]
print(bleu_similarity(s, t))
print(compute_bleu([[s]], [t])[0])
print("Bigrams")
print(bigram_overlap(s, t))
|
# This file is a part of Arjuna
# Copyright 2015-2021 Rahul Verma
# Website: www.RahulVerma.net
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Classes to assist in XML Parsing.
'''
import os
import re
from io import BytesIO, StringIO
from lxml import etree, html
from typing import List, Dict, Tuple
from arjuna.tpi.tracker import track
from arjuna.tpi.helper.arjtype import CIStringDict
def _process_tags(tagsvalue):
tag_list = None
if type(tagsvalue) is str:
tag_list = tagsvalue.strip().split()
else:
tag_list = tagsvalue
return [t.lower()=='any' and '*' or t for t in tag_list]
@track("trace")
class NodeLocator:
'''
Locator for finding an XML Node in an **XmlNode**.
Keyword Arguments:
tags: (Optional) Descendant tags for the node. Can be a string of single or multiple tags or a list/tuple of tags.
text: Partial text content.
attrs: Arbitrary attributes as a dictionary. Use this when the attr names are not valid Python names.
**attr_kwargs: Arbitrary number of key value pairs representing attribute name and value. The values here will override those in attr_dict if there is an overlap of name(s).
Raises:
Exception: If neither tag nor an attribute is provided.
Note:
You can use tag and attributes in combination.
Supports nested node finding.
'''
def __init__(self, *, tags: 'strOrSequence'=None, text=None, attrs={}, **attr_kwargs):
if tags is None and text is None and not attrs and not attr_kwargs:
raise Exception("You must provided tags and/or attributes for finding nodes.")
attr_conditions = []
if text:
attr_conditions.append("contains(text(), '{}')".format(text))
lattrs = {}
lattrs.update(attr_kwargs)
if lattrs:
for attr, value in lattrs.items():
if value is None:
attr_conditions.append("@{}".format(attr))
else:
attr_conditions.append("contains(@{}, '{}')".format(attr, value))
attr_str = ""
if attr_conditions:
attr_str = "[{}]".format("and ".join(attr_conditions))
tags = tags and "//".join(_process_tags(tags)) or "*"
prefix = ".//"
self.__xpath = "{}{}{}".format(prefix, tags, attr_str)
def search_node(self, node: 'XmlNode') -> tuple:
'''
Search `XmlNode` objects that match this locator in the provided `XmlNode` object.
'''
return (XmlNode(n) for n in node.xpath(self.__xpath))
def __str__(self):
return "XPath: {}".format(self.__xpath)
def _process_child_html(in_str):
processed = "\n".join([l for l in in_str.splitlines() if l.strip()])
return "\t" + processed
def _remove_empty_lines_from_string(in_str):
return '\n'.join(
[l.strip() for l in in_str.splitlines() if l.strip()]
)
def _empty_or_none(in_str):
if type(in_str) is str and not in_str.strip():
return True
else:
return in_str is None
@track("trace")
class XmlNode:
'''
Represents a single node in a parsed XML.
Arguments:
node: **lxml** Element object.
'''
def __init__(self, node):
self.__node = node
self.__attrs = CIStringDict(self.node.attrib)
@property
def node(self):
'''
Wrapped **lxml** Element
Not supposed to be used directly.
'''
return self.__node
def get_text(self, normalize: bool=False) -> str:
'''
Text of this node.
Keyword Arguments:
normalize: If True, all extra space is trimmed to a single space.
'''
texts = self.texts
if normalize:
text = "".join([l for l in texts if l !="\n"]).strip()
text = " ".join(text.split())
return text
else:
return "".join(texts).strip()
@property
def normalized_text(self) -> str:
'''
Text of this node with all extra space trimmed to a single space.
'''
return self.get_text(normalize=True)
@property
def text(self) -> str:
'''
Unaltered text of the node.
'''
text = self.get_text()
if text is None:
return ""
else:
return text
@property
def links(self) -> tuple:
'''
All links in the XML.
'''
from arjuna.tpi.parser.text import Text
return Text(self.as_str()).links
@property
def unique_links(self) -> tuple:
'''
All unique links in the XML.
'''
from arjuna.tpi.parser.text import Text
return Text(self.as_str()).unique_links
def find_links(self, *, unique=True, contain=""):
from arjuna.tpi.parser.text import Text
return Text(self.as_str()).find_links(unique=unique, contain=contain)
@property
def texts(self) -> list:
'''
List of Texts of the node.
Note:
Multiple texts are stored separately.
'''
return self.node.xpath(".//text()")
def get_inner_xml(self, normalize=False) -> str:
'''
Inner XML of this node.
Keyword Arguments:
normalize: If True, empty lines are removed between children nodes.
'''
def same(i):
return i
processor = normalize and _process_child_html or same
out = [
processor(etree.tostring(c, encoding='unicode'))
for c in list(self.__node.iterchildren())
]
return "\n".join(out).strip()
@property
def inner_xml(self) -> str:
'''
Unaltered inner XML of this node
'''
return self.get_inner_xml()
@property
def normalized_inner_xml(self) -> str:
'''
Normalized inner XML of this node, with empty lines removed between children nodes.
'''
return self.get_inner_xml(normalize=True)
def remove_all_children(self) -> None:
'''
Remove all children nodes from this node.
'''
for child in list(self.__node): self.__node.remove(child)
def as_str(self, normalize=False) -> str:
'''
String representation of this node.
normalize: If True all new lines are removed and more than one conseuctive space is converted to a single space.
'''
true_source = etree.tostring(self.node, encoding='unicode')
if not normalize:
return true_source
else:
ret_source = ' '.join(true_source.splitlines())
return re.sub(r"\s+", " ", ret_source)
@property
def source(self) -> str:
'''
Unalereted string representation of this node.
'''
return self.as_str()
@property
def normalized_source(self) -> str:
'''
String representation of this node with all new lines removed and more than one conseuctive space converted to a single space.
'''
return self.as_str(normalize=True)
@property
def tag(self) -> str:
'''
Tag of the node.
'''
return self.node.tag
@property
def children(self) -> Tuple['XmlNode']:
'''
All Children of this node as a Tuple of XmlNodes
'''
return (XmlNode(c) for c in list(self.node))
@property
def parent(self) -> 'XmlNode':
'''
Parent XmlNode
'''
return XmlNode(self.node.getparent())
@property
def preceding_sibling(self) -> 'XmlNode':
'''
The XmlNode before this node at same hierarchial level.
'''
return XmlNode(self.node.getprevious())
@property
def following_sibling(self) -> 'XmlNode':
'''
The XmlNode after this node at same hierarchial level.
'''
return XmlNode(self.node.getnext())
@property
def attrs(self) -> CIStringDict:
'''
All Attributes of this node as a dictionary.
'''
return self.__attrs
def attr(self, name) -> str:
'''
Value of an attribute of this node.
'''
return self.__attrs[name]
@property
def value(self) -> str:
'''
Value of an 'value' attribute of this node.
'''
return self.attr("value")
def has_attr(self, name):
'''
Check if an attribute is present.
'''
return name in self.__attrs
def __xpath(self, xpath):
if not xpath.startswith("."):
return "." + xpath
else:
return xpath
def findall_with_xpath(self, xpath) -> List['XmlNode']:
'''
Find all XmlNodes that match an XPath.
'''
return [XmlNode(n) for n in self.node.xpath(self.__xpath(xpath))]
def find_with_xpath(self, xpath, position=1):
'''
Find nth XmlNode that matches an XPath.
Args:
xpath: XPath string
position: XPath index. Default is 1.
'''
try:
all = self.findall_with_xpath(xpath)
return all[position-1]
except IndexError as e:
raise Exception(f"No node match at position >>{position}<< for xpath >>{xpath}<< in xml >>{self}<<")
def findall(self, *node_locators, stop_when_matched: bool=False) -> List['XmlNode']:
'''
Find all XmlNodes that match one of more `NodeLocator` s.
Args:
*node_locators: One or more `NodeLocator` s
Keyword Arguments:
stop_when_matched: If True, the call returns nodes found by the first `NodeLocator` that locates one or more nodes. Default is False.
Returns:
List of `XmlNode` s. In case of no match, empty list is returned.
'''
out = []
for locator in node_locators:
try:
nodes = list(locator.search_node(self.node))
except:
continue
else:
out.extend(nodes)
if stop_when_matched:
if nodes:
break
return out
def find(self, *node_locators, strict: bool=False) -> 'XmlNode':
'''
Find first `XmlNode` that match one of more `NodeLocator` s.
Args:
*node_locators: One or more `NodeLocator` s
Keyword Arguments:
strict: If True, the call raises an exception if element is not found, else returns None
'''
matches = self.findall(*node_locators, stop_when_matched=True)
if matches:
return matches[0]
else:
if strict:
raise Exception("Element could not be found with Node Locators: >>{}<<".format([str(n) for n in node_locators]))
else:
return None
def find_keyvalue_texts(self, key_locator, value_locator) -> Tuple[str, str]:
'''
Returns texts of first XmlNodes for a pair of `NodeLocator` s
Args:
key_locator: `NodeLocator` (for key)
value_locator: First `NodeLocator` (for value)
Returns:
2-element tuple containing the text strings.
'''
key = self.find(key_locator).text
value = self.find(value_locator).text
return key,value
def __str__(self):
return self.as_str()
def clone(self) -> 'XmlNode':
'''
Create a clone of this XmlNode object.
'''
return Xml.from_str(str(self))
class Xml:
'''
Helper class to create XmlNode objects.
'''
@classmethod
def from_str(cls, xml_str):
'''
Create an `XmlNode` from a string.
'''
lenient_parser = etree.XMLParser(encoding='utf-8', recover=True)
return XmlNode(etree.parse(BytesIO(xml_str.encode('utf-8')), lenient_parser).getroot())
@classmethod
def from_file(cls, file_path: str) -> XmlNode:
'''
Creates an `XmlNode` from file.
Arguments:
file_path: Absolute path of the json file.
Returns:
Arjuna's `XmlNode` object
'''
with open(file_path, 'r') as f:
return cls.from_str(f.read())
@classmethod
def from_lxml_element(cls, element, clone=False) -> XmlNode:
'''
Create an `XmlNode` from an `lxml` element.
Arguments:
element: `lxml` element
'''
if clone:
return XmlNode(element)
else:
return XmlNode(element).clone()
@classmethod
def node_locator(cls, *, tags: 'strOrSequence'=None, text=None, attrs={}, **attr_kwargs):
'''
Create a locator for finding an XML Node in an **XmlNode**.
Keyword Arguments:
tags: (Optional) Descendant tags for the node. Can be a string of single or multiple tags or a list/tuple of tags.
text: Partial text content.
attrs: Arbitrary attributes as a dictionary. Use this when the attr names are not valid Python names.
**attr_kwargs: Arbitrary number of key value pairs representing attribute name and value. The values here will override those in attr_dict if there is an overlap of name(s).
Raises:
Exception: If neither tag nor an attribute is provided.
Note:
You can use tag and attributes in combination.
Supports nested node finding.
'''
return NodeLocator(tags=tags, text=text, attrs=attrs, **attr_kwargs)
|
import torch
import numpy as np
from pyannote.audio.features import RawAudio
from pyannote.core import SlidingWindowFeature
from graphs.models.base import BaseModel
class SincNetPyAnnote(BaseModel):
def __init__(self, device, weights='emb_voxceleb', sample_rate=16000, **kwargs):
super(SincNetPyAnnote, self).__init__(device)
self.model = torch.hub.load('pyannote/pyannote-audio', weights)
self.model.device = self.device
self.sample_rate = sample_rate
def forward(self, x):
# embeddings = torch.zeros(
# x.shape[0], 512).float().to(self.device)
for i in range(0, x.shape[0]):
# x = x[i].cpu().detach().numpy()
x = x.squeeze(0).cpu().detach().numpy()
feature_extraction_ = RawAudio(sample_rate=self.sample_rate)
features = SlidingWindowFeature(
feature_extraction_.get_features(x, self.sample_rate),
feature_extraction_.sliding_window,
)
x = self.model.model_.slide(
features,
self.model.chunks_,
batch_size=self.model.batch_size,
device=self.model.device,
return_intermediate=self.model.return_intermediate,
progress_hook=self.model.progress_hook,
).data
x = np.mean(x, axis=0)
x = torch.FloatTensor(x).unsqueeze(0)
# embeddings[i] = x
embeddings = x
return embeddings
|
import inspect
from ...utils import call_main
def add_parser(sub_parser, raw):
desc = """
Display the current git branch name.
$ m git branch
master
"""
sub_parser.add_parser(
'branch',
help='display the current git branch',
formatter_class=raw,
description=inspect.cleandoc(desc),
)
def run(_):
# pylint: disable=import-outside-toplevel
from .... import git
return call_main(git.get_branch, [], print_raw=True)
|
# TestSwiftExpressionObjCContext.py
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2018 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ------------------------------------------------------------------------------
import lldb
from lldbsuite.test.lldbtest import *
from lldbsuite.test.decorators import *
import lldbsuite.test.lldbutil as lldbutil
import os
import unittest2
class TestSwiftExpressionObjCContext(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
TestBase.setUp(self)
@skipUnlessDarwin
@swiftTest
def test(self):
self.build()
target, _, _, _ = lldbutil.run_to_source_breakpoint(
self, "break here", lldb.SBFileSpec('main.m'),
extra_images=['Foo'])
# This is expected to fail because we can't yet import ObjC
# modules into a Swift context.
self.expect("expr -lang Swift -- Bar()", "failure",
substrs=["cannot find 'Bar'"],
error=True)
self.expect("expr -lang Swift -- (1, 2, 3)",
"context-less swift expression works",
substrs=["(Int, Int, Int)"])
|
"""``cubi-tk sea-snap``: tools for supporting the Sea-snap pipeline.
Available Commands
------------------
``check-irods``
Check consistency of sample info, blueprint and files on SODAR.
``itransfer-raw-data``
Transfer raw data from ``work/input_links`` directory of ``ngs_mapping``.
``itransfer-results``
Transfer results and logs from ``output`` directory.
``write-sample-info``
Pull information from SODAR, parse and write sample info.
More Information
----------------
- Also see ``cubi-tk sea-snap`` :ref:`cli_main <CLI documentation>` and ``cubi-tk sea-snap --help`` for more information.
"""
import argparse
from ..common import run_nocmd
from .itransfer_raw_data import setup_argparse as setup_argparse_itransfer_raw_data
from .itransfer_results import setup_argparse as setup_argparse_itransfer_mapping_results
# from .pull_isa import setup_argparse as setup_argparse_pull_isa
from .working_dir import setup_argparse as setup_argparse_working_dir
from .write_sample_info import setup_argparse as setup_argparse_write_sample_info
from .check_irods import setup_argparse as setup_argparse_check_irods
def setup_argparse(parser: argparse.ArgumentParser) -> None:
"""Main entry point for isa-tpl command."""
subparsers = parser.add_subparsers(dest="sea_snap_cmd")
setup_argparse_itransfer_raw_data(
subparsers.add_parser("itransfer-raw-data", help="Transfer FASTQs into iRODS landing zone")
)
setup_argparse_itransfer_mapping_results(
subparsers.add_parser(
"itransfer-results", help="Transfer mapping results into iRODS landing zone"
)
)
setup_argparse_working_dir(
subparsers.add_parser("working-dir", help="Create working directory")
)
setup_argparse_write_sample_info(
subparsers.add_parser("write-sample-info", help="Generate sample info file")
)
setup_argparse_check_irods(
subparsers.add_parser(
"check-irods", help="Check consistency of sample info, blueprint and files on SODAR"
)
)
def run(args, parser, subparser):
"""Main entry point for sea-snap command."""
if not args.sea_snap_cmd: # pragma: nocover
return run_nocmd(args, parser, subparser)
else:
return args.sea_snap_cmd(args, parser, subparser)
|
import os
import csv
import threading
from .classes import AccessNetwork
from .path import single_source_shortest_path
from .consts import MAX_LABEL_COST, MIN_TIME_BUDGET, \
BUDGET_TIME_INTVL, MAX_TIME_BUDGET
__all__ = ['evaluate_accessibility']
def _get_interval_id(t):
""" return interval id in predefined time budget intervals
[0, MIN_TIME_BUDGET],
(MIN_TIME_BUDGET + (i-1)*BUDGET_TIME_INTVL, MIN_TIME_BUDGET + i*BUDGET_TIME_INTVL]
where, i is integer and i>=1
"""
if t < MIN_TIME_BUDGET:
return 0
if ((t-MIN_TIME_BUDGET) % BUDGET_TIME_INTVL) == 0:
return int((t-MIN_TIME_BUDGET) % BUDGET_TIME_INTVL)
return int((t-MIN_TIME_BUDGET) % BUDGET_TIME_INTVL) + 1
def _update_min_travel_time(an, at, min_travel_times, time_dependent, demand_period_id):
an.update_generalized_link_cost(at, time_dependent, demand_period_id)
at_str = at.get_type_str()
max_min = 0
for c in an.get_centroids():
node_id = c.get_node_id()
zone_id = c.get_zone_id()
single_source_shortest_path(an, node_id)
for c_ in an.get_centroids():
if c_ == c:
continue
node_no = c_.get_node_no()
to_zone_id = c_.get_zone_id()
min_tt = an.get_node_label_cost(node_no)
# this function will dramatically slow down the whole process
min_dist = an.get_sp_distance(node_no)
min_travel_times[(zone_id, to_zone_id, at_str)] = min_tt, min_dist
if min_tt < MAX_LABEL_COST and max_min < min_tt:
max_min = min_tt
return max_min
def _output_accessibility(min_travel_times, zones, mode='p', output_dir='.'):
""" output accessibility for each OD pair (i.e., travel time) """
with open(output_dir+'/accessibility.csv', 'w', newline='') as f:
headers = ['o_zone_id', 'o_zone_name',
'd_zone_id', 'd_zone_name',
'accessibility', 'distance',
'geometry']
writer = csv.writer(f)
writer.writerow(headers)
# for multimodal case, find the minimum travel time
# under mode 'p' (i.e., auto)
for k, v in min_travel_times.items():
# k = (from_zone_id, to_zone_id, at_type_str)
if k[2] != mode:
continue
# output assessiblity
# no exception handlings here as min_travel_times is constructed
# directly using an.get_centroids()
coord_oz = zones[k[0]]
coord_dz = zones[k[1]]
geo = 'LINESTRING (' + coord_oz + ', ' + coord_dz + ')'
line = [k[0], '', k[1], '', v[0], v[1], geo]
writer.writerow(line)
if output_dir == '.':
print('\ncheck accessibility.csv in '
+os.getcwd()+' for accessibility matrix')
else:
print('\ncheck accessibility.csv in '
+os.path.join(os.getcwd(), output_dir)
+' for accessibility matrix')
def _output_accessibility_aggregated(min_travel_times, interval_num,
zones, ats, output_dir='.'):
""" output aggregated accessibility matrix for each agent type """
with open(output_dir+'/accessibility_aggregated.csv', 'w', newline='') as f:
time_budgets = [
'TT_'+str(MIN_TIME_BUDGET+BUDGET_TIME_INTVL*i) for i in range(interval_num)
]
headers = ['zone_id', 'geometry', 'mode']
headers.extend(time_budgets)
writer = csv.writer(f)
writer.writerow(headers)
# calculate accessibility
for oz, coord in zones.items():
if oz == -1:
continue
for atype in ats:
at_str = atype.get_type_str()
# number of accessible zones from oz for each agent type
counts = [0] * interval_num
for dz in zones.keys():
if (oz, dz, at_str) not in min_travel_times.keys():
continue
min_tt = min_travel_times[(oz, dz, at_str)][0]
if min_tt >= MAX_LABEL_COST:
continue
id = _get_interval_id(min_tt)
while id < interval_num:
counts[id] += 1
id += 1
# output assessiblity
geo = 'POINT (' + coord + ')'
line = [oz, geo, atype.get_type_str()]
line.extend(counts)
writer.writerow(line)
if output_dir == '.':
print('\ncheck accessibility_aggregated.csv in '
+os.getcwd()+' for aggregated accessibility matrix')
else:
print('\ncheck accessibility_aggregated.csv in '
+os.path.join(os.getcwd(), output_dir)
+' for aggregated accessibility matrix')
def evaluate_accessibility(ui,
multimodal=True,
mode='p',
time_dependent=False,
demand_period_id=0,
output_dir='.'):
""" perform accessibility evaluation for a target mode or more
Parameters
----------
ui
network object generated by pg.read_network()
multimodal
True or False. Its default value is True. It will only affect the
output to accessibility_aggregated.csv.
If True, the accessiblity evalution will be conducted
for all the modes defined in settings.yml. The number of accessible
zones from each zone under each defined mode given a budget time (up
to 240 minutes) will be outputted to accessibility_aggregated.csv.
If False, the accessiblity evalution will be only conducted against the
target mode. The number of accessible zones from each zone under the
target mode given a budget time (up to 240 minutes) will be outputted
to accessibility_aggregated.csv.
mode
target mode with its default value as 'p' (i.e., mode auto). It can be
either agent type or its name. For example, 'w' and 'walk' are
equivalent inputs.
time_dependent
True or False. Its default value is False.
If True, the accessibility will be evaluated using the period link
free-flow travel time (i.e., VDF_fftt). In other words, the
accessibility is time-dependent.
If False, the accessibility will be evaluated using the link length and
the free flow travel speed of each mode.
demand_period_id
The sequence number of demand period listed in demand_periods in
settings.yml. demand_period_id of the first demand_period is 0.
Use it with time_dependent when there are multiple demand periods. Its
default value is 0.
output_dir
The directory path where accessibility_aggregated.csv and
accessibility.csv are outputed. The default is the current working
directory (CDW).
Outputs
-------
accessibility_aggregated.csv
aggregated accessibility as the number of accessible zones from each
zone for a target mode or any mode defined in settings.yml given a
budget time (up to 240 minutes).
accessibility.csv:
accessibility between each OD pair in terms of free flow travel time.
"""
base = ui._base_assignment
an = AccessNetwork(base.network)
ats = None
# map zone id to zone centroid coordinate
zones = {}
for c in an.get_centroids():
zones[c.get_zone_id()] = c.get_coordinate()
max_min = 0
min_travel_times = {}
if multimodal:
ats = base.get_agent_types()
for at in ats:
an.set_target_mode(at.get_name())
max_min_ = _update_min_travel_time(an,
at,
min_travel_times,
time_dependent,
demand_period_id)
if max_min_ > max_min:
max_min = max_min_
else:
at_name, at_str = base._convert_mode(mode)
an.set_target_mode(at_name)
at = base.get_agent_type(at_str)
max_min = _update_min_travel_time(an,
at,
min_travel_times,
time_dependent,
demand_period_id)
ats = [at]
interval_num = _get_interval_id(min(max_min, MAX_TIME_BUDGET)) + 1
t = threading.Thread(
target=_output_accessibility,
args=(min_travel_times, zones, mode, output_dir))
t.start()
t = threading.Thread(
target=_output_accessibility_aggregated,
args=(min_travel_times, interval_num, zones, ats, output_dir)
)
t.start()
|
"""
Modern, PEP 517 compliant build backend for building Python packages with
extensions built using CMake.
"""
__version__ = '0.0.11a0'
|
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import numpy as np
import torch
from torch import tensor
from madminer.utils.ml.models.ratio import DenseSingleParameterizedRatioModel, DenseDoublyParameterizedRatioModel
logger = logging.getLogger(__name__)
def evaluate_flow_model(model, thetas=None, xs=None, evaluate_score=False, run_on_gpu=True, double_precision=False):
# CPU or GPU?
run_on_gpu = run_on_gpu and torch.cuda.is_available()
device = torch.device("cuda" if run_on_gpu else "cpu")
dtype = torch.double if double_precision else torch.float
# Balance theta0 and theta1
n_thetas = len(thetas)
# Prepare data
n_xs = len(xs)
thetas = torch.stack([tensor(thetas[i % n_thetas], requires_grad=True) for i in range(n_xs)])
xs = torch.stack([tensor(i) for i in xs])
model = model.to(device, dtype)
thetas = thetas.to(device, dtype)
xs = xs.to(device, dtype)
# Evaluate estimator with score:
if evaluate_score:
model.eval()
_, log_p_hat, t_hat = model.log_likelihood_and_score(thetas, xs)
# Copy back tensors to CPU
if run_on_gpu:
log_p_hat = log_p_hat.cpu()
t_hat = t_hat.cpu()
log_p_hat = log_p_hat.detach().numpy().flatten()
t_hat = t_hat.detach().numpy().flatten()
# Evaluate estimator without score:
else:
with torch.no_grad():
model.eval()
_, log_p_hat = model.log_likelihood(thetas, xs)
# Copy back tensors to CPU
if run_on_gpu:
log_p_hat = log_p_hat.cpu()
log_p_hat = log_p_hat.detach().numpy().flatten()
t_hat = None
return log_p_hat, t_hat
def evaluate_ratio_model(
model,
method_type=None,
theta0s=None,
theta1s=None,
xs=None,
evaluate_score=False,
run_on_gpu=True,
double_precision=False,
return_grad_x=False,
):
# CPU or GPU?
run_on_gpu = run_on_gpu and torch.cuda.is_available()
device = torch.device("cuda" if run_on_gpu else "cpu")
dtype = torch.double if double_precision else torch.float
# Figure out method type
if method_type is None:
if isinstance(model, DenseSingleParameterizedRatioModel):
method_type = "parameterized"
elif isinstance(model, DenseDoublyParameterizedRatioModel):
method_type = "doubly_parameterized"
else:
raise RuntimeError("Cannot infer method type automatically")
# Balance theta0 and theta1
if theta1s is None:
n_thetas = len(theta0s)
else:
n_thetas = max(len(theta0s), len(theta1s))
if len(theta0s) > len(theta1s):
theta1s = np.array([theta1s[i % len(theta1s)] for i in range(len(theta0s))])
elif len(theta0s) < len(theta1s):
theta0s = np.array([theta0s[i % len(theta0s)] for i in range(len(theta1s))])
# Prepare data
n_xs = len(xs)
theta0s = torch.stack([tensor(theta0s[i % n_thetas], requires_grad=evaluate_score) for i in range(n_xs)])
if theta1s is not None:
theta1s = torch.stack([tensor(theta1s[i % n_thetas], requires_grad=evaluate_score) for i in range(n_xs)])
xs = torch.stack([tensor(i) for i in xs])
model = model.to(device, dtype)
theta0s = theta0s.to(device, dtype)
if theta1s is not None:
theta1s = theta1s.to(device, dtype)
xs = xs.to(device, dtype)
# Evaluate ratio estimator with score or x gradients:
if evaluate_score or return_grad_x:
model.eval()
if method_type == "parameterized_ratio":
if return_grad_x:
s_hat, log_r_hat, t_hat0, x_gradients = model(
theta0s, xs, return_grad_x=True, track_score=evaluate_score, create_gradient_graph=False
)
else:
s_hat, log_r_hat, t_hat0 = model(theta0s, xs, track_score=evaluate_score, create_gradient_graph=False)
x_gradients = None
t_hat1 = None
elif method_type == "double_parameterized_ratio":
if return_grad_x:
s_hat, log_r_hat, t_hat0, t_hat1, x_gradients = model(
theta0s, theta1s, xs, return_grad_x=True, track_score=evaluate_score, create_gradient_graph=False
)
else:
s_hat, log_r_hat, t_hat0, t_hat1 = model(
theta0s, theta1s, xs, track_score=evaluate_score, create_gradient_graph=False
)
x_gradients = None
else:
raise ValueError("Unknown method type %s", method_type)
# Copy back tensors to CPU
if run_on_gpu:
s_hat = s_hat.cpu()
log_r_hat = log_r_hat.cpu()
if t_hat0 is not None:
t_hat0 = t_hat0.cpu()
if t_hat1 is not None:
t_hat1 = t_hat1.cpu()
# Get data and return
s_hat = s_hat.detach().numpy().flatten()
log_r_hat = log_r_hat.detach().numpy().flatten()
if t_hat0 is not None:
t_hat0 = t_hat0.detach().numpy()
if t_hat1 is not None:
t_hat1 = t_hat1.detach().numpy()
# Evaluate ratio estimator without score:
else:
with torch.no_grad():
model.eval()
if method_type == "parameterized_ratio":
s_hat, log_r_hat, _ = model(theta0s, xs, track_score=False, create_gradient_graph=False)
elif method_type == "double_parameterized_ratio":
s_hat, log_r_hat, _, _ = model(theta0s, theta1s, xs, track_score=False, create_gradient_graph=False)
else:
raise ValueError("Unknown method type %s", method_type)
# Copy back tensors to CPU
if run_on_gpu:
s_hat = s_hat.cpu()
log_r_hat = log_r_hat.cpu()
# Get data and return
s_hat = s_hat.detach().numpy().flatten()
log_r_hat = log_r_hat.detach().numpy().flatten()
t_hat0, t_hat1 = None, None
if return_grad_x:
return s_hat, log_r_hat, t_hat0, t_hat1, x_gradients
return s_hat, log_r_hat, t_hat0, t_hat1
def evaluate_local_score_model(model, xs=None, run_on_gpu=True, double_precision=False, return_grad_x=False):
# CPU or GPU?
run_on_gpu = run_on_gpu and torch.cuda.is_available()
device = torch.device("cuda" if run_on_gpu else "cpu")
dtype = torch.double if double_precision else torch.float
# Prepare data
xs = torch.stack([tensor(i) for i in xs])
model = model.to(device, dtype)
xs = xs.to(device, dtype)
# Evaluate networks
if return_grad_x:
model.eval()
t_hat, x_gradients = model(xs, return_grad_x=True)
else:
with torch.no_grad():
model.eval()
t_hat = model(xs)
x_gradients = None
# Copy back tensors to CPU
if run_on_gpu:
t_hat = t_hat.cpu()
if x_gradients is not None:
x_gradients = x_gradients.cpu()
# Get data and return
t_hat = t_hat.detach().numpy()
if return_grad_x:
x_gradients = x_gradients.detach().numpy()
return t_hat, x_gradients
return t_hat
|
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import pasta
import pytest
from sagemaker.cli.compatibility.v2.modifiers import training_input
from tests.unit.sagemaker.cli.compatibility.v2.modifiers.ast_converter import ast_call, ast_import
@pytest.fixture
def constructors():
return (
"sagemaker.session.s3_input(s3_data='s3://a')",
"sagemaker.inputs.s3_input(s3_data='s3://a')",
"sagemaker.s3_input(s3_data='s3://a')",
"session.s3_input(s3_data='s3://a')",
"inputs.s3_input(s3_data='s3://a')",
"s3_input(s3_data='s3://a')",
)
@pytest.fixture
def import_statements():
return (
"from sagemaker.session import s3_input",
"from sagemaker.inputs import s3_input",
"from sagemaker import s3_input",
)
def test_constructor_node_should_be_modified(constructors):
modifier = training_input.TrainingInputConstructorRefactor()
for constructor in constructors:
node = ast_call(constructor)
assert modifier.node_should_be_modified(node)
def test_constructor_node_should_be_modified_random_call():
modifier = training_input.TrainingInputConstructorRefactor()
node = ast_call("FileSystemInput()")
assert not modifier.node_should_be_modified(node)
def test_constructor_modify_node():
modifier = training_input.TrainingInputConstructorRefactor()
node = ast_call("s3_input(s3_data='s3://a')")
modifier.modify_node(node)
assert "TrainingInput(s3_data='s3://a')" == pasta.dump(node)
node = ast_call("sagemaker.s3_input(s3_data='s3://a')")
modifier.modify_node(node)
assert "sagemaker.TrainingInput(s3_data='s3://a')" == pasta.dump(node)
node = ast_call("session.s3_input(s3_data='s3://a')")
modifier.modify_node(node)
assert "inputs.TrainingInput(s3_data='s3://a')" == pasta.dump(node)
node = ast_call("inputs.s3_input(s3_data='s3://a')")
modifier.modify_node(node)
assert "inputs.TrainingInput(s3_data='s3://a')" == pasta.dump(node)
node = ast_call("sagemaker.inputs.s3_input(s3_data='s3://a')")
modifier.modify_node(node)
assert "sagemaker.inputs.TrainingInput(s3_data='s3://a')" == pasta.dump(node)
node = ast_call("sagemaker.session.s3_input(s3_data='s3://a')")
modifier.modify_node(node)
assert "sagemaker.inputs.TrainingInput(s3_data='s3://a')" == pasta.dump(node)
def test_import_from_node_should_be_modified_training_input(import_statements):
modifier = training_input.TrainingInputImportFromRenamer()
for statement in import_statements:
node = ast_import(statement)
assert modifier.node_should_be_modified(node)
def test_import_from_node_should_be_modified_random_import():
modifier = training_input.TrainingInputImportFromRenamer()
node = ast_import("from sagemaker.session import Session")
assert not modifier.node_should_be_modified(node)
def test_import_from_modify_node():
modifier = training_input.TrainingInputImportFromRenamer()
node = ast_import("from sagemaker import s3_input")
modifier.modify_node(node)
expected_result = "from sagemaker import TrainingInput"
assert expected_result == pasta.dump(node)
node = ast_import("from sagemaker.inputs import s3_input as training_input")
modifier.modify_node(node)
expected_result = "from sagemaker.inputs import TrainingInput as training_input"
assert expected_result == pasta.dump(node)
node = ast_import("from sagemaker.session import s3_input as training_input")
modifier.modify_node(node)
expected_result = "from sagemaker.inputs import TrainingInput as training_input"
assert expected_result == pasta.dump(node)
|
def read_to_string(filename):
chars = ['"']
with open(filename, "rb") as file:
while True:
data = file.read(7)
if not data:
break
if len(data) < 7:
data = data + b"\0" * (7 - len(data))
i = int.from_bytes(data, "big")
# print("i:", *("{:08b}".format(d) for d in data))
sevens = []
for _ in range(8):
sevens.append(i & 127)
i >>= 7
sevens.reverse()
# print("s:", *("{:07b}".format(s) for s in sevens))
char_chunk = ("".join(chr(s) for s in sevens)
.replace("\\", "\\\\")
.replace("\n", "\\n")
.replace("\r", "\\r")
.replace('"', '\\"'))
chars.append(char_chunk)
chars.append('"')
return "".join(chars)
if __name__ == '__main__':
import sys
with open(sys.argv[2], "w") as file:
print(sys.argv[3], "=", read_to_string(sys.argv[1]), ";",
file=file, sep="", end="")
|
from pywatson.question.evidence_request import EvidenceRequest
from pywatson.question.filter import Filter
from pywatson.question.watson_question import WatsonQuestion
class TestQuestion(object):
"""Unit tests for the WatsonQuestion class"""
def test___init___basic(self, questions):
"""Question is constructed properly with just question_text"""
question = WatsonQuestion(questions[0]['questionText'])
assert question.question_text == questions[0]['questionText']
def test___init___complete(self, questions):
"""Question is constructed properly with all parameters provided"""
q = questions[1]
er = q['evidenceRequest']
evidence_request = EvidenceRequest(er['items'], er['profile'])
filters = tuple(Filter(f['filterType'], f['filterName'], f['values']) for f in q['filters'])
question = WatsonQuestion(
question_text=q['questionText'],
answer_assertion=q['answerAssertion'],
category=q['category'],
context=q['context'],
evidence_request=evidence_request,
filters=filters,
formatted_answer=q['formattedAnswer'],
items=q['items'],
lat=q['lat'],
passthru=q['passthru'],
synonym_list=q['synonyms'])
assert question.question_text == q['questionText']
assert question.answer_assertion == q['answerAssertion']
assert question.category == q['category']
assert question.context == q['context']
assert question.evidence_request == evidence_request
assert question.filters == filters
|
from KratosMultiphysics import *
from KratosMultiphysics.sympy_fe_utilities import *
from sympy import *
import pprint
def computeTau(params):
print("\nCompute Stabilization Matrix\n")
dim = params["dim"] # Spatial dimensions
Tau = zeros(dim+2,dim+2) # Stabilization Matrix
tau1 = Symbol('tau1')
tau2 = Symbol('tau2')
tau3 = Symbol('tau3')
Tau[0,0] = tau1
for i in range (0,dim):
Tau[i+1,i+1] = tau2
Tau[dim+1,dim+1] = tau3
return(Tau)
def computeTauOnGaussPoint(params, U_gauss):
print("\t- Compute Stabilization Matrix on Gauss pt.")
# Calculate auxiliary values
rho_g = U_gauss[0]
e_t_g = U_gauss[params["dim"] + 1]
norm_v_squared = 0.0
for d in range(params["dim"]):
norm_v_squared += (U_gauss[d + 1] * U_gauss[d + 1]) / (rho_g * rho_g)
norm_v = sqrt(norm_v_squared)
nu = params["mu"] / rho_g
alpha = params["lambda"] / (rho_g * params["gamma"] * params["c_v"])
# Calculate sound speed
c = sqrt(params["gamma"] * (params["gamma"] -1) * ((e_t_g / rho_g) - ((1.0 / 2.0) * norm_v_squared)))
# Calculate stabilization constants
tau1_inv = (params["stab_c2"] * (norm_v + c)) / params["h"]
tau2_inv = ((params["stab_c1"] / (params["h"] * params["h"])) * (4.0 * nu / 3.0)) + tau1_inv
tau3_inv = (params["stab_c1"] * alpha / (params["h"] * params["h"])) + tau1_inv
# Save the obtained values in the stabilization matrix
Tau = zeros(params["dim"] + 2, params["dim"] + 2)
Tau[0,0] = 1.0 / tau1_inv
for i in range (params["dim"]):
Tau[i + 1, i + 1] = 1.0 / tau2_inv
Tau[params["dim"] + 1, params["dim"] + 1] = 1.0 / tau3_inv
return(Tau)
def printTau(Tau, params):
print("The Stabilization term matrix is:\n")
dim = params["dim"]
for i in range (0,dim+2):
for j in range (0,dim+2):
print("Tau[",i,",",j,"]=",Tau[i,j],"\n")
return 0
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
if __name__ == '__main__':
m = int(input("Enter the number of the season "))
if m < 1 or m > 4:
print("Illegal value of m", file=sys.stderr)
exit(1)
if m == 1:
print("The winter months are: December, January and February")
elif m == 2:
print("The spring months are: March, April and May")
elif m == 3:
print("The summer months are: June, July and August")
else:
print("The autumn months are: September, October and November")
|
#!/usr/bin/env python
from EPPs.common import StepEPP
from pyclarity_lims.entities import Protocol
class CheckStepUDF(StepEPP):
"""
Checks if steps specified in arguments have their default value or not
"""
def __init__(self, argv=None):
super().__init__(argv)
self.check_udfs = self.cmd_args.check_udfs
self.default_values = self.cmd_args.default_values
@staticmethod
def add_args(argparser):
argparser.add_argument('-c', '--check_udfs', nargs='*',help='Select the step udf for checking')
argparser.add_argument('-d', '--default_values', nargs='*',help='Default value for each step udf')
def _run(self):
for check_udf, default_value in \
zip(self.check_udfs, self.default_values):
print(check_udf,default_value)
if self.process.udf[check_udf]==default_value:
raise ValueError('Please complete '+check_udf)
if __name__ == '__main__':
CheckStepUDF().run()
|
# LICENSED UNDER BSD-3-CLAUSE-CLEAR LICENSE
# SEE PROVIDED LICENSE FILE IN ROOT DIRECTORY
# Import Modules
import logging
import click
from src.bgp import bgp
from src.peeringdb import pdb
from src.ip import ip
from src.shodan import shodan
from src.whois import whois
from src.api import api
class Throne:
def __init__(self):
self.verbose = False
pass_throne = click.make_pass_decorator(Throne)
@click.group()
@click.option("--verbose", "-v", is_flag=True, help="Enables verbose mode.")
@click.pass_context
def cli(ctx, verbose):
"""
Throne is a command line tool to query various things on the internet.
"""
if verbose:
LOG_FORMAT = ('[%(asctime)s] [%(levelname)s] [%(filename)s:%(lineno)s] '
'[%(funcName)s()] %(message)s')
logging.basicConfig(level=logging.DEBUG, format=LOG_FORMAT)
cli.add_command(bgp)
cli.add_command(pdb)
cli.add_command(ip)
cli.add_command(shodan)
cli.add_command(whois)
cli.add_command(api)
|
import pytest
from src.app import app as APIAppRoot
from fastapi.testclient import TestClient
@pytest.fixture(scope="package")
def client():
with TestClient(
APIAppRoot, base_url="http://testserver/api/bilibili/v3/"
) as client:
yield client
def test_video_info(client: TestClient):
response = client.get("video_info", params={"aid": 2})
assert response.status_code == 200
assert response.json()["code"] == 0
def test_video_address(client: TestClient):
response = client.get(
"video_address",
params={"aid": 2, "cid": 62131},
)
assert response.status_code == 200
assert response.json()["code"] == 0
def test_video_recommend(client: TestClient):
response = client.get("video_recommend")
assert response.status_code == 200
assert response.json()["list"]
def test_video_dynamic(client: TestClient):
response = client.get("video_dynamic")
assert response.status_code == 200
assert response.json()["code"] == 0
def test_video_ranking(client: TestClient):
response = client.get("video_ranking")
assert response.status_code == 200
assert response.json()["rank"]
def test_user_info(client: TestClient):
response = client.get("user_info", params={"uid": 2})
assert response.status_code == 200
assert response.json()["code"] == 0
def test_user_uploaded(client: TestClient):
response = client.get("user_uploaded", params={"uid": 2})
assert response.status_code == 200
assert response.json()["code"] == 0
def test_user_favorite(client: TestClient):
# TODO:add test case
pass
def test_season_info(client: TestClient):
response = client.get("season_info", params={"season_id": 425})
assert response.status_code == 200
assert response.json()["code"] == 0
def test_season_recommend(client: TestClient):
response = client.get("season_recommend", params={"season_id": 425})
assert response.status_code == 200
assert response.json()["code"] == 0
def test_season_episode(client: TestClient):
response = client.get("season_episode", params={"episode_id": 84340})
assert response.status_code == 200
assert response.json()["code"] == 0
def test_season_timeline(client: TestClient):
response = client.get("season_timeline")
assert response.status_code == 200
assert response.json()["code"] == 0
def test_season_ranking(client: TestClient):
response = client.get("season_ranking")
assert response.status_code == 200
assert response.json()["code"] == 0
def test_search(client: TestClient):
response = client.get("search", params={"keyword": "railgun"})
assert response.status_code == 200
assert response.json()["code"] == 0
def test_search_recommend(client: TestClient):
response = client.get("search_recommend")
assert response.status_code == 200
assert response.json()["code"] == 0
def test_search_suggestion(client: TestClient):
response = client.get("search_suggestion", params={"keyword": "paperclip"})
assert response.status_code == 200
assert response.json()["code"] == 0
def test_comments(client: TestClient):
response = client.get("comments", params={"id": 2})
assert response.status_code == 200
assert response.json()["code"] == 0
|
# clear the list
elements = [1, 2, 3, 4, 5]
del elements[:]
print(elements)
# replace all elements
elements = [1, 2, 3, 4, 5]
elements[:] = [5, 4, 3, 2, 1]
print(elements)
# copy the list
elements = [1, 2, 3, 4, 5]
copy_of_elements = elements[:]
print(copy_of_elements is elements)
|
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
class LoginForm(forms.Form):
username = forms.CharField(max_length = 100)
password = forms.CharField(widget = forms.PasswordInput())
|
#!/usr/bin/python3
OPENVSWITCH_SERVICES_EXPRS = [r"ovsdb-\S+",
r"ovs-vswitch\S+",
r"ovn\S+"]
OVS_PKGS = [r"libc-bin",
r"openvswitch-switch",
r"ovn",
]
OVS_DAEMONS = {"ovs-vswitchd":
{"logs": "var/log/openvswitch/ovs-vswitchd.log"},
"ovsdb-server":
{"logs": "var/log/openvswitch/ovsdb-server.log"}}
|
from apptweak.plateform import *
class Ios(Plateform):
plateform_name = 'ios'
def __init__(self):
super().__init__(self.plateform_name)
@classmethod
def ratings(self, application_id, params = {}):
return self.applications(application_id, API_END_PATH['ratings'], params)
@classmethod
def backlinks(self, application_id):
raise Exception('Not implemented for this plateform')
|
# Based on gearUtils-03.js by Dr A.R.Collins
# Latest version: <www.arc.id.au/gearDrawing.html>
# Calculation of Bezier coefficients for
# Higuchi et al. approximation to an involute.
# ref: YNU Digital Eng Lab Memorandum 05-1
from math import *
from svg import *
def genInvolutePolar(Rb, R): # Rb = base circle radius
# returns the involute angle as function of radius R.
return (sqrt(R*R - Rb*Rb) / Rb) - acos(Rb / R)
def rotate(pt, rads): # rotate pt by rads radians about origin
sinA = sin(rads)
cosA = cos(rads)
return [pt[0] * cosA - pt[1] * sinA,
pt[0] * sinA + pt[1] * cosA]
def toCartesian(radius, angle): # convert polar coords to cartesian
return [radius * cos(angle), radius * sin(angle)]
def CreateExternalGear(m, Z, phi):
# ****** external gear specifications
addendum = m # distance from pitch circle to tip circle
dedendum = 1.25 * m # pitch circle to root, sets clearance
clearance = dedendum - addendum
# Calculate radii
Rpitch = Z * m / 2 # pitch circle radius
Rb = Rpitch*cos(phi * pi / 180) # base circle radius
Ra = Rpitch + addendum # tip (addendum) circle radius
Rroot = Rpitch - dedendum # root circle radius
fRad = 1.5 * clearance # fillet radius, max 1.5*clearance
Rf = sqrt((Rroot + fRad) * (Rroot + fRad) - (fRad * fRad)) # radius at top of fillet
if (Rb < Rf):
Rf = Rroot + clearance
# ****** calculate angles (all in radians)
pitchAngle = 2 * pi / Z # angle subtended by whole tooth (rads)
baseToPitchAngle = genInvolutePolar(Rb, Rpitch)
pitchToFilletAngle = baseToPitchAngle # profile starts at base circle
if (Rf > Rb): # start profile at top of fillet (if its greater)
pitchToFilletAngle -= genInvolutePolar(Rb, Rf)
filletAngle = atan(fRad / (fRad + Rroot)) # radians
# ****** generate Higuchi involute approximation
fe = 1 # fraction of profile length at end of approx
fs = 0.01 # fraction of length offset from base to avoid singularity
if (Rf > Rb):
fs = (Rf * Rf - Rb * Rb) / (Ra * Ra - Rb * Rb) # offset start to top of fillet
# approximate in 2 sections, split 25% along the involute
fm = fs + (fe - fs) / 4 # fraction of length at junction (25% along profile)
dedBez = BezCoeffs(m, Z, phi, 3, fs, fm)
addBez = BezCoeffs(m, Z, phi, 3, fm, fe)
dedInv = dedBez.involuteBezCoeffs()
addInv = addBez.involuteBezCoeffs()
# join the 2 sets of coeffs (skip duplicate mid point)
inv = dedInv + addInv[1:]
# create the back profile of tooth (mirror image)
invR = [0 for i in range(0, len(inv))] # involute profile along back of tooth
for i in range(0, len(inv)):
# rotate all points to put pitch point at y = 0
pt = rotate(inv[i], -baseToPitchAngle - pitchAngle / 4)
inv[i] = pt
# generate the back of tooth profile nodes, mirror coords in X axis
invR[i] = [pt[0], -pt[1]]
# ****** calculate section junction points R=back of tooth, Next=front of next tooth)
fillet = toCartesian(Rf, -pitchAngle / 4 - pitchToFilletAngle) # top of fillet
filletR = [fillet[0], -fillet[1]] # flip to make same point on back of tooth
rootR = toCartesian(Rroot, pitchAngle / 4 + pitchToFilletAngle + filletAngle)
rootNext = toCartesian(Rroot, 3 * pitchAngle / 4 - pitchToFilletAngle - filletAngle)
filletNext = rotate(fillet, pitchAngle) # top of fillet, front of next tooth
# Draw the shapes in SVG
t_inc = 2.0 * pi / float(Z)
thetas = [(x * t_inc) for x in range(Z)]
svg = SVG_move(fillet, 0) # start at top of fillet
for theta in thetas:
if (Rf < Rb):
svg += SVG_line(inv[0], theta) # line from fillet up to base circle
svg += SVG_curve2(inv[1], inv[2], inv[3],
inv[4], inv[5], inv[6], theta)
svg += SVG_circle(invR[6], Ra, 1, theta) # arc across addendum circle
# svg = SVG_move(invR[6]) # TEMP
svg += SVG_curve2(invR[5], invR[4], invR[3],
invR[2], invR[1], invR[0], theta)
if (Rf < Rb):
svg += SVG_line(filletR, theta) # line down to topof fillet
if (rootNext[1] > rootR[1]): # is there a section of root circle between fillets?
svg += SVG_circle(rootR, fRad, 0, theta) # back fillet
svg += SVG_circle(rootNext, Rroot, 1, theta) # root circle arc
svg += SVG_circle(filletNext, fRad, 0, theta)
svg += SVG_close()
return svg
def CreateInternalGear(m, Z, phi):
addendum = 0.6 * m # pitch circle to tip circle (ref G.M.Maitra)
dedendum = 1.25 * m # pitch circle to root radius, sets clearance
# Calculate radii
Rpitch = Z * m / 2 # pitch radius
Rb = Rpitch * cos(phi * pi / 180) # base radius
Ra = Rpitch - addendum # addendum radius
Rroot = Rpitch + dedendum# root radius
clearance = 0.25 * m # gear dedendum - pinion addendum
Rf = Rroot - clearance # radius of top of fillet (end of profile)
fRad = 1.5 * clearance # fillet radius, 1 .. 1.5*clearance
# ****** calculate subtended angles
pitchAngle = 2 * pi / Z # angle between teeth (rads)
baseToPitchAngle = genInvolutePolar(Rb, Rpitch)
tipToPitchAngle = baseToPitchAngle # profile starts from base circle
if (Ra > Rb):
tipToPitchAngle -= genInvolutePolar(Rb, Ra) # start profile from addendum
pitchToFilletAngle = genInvolutePolar(Rb, Rf) - baseToPitchAngle
filletAngle = 1.414 * clearance / Rf # to make fillet tangential to root
# ****** generate Higuchi involute approximation
fe = 1 # fraction of involute length at end of approx (fillet circle)
fs = 0.01 # fraction of length offset from base to avoid singularity
if (Ra > Rb):
fs = (Ra*Ra - Rb*Rb) / (Rf*Rf - Rb*Rb) # start profile from addendum (tip circle)
# approximate in 2 sections, split 25% along the profile
fm = fs + (fe - fs) / 4
addBez = BezCoeffs(m, Z, phi, 3, fs, fm)
dedBez = BezCoeffs(m, Z, phi, 3, fm, fe)
addInv = addBez.involuteBezCoeffs()
dedInv = dedBez.involuteBezCoeffs()
# join the 2 sets of coeffs (skip duplicate mid point)
invR = addInv + dedInv[1:]
# create the front profile of tooth (mirror image)
inv = [0 for i in range(0, len(invR))] # back involute profile
for i in range(0, len(inv)):
# rotate involute to put center of tooth at y = 0
pt = rotate(invR[i], pitchAngle / 4 - baseToPitchAngle)
invR[i] = pt
# generate the back of tooth profile, flip Y coords
inv[i] = [pt[0], -pt[1]]
# ****** calculate coords of section junctions
fillet = [inv[6][0], inv[6][1]] # top of fillet, front of tooth
tip = toCartesian(Ra, -pitchAngle / 4 + tipToPitchAngle) # tip, front of tooth
tipR = [tip[0], -tip[1]] # addendum, back of tooth
rootR = toCartesian(Rroot, pitchAngle / 4 + pitchToFilletAngle + filletAngle)
rootNext = toCartesian(Rroot, 3 * pitchAngle / 4 - pitchToFilletAngle - filletAngle)
filletNext = rotate(fillet, pitchAngle) # top of fillet, front of next tooth
# Draw the shapes in SVG
t_inc = 2.0 * pi / float(Z)
thetas = [(x * t_inc) for x in range(Z)]
svg = SVG_move(fillet, 0) # start at top of fillet
for theta in thetas:
svg += SVG_curve2(inv[5], inv[4], inv[3],
inv[2], inv[1], inv[0], theta)
if (Ra < Rb):
svg += SVG_line(tip, theta) # line from end of involute to addendum (tip)
svg += SVG_circle(tipR, Ra, 1, theta) # arc across tip circle
if (Ra < Rb):
svg += SVG_line(invR[0], theta) # line from addendum to start of involute
svg += SVG_curve2(invR[1], invR[2], invR[3],
invR[4], invR[5], invR[6], theta)
if (rootR[1] < rootNext[1]): # there is a section of root circle between fillets
svg += SVG_circle(rootR, fRad, 1, theta) # fillet on back of tooth
svg += SVG_circle(rootNext, Rroot, 1, theta) # root circle arc
svg += SVG_circle(filletNext, fRad, 1, theta) # fillet on next
return svg
class BezCoeffs:
def chebyExpnCoeffs(self, j, func):
N = 50 # a suitably large number N>>p
c = 0
for k in range(1, N + 1):
c += func(cos(pi * (k - 0.5) / N)) * cos(pi * j * (k - 0.5) / N)
return 2 *c / N
def chebyPolyCoeffs(self, p, func):
coeffs = [0, 0, 0, 0]
fnCoeff = [0, 0, 0, 0]
T = [[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]
]
# now generate the Chebyshev polynomial coefficient using
# formula T(k+1) = 2xT(k) - T(k-1) which yields
# T = [ [ 1, 0, 0, 0, 0, 0], # T0(x) = +1
# [ 0, 1, 0, 0, 0, 0], # T1(x) = 0 +x
# [-1, 0, 2, 0, 0, 0], # T2(x) = -1 0 +2xx
# [ 0, -3, 0, 4, 0, 0], # T3(x) = 0 -3x 0 +4xxx
# [ 1, 0, -8, 0, 8, 0], # T4(x) = +1 0 -8xx 0 +8xxxx
# [ 0, 5, 0,-20, 0, 16], # T5(x) = 0 5x 0 -20xxx 0 +16xxxxx
# ... ]
for k in range(1, p + 1):
for j in range(0, len(T[k]) - 1):
T[k + 1][j + 1] = 2 * T[k][j]
for j in range(0, len(T[k - 1])):
T[k + 1][j] -= T[k - 1][j]
# convert the chebyshev function series into a simple polynomial
# and collect like terms, out T polynomial coefficients
for k in range(0, p + 1):
fnCoeff[k] = self.chebyExpnCoeffs(k, func)
coeffs[k] = 0
for k in range(0, p + 1):
for pwr in range(0, p + 1):
coeffs[pwr] += fnCoeff[k] * T[k][pwr]
coeffs[0] -= self.chebyExpnCoeffs(0, func) / 2 # fix the 0th coeff
return coeffs
# Equation of involute using the Bezier parameter t as variable
def involuteXbez(self, t):
# map t (0 <= t <= 1) onto x (where -1 <= x <= 1)
x = t * 2 - 1
# map theta (where ts <= theta <= te) from x (-1 <=x <= 1)
theta = x * (self.te - self.ts) / 2 + (self.ts + self.te) / 2
return self.Rb * (cos(theta) + theta * sin(theta))
def involuteYbez(self, t):
# map t (0 <= t <= 1) onto x (where -1 <= x <= 1)
x = t * 2 - 1
# map theta (where ts <= theta <= te) from x (-1 <=x <= 1)
theta = x * (self.te - self.ts) / 2 + (self.ts + self.te) / 2
return self.Rb * (sin(theta) - theta * cos(theta))
def binom(self, n, k):
coeff = 1
for i in range(n - k + 1, n + 1):
coeff *= i
for i in range(1, k + 1):
coeff /= i
return coeff
def bezCoeff(self, i, func):
# generate the polynomial coeffs in one go
polyCoeffs = self.chebyPolyCoeffs(self.p, func)
bc = 0
for j in range(0, i + 1):
bc += self.binom(i, j) * polyCoeffs[j] / self.binom(self.p, j)
return bc
def involuteBezCoeffs(self):
# calc Bezier coeffs
bzCoeffs = []
for i in range(0, self.p + 1):
bcoeff = [0, 0]
bcoeff[0] = self.bezCoeff(i, self.involuteXbez)
bcoeff[1] = self.bezCoeff(i, self.involuteYbez)
bzCoeffs.append(bcoeff)
return bzCoeffs
# Parameters:
# module - sets the size of teeth (see gear design texts)
# numTeeth - number of teeth on the gear
# pressure angle - angle in degrees, usually 14.5 or 20
# order - the order of the Bezier curve to be fitted [3, 4, 5, ..]
# fstart - fraction of distance along tooth profile to start
# fstop - fraction of distance along profile to stop
def __init__(self, module, numTeeth, pressureAngle, order, fstart, fstop):
self.Rpitch = module * numTeeth / 2 # pitch circle radius
self.phi = pressureAngle # pressure angle
self.Rb = self.Rpitch * cos(self.phi * pi / 180) # base circle radius
self.Ra = self.Rpitch + module # addendum radius (outer radius)
self.ta = sqrt(self.Ra * self.Ra - self.Rb * self.Rb) / self.Rb # involute angle at addendum
self.stop = fstop
if (fstart < self.stop):
self.start = fstart
self.te = sqrt(self.stop) * self.ta # involute angle, theta, at end of approx
self.ts = sqrt(self.start) * self.ta # involute angle, theta, at start of approx
self.p = order # order of Bezier approximation
|
Cnt = 5
T = [0] * Cnt
k1 = 0
Rez = 0
for k1 in range(0, Cnt):
if (T[k1] < 186):
Rez = Rez + 1
print("Rez=", Rez)
|
#!/usr/bin/env python
import time
start = time.time()
# START OMIT
import numpy as np
counter = {'yes': 0, 'no': 0}
big_list = set(np.random.randint(0, 10000, 10000000))
check_list = set(np.random.randint(0, 99999, 1000))
for number in check_list:
if number in big_list:
counter['yes'] += 1
else:
counter['no'] += 1
# END OMIT
print(counter)
print(f"Script executed in {time.time() - start:.2f} seconds")
|
#
# $Id: sphinxapi.py 1216 2008-03-14 23:25:39Z shodan $
#
# Python version of Sphinx searchd client (Python API)
#
# Copyright (c) 2006-2008, Andrew Aksyonoff
# Copyright (c) 2006, Mike Osadnik
# All rights reserved
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License. You should have
# received a copy of the GPL license along with this program; if you
# did not, you can find it at http://www.gnu.org/
#
import sys
import select
import socket
from struct import *
# known searchd commands
SEARCHD_COMMAND_SEARCH = 0
SEARCHD_COMMAND_EXCERPT = 1
SEARCHD_COMMAND_UPDATE = 2
SEARCHD_COMMAND_KEYWORDS= 3
# current client-side command implementation versions
VER_COMMAND_SEARCH = 0x113
VER_COMMAND_EXCERPT = 0x100
VER_COMMAND_UPDATE = 0x101
VER_COMMAND_KEYWORDS = 0x100
# known searchd status codes
SEARCHD_OK = 0
SEARCHD_ERROR = 1
SEARCHD_RETRY = 2
SEARCHD_WARNING = 3
# known match modes
SPH_MATCH_ALL = 0
SPH_MATCH_ANY = 1
SPH_MATCH_PHRASE = 2
SPH_MATCH_BOOLEAN = 3
SPH_MATCH_EXTENDED = 4
SPH_MATCH_FULLSCAN = 5
SPH_MATCH_EXTENDED2 = 6
# known ranking modes (extended2 mode only)
SPH_RANK_PROXIMITY_BM25 = 0 # default mode, phrase proximity major factor and BM25 minor one
SPH_RANK_BM25 = 1 # statistical mode, BM25 ranking only (faster but worse quality)
SPH_RANK_NONE = 2 # no ranking, all matches get a weight of 1
SPH_RANK_WORDCOUNT = 3 # simple word-count weighting, rank is a weighted sum of per-field keyword occurence counts
# known sort modes
SPH_SORT_RELEVANCE = 0
SPH_SORT_ATTR_DESC = 1
SPH_SORT_ATTR_ASC = 2
SPH_SORT_TIME_SEGMENTS = 3
SPH_SORT_EXTENDED = 4
SPH_SORT_EXPR = 5
# known filter types
SPH_FILTER_VALUES = 0
SPH_FILTER_RANGE = 1
SPH_FILTER_FLOATRANGE = 2
# known attribute types
SPH_ATTR_NONE = 0
SPH_ATTR_INTEGER = 1
SPH_ATTR_TIMESTAMP = 2
SPH_ATTR_ORDINAL = 3
SPH_ATTR_BOOL = 4
SPH_ATTR_FLOAT = 5
SPH_ATTR_MULTI = 0X40000000
# known grouping functions
SPH_GROUPBY_DAY = 0
SPH_GROUPBY_WEEK = 1
SPH_GROUPBY_MONTH = 2
SPH_GROUPBY_YEAR = 3
SPH_GROUPBY_ATTR = 4
class SphinxClient:
def __init__ (self):
"""
Create a new client object, and fill defaults.
"""
self._host = 'localhost' # searchd host (default is "localhost")
self._port = 3312 # searchd port (default is 3312)
self._offset = 0 # how much records to seek from result-set start (default is 0)
self._limit = 20 # how much records to return from result-set starting at offset (default is 20)
self._mode = SPH_MATCH_ALL # query matching mode (default is SPH_MATCH_ALL)
self._weights = [] # per-field weights (default is 1 for all fields)
self._sort = SPH_SORT_RELEVANCE # match sorting mode (default is SPH_SORT_RELEVANCE)
self._sortby = '' # attribute to sort by (defualt is "")
self._min_id = 0 # min ID to match (default is 0)
self._max_id = 0xFFFFFFFF # max ID to match (default is UINT_MAX)
self._filters = [] # search filters
self._groupby = '' # group-by attribute name
self._groupfunc = SPH_GROUPBY_DAY # group-by function (to pre-process group-by attribute value with)
self._groupsort = '@group desc' # group-by sorting clause (to sort groups in result set with)
self._groupdistinct = '' # group-by count-distinct attribute
self._maxmatches = 1000 # max matches to retrieve
self._cutoff = 0 # cutoff to stop searching at
self._retrycount = 0 # distributed retry count
self._retrydelay = 0 # distributed retry delay
self._anchor = {} # geographical anchor point
self._indexweights = {} # per-index weights
self._ranker = SPH_RANK_PROXIMITY_BM25 # ranking mode
self._maxquerytime = 0 # max query time, milliseconds (default is 0, do not limit)
self._fieldweights = {} # per-field-name weights
self._error = '' # last error message
self._warning = '' # last warning message
self._reqs = [] # requests array for multi-query
return
def GetLastError (self):
"""
Get last error message (string).
"""
return self._error
def GetLastWarning (self):
"""
Get last warning message (string).
"""
return self._warning
def SetServer (self, host, port):
"""
Set searchd server host and port.
"""
assert(isinstance(host, str))
assert(isinstance(port, int))
self._host = host
self._port = port
def _Connect (self):
"""
INTERNAL METHOD, DO NOT CALL. Connects to searchd server.
"""
try:
sock = socket.socket ( socket.AF_INET, socket.SOCK_STREAM )
sock.connect ( ( self._host, self._port ) )
except socket.error as msg:
if sock:
sock.close()
self._error = 'connection to %s:%s failed (%s)' % ( self._host, self._port, msg )
return 0
v = unpack('>L', sock.recv(4))
if v<1:
sock.close()
self._error = 'expected searchd protocol version, got %s' % v
return 0
# all ok, send my version
sock.send(pack('>L', 1))
return sock
def _GetResponse (self, sock, client_ver):
"""
INTERNAL METHOD, DO NOT CALL. Gets and checks response packet from searchd server.
"""
(status, ver, length) = unpack('>2HL', sock.recv(8))
response = ''
left = length
while left>0:
chunk = sock.recv(left)
if chunk:
response += chunk
left -= len(chunk)
else:
break
sock.close()
# check response
read = len(response)
if not response or read!=length:
if length:
self._error = 'failed to read searchd response (status=%s, ver=%s, len=%s, read=%s)' \
% (status, ver, length, read)
else:
self._error = 'received zero-sized searchd response'
return None
# check status
if status==SEARCHD_WARNING:
wend = 4 + unpack ( '>L', response[0:4] )[0]
self._warning = response[4:wend]
return response[wend:]
if status==SEARCHD_ERROR:
self._error = 'searchd error: '+response[4:]
return None
if status==SEARCHD_RETRY:
self._error = 'temporary searchd error: '+response[4:]
return None
if status!=SEARCHD_OK:
self._error = 'unknown status code %d' % status
return None
# check version
if ver<client_ver:
self._warning = 'searchd command v.%d.%d older than client\'s v.%d.%d, some options might not work' \
% (ver>>8, ver&0xff, client_ver>>8, client_ver&0xff)
return response
def SetLimits (self, offset, limit, maxmatches=0, cutoff=0):
"""
Set offset and count into result set, and optionally set max-matches and cutoff limits.
"""
assert(isinstance(offset, int) and offset>=0)
assert(isinstance(limit, int) and limit>0)
assert(maxmatches>=0)
self._offset = offset
self._limit = limit
if maxmatches>0:
self._maxmatches = maxmatches
if cutoff>=0:
self._cutoff = cutoff
def SetMaxQueryTime (self, maxquerytime):
"""
Set maximum query time, in milliseconds, per-index. 0 means 'do not limit'.
"""
assert(isinstance(maxquerytime,int) and maxquerytime>0)
self._maxquerytime = maxquerytime
def SetMatchMode (self, mode):
"""
Set matching mode.
"""
assert(mode in [SPH_MATCH_ALL, SPH_MATCH_ANY, SPH_MATCH_PHRASE, SPH_MATCH_BOOLEAN, SPH_MATCH_EXTENDED, SPH_MATCH_FULLSCAN, SPH_MATCH_EXTENDED2])
self._mode = mode
def SetRankingMode (self, ranker):
"""
Set ranking mode.
"""
assert(ranker in [SPH_RANK_PROXIMITY_BM25, SPH_RANK_BM25, SPH_RANK_NONE, SPH_RANK_WORDCOUNT])
self._ranker = ranker
def SetSortMode ( self, mode, clause='' ):
"""
Set sorting mode.
"""
assert ( mode in [SPH_SORT_RELEVANCE, SPH_SORT_ATTR_DESC, SPH_SORT_ATTR_ASC, SPH_SORT_TIME_SEGMENTS, SPH_SORT_EXTENDED, SPH_SORT_EXPR] )
assert ( isinstance ( clause, str ) )
self._sort = mode
self._sortby = clause
def SetWeights (self, weights):
"""
Set per-field weights.
WARNING, DEPRECATED; do not use it! use SetFieldWeights() instead
"""
assert(isinstance(weights, list))
for w in weights:
assert(isinstance(w, int))
self._weights = weights
def SetFieldWeights (self, weights):
"""
Bind per-field weights by name; expects (name,field_weight) dictionary as argument.
"""
assert(isinstance(weights,dict))
for key,val in list(weights.items()):
assert(isinstance(key,str))
assert(isinstance(val,int))
self._fieldweights = weights
def SetIndexWeights (self, weights):
"""
Bind per-index weights by name; expects (name,index_weight) dictionary as argument.
"""
assert(isinstance(weights,dict))
for key,val in list(weights.items()):
assert(isinstance(key,str))
assert(isinstance(val,int))
self._indexweights = weights
def SetIDRange (self, minid, maxid):
"""
Set IDs range to match.
Only match records if document ID is beetwen $min and $max (inclusive).
"""
assert(isinstance(minid, int))
assert(isinstance(maxid, int))
assert(minid<=maxid)
self._min_id = minid
self._max_id = maxid
def SetFilter ( self, attribute, values, exclude=0 ):
"""
Set values set filter.
Only match records where 'attribute' value is in given 'values' set.
"""
assert(isinstance(attribute, str))
assert(isinstance(values, list))
assert(values)
for value in values:
assert(isinstance(value, int))
self._filters.append ( { 'type':SPH_FILTER_VALUES, 'attr':attribute, 'exclude':exclude, 'values':values } )
def SetFilterRange (self, attribute, min_, max_, exclude=0 ):
"""
Set range filter.
Only match records if 'attribute' value is beetwen 'min_' and 'max_' (inclusive).
"""
assert(isinstance(attribute, str))
assert(isinstance(min_, int))
assert(isinstance(max_, int))
assert(min_<=max_)
self._filters.append ( { 'type':SPH_FILTER_RANGE, 'attr':attribute, 'exclude':exclude, 'min':min_, 'max':max_ } )
def SetFilterFloatRange (self, attribute, min_, max_, exclude=0 ):
assert(isinstance(attribute,str))
assert(isinstance(min_,float))
assert(isinstance(max_,float))
assert(min_ <= max_)
self._filters.append ( {'type':SPH_FILTER_FLOATRANGE, 'attr':attribute, 'exclude':exclude, 'min':min_, 'max':max_} )
def SetGeoAnchor (self, attrlat, attrlong, latitude, longitude):
assert(isinstance(attrlat,str))
assert(isinstance(attrlong,str))
assert(isinstance(latitude,float))
assert(isinstance(longitude,float))
self._anchor['attrlat'] = attrlat
self._anchor['attrlong'] = attrlong
self._anchor['lat'] = latitude
self._anchor['long'] = longitude
def SetGroupBy ( self, attribute, func, groupsort='@group desc' ):
"""
Set grouping attribute and function.
"""
assert(isinstance(attribute, str))
assert(func in [SPH_GROUPBY_DAY, SPH_GROUPBY_WEEK, SPH_GROUPBY_MONTH, SPH_GROUPBY_YEAR, SPH_GROUPBY_ATTR] )
assert(isinstance(groupsort, str))
self._groupby = attribute
self._groupfunc = func
self._groupsort = groupsort
def SetGroupDistinct (self, attribute):
assert(isinstance(attribute,str))
self._groupdistinct = attribute
def SetRetries (self, count, delay=0):
assert(isinstance(count,int) and count>=0)
assert(isinstance(delay,int) and delay>=0)
self._retrycount = count
self._retrydelay = delay
def ResetFilters (self):
"""
Clear all filters (for multi-queries).
"""
self._filters = []
self._anchor = {}
def ResetGroupBy (self):
"""
Clear groupby settings (for multi-queries).
"""
self._groupby = ''
self._groupfunc = SPH_GROUPBY_DAY
self._groupsort = '@group desc'
self._groupdistinct = ''
def Query (self, query, index='*', comment=''):
"""
Connect to searchd server and run given search query.
Returns None on failure; result set hash on success (see documentation for details).
"""
assert(len(self._reqs)==0)
self.AddQuery(query,index,comment)
results = self.RunQueries()
if not results or len(results)==0:
return None
self._error = results[0]['error']
self._warning = results[0]['warning']
if results[0]['status'] == SEARCHD_ERROR:
return None
return results[0]
def AddQuery (self, query, index='*', comment=''):
"""
Add query to batch.
"""
# build request
req = [pack('>5L', self._offset, self._limit, self._mode, self._ranker, self._sort)]
req.append(pack('>L', len(self._sortby)))
req.append(self._sortby)
if isinstance(query,str):
query = query.encode('utf-8')
assert(isinstance(query,bytes))
req.append(pack('>L', len(query)))
req.append(query)
req.append(pack('>L', len(self._weights)))
for w in self._weights:
req.append(pack('>L', w))
req.append(pack('>L', len(index)))
req.append(index)
req.append(pack('>L',0)) # id64 range marker FIXME! IMPLEMENT!
req.append(pack('>L', self._min_id))
req.append(pack('>L', self._max_id))
# filters
req.append ( pack ( '>L', len(self._filters) ) )
for f in self._filters:
req.append ( pack ( '>L', len(f['attr'])) + f['attr'])
filtertype = f['type']
req.append ( pack ( '>L', filtertype))
if filtertype == SPH_FILTER_VALUES:
req.append ( pack ('>L', len(f['values'])))
for val in f['values']:
req.append ( pack ('>L', val))
elif filtertype == SPH_FILTER_RANGE:
req.append ( pack ('>2L', f['min'], f['max']))
elif filtertype == SPH_FILTER_FLOATRANGE:
req.append ( pack ('>2f', f['min'], f['max']))
req.append ( pack ( '>L', f['exclude'] ) )
# group-by, max-matches, group-sort
req.append ( pack ( '>2L', self._groupfunc, len(self._groupby) ) )
req.append ( self._groupby )
req.append ( pack ( '>2L', self._maxmatches, len(self._groupsort) ) )
req.append ( self._groupsort )
req.append ( pack ( '>LLL', self._cutoff, self._retrycount, self._retrydelay))
req.append ( pack ( '>L', len(self._groupdistinct)))
req.append ( self._groupdistinct)
# anchor point
if len(self._anchor) == 0:
req.append ( pack ('>L', 0))
else:
attrlat, attrlong = self._anchor['attrlat'], self._anchor['attrlong']
latitude, longitude = self._anchor['lat'], self._anchor['long']
req.append ( pack ('>L', 1))
req.append ( pack ('>L', len(attrlat)) + attrlat)
req.append ( pack ('>L', len(attrlong)) + attrlong)
req.append ( pack ('>f', latitude) + pack ('>f', longitude))
# per-index weights
req.append ( pack ('>L',len(self._indexweights)))
for indx,weight in list(self._indexweights.items()):
req.append ( pack ('>L',len(indx)) + indx + pack ('>L',weight))
# max query time
req.append ( pack ('>L', self._maxquerytime) )
# per-field weights
req.append ( pack ('>L',len(self._fieldweights) ) )
for field,weight in list(self._fieldweights.items()):
req.append ( pack ('>L',len(field)) + field + pack ('>L',weight) )
# comment
req.append ( pack('>L',len(comment)) + comment )
# send query, get response
req = ''.join(req)
self._reqs.append(req)
return
def RunQueries (self):
"""
Run queries batch.
Returns None on network IO failure; or an array of result set hashes on success.
"""
if len(self._reqs)==0:
self._error = 'no queries defined, issue AddQuery() first'
return None
sock = self._Connect()
if not sock:
return None
req = ''.join(self._reqs)
length = len(req)+4
req = pack('>HHLL', SEARCHD_COMMAND_SEARCH, VER_COMMAND_SEARCH, length, len(self._reqs))+req
sock.send(req)
response = self._GetResponse(sock, VER_COMMAND_SEARCH)
if not response:
return None
nreqs = len(self._reqs)
# parse response
max_ = len(response)
p = 0
results = []
for i in range(0,nreqs,1):
result = {}
result['error'] = ''
result['warning'] = ''
status = unpack('>L', response[p:p+4])[0]
p += 4
result['status'] = status
if status != SEARCHD_OK:
length = unpack('>L', response[p:p+4])[0]
p += 4
message = response[p:p+length]
p += length
if status == SEARCHD_WARNING:
result['warning'] = message
else:
result['error'] = message
continue
# read schema
fields = []
attrs = []
nfields = unpack('>L', response[p:p+4])[0]
p += 4
while nfields>0 and p<max_:
nfields -= 1
length = unpack('>L', response[p:p+4])[0]
p += 4
fields.append(response[p:p+length])
p += length
result['fields'] = fields
nattrs = unpack('>L', response[p:p+4])[0]
p += 4
while nattrs>0 and p<max_:
nattrs -= 1
length = unpack('>L', response[p:p+4])[0]
p += 4
attr = response[p:p+length]
p += length
type_ = unpack('>L', response[p:p+4])[0]
p += 4
attrs.append([attr,type_])
result['attrs'] = attrs
# read match count
count = unpack('>L', response[p:p+4])[0]
p += 4
id64 = unpack('>L', response[p:p+4])[0]
p += 4
# read matches
result['matches'] = []
while count>0 and p<max_:
count -= 1
if id64:
dochi, doc, weight = unpack('>3L', response[p:p+12])
doc += (dochi<<32)
p += 12
else:
doc, weight = unpack('>2L', response[p:p+8])
p += 8
match = { 'id':doc, 'weight':weight, 'attrs':{} }
for i in range(len(attrs)):
if attrs[i][1] == SPH_ATTR_FLOAT:
match['attrs'][attrs[i][0]] = unpack('>f', response[p:p+4])[0]
elif attrs[i][1] == (SPH_ATTR_MULTI | SPH_ATTR_INTEGER):
match['attrs'][attrs[i][0]] = []
nvals = unpack('>L', response[p:p+4])[0]
p += 4
for n in range(0,nvals,1):
match['attrs'][attrs[i][0]].append(unpack('>L', response[p:p+4])[0])
p += 4
p -= 4
else:
match['attrs'][attrs[i][0]] = unpack('>L', response[p:p+4])[0]
p += 4
result['matches'].append ( match )
result['total'], result['total_found'], result['time'], words = unpack('>4L', response[p:p+16])
result['time'] = '%.3f' % (result['time']/1000.0)
p += 16
result['words'] = []
while words>0:
words -= 1
length = unpack('>L', response[p:p+4])[0]
p += 4
word = response[p:p+length]
p += length
docs, hits = unpack('>2L', response[p:p+8])
p += 8
result['words'].append({'word':word, 'docs':docs, 'hits':hits})
results.append(result)
self._reqs = []
sock.close()
return results
def BuildExcerpts (self, docs, index, words, opts=None):
"""
Connect to searchd server and generate exceprts from given documents.
"""
if not opts:
opts = {}
if isinstance(words,str):
words = words.encode('utf-8')
assert(isinstance(docs, list))
assert(isinstance(index, bytes))
assert(isinstance(words, bytes))
assert(isinstance(opts, dict))
sock = self._Connect()
if not sock:
return None
# fixup options
opts.setdefault('before_match', '<b>')
opts.setdefault('after_match', '</b>')
opts.setdefault('chunk_separator', ' ... ')
opts.setdefault('limit', 256)
opts.setdefault('around', 5)
# build request
# v.1.0 req
# mode=0, flags=1 (remove spaces)
req = [pack('>2L', 0, 1)]
# req index
req.append(pack('>L', len(index)))
req.append(index)
# req words
req.append(pack('>L', len(words)))
req.append(words)
# options
req.append(pack('>L', len(opts['before_match'])))
req.append(opts['before_match'])
req.append(pack('>L', len(opts['after_match'])))
req.append(opts['after_match'])
req.append(pack('>L', len(opts['chunk_separator'])))
req.append(opts['chunk_separator'])
req.append(pack('>L', int(opts['limit'])))
req.append(pack('>L', int(opts['around'])))
# documents
req.append(pack('>L', len(docs)))
for doc in docs:
if isinstance(doc,str):
doc = doc.encode('utf-8')
assert(isinstance(doc, bytes))
req.append(pack('>L', len(doc)))
req.append(doc)
req = ''.join(req)
# send query, get response
length = len(req)
# add header
req = pack('>2HL', SEARCHD_COMMAND_EXCERPT, VER_COMMAND_EXCERPT, length)+req
wrote = sock.send(req)
response = self._GetResponse(sock, VER_COMMAND_EXCERPT )
if not response:
return []
# parse response
pos = 0
res = []
rlen = len(response)
for i in range(len(docs)):
length = unpack('>L', response[pos:pos+4])[0]
pos += 4
if pos+length > rlen:
self._error = 'incomplete reply'
return []
res.append(response[pos:pos+length])
pos += length
return res
def UpdateAttributes ( self, index, attrs, values ):
"""
Update given attribute values on given documents in given indexes.
Returns amount of updated documents (0 or more) on success, or -1 on failure.
'attrs' must be a list of strings.
'values' must be a dict with int key (document ID) and list of int values (new attribute values).
Example:
res = cl.UpdateAttributes ( 'test1', [ 'group_id', 'date_added' ], { 2:[123,1000000000], 4:[456,1234567890] } )
"""
assert ( isinstance ( index, str ) )
assert ( isinstance ( attrs, list ) )
assert ( isinstance ( values, dict ) )
for attr in attrs:
assert ( isinstance ( attr, str ) )
for docid, entry in list(values.items()):
assert ( isinstance ( docid, int ) )
assert ( isinstance ( entry, list ) )
assert ( len(attrs)==len(entry) )
for val in entry:
assert ( isinstance ( val, int ) )
# build request
req = [ pack('>L',len(index)), index ]
req.append ( pack('>L',len(attrs)) )
for attr in attrs:
req.append ( pack('>L',len(attr)) + attr )
req.append ( pack('>L',len(values)) )
for docid, entry in list(values.items()):
req.append ( pack('>q',docid) )
for val in entry:
req.append ( pack('>L',val) )
# connect, send query, get response
sock = self._Connect()
if not sock:
return None
req = ''.join(req)
length = len(req)
req = pack ( '>2HL', SEARCHD_COMMAND_UPDATE, VER_COMMAND_UPDATE, length ) + req
wrote = sock.send ( req )
response = self._GetResponse ( sock, VER_COMMAND_UPDATE )
if not response:
return -1
# parse response
updated = unpack ( '>L', response[0:4] )[0]
return updated
def BuildKeywords ( self, query, index, hits ):
"""
Connect to searchd server, and generate keywords list for a given query.
Returns None on failure, or a list of keywords on success.
"""
assert ( isinstance ( query, str ) )
assert ( isinstance ( index, str ) )
assert ( isinstance ( hits, int ) )
# build request
req = [ pack ( '>L', len(query) ) + query ]
req.append ( pack ( '>L', len(index) ) + index )
req.append ( pack ( '>L', hits ) )
# connect, send query, get response
sock = self._Connect()
if not sock:
return None
req = ''.join(req)
length = len(req)
req = pack ( '>2HL', SEARCHD_COMMAND_KEYWORDS, VER_COMMAND_KEYWORDS, length ) + req
wrote = sock.send ( req )
response = self._GetResponse ( sock, VER_COMMAND_KEYWORDS )
if not response:
return None
# parse response
res = []
nwords = unpack ( '>L', response[0:4] )[0]
p = 4
max_ = len(response)
while nwords>0 and p<max_:
nwords -= 1
length = unpack ( '>L', response[p:p+4] )[0]
p += 4
tokenized = response[p:p+length]
p += length
length = unpack ( '>L', response[p:p+4] )[0]
p += 4
normalized = response[p:p+length]
p += length
entry = { 'tokenized':tokenized, 'normalized':normalized }
if hits:
entry['docs'], entry['hits'] = unpack ( '>2L', response[p:p+8] )
p += 8
res.append ( entry )
if nwords>0 or p>max_:
self._error = 'incomplete reply'
return None
return res
#
# $Id: sphinxapi.py 1216 2008-03-14 23:25:39Z shodan $
#
|
#!/usr/bin/env python3
from src import nswairquality
if __name__ == "__main__":
x = nswairquality.NSWAirQuality()
print(x.toJSON(True))
|
import tensorflow as tf
from tensorflow.contrib import rnn
import numpy as np
import time
import string
import matplotlib.pyplot as plt
from random import randint, choice
### to train on different data, just insert a path to a text file here:
corpus_path = '../corpora/CNCR_2017-18_corpus.txt'
###
# restore saved parameters and resume training?
restore=False
# if False then we just train from scratch
## call it whatever you want:
model_name = 'char_lstm'
model_path = 'model/'
summary_path = 'summary/'
print('Loading corpus...')
with open(corpus_path, encoding='latin1') as file:
corpus = file.read()
#remove blank lines:
lines = corpus.split('\n')
# define legal characters:
legal_chars = string.ascii_lowercase + string.punctuation + string.whitespace + string.digits
def text_to_onehot(corpus, char_indices):
"""Takes a string and returns a ndarray of dimensions [len(string), num_chars],
given a dict that maps characters to integer indices."""
onehot = np.zeros((len(corpus), len(char_indices)))
for x in range(len(corpus)):
char = corpus[x]
idx = char_indices[char]
onehot[x,idx] = 1
return onehot
def onehot_to_text(onehot, indices_char):
"""Takes an ndarray of softmax or onehot encoded text, and a dict that maps
array indices to string characters, and returns the translated string."""
text = []
assert len(indices_char) == onehot.shape[1]
for x in range(onehot.shape[0]):
row = onehot[x,:]
idx = np.argmax(row)
char = indices_char[idx]
text.append(char)
return ''.join(text)
chars = sorted(legal_chars)
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
num_chars = len(chars)
print('Processing corpus...')
newlines = []
for x in lines:
if x != '' and x[0] != '!' and x[0] != '.': # remove empty lines and bot responses
if '<@' not in x: # remove mentions
legalx = ''.join([i for i in x.lower() if i in legal_chars])
newlines.append(legalx.replace('&&newline', '\n'))
raw_corpus = '\n'.join(newlines)
corpus = text_to_onehot(raw_corpus, char_indices)
### corpus loaded, now we construct the batch generator:
def get_batch(corpus, batch_size, str_len, char_pred = True):
# if char_pred = False, returns an array of random strings taken from the corpus
# if char_pred = True, instead returns two arrays, one x and one y (shifted right by one) for character prediction
corpus_size, num_chars = corpus.shape
if not char_pred:
batch = np.zeros((batch_size, str_len, num_chars))
for b in range(batch_size):
start_idx = randint(0, corpus_size - str_len - 1) # randint is end-inclusive
end_idx = start_idx + str_len
batch[b,:,:] = corpus[start_idx:end_idx,:]
return batch
else:
xbatch = np.zeros((batch_size, str_len, num_chars))
ybatch = np.zeros((batch_size, str_len, num_chars))
for b in range(batch_size):
start_x = randint(0, corpus_size - str_len - 2) # randint is end-inclusive
end_x = start_x + str_len
start_y, end_y = start_x + 1, end_x + 1
xbatch[b,:,:] = corpus[start_x:end_x,:]
ybatch[b,:,:] = corpus[start_y:end_y,:]
return xbatch, ybatch
## build the network:
print('Constructing network...')
sess = tf.InteractiveSession()
lstm_size = 300
str_len = 50
batch_size = 200
learning_rate = 0.001
x = tf.placeholder(tf.float32, [None, None, num_chars], name='x')
y = tf.placeholder(tf.float32, [None, None, num_chars], name='y')
num_cells = 2
## lstm:
cells = [rnn.BasicLSTMCell(lstm_size) for _ in range(num_cells)]
multicell = rnn.MultiRNNCell(cells)
projection = rnn.OutputProjectionWrapper(multicell, num_chars)
# outputs for training:
rnn_outputs, final_state = tf.nn.dynamic_rnn(projection, x, dtype=tf.float32)
xe = tf.nn.softmax_cross_entropy_with_logits(logits=rnn_outputs, labels=y)
total_loss = tf.reduce_mean(xe)
train_step = tf.train.AdamOptimizer(learning_rate).minimize(total_loss)
# outputs for sequential text generation:
seq_init = projection.zero_state(1, dtype=tf.float32)
seq_len = tf.placeholder(dtype=tf.int32, name='seq_len')
seq_output, seq_state = tf.nn.dynamic_rnn(projection, x, initial_state=seq_init, sequence_length=seq_len)
print('Initialising variables...')
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
if restore:
saver.restore(sess, model_path)
def softmax(k):
"""Compute softmax values for each sets of scores in x."""
e_k = np.exp(k - np.max(k))
return e_k / e_k.sum(axis=0)
def generate_sequence(seed='\n', max_len=100, stop_at_newline=True, complete_sentence=False):
"""recursively generate a sequence by generating a prediction and feeding it to next time step.
args:
seed:: initial data to feed to network. has to be non-empty, but newline \n is a safe bet.
max_len:: stop generating when we reach this many characters.
stop_at_newline:: if True, stop generating when a newline \n is generated.
complete_sentence:: if True, return the seed as well as the generated sequence.
this function might need cleaning up, but it works ok"""
seed_onehot = text_to_onehot(seed, char_indices)
pred_chars = []
states = []
state = None
for i in range(len(seed)):
seed_in = seed_onehot[i,:].reshape(1, 1, -1)
feed = {x: seed_in, seq_len: 1}
if i > 0:
feed[seq_init] = state
out, state = sess.run([seq_output, seq_state], feed_dict=feed) # print seed
# char_onehot = out[0,0,:]
# char = onehot_to_text(char_onehot.reshape(1,-1), indices_char)
if complete_sentence:
pred_chars.append(seed[i])
states.append(state)
# process last state before generating sequence:
char_logits = out[0, 0, :]
char_softmax = softmax(char_logits)
char_range = np.arange(num_chars)
char_choice = np.random.choice(char_range, p=char_softmax)
char_onehot = np.eye(num_chars)[char_choice, :] # neat trick
char = onehot_to_text(char_onehot.reshape(1, -1), indices_char)
pred_chars.append(char)
done = False
i = 0
while not done:
feed = {x: char_onehot.reshape(1, 1, -1), seq_init: state, seq_len: 1}
out, state = sess.run([seq_output, seq_state], feed_dict=feed) # print prediction
char_logits = out[0, 0, :]
char_softmax = softmax(char_logits)
char_range = np.arange(num_chars)
char_choice = np.random.choice(char_range, p=char_softmax)
char_onehot = np.eye(num_chars)[char_choice,:] # neat trick
char = onehot_to_text(char_onehot.reshape(1,-1), indices_char)
pred_chars.append(char)
states.append(state)
i += 1
if i > max_len or ((stop_at_newline) and char == '\n'):
done = True
pred_chars = pred_chars[:-1]
sequence = ''.join(pred_chars)
if len(sequence) > 0 and sequence[0] == '\n':
sequence = sequence[1:]
return sequence
# generate a test sequence:
generate_sequence(seed='\n', max_len=100, complete_sentence=True)
### now train the network:
num_epochs = 1000
update_interval = 100
ytexts = []
predtexts = []
gentexts = []
losses = []
print('Training network...')
batch_times = []
feed_times = []
e = 0
training_time = 1*60*60 # in seconds
done = False
start_time = time.time()
# train for an amount of time:
while not done:
time_elapsed = time.time() - start_time
batch_start = time.time()
xbatch,ybatch = get_batch(corpus, batch_size, str_len, char_pred=True)
batch_end = time.time()
feed = {x: xbatch, y: ybatch}
_, loss, pred = sess.run([train_step, total_loss, rnn_outputs], feed_dict=feed)
feed_end = time.time()
batch_times.append(batch_end - batch_start)
feed_times.append(feed_end - batch_end)
losses.append(loss)
# see what we're producing:
if e % update_interval == 0:
print(' Time elapsed: %d secs. Epoch %d: Training loss %.6f' % (time_elapsed, e, loss))
ytext = onehot_to_text(ybatch[0], indices_char)
predtext = onehot_to_text(np.array(pred)[0,:,:], indices_char)
gentext = generate_sequence()
print(' desired sentence:')
print(' %s' % ytext.replace('\n', '\\n'))
print(' output sentence:')
print(' %s' % predtext.replace('\n', '\\n'))
ytexts.append(ytext)
predtexts.append(predtext)
print(' generated sentence:')
print(' %s' % gentext)
gentexts.append(gentext)
print('Time spent generating batches: %.6f' % np.sum(batch_times))
print('Time spent training network: %.6f' % np.sum(feed_times))
batch_times = []
feed_times = []
e += 1
if time_elapsed > training_time:
print('Training complete.')
done = True
plt.plot(losses)
# generate a series of test sequences, each seeded by the last:
sequences=['\n']
for i in range(10):
sequences.append(generate_sequence(seed=sequences[-1], max_len=100))
print('\n'.join(sequences))
# save weights:
save_path = saver.save(sess, model_path)
print("TF model variables for %s saved in path: %s" % (model_name, save_path))
|
import numpy as np
def losses_kron(B0_kron,B1_kron,B2_kron,gen_PG_point,baseMVA):
"""Compute losses based on Kron's Method."""
gen_PG_point_pu = np.array(gen_PG_point) / baseMVA #convert to p.u.
B0_kron = np.array(B0_kron)
B1_kron = np.array(B1_kron)
B2_kron = np.array(B2_kron)
# losses0 = B0_kron * baseMVA
# losses1 = B1_kron.dot(gen_PG_point_pu) * baseMVA
# losses2 = gen_PG_point_pu.T.dot(B2_kron).dot(gen_PG_point_pu) * baseMVA
# losses = losses2 + losses1 + losses0 #total power losses
return (B0_kron + B1_kron.dot(gen_PG_point_pu) + gen_PG_point_pu.T.dot(B2_kron).dot(gen_PG_point_pu)) * baseMVA
def losses_kron_detailed(B0_kron,B1_kron,B2_kron,gen_PG_point,baseMVA):
"""Compute losses based on Kron's Method."""
gen_PG_point_pu = np.array(gen_PG_point) / baseMVA #convert to p.u.
B0_kron = np.array(B0_kron)
B1_kron = np.array(B1_kron)
B2_kron = np.array(B2_kron)
losses0 = B0_kron * baseMVA
losses1 = B1_kron.dot(gen_PG_point_pu) * baseMVA
losses2 = gen_PG_point_pu.T.dot(B2_kron).dot(gen_PG_point_pu) * baseMVA
# losses = losses2 + losses1 + losses0 #total power losses
return losses0, losses1, losses2
|
#!/usr/bin/env python3
# coding: utf-8
# author: alice12ml
from handler import Handler
from config import API_TOKEN
import requests
import logging
import subprocess
import uuid
class VideoHandler(Handler):
@classmethod
def response(cls, message):
if message.get('document') and message['document'].get('mime_type') == 'video/mp4':
return VideoHandler(message)
def handle(self):
logging.info("[%s] send a video." % self.sender)
file_id = self.message['document']['file_id']
# 获取 file_path
url = 'https://api.telegram.org/bot%s/getFile' % API_TOKEN
r = requests.get(url, {'file_id': file_id})
if r.status_code == requests.codes.ok and r.json()['ok']:
file_path = r.json()['result']['file_path']
logging.debug("file_path: " + file_path)
# 下载 sticker 对应的 mp4 文件并保存
filename = str(uuid.uuid1())
url = 'https://api.telegram.org/file/bot%s/%s' % (API_TOKEN, file_path)
r = requests.get(url)
with open(filename + ".mp4", "wb") as f:
f.write(r.content)
# 利用 ffmpeg 转换文件
subprocess.run('ffmpeg -loglevel panic -i %s.mp4 -r 25 -vf "scale=iw/2:ih/2:flags=lanczos" %s.gif' % (filename, filename), shell=True)
logging.debug("convert video to gif.")
# 上传文件,由于 Telegram 服务器会自动将 gif 转换为 mp4,故上传到图床
logging.debug("upload gif to sm.ms")
url = 'https://sm.ms/api/upload'
gif = open(filename+'.gif', 'rb')
files = {'smfile': gif}
r = requests.post(url, files=files)
code = r.json()['code']
pic_url = r.json()['data']['url']
if code == 'success':
logging.info("gif url: " + pic_url)
# 发送图床链接
self.send_message(pic_url)
else:
logging.error("request.text: " + r.text)
# 删除缓存文件
gif.close()
subprocess.run('rm -f %s.mp4 %s.gif' % (filename, filename), shell=True)
else:
logging.error('request.text: ' + r.text)
|
#!/usr/bin/env python3
##########################################################################
# Script to simulate Modelica models with JModelica.
#
##########################################################################
# Import the function for compilation of models and the load_fmu method
from pymodelica import compile_fmu
import traceback
import logging
from pyfmi import load_fmu
import pymodelica
import os
import shutil
import sys
import matplotlib.pyplot as plt
debug_solver = False
model="Buildings.Utilities.Psychrometrics.Examples.DewPointTemperature"
generate_plot = False
# Overwrite model with command line argument if specified
if len(sys.argv) > 1:
# If the argument is a file, then parse it to a model name
if os.path.isfile(sys.argv[1]):
model = sys.argv[1].replace(os.path.sep, '.')[:-3]
else:
model=sys.argv[1]
print("*** Compiling {}".format(model))
# Increase memory
pymodelica.environ['JVM_ARGS'] = '-Xmx4096m'
sys.stdout.flush()
######################################################################
# Compile fmu
fmu_name = compile_fmu(model,
version="2.0",
compiler_log_level='warning', #'info', 'warning',
compiler_options = {"generate_html_diagnostics" : True,
"nle_solver_tol_factor": 1e-2}) # 1e-2 is the default
######################################################################
# Load model
mod = load_fmu(fmu_name, log_level=4) # default setting is 3
mod.set_max_log_size(2073741824) # = 2*1024^3 (about 2GB)
######################################################################
# Retrieve and set solver options
x_nominal = mod.nominal_continuous_states
opts = mod.simulate_options() #Retrieve the default options
opts['solver'] = 'CVode' #'Radau5ODE' #CVode
opts['ncp'] = 500
if opts['solver'].lower() == 'cvode':
# Set user-specified tolerance if it is smaller than the tolerance in the .mo file
rtol = 1.0e-8
x_nominal = mod.nominal_continuous_states
if len(x_nominal) > 0:
atol = rtol*x_nominal
else:
atol = rtol
opts['CVode_options']['external_event_detection'] = False
opts['CVode_options']['maxh'] = (mod.get_default_experiment_stop_time()-mod.get_default_experiment_start_time())/float(opts['ncp'])
opts['CVode_options']['iter'] = 'Newton'
opts['CVode_options']['discr'] = 'BDF'
opts['CVode_options']['rtol'] = rtol
opts['CVode_options']['atol'] = atol
opts['CVode_options']['store_event_points'] = True # True is default, set to false if many events
if debug_solver:
opts['CVode_options']['clock_step'] = True
if debug_solver:
opts["logging"] = True #<- Turn on solver debug logging
mod.set("_log_level", 4)
######################################################################
# Simulate
res = mod.simulate(options=opts)
# logging.error(traceback.format_exc())
if generate_plot:
plt.plot(res['time'], res['TDewPoi.T']-273.15)
plt.xlabel('time in [s]')
plt.ylabel('Dew point [degC]')
plt.grid()
plt.show()
plt.savefig("plot.pdf")
######################################################################
# Copy style sheets.
# This is a hack to get the css and js files to render the html diagnostics.
htm_dir = os.path.splitext(os.path.basename(fmu_name))[0] + "_html_diagnostics"
if os.path.exists(htm_dir):
for fil in ["scripts.js", "style.css", "zepto.min.js"]:
src = os.path.join(".jmodelica_html", fil)
if os.path.exists(src):
des = os.path.join(htm_dir, fil)
shutil.copyfile(src, des)
######################################################################
# Get debugging information
if debug_solver:
#Load the debug information
from pyfmi.debug import CVodeDebugInformation
debug = CVodeDebugInformation(model.replace(".", "_")+"_debug.txt")
### Below are options to plot the order, error and step-size evolution.
### The error methos also take a threshold and a region if you want to
### limit the plot to a certain interval.
if opts['solver'].lower() == 'cvode':
#Plot wall-clock time versus model time
debug.plot_cumulative_time_elapsed()
#Plot only the region 0.8 - 1.0 seconds and only state variables with an error greater than 0.01 (for any point in that region)
debug.plot_error(region=[0.8,1.0], threshold=0.01)
#Plot order evolution
debug.plot_order()
#Plot error evolution
debug.plot_error() #Note see also the arguments to the method
#Plot the used step-size
debug.plot_step_size()
#See also debug?
|
import json
import luigi
from luigi.contrib.postgres import CopyToTable
from datetime import datetime
from nyc_ccci_etl.commons.configuration import get_database_connection_parameters
from nyc_ccci_etl.utils.get_os_user import get_os_user
from nyc_ccci_etl.utils.get_current_ip import get_current_ip
from nyc_ccci_etl.orchestrator_tasks.update_centers import UpdateCenters
class LoadUpdateCentersMetadata(CopyToTable):
year = luigi.IntParameter()
month = luigi.IntParameter()
day = luigi.IntParameter()
def requires(self):
return UpdateCenters(self.year, self.month, self.day)
host, database, user, password = get_database_connection_parameters()
table = "transformed.update_centers_metadata"
schema = "transformed"
columns = [
("executed_at", "timestamp"),
("task_params", "varchar"),
("record_count", "integer"),
("execution_user", "varchar"),
("source_ip", "varchar"),
("database_name", "varchar"),
("database_schema", "varchar"),
("database_table", "varchar"),
("database_user", "varchar"),
("vars", "varchar"),
("script_tag", "varchar")
]
def rows(self):
params_string = "year={} month={} day={}".format(str(self.year), str(self.month), str(self.day))
row = (
str(datetime.now(tz=None)),
params_string,
"0",
get_os_user(),
get_current_ip(),
self.database,
self.schema,
self.table,
self.user,
"0",
"transormations"
)
yield row
|
import itertools
from z3 import z3
from teether.evm.state import SymRead, LazySubstituteState, translate
from teether.util.z3_extra_util import get_vars_non_recursive, concrete, ast_eq
class SymbolicResult(object):
def __init__(self, xid, state, constraints, sha_constraints, target_op):
self.xid = xid
self.state = state
self.constraints = constraints
self.sha_constraints = sha_constraints
self.target_op = target_op
self.calls = 1
self._simplified = False
self.storage_info = StorageInfo(self)
def simplify(self):
if self._simplified:
return
self.constraints = [z3.simplify(c) for c in self.constraints]
self.sha_constraints = {sha: z3.simplify(sha_value) if not isinstance(sha_value, SymRead) else sha_value for
sha, sha_value in self.sha_constraints.items()}
self._simplified = True
def copy(self):
new_xid = gen_exec_id()
self.simplify()
new_constraints = [translate(c, new_xid) for c in self.constraints]
new_sha_constraints = {translate(sha, new_xid): translate(sha_value, new_xid) if not isinstance(sha_value,
SymRead) else sha_value.translate(
new_xid) for sha, sha_value in
self.sha_constraints.items()}
new_state = self.state.copy(new_xid)
return SymbolicResult(new_xid, new_state, new_constraints, new_sha_constraints, self.target_op)
def may_read_from(self, other):
return self.storage_info.may_read_from(other.storage_info)
class CombinedSymbolicResult(object):
def __init__(self):
self.results = []
self._constraints = None
self._sha_constraints = None
self._states = None
self._idx_dict = None
self.calls = 0
def _reset(self):
self._constraints = None
self._sha_constraints = None
self._states = None
def combine(self, storage=dict(), initial_balance=None):
extra_subst = []
storage_base = z3.K(z3.BitVecSort(256), z3.BitVecVal(0, 256))
for k, v in storage.items():
storage_base = z3.Store(storage_base, k, v)
for result in self.results:
extra_subst.append((result.state.storage.base, storage_base))
storage_base = z3.substitute(result.state.storage.storage, extra_subst)
extra_constraints = []
if initial_balance is not None:
balance_base = z3.BitVecVal(initial_balance, 256)
else:
balance_base = None
for result in self.results:
if balance_base is not None:
extra_subst.append((result.state.start_balance, balance_base))
balance_base = z3.substitute(result.state.balance, extra_subst)
else:
balance_base = result.state.balance
self._states = [LazySubstituteState(r.state, extra_subst) for r in self.results]
self._constraints = [z3.substitute(c, extra_subst) for r in self.results for c in
r.constraints] + extra_constraints
self._sha_constraints = {
sha: z3.substitute(sha_value, extra_subst) if not isinstance(sha_value, SymRead) else sha_value for r in
self.results for sha, sha_value in r.sha_constraints.items()}
self._idx_dict = {r.xid: i for i, r in enumerate(self.results)}
def prepend(self, result):
self.calls += 1
self.results = [result] + self.results
self._reset()
@property
def idx_dict(self):
if self._idx_dict is None:
self.combine()
return self._idx_dict
@property
def constraints(self):
if self._constraints is None:
self.combine()
return self._constraints
@property
def sha_constraints(self):
if self._sha_constraints is None:
self.combine()
return self._sha_constraints
@property
def states(self):
if not self._states:
self.combine()
return self._states
@property
def state(self):
return self.states[-1]
def simplify(self):
self._constraints = [z3.simplify(c) for c in self.constraints]
self._sha_constraints = {sha: (z3.simplify(sha_value) if not isinstance(sha_value, SymRead) else sha_value) for
sha, sha_value in self.sha_constraints.items()}
class StorageInfo(object):
def __init__(self, result):
self.result = result
self._vars = dict()
self.concrete_reads = set()
self.concrete_writes = set()
self.symbolic_reads = set()
self.symbolic_writes = set()
self.symbolic_hash_reads = set()
self.symbolic_hash_writes = set()
for addr in set(result.state.storage.reads):
if concrete(addr):
self.concrete_reads.add(addr)
else:
x_vars = get_vars_non_recursive(addr, True)
self._vars[addr] = x_vars
if set(x_vars) & set(result.sha_constraints.keys()):
self.symbolic_hash_reads.add(addr)
else:
self.symbolic_reads.add(addr)
for addr in set(result.state.storage.writes):
if concrete(addr):
self.concrete_writes.add(addr)
else:
x_vars = get_vars_non_recursive(addr, True)
self._vars[addr] = x_vars
if set(x_vars) & set(result.sha_constraints.keys()):
self.symbolic_hash_writes.add(addr)
else:
self.symbolic_writes.add(addr)
def may_read_from(self, other):
if not self.symbolic_reads and not other.symbolic_writes:
# no side has a non-hash-based symbolic access
# no side具有非哈希符号访问权限
# => only concrete accesses can intersect
# 只有具体的入口可以相交
# (or hash-based accesses, which we will check later)
if self.concrete_reads & other.concrete_writes:
return True
else:
# at least one side has a non-hash-based symbolic access
# 至少一方具有非基于散列的符号访问
# => if there is at least one concrete or symbolic access
# on the other side, the two could be equal
# 如果在另一边至少有一个具体的或象征性的通道,那么这两个通道可能是相等的
# (otherwise we have to look at hash-based accesses, see below)
# (否则我们必须考虑基于散列的访问,见下文)
if ((self.symbolic_reads or self.concrete_reads or self.symbolic_hash_reads) and
(other.symbolic_writes or other.concrete_writes or other.symbolic_hash_writes)):
return True
if self.symbolic_hash_reads and other.symbolic_hash_writes:
for a, b in itertools.product(self.symbolic_hash_reads, other.symbolic_hash_writes):
if not ast_eq(a, b):
continue
hash_a = list(self._vars[a] & set(self.result.sha_constraints.keys()))
hash_b = list(other._vars[b] & set(other.result.sha_constraints.keys()))
if len(hash_a) != 1 or len(hash_b) != 1:
# multiple hashes on either side
# => assume they could be equal
return True
# only one hash on either side
# => check whether these two can actually be equal
d_a = self.result.sha_constraints[hash_a[0]]
d_b = other.result.sha_constraints[hash_b[0]]
if isinstance(d_a, SymRead) or isinstance(d_b, SymRead):
return True
if d_a.size() == d_b.size():
return True
# at this point, we have checked every possible combination
# 在这一点上,我们已经检查了所有可能的组合
# => no luck this time
return False
def gen_exec_id():
if "xid" not in gen_exec_id.__dict__:
gen_exec_id.xid = 0
else:
gen_exec_id.xid += 1
return gen_exec_id.xid
|
# Paginator.py (vars-localize)
from PyQt5 import QtGui
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import QWidget, QHBoxLayout, QPushButton, QLabel, QInputDialog
__author__ = "Kevin Barnard"
__copyright__ = "Copyright 2019, Monterey Bay Aquarium Research Institute"
__credits__ = ["MBARI"]
__license__ = "GPL"
__maintainer__ = "Kevin Barnard"
__email__ = "kbarnard@mbari.org"
__doc__ = '''
Pagination controller widget.
@author: __author__
@status: __status__
@license: __license__
'''
class Paginator(QWidget):
left_signal = pyqtSignal()
right_signal = pyqtSignal()
jump_signal = pyqtSignal()
def __init__(self, parent=None):
super(Paginator, self).__init__(parent)
self.setLayout(QHBoxLayout())
self.offset = 0
self.limit = 0
self.count = 0
self.nav_label = QLabel()
self.left_button = QPushButton()
self.left_button.setIcon(QIcon('images/arrow_left.png'))
self.left_button.pressed.connect(self.left_press)
self.right_button = QPushButton()
self.right_button.setIcon(QIcon('images/arrow_right.png'))
self.right_button.pressed.connect(self.right_press)
self.layout().addWidget(self.nav_label, stretch=1)
self.layout().addWidget(self.left_button)
self.layout().addWidget(self.right_button)
self.update_nav()
def left_press(self):
self.offset -= self.limit
self.update_nav()
self.left_signal.emit()
def right_press(self):
self.offset += self.limit
self.update_nav()
self.right_signal.emit()
def update_nav(self):
self.offset = max(0, self.offset) # If < 0, fixes
left_bound = self.offset + 1
right_bound = self.offset + self.limit
count_msg = ''
if self.count:
right_bound = min(right_bound, self.count) # Limit to count
count_msg = ' of {}'.format(self.count)
self.nav_label.setText('{} - {}'.format(left_bound, right_bound) + count_msg)
# Disable buttons if hit boundaries
self.left_button.setEnabled(left_bound > 1)
if self.count:
self.right_button.setEnabled(right_bound < self.count)
def set_offset(self, offset):
self.offset = offset
self.update_nav()
def set_limit(self, limit):
self.limit = limit
self.update_nav()
def set_count(self, count):
self.count = count
self.update_nav()
def mouseDoubleClickEvent(self, a0: QtGui.QMouseEvent) -> None:
if not self.left_button.isEnabled() and not self.right_button.isEnabled():
return
imaged_moment_desired, ok = QInputDialog.getInt(self, 'Jump to imaged moment', 'Jump to imaged moment:')
if ok and 0 < imaged_moment_desired <= self.count:
self.set_offset(imaged_moment_desired - 1)
self.jump_signal.emit()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 10 01:12:40 2020
@author: kwanale1
"""
import pandas as pd
from ppandas import PDataFrame
df1 = pd.read_csv("data/spatial-ref-region-point.csv")
df1.rename(columns={"Region":"Location"},inplace=True)
#print(df1.columns)
df2_point = pd.read_csv("data/spatial-sec-region-point.csv",usecols=["Point","A"])
df2_point.rename(columns={"Point":"Location"},inplace=True)
#print(df2_point.columns)
df2_x_y = pd.read_csv("data/spatial-sec-region-point.csv",usecols=["X","Y","A"])
#zip together X and Y to make one Location column
df2_x_y['Location'] = list(zip(df2_x_y.X, df2_x_y.Y))
df2_x_y.drop(columns=["X","Y"],inplace=True)
pd1 = PDataFrame(["Location"],df1)
print(pd1.query(["Location"]))
print(pd1.query(["A"]))
#pd1.visualise(show_tables=True)
pd2_point = PDataFrame(["Location"],df2_point)
pd2_x_y = PDataFrame(["Location"],df2_x_y)
#pd2_point.visualise(show_tables=True)
#pd2_x_y.visualise(show_tables=False)
#print(pd1.bayes_net.get_cpds(node = "A"))
#print(pd2_x_y.bayes_net.get_cpds(node = "A"))
print(pd2_point.query(["A"]))
print(pd2_x_y.query(["A"]))
#pd2_x_y.visualise(show_tables=True)
pd_join = pd1.pjoin(pd2_point, mismatches={"Location":"spatial"})
#print(pd_join.query(["Location"]))
#pd_join = pd1.pjoin(pd2_x_y, mismatches={"Location":"spatial"})
#print(pd_join.bayes_net.get_cpds(node = "A"))
#pd_join.visualise(show_tables = True)
#queryResult = pd_join.query(['A'],{"Location":"POINT (0.5 0.5)"}) # ToDo: Rewrite query of Point to Regions? How about brand new points?
#queryResult = pd_join.query(['A'],{"Location":"POLYGON ((0 0, 1 0, 1 1, 0 1, 0 0))"}) #
queryResult = pd_join.query(['A'],{"Location":"POLYGON ((0 1, 1 1, 1 2, 0 2, 0 1))"}) #
print('conditional query')
print(queryResult)
#ToDo: Future Query by bounds on points: Eg South of this longitude X?
#Fails since reference distribution can't use Point data
pd_join = pd2_x_y.pjoin(pd1, mismatches={"Location":"spatial"})
|
"""General configuration for mpl plotting scripts"""
from pathlib import Path
from astropy import units as u
from astropy.units import imperial
BASE_PATH = Path(__file__).parent.parent
FIGURE_WIDTH_AA = {
"single-column": 90 * u.mm,
"two-column": 180 * u.mm,
"intermediate": 120 * u.mm,
}
class FigureSizeAA:
"""Figure size A&A"""
def __init__(self, aspect_ratio=1, width_aa="single-column"):
self.width = FIGURE_WIDTH_AA[width_aa]
self.height = self.width / aspect_ratio
@property
def inch(self):
"""Figure size in inch"""
return self.width.to_value(imperial.inch), self.height.to_value(imperial.inch)
@property
def mm(self):
"""Figure size in mm"""
return self.width.value, self.height.value
|
from flask import render_template,request,redirect,url_for,abort, flash, jsonify
from . import main
from .forms import BlogForm,UpdateProfile, CommentForm
from ..models import User,Blog,Comment
from flask_login import login_required, current_user
from .. import db,photos
from ..request import get_blogs
# Views
@main.route('/')
def index():
'''
View root page function that returns the index page and its data
'''
title = 'Home - Welcome to The Blogging Website'
blogs = get_blogs()
return render_template('index.html',title = title,blogs = blogs)
@main.route('/user/<uname>')
def profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
return render_template("profile/profile.html", user = user)
@main.route('/user/<uname>/update',methods = ['GET','POST'])
@login_required
def update_profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
form = UpdateProfile()
if form.validate_on_submit():
user.bio = form.bio.data
db.session.add(user)
db.session.commit()
return redirect(url_for('.profile',uname=user.username))
return render_template('profile/update.html',form =form)
@main.route('/user/<uname>/update/pic',methods= ['POST'])
@login_required
def update_pic(uname):
user = User.query.filter_by(username = uname).first()
if 'photo' in request.files:
filename = photos.save(request.files['photo'])
path = f'photos/{filename}'
user.profile_pic_path = path
db.session.commit()
return redirect(url_for('main.profile',uname=uname))
@main.route('/view/<int:id>', methods=['GET', 'POST'])
@login_required
def view(id):
blog = Blog.query.get_or_404(id)
blog_comments = Comment.query.filter_by(blog_id=id).all()
comment_form = CommentForm()
if comment_form.validate_on_submit():
new_comment = Comment(blog_id=id, comment=comment_form.comment.data, user=current_user)
new_comment.save_comment()
return render_template('view.html', blog=blog, blog_comments=blog_comments, comment_form=comment_form)
@main.route('/blog/allblogs', methods=['GET', 'POST'])
@login_required
def blogger():
blogs = Blog.query.all()
return render_template('blogger.html', blogs=blogs)
@main.route('/blog/new', methods = ['GET','POST'])
@login_required
def blogs():
blog_form = BlogForm()
if blog_form.validate_on_submit():
title_blog = blog_form.title_blog.data
blog_content = blog_form.blog_content.data
#Updating the Blog instance
new_blog = Blog(title_blog=title_blog,blog_content=blog_content,user=current_user)
# Saving the blog method
new_blog.save_blog()
return redirect(url_for('main.blogger'))
title = 'The Blog'
return render_template('new_blog.html',title = title,blog_form=blog_form )
@main.route('/Update/<int:id>', methods=['GET', 'POST'])
@login_required
def update_blog(id):
blog = Blog.query.get_or_404(id)
if blog.user != current_user:
abort(403)
form = BlogForm()
if form.validate_on_submit():
blog.title_blog = form.title_blog.data
blog.blog_content = form.blog_content.data
db.session.commit()
return redirect(url_for('main.blogger'))
elif request.method == 'GET':
form.title_blog.data = blog.title_blog
form.blog_content.data = blog.blog_content
return render_template('update_blog.html', blog_form=form)
@main.route('/delete/<int:id>', methods=['GET', 'POST'])
@login_required
def delete(id):
blog = Blog.query.get_or_404(id)
if blog.user != current_user:
abort(403)
db.session.delete(blog)
db.session.commit()
return redirect(url_for('main.blogger'))
@main.route('/delete_comment/<int:comment_id>', methods=['GET', 'POST'])
@login_required
def delete_comment(comment_id):
comment =Comment.query.get_or_404(comment_id)
if (comment.user.id) != current_user.id:
abort(403)
db.session.delete(comment)
db.session.commit()
flash('Succesfully deleted the Comment!!')
return redirect (url_for('main.blogger'))
|
#!/usr/bin/python
import psutil
def main():
count = 0
for proc in psutil.process_iter():
count += 1
print "%s proc = %s" % (str(count), str(proc))
main()
|
"""Initial Migration
Revision ID: a3032c881fba
Revises: 4a7e813c98de
Create Date: 2019-02-11 19:28:22.793189
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'a3032c881fba'
down_revision = '4a7e813c98de'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'pass_secure')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('pass_secure', sa.VARCHAR(length=255), autoincrement=False, nullable=True))
# ### end Alembic commands ###
|
import pandas as pd
import numpy as np
def load_mie_file(filename):
"""
Loads the Mie parameters from a file.
Parameters
----------
filename: str
The name of the file storing the Mie scattering parameters
Returns
-------
my_df: xarray.Dataset
The xarray Dataset storing the Mie parameters, including
descriptive metadata.
"""
my_df = pd.read_csv(filename, delim_whitespace=True,
names=["wavelength", "p_diam", "size_parameter", "compre_real",
"compre_im", "scat_p", "alpha_p", "beta_p", "scat_eff",
"ext_eff", "backscat_eff"])
my_df["alpha_p"] = my_df["alpha_p"] * 1e-12
my_df["beta_p"] = my_df["beta_p"] * 1e-12 / (4 * np.pi)
my_df["scat_p"] = my_df["scat_p"] * 1e-12
my_df["p_diam"] = 2e-6 * my_df["p_diam"]
my_df["backscat_eff"] = my_df["backscat_eff"] / (4 * np.pi)
my_df = my_df.to_xarray()
my_df["wavelength"].attrs["units"] = "microns"
my_df["wavelength"].attrs["long_name"] = "Wavelength of beam"
my_df["wavelength"].attrs["standard_name"] = "wavelength"
my_df["p_diam"].attrs["units"] = "meters"
my_df["p_diam"].attrs["long_name"] = "Diameter of particle"
my_df['p_diam'].attrs["standard_name"] = "Diameter"
my_df["size_parameter"].attrs["units"] = "1"
my_df["size_parameter"].attrs["long_name"] = "Size parameter (pi*diameter / wavelength)"
my_df['size_parameter'].attrs["standard_name"] = "Size parameter"
my_df["compre_real"].attrs["units"] = "1"
my_df["compre_real"].attrs["long_name"] = ("Complex refractive index of the sphere divided " +
"by the real index of the medium (real part)")
my_df['compre_real'].attrs["standard_name"] = "Complex_over_real_Re"
my_df["compre_im"].attrs["units"] = "1"
my_df["compre_im"].attrs["long_name"] = ("Complex refractive index of the sphere divided " +
"by the real index of the medium (imaginary part)")
my_df['compre_im'].attrs["standard_name"] = "Complex_over_real_Im"
my_df["scat_p"].attrs["units"] = "microns^2"
my_df["scat_p"].attrs["long_name"] = "scattering cross section"
my_df["scat_p"].attrs["standard_name"] = "Scat_cross_section"
my_df["beta_p"].attrs["units"] = "meters^2"
my_df["beta_p"].attrs["long_name"] = "Back scattering cross section"
my_df["beta_p"].attrs["standard_name"] = "Scat_cross_section_back"
my_df["alpha_p"].attrs["units"] = "meters^2"
my_df["alpha_p"].attrs["long_name"] = "Extinction cross section"
my_df["alpha_p"].attrs["standard_name"] = "Ext_cross_section"
my_df["scat_eff"].attrs["units"] = "1"
my_df["scat_eff"].attrs["long_name"] = "scattering efficiency"
my_df["scat_eff"].attrs["standard_name"] = "Scattering_efficiency"
my_df["ext_eff"].attrs["units"] = "1"
my_df["ext_eff"].attrs["long_name"] = "Extinction efficiency"
my_df["ext_eff"].attrs["standard_name"] = "Extinction_efficiency"
my_df["backscat_eff"].attrs["units"] = "sr^-1"
my_df["backscat_eff"].attrs["long_name"] = "Backscattering efficiency"
my_df["backscat_eff"].attrs["standard_name"] = "Backscattering_efficiency"
return my_df
|
from .effector import Effector
class DefaultEffector(Effector):
"""default effector for Casbin."""
def merge_effects(self, expr, effects, results):
"""merges all matching results collected by the enforcer into a single decision."""
result = False
if expr == "some(where (p_eft == allow))":
for eft in effects:
if eft == self.ALLOW:
result = True
break
elif expr == "!some(where (p_eft == deny))":
result = True
for eft in effects:
if eft == self.DENY:
result = False
break
elif expr == "some(where (p_eft == allow)) && !some(where (p_eft == deny))":
for eft in effects:
if eft == self.ALLOW:
result = True
elif eft == self.DENY:
result = False
break
elif expr == "priority(p_eft) || deny":
for eft in effects:
if eft != self.INDETERMINATE:
if eft == self.ALLOW:
result = True
else:
result = False
break
else:
raise RuntimeError("unsupported effect")
return result
|
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2020 PANGAEA (https://www.pangaea.de/)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from fuji_server.evaluators.fair_evaluator import FAIREvaluator
from fuji_server.models.semantic_vocabulary import SemanticVocabulary
from fuji_server.models.semantic_vocabulary_output import SemanticVocabularyOutput
from fuji_server.models.semantic_vocabulary_output_inner import SemanticVocabularyOutputInner
class FAIREvaluatorSemanticVocabulary(FAIREvaluator):
def evaluate(self):
self.result = SemanticVocabulary(id=self.metric_number, metric_identifier=self.metric_identifier,
metric_name=self.metric_name)
# remove duplicates
if self.fuji.namespace_uri:
self.fuji.namespace_uri = list(set(self.fuji.namespace_uri))
self.fuji.namespace_uri = [x.strip() for x in self.fuji.namespace_uri]
self.logger.info('{0} : Number of vocabulary namespaces extracted from all RDF-based metadata -: {1}'.format(
self.metric_identifier, len(self.fuji.namespace_uri)))
# exclude white list
excluded = []
for n in self.fuji.namespace_uri:
for i in self.fuji.DEFAULT_NAMESPACES:
if n.startswith(i):
excluded.append(n)
self.fuji.namespace_uri[:] = [x for x in self.fuji.namespace_uri if x not in excluded]
if excluded:
self.logger.info(
'{0} : Default vocabulary namespace(s) excluded -: {1}'.format(self.metric_identifier, excluded))
outputs = []
score = 0
test_status = 'fail'
# test if exists in imported list, and the namespace is assumed to be active as it is tested during the LOD import.
if self.fuji.namespace_uri:
self.maturity = 1
self.setEvaluationCriteriumScore('FsF-I1-02M-1', 0, 'pass')
lod_namespaces = [d['namespace'] for d in self.fuji.VOCAB_NAMESPACES if 'namespace' in d]
exists = list(set(lod_namespaces) & set(self.fuji.namespace_uri))
self.logger.info(
'{0} : Check the remaining namespace(s) exists in LOD -: {1}'.format(self.metric_identifier, exists))
if exists:
score = self.total_score
self.setEvaluationCriteriumScore('FsF-I1-02M-2', 1, 'pass')
self.maturity = 3
self.logger.log(self.fuji.LOG_SUCCESS, '{0} : Namespace matches found -: {1}'.format(self.metric_identifier, exists))
for e in exists:
outputs.append(SemanticVocabularyOutputInner(namespace=e, is_namespace_active=True))
else:
self.logger.warning('{0} : NO vocabulary namespace match is found'.format(self.metric_identifier))
not_exists = [x for x in self.fuji.namespace_uri if x not in exists]
if not_exists:
self.logger.warning('{0} : Vocabulary namespace (s) specified but no match is found in LOD reference list -: {1}'.format(
self.metric_identifier, not_exists))
else:
self.logger.warning('{0} : NO namespaces of semantic vocabularies found in the metadata'.format(self.metric_identifier))
if score > 0:
test_status = 'pass'
self.result.test_status = test_status
self.score.earned = score
self.result.score = self.score
self.result.metric_tests = self.metric_tests
self.result.maturity = self.maturity
self.result.output = outputs
|
import pyperclip
import random
import string
global users_list
class User:
'''
allows usto create new users and save their information for future usage
'''
users_list = []
def __init__(self,first_name,last_name,password):
'''
defining properties
'''
# In variables
self.first_name = first_name
self.last_name = last_name
self.password = password
def save_user(self):
'''
saves new user created
'''
User.users_list.append(self)
class Credential:
'''
account to save passwords info and new info
'''
# Class Variables
credentials_list =[]
user_credentials_list = []
@classmethod
def check_user(cls,first_name,password):
'''
checks if info entered matches that stored
'''
current_user = ''
for user in User.users_list:
if (user.first_name == first_name and user.password == password):
current_user = user.first_name
return current_user
def __init__(self,user_name,site_name,account_name,password):
'''
defines users properties
'''
self.user_name = user_name
self.site_name = site_name
self.account_name = account_name
self.password = password
def save_credentials(self):
'''
Function to save a newly created user instance
'''
Credential.credentials_list.append(self)
def generate_password(size=8, char=string.ascii_uppercase+string.ascii_lowercase+string.digits):
'''
function that shows passwords
'''
gen_pass=''.join(random.choice(char) for _ in range(size))
return gen_pass
@classmethod
def display_credentials(cls,user_name):
'''
displayed all listed info
'''
user_credentials_list = []
for credential in cls.credentials_list:
if credential.user_name == user_name:
user_credentials_list.append(credential)
return user_credentials_list
@classmethod
def find_by_site_name(cls, site_name):
'''
finds the system name
'''
for credential in cls.credentials_list:
if credential.site_name == site_name:
return credential
@classmethod
def copy_credential(cls,site_name):
'''
copies info entered
'''
find_credential = Credential.find_by_site_name(site_name)
return pyperclip.copy(find_credential.password)
|
from statistics import mean
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from nltk import agreement
def multi_kappa(data):
ratingtask = agreement.AnnotationTask(data=data)
return ratingtask.multi_kappa()
def calculate_kappe(data, from_date, to_date, name):
print('Calculating annotator agreement from {} to {}'.format(from_date, to_date))
df2 = data
df2['created_time'] = df2['created_time'].apply(pd.to_datetime)
df2 = df2[(df2['created_time'] >= from_date) & (df2['created_time'] <= to_date)]
number_of_annotators = len(df2.groupby('user_id'))
annotators = range(1, number_of_annotators+1)
print('Number of annotations: {}'.format(len(df2)))
print('Posts per Annotator: {}'.format(len(df2) / number_of_annotators))
print('')
values = list()
values_formatted = list()
categories = ["category_1", "category_2", "category_3", "category_4", "category_5", "category_6", "category_7",
"category_8", "category_9", "category_10", "category_9999"]
category_labels = ['Success', 'All Topics', '1. Product/Service', '2. Event/Fair', '3. Interactions', '4. News', '5. Entertainment', '6. Knowledge',
'7. Recruiting/HR', '8. Corporate Social\n Responsibility', '9. Advertising/Campaign', '10. Sponsoring',
'11. Other']
for category in categories:
category_df = df2.filter(items=['user_id', 'post_id', category])
category_data = category_df.values.tolist()
try:
rating = multi_kappa(category_data)
values.append(rating)
values_formatted.append('{}: {}'.format(category, rating))
except ZeroDivisionError as e:
values.append(0.0)
# Suppress the exception
# ZeroDivisionError occurs of no post for that class exists
pass
print('Kappa values: {}'.format(values))
print('Kappa values: {}'.format(values_formatted))
mean_rating = mean([x for x in values if x > 0])
print('Kappa on category (mean): {}'.format(mean_rating))
success_data = df2.filter(items=['user_id', 'post_id', 'successful']).values.tolist()
success_rating = multi_kappa(success_data)
print('Kappa on success: {}'.format(success_rating))
print('')
val_new = list()
val_new.append(success_rating)
val_new.append(mean_rating)
for x in values:
val_new.append(x)
df3 = pd.DataFrame([val_new], columns=category_labels, index=['All'])
for annotator in annotators:
values_wo = list()
for category in categories:
category_df = df2.filter(items=['user_id', 'post_id', category])
category_df = category_df[category_df['user_id'] != annotator]
category_data = category_df.values.tolist()
try:
rating = multi_kappa(category_data)
print('{} {}: '.format(category, rating))
values_wo.append(rating)
except ZeroDivisionError as e:
values_wo.append(0.0)
# Suppress the exception
# ZeroDivisionError occurs of no post for that class exists
pass
print('Kappa values, without {}: {}'.format(annotator, values_wo))
mean_rating_wo = mean([x for x in values_wo if x > 0])
print('Kappa on category (mean), without {}: {}'.format(annotator, mean_rating_wo))
success_df = df2.filter(items=['user_id', 'post_id', 'successful'])
success_df = success_df[success_df['user_id'] != annotator]
success_data = success_df.values.tolist()
success_rating_wo = multi_kappa(success_data)
print('Kappa on success, without {}: {}'.format(annotator, success_rating_wo))
print('')
val_new2 = list()
val_new2.append(success_rating_wo)
val_new2.append(mean_rating_wo)
for x in values_wo:
val_new2.append(x)
df3 = df3.append(pd.DataFrame([val_new2], columns=category_labels, index=['Without {}'.format(annotator)]))
print(df3)
plot_horizontal(df3, name)
def plot_horizontal(data, plot_name):
plt.rcdefaults()
plt.style.use('grayscale')
cm = plt.get_cmap('gist_gray')
fig, ax = plt.subplots()
co = iter(cm(np.linspace(0, 2, len(data.columns))))
cols = [next(co), next(co),next(co),next(co),next(co),next(co)]
y_pos = 5*np.arange(len(data.columns))
width = 0.75
rects1 = ax.barh(y_pos - 2.5*width, data.iloc[0], width, align='center', label='All Experts', color=cols[0])
rects2 = ax.barh(y_pos - 1.5*width, data.iloc[1], width, align='center', label='Without 1', color=cols[4])
rects3 = ax.barh(y_pos - 0.5*width, data.iloc[2], width, align='center', label='Without 2', color=cols[2])
rects4 = ax.barh(y_pos + 0.5*width, data.iloc[3], width, align='center', label='Without 3', color=cols[5])
rects5 = ax.barh(y_pos + 1.5*width, data.iloc[4], width, align='center', label='Without 4', color=cols[1])
rects6 = ax.barh(y_pos + 2.5*width, data.iloc[5], width, align='center', label='Without 5', color=cols[3])
ax.set_yticks(y_pos)
ax.set_yticklabels(data.columns)
ax.invert_yaxis() # labels read top-to-bottom
ax.set_xlabel('Fleiss\' Kappa')
ax.legend()
plt.show()
for plot_format in ['eps', 'pdf', 'png']:
fig.savefig('plots/annotator_agreement_{}.{}'.format(plot_name, plot_format), dpi=fig.dpi, bbox_inches = "tight")
def main():
annotations_path = '../data/annotations.csv'
dataset = pd.read_csv(annotations_path)
df2 = dataset[dataset['phase'] == 2]
calculate_kappe(df2, '2000-01-01', '2019-05-31', 'a')
calculate_kappe(df2, '2019-06-03', '2099-12-31', 'b')
if __name__ == '__main__':
main()
|
import os
from glob import glob
import numpy as np
import matplotlib.pyplot as plt
if __name__ == "__main__":
data_path = "./results/???/" # Change here
save_folder = "./plots/learning/???/" # Change here
meta = {}
with open(data_path + "/meta.txt", "r") as param_file:
for line in param_file:
key, val = line.partition("=")[::2]
meta[key.strip()] = val.strip()
data = []
for folder in glob(data_path + "/*/"):
if os.path.isfile(folder + "performance.dat"):
data = np.loadtxt(folder + "performance.dat")
else:
print("Missing performance data in " + folder)
exit(-1)
# Epoch, LR, Training Loss, Validation Loss, Validation Error,
# Best Epoch, Best Validation Error, Test Loss, Test Error
train_loss = data[:, 2]
val_loss = data[:, 3]
test_loss = data[:, 7]
val_error = data[:, 4]
test_error = data[:, 8]
best_epoch = data[-1, 5]
fig, ax_list = plt.subplots(1, 2)
plt.tight_layout(pad=2, w_pad=2, h_pad=1)
ax_list[0].plot(train_loss, color='blue', label='train loss', lw=2)
ax_list[0].plot(val_loss, color='green', label='val loss', lw=2)
ax_list[0].plot(test_loss, color='red', label='test loss', lw=2)
ax_list[0].legend(loc="upper right")
ax_list[1].set_ylim([0, 100])
ax_list[1].plot(val_error, color='green', label='val error', lw=2)
ax_list[1].plot(test_error, color='red', label='test error', lw=2)
ax_list[1].axvline(x=best_epoch, color='gray', linestyle='--', label='best epoch', lw=2)
ax_list[1].legend(loc="upper right")
params = {}
with open(folder + "params.txt", "r") as param_file:
for line in param_file:
key, val = line.partition("=")[::2]
params[key.strip()] = val.strip()
fname = meta["dataset"] + "_" + meta["network"] + meta["layer"] + "_a" + params["nalpha"]
if params["binary"] == "True":
plt.savefig(save_folder + fname + "_b.png")
elif params["binary"] == "False":
plt.savefig(save_folder + fname + ".png")
else:
print("Param file corrupted in " + folder)
exit(-1)
plt.close(fig)
|
from requests import *
import utils
import re
match_ids = re.compile("href=\"/note/(.+?)\">")
def check(host):
s = session()
# reg
result = s.post(f"http://na2.{host}/", data={
"username": utils.randomString(),
"password": "9pFrqCEyagsCbabGamT"
}).content
if b"Welcome to our super-duper-duper safe note app!" not in result:
return 0, "Failed to login"
# add note
result = s.post(f"http://na2.{host}/add_note", data={
"content": "<script>alert(1)</script>",
"xsrf": s.cookies.get("token")
}).content
if b"Note ID: " not in result:
return 0, "Wrong result of add note"
# test xss
note_id = match_ids.findall(str(result))[0]
result = s.get(f"http://na2.{host}/note/{note_id}").content
if b"<script>alert(1)</script>" not in result:
return 0, "Cannot trigger XSS"
# test logout
s.post(f"http://na2.{host}/logout")
if "token" in s.cookies:
return 0, "Logout failed"
# test admin
result = s.post(f"http://na2.{host}/", data={
"username": "admin",
"password": "Lq#QHMnpyk6Y+.]"
}).content
# admin flag
result = s.get(f"http://na2.{host}/note/1").content
if b'we{f93486a2-4f82-42b6-8dc8-04cd765501f3@1nsp1reD-bY-cHa1I-1N-BbC7F}' not in result:
return 0, "Failed to get flag"
return 1, ""
FUNCTIONS = [check]
if __name__ == "__main__":
print(check("w-va.cf"))
|
if __name__ == '__main__':
import os
pipe = os.popen('/home/ivan/shortcuts/python hello-out.py')
print(pipe.read())
print(pipe.close())
pipe = os.popen('/home/ivan/shortcuts/python hello-in.py', 'w')
print(pipe.write('Gumby\n'))
pipe.close()
print(open('hello-in.txt').read())
|
n1 = int(input('Digite um número'))
s = n1 - 1
a = n1 + 1
print("Seu sucessor é {} e seu antecessor é {} ".format(a,s))
|
from setuptools import setup
setup(
name='python-amazon-sp-api',
version='0.11.0',
install_requires=[
"requests",
"six>=1.15,<2",
"boto3>=1.16.39,<2",
"cachetools~=4.2.0",
"pytz",
"confuse~=1.4.0"
],
packages=['tests', 'tests.api', 'tests.api.orders', 'tests.api.sellers', 'tests.api.finances',
'tests.api.product_fees', 'tests.api.notifications', 'tests.api.reports', 'tests.client',
'sp_api',
'sp_api.api',
'sp_api.api.orders',
'sp_api.api.sellers',
'sp_api.api.finances',
'sp_api.api.product_fees',
'sp_api.api.products',
'sp_api.api.feeds',
'sp_api.api.sales',
'sp_api.api.catalog',
'sp_api.api.notifications',
'sp_api.api.reports',
'sp_api.api.inventories',
'sp_api.api.messaging',
'sp_api.api.upload',
'sp_api.api.merchant_fulfillment',
'sp_api.api.fulfillment_inbound',
'sp_api.auth',
'sp_api.base',
'sp_api.util',
##### DO NOT DELETE ########## INSERT PACKAGE HERE #######
'sp_api.api.listings_restrictions',
'sp_api.api.catalog_items',
'sp_api.api.product_type_definitions',
'sp_api.api.listings_items',
'sp_api.api.vendor_transaction_status',
'sp_api.api.vendor_shipments',
'sp_api.api.vendor_orders',
'sp_api.api.vendor_invoices',
'sp_api.api.vendor_direct_fulfillment_transactions',
'sp_api.api.vendor_direct_fulfillment_shipping',
'sp_api.api.vendor_direct_fulfillment_payments',
'sp_api.api.vendor_direct_fulfillment_orders',
'sp_api.api.vendor_direct_fulfillment_inventory',
'sp_api.api.tokens',
'sp_api.api.solicitations',
'sp_api.api.shipping',
'sp_api.api.services',
'sp_api.api.fba_small_and_light',
'sp_api.api.fba_inbound_eligibility',
'sp_api.api.authorization',
'sp_api.api.aplus_content',
'sp_api.api.fulfillment_outbound',
],
scripts=['make_endpoint/make_endpoint'],
url='https://github.com/saleweaver/python-amazon-sp-api',
license='MIT',
author='Michael',
author_email='info@saleweaver.com',
description='Python wrapper for the Amazon Selling-Partner API'
)
|
from scapy.layers.inet import IP, UDP
from scapy.layers.ntp import NTP
from scapy.packet import Packet
from scapy.sendrecv import sniff, sr1, send
from ntp_utils import ntp_time_now
class NTPInterceptor:
def intercept_req(self, pck):
return pck
def intercept_res(self, pck):
return pck
class NTPServer:
"""
A scapy MITM NTP server which can respond to client requests and provide access to interceptors in order to
implement covert channels.
"""
def __init__(self, sniff_interface: str = 'wlp4s0', host_ip='localhost', req_interceptor=NTPInterceptor(),
res_interceptor=NTPInterceptor()):
"""
:param sniff_interface:
:param req_interceptor: a class which is called whenever an NTP package arrives at the server.
:param res_interceptor: a class which is called whenever a NTP response is send to a client request.
"""
super().__init__()
self.sniff_interface = sniff_interface
self._req_interceptor = req_interceptor
self._res_interceptor = res_interceptor
self._host_ip = host_ip
self.reference_time = ntp_time_now()
self.debug = False
def run(self, with_response: bool = True):
"""
Starts the sniffing for incoming NTP client packages. Note that further packages are not sniffed while
one package is processed.
"""
print('Starting server.... listening on interface ' + self.sniff_interface)
while True:
pck = self.next_ntp_packet()
received_time = ntp_time_now()
if pck[IP].dst != self._host_ip:
print('This package was not meant for the server...')
continue
pck_ntp = pck[NTP]
if pck_ntp.mode != 3:
continue
self._req_interceptor.intercept_req(pck_ntp)
if not with_response:
continue
if self.debug:
print('Got a NTP client request, creating response.')
# ntp_resp = self._send_ntp_client_request(ntp=pck_ntp)
response_from_server_ntp = NTP() # ntp_resp[NTP]
response_from_server_ntp.recv = received_time
response_from_server_ntp.ref = self.reference_time
# response_from_server_ntp.id = str(pck[IP].dst)
response_from_server_ntp = self._res_interceptor.intercept_res(response_from_server_ntp)
response = IP(dst=pck[IP].src, src=pck[IP].dst) / UDP() / response_from_server_ntp
if self.debug:
response.show()
send(response)
def next_ntp_packet(self) -> Packet:
"""
Sniffs for the next incoming ntp package. This method is blocking
:return: the sniffed package.
"""
results = sniff(filter='udp and port 123', count=1, iface=self.sniff_interface)
pck = (results[0])
if self.debug:
pck.show()
return pck
def _send_ntp_client_request(self, dst='pool.ntp.org', ntp=NTP()) -> Packet:
pck = IP(dst=dst) / UDP() / ntp
if self.debug:
pck.show()
pck = sr1(pck)
if self.debug:
pck.show()
return pck
if __name__ == '__main__':
class StratumInterceptor(NTPInterceptor):
def intercept_res(self, pck):
pck.stratum = 2
return pck
interceptor = StratumInterceptor()
server = NTPServer(res_interceptor=interceptor, host_ip='192.168.0.4')
server.run()
|
# The isBadVersion API is already defined for you.
# @param version, an integer
# @return a bool
# def isBadVersion(version):
class Solution:
def firstBadVersion(self, n):
"""
:type n: int
:rtype: int
"""
l, r = 1, n
while l<r:
m = (l+r)//2
if isBadVersion(m):
r = m # Because m can be the first bad version, we keep it in the limits
else:
l = m+1 # Because m is not a bad version, we do not keep it in the limits
return l
|
#!/usr/bin/python
# Copyright (c) 2017 Tim Rightnour <thegarbledone@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: syslogger
short_description: Log messages in the syslog
description:
- "Uses syslog to add log entries to the host."
- "Can specify facility and priority."
options:
msg:
description:
- This is the message to place in syslog
required: true
priority:
description:
- Set the log priority
choices: [ "emerg", "alert", "crit", "err", "warning", "notice", "info", "debug" ]
required: false
default: "info"
facility:
description:
- Set the log facility
choices: [ "kern", "user", "mail", "daemon", "auth", "lpr", "news",
"uucp", "cron", "syslog", "local0", "local1", "local2",
"local3", "local4", "local5", "local6", "local7" ]
required: false
default: "daemon"
log_pid:
description:
- Log the pid in brackets
type: bool
required: false
default: "no"
author:
- Tim Rightnour (@garbled1)
'''
EXAMPLES = '''
# Full example
- name: Test syslog
syslogger:
msg: "Hello from ansible"
priority: "err"
facility: "daemon"
log_pid: true
# Basic usage
- name: Simple Usage
syslogger:
msg: "I will end up as daemon.info"
'''
RETURN = '''
'''
from ansible.module_utils.basic import AnsibleModule
import syslog
def get_facility(x):
return {
'kern': syslog.LOG_KERN,
'user': syslog.LOG_USER,
'mail': syslog.LOG_MAIL,
'daemon': syslog.LOG_DAEMON,
'auth': syslog.LOG_AUTH,
'lpr': syslog.LOG_LPR,
'news': syslog.LOG_NEWS,
'uucp': syslog.LOG_UUCP,
'cron': syslog.LOG_CRON,
'syslog': syslog.LOG_SYSLOG,
'local0': syslog.LOG_LOCAL0,
'local1': syslog.LOG_LOCAL1,
'local2': syslog.LOG_LOCAL2,
'local3': syslog.LOG_LOCAL3,
'local4': syslog.LOG_LOCAL4,
'local5': syslog.LOG_LOCAL5,
'local6': syslog.LOG_LOCAL6,
'local7': syslog.LOG_LOCAL7
}.get(x, syslog.LOG_DAEMON)
def get_priority(x):
return {
'emerg': syslog.LOG_EMERG,
'alert': syslog.LOG_ALERT,
'crit': syslog.LOG_CRIT,
'err': syslog.LOG_ERR,
'warning': syslog.LOG_WARNING,
'notice': syslog.LOG_NOTICE,
'info': syslog.LOG_INFO,
'debug': syslog.LOG_DEBUG
}.get(x, syslog.LOG_INFO)
def run_module():
# define the available arguments/parameters that a user can pass to
# the module
module_args = dict(
msg=dict(type='str', required=True),
priority=dict(type='str', required=False,
choices=["emerg", "alert", "crit", "err", "warning",
"notice", "info", "debug"],
default='info'),
facility=dict(type='str', required=False,
choices=["kern", "user", "mail", "daemon", "auth",
"lpr", "news", "uucp", "cron", "syslog",
"local0", "local1", "local2", "local3",
"local4", "local5", "local6", "local7"],
default='daemon'),
log_pid=dict(type='bool', required=False, default=False)
)
module = AnsibleModule(
argument_spec=module_args,
)
result = dict(
changed=False,
priority=module.params['priority'],
facility=module.params['facility'],
log_pid=module.params['log_pid'],
msg=module.params['msg']
)
# do the logging
try:
if module.params['log_pid']:
syslog.openlog('ansible_syslogger',
logoption=syslog.LOG_PID,
facility=get_facility(module.params['facility']))
else:
syslog.openlog('ansible_syslogger',
facility=get_facility(module.params['facility']))
syslog.syslog(get_priority(module.params['priority']),
module.params['msg'])
syslog.closelog()
result['changed'] = True
except Exception:
module.fail_json(error='Failed to write to syslog', **result)
module.exit_json(**result)
def main():
run_module()
if __name__ == '__main__':
main()
|
# Generated by Django 2.0.6 on 2018-08-20 03:00
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('imagesource', '0003_auto_20180820_1019'),
]
operations = [
migrations.AlterField(
model_name='imagesource',
name='created',
field=models.DateTimeField(default=django.utils.timezone.now),
),
]
|
from __future__ import absolute_import
# Copyright (c) 2010-2015 openpyxl
from .external import ExternalLink
|
def findnotrepeat(inplist1):
i = 0
while i < len(inplist1):
if (i+1) == len(inplist1):
return inplist1[i]
elif inplist1[i] != inplist1[i+1]:
return inplist1[i]
else:
i += 2
return "Todos tienen un par dentro de la lista"
# PRUEBAS DE LA FUNCIÓN
testlist1 = [3, 3, 3, 4, 4, 5, 5, 6, 6, 7]
unrepited = findnotrepeat(testlist1)
print("El número sin repetir es: ",unrepited)
testlist2 = [1, 1, 2, 2, 3, 3, 4, 4]
unrepited = findnotrepeat(testlist2)
print("El número sin repetir es: ",unrepited)
testlist3 = [1, 2, 2, 3, 3]
unrepited = findnotrepeat(testlist3)
print("El número sin repetir es: ",unrepited)
#Esto es cun comentario#
|
import torch
def IntTensor(values, device='cuda:0'):
"""
Returns a Tensor of type torch.int containing the given values
Parameters
----------
values : list
the values of the tensor
device : str
the device to store the tensor to
Returns
-------
Tensor
an integer precision tensor
"""
return torch.tensor(values, dtype=torch.int, device=device)
|
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
# region header
'''
This module provides an easy way to compile, run and clean up a various \
number of scripts.
'''
# # python3.5
# # pass
from __future__ import absolute_import, division, print_function, \
unicode_literals
# #
'''
For conventions see "boostnode/__init__.py" on \
https://github.com/thaibault/boostnode
'''
__author__ = 'Torben Sickert'
__copyright__ = 'see boostnode/__init__.py'
__credits__ = 'Torben Sickert',
__license__ = 'see boostnode/__init__.py'
__maintainer__ = 'Torben Sickert'
__maintainer_email__ = 'info["~at~"]torben.website'
__status__ = 'stable'
__version__ = '1.0'
# # python3.5
# # import builtins
# # import collections
import __builtin__ as builtins
# #
from copy import copy
import inspect
import logging
import os
import sys
'''Make boostnode packages and modules importable via relative paths.'''
sys.path.append(os.path.abspath(sys.path[0] + 2 * (os.sep + '..')))
from boostnode.extension.file import Handler as FileHandler
from boostnode.extension.native import Module, InstancePropertyInitializer
from boostnode.extension.output import Logger, Print
from boostnode.extension.system import CommandLine, Platform, Runnable
# # python3.5 from boostnode.extension.type import Self
pass
from boostnode.paradigm.aspectOrientation import JointPoint
from boostnode.paradigm.objectOrientation import Class
from boostnode.runnable.template import Parser as TemplateParser
# endregion
# region classes
class Run(Class, Runnable):
'''
This class provides a large number of supported programming languages \
support for compiling, running and cleaning after running.
Determines a code file to run and runs them in its own thread by \
piping all outputs through the command line interface.
**code_file_path** - A given code file handler or path \
which should be run.
**default_command_sequence** - A default command sequence which \
should be executed in given order.
'''
# region properties
COMMAND_LINE_ARGUMENTS = (
{'arguments': ('-f', '--code-file'),
'specification': {
'action': 'store',
'default': {'execute': '__initializer_default_value__'},
'type': {'execute': 'type(__initializer_default_value__)'},
'required': {'execute': '__initializer_default_value__ is None'},
'help': 'Select a file for running.',
'dest': 'code_file_path',
'metavar': 'FILE_PATH'}},
{'arguments': ('-d', '--default-command-sequence'),
'specification': {
'action': 'store',
'nargs': '*',
'default': {'execute': '__initializer_default_value__'},
'type': builtins.str,
'required': {'execute': '__initializer_default_value__ is None'},
'help': {
'execute': "'Select a default sequence of things to do with "
'''code files (default: "%s").' % '''
'\'", "\'.join(__initializer_default_value__)'},
'dest': 'default_command_sequence',
'metavar': 'COMMAND'}},
{'arguments': ('-n', '--runner-meta-help'),
'specification': {
'action': 'store_true',
'default': False,
'help': 'Shows this help message.',
'dest': 'meta_help'}})
'''Holds all command line interface argument informations.'''
SUPPORTED_CODES = {
'template': {
'commands': {
'compile': "bash --login -c '"
'template "<% code_file.path %>" 1>'
'"<% code_file.directory.path %>'
'<% code_file.basename %>.html"\'',
'run': 'bash --login -c \'webbrowser '
'"<% code_file.directory.path %>'
'<% code_file.basename %>.html"\''
},
'extensions': ('tpl',)
},
'c': {
'commands': {
'compile': 'g++ "<% code_file.path %>" -o '
'"<% code_file.directory.path %>'
'<% code_file.basename %>"',
'run': '"<% code_file.directory.path %>'
'<% code_file.basename %>" <%arguments%>',
},
'code_manager': {
'file_path': 'Makefile',
'commands': {
'compile': 'make compile',
'test': 'make test',
'clean': 'make clean',
'all': 'make all'
}
},
'extensions': ('cpp', 'c', 'cc'),
'delete_patterns': ('.*\.o', '.*Main', '.*Test')
},
'bash': {
'commands': {'run': '"<% code_file.path %>" <% arguments %>'},
'extensions': ('bash',)
},
'shell': {
'commands': {'run': '"<% code_file.path %>" <% arguments %>'},
'extensions': ('sh', 'shell')
},
'python': {
'commands': {'run': '"<% code_file.path %>" <% arguments %>'},
'code_manager': {
'file_path': '__init__.<% code_file.extension %>',
'commands': {
'clean': '__init__.<% code_file.extension %> clear',
'test': '__init__.<% code_file.extension %> test',
'all': '__init__.<% code_file.extension %> all'
}
},
'extensions': ('py', 'pyc', 'pyw', 'pyo', 'pyd'),
'delete_patterns': ('.*\.py[cod]', '__pycache__', 'temp_.*')
},
'laTeX': {
'commands': {
'compile': 'pdflatex "<% code_file.path %>" && '
'cd "<% code_file.directory.path %>" && bibtex '
'"<% code_file.basename %>.aux"; '
'pdflatex "<% code_file.path %>" && '
'pdflatex "<% code_file.path %>"',
'run': ' || '.join(builtins.map(
lambda name: name + ' "<% code_file.basename %>.pdf"',
Platform.UNIX_OPEN_APPLICATIONS)
)
},
'code_manager': {
'file_path': 'Makefile',
'commands': {
'compile': 'make compile',
'run': 'make preview',
'clean': 'make clean',
'all': 'make all'
}
},
'extensions': ('tex',),
'delete_patterns': (
'.+\.aux', '.+\.log', '.+\.toc', '.+\.out', '.+\.blg',
'.+\.bbl', '.+\.lol')
},
'java': {
'commands': {
'compile': 'javac "<% code_file.path %>"',
'run': 'java "<% code_file.basename.capitalize() %>" '
'<% arguments %>'
},
'extensions': ('java',),
'delete_patterns': ('.*\.class',)
}
}
'''Holds all supported code types and there methods to do common stuff.'''
# endregion
# region dynamic methods
# # region public
# # # region special
@JointPoint
# # python3.5 def __repr__(self: Self) -> builtins.str:
def __repr__(self):
'''
Invokes if this object should describe itself by a string.
Examples:
>>> file = FileHandler(__test_folder__.path + '__repr__.py')
>>> file.content = '#!/usr/bin/env python'
>>> repr(Run(code_file_path=file)) # doctest: +ELLIPSIS
'Object of "Run" with detected path "...__repr__.py".'
'''
return (
'Object of "{class_name}" with detected path "{path}".'.format(
class_name=self.__class__.__name__,
path=self._code_file.path))
# # # endregion
# # endregion
# # region protected
# # # region runnable implementation
@JointPoint
# # python3.5 def _run(self: Self) -> Self:
def _run(self):
'''
Entry point for command line call of this program. Determines a \
meaningful file for running. Set the right code dependent \
commands and finally executes them.
Examples:
>>> sys_argv_backup = sys.argv
>>> sys.argv[1:] = ['--runner-meta-help', '--log-level', 'info']
>>> run = Run.run() # doctest: +ELLIPSIS
usage:...
>>> empty_folder = FileHandler(
... __test_folder__.path + '_run', make_directory=True)
>>> sys.argv[1:] = ['-f', empty_folder.path, '--log-level', 'info']
>>> run = Run.run() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
CodeRunnerError: No supported file path found for running.
>>> sys.argv = sys_argv_backup
'''
command_line_arguments = CommandLine.argument_parser(
meta=True, arguments=self.COMMAND_LINE_ARGUMENTS,
module_name=__name__, scope={'self': self})
if command_line_arguments.meta_help:
CommandLine.current_argument_parser.print_help()
return self
return self._initialize(**self._command_line_arguments_to_dictionary(
namespace=command_line_arguments))
@JointPoint(InstancePropertyInitializer)
# # python3.5
# # def _initialize(
# # self: Self, code_file_path=None,
# # default_command_sequence=('compile', 'run', 'clean'),
# # **keywords: builtins.object
# # ) -> Self:
def _initialize(
self, code_file_path=None,
default_command_sequence=('compile', 'run', 'clean'), **keywords
):
# #
'''Sets some instance properties.'''
# # # region properties
'''
Holds the current code file and there potentially presented code \
manager as file handler.
'''
self.code_manager_file = None
'''
Saves every properties for current code taken from \
"SUPPORTED_CODES".
'''
self._current_code = {}
'''Saves currently needed commands taken from "_current_code".'''
self._current_commands = ()
'''
Saves given arguments which should be piped through the run \
command to determined code file.
'''
self._command_line_arguments = ()
'''Saves currently determined runnable code file object.'''
self._code_file = self._determine_code_file(self.code_file_path)
# # # endregion
if not self._code_file:
raise __exception__(
'No supported file found for running with given hint "%s".',
code_file_path)
return self._run_code_file()
# # # endregion
@JointPoint
# # python3.5 def _tidy_up(self: Self) -> Self:
def _tidy_up(self):
'''
Tidies up the current working directory after running the given \
file.
Examples:
>>> garbage = FileHandler(
... __test_folder__.path + 'temp_tidy_up', make_directory=True)
>>> file = FileHandler(
... __test_folder__.path + '_tidy_up_runnable.py')
>>> file.content = '#!/usr/bin/env python'
>>> run = Run(file)
>>> run # doctest: +ELLIPSIS
Object of "Run" with detected path "..._tidy_up_runnable.py".
>>> run._tidy_up() # doctest: +ELLIPSIS
Object of "Run" with detected path "..._tidy_up_runnable.py".
>>> garbage.is_element()
False
>>> del run._current_code['properties']['delete_patterns']
>>> run._tidy_up() # doctest: +ELLIPSIS
Object of "Run" with detected path "..._tidy_up_runnable.py".
'''
if 'delete_patterns' in self._current_code['properties']:
__logger__.info(
'Delete files which matches one of "%s" pattern.',
'", "'.join(
self._current_code['properties']['delete_patterns']))
FileHandler(
location=self._code_file.directory.path
).delete_file_patterns(
*self._current_code['properties']['delete_patterns'])
return self
@JointPoint
# # python3.5 def _run_commands(self: Self) -> Self:
def _run_commands(self):
'''
Run currently needed commands.
Examples:
>>> __test_buffer__.clear() # doctest: +ELLIPSIS
'...'
>>> __test_folder__.clear_directory()
True
>>> file = FileHandler(__test_folder__.path + '_run_commands.py')
>>> file.content = '#!/usr/bin/env python'
>>> file.change_right(700) # doctest: +ELLIPSIS
Object of "Handler" with path "..._run_commands.py" and initiall...
>>> Run(
... code_file_path=file
... )._run_commands() # doctest: +ELLIPSIS
Object of "Run" with detected path "..._run_commands.py...".
>>> __test_buffer__.clear() # doctest: +ELLIPSIS
'...Detected "python"...No "compile" necessary...'
'''
for command_name in self.default_command_sequence:
if command_name in self._current_commands:
self._run_command(
command_name, command=self._current_commands[command_name])
else:
__logger__.info('No "%s" necessary.', command_name)
return self
@JointPoint
# # python3.5 def _check_code_manager(self: Self) -> Self:
def _check_code_manager(self):
'''
Checks if a code manager file exists for the current detected \
code file. For example it can find a makefile for a detected c++ \
source code.
Examples:
>>> file = FileHandler(
... __test_folder__.path + '_check_code_manager.py')
>>> file.content = '#!/usr/bin/env python'
>>> FileHandler(
... __test_folder__.path + '__init__.py'
... ).content = '#!/usr/bin/env python'
>>> run = Run(code_file_path=file)
>>> __test_buffer__.clear() # doctest: +ELLIPSIS
'...'
>>> run._check_code_manager() # doctest: +ELLIPSIS
Object of "Run" with detected path "..._check_code_manager.py".
>>> __test_buffer__.clear() # doctest: +ELLIPSIS
'...Detected code manager "...__init__.py".\\n'
>>> del run._current_code['properties']['code_manager']
>>> run._check_code_manager() # doctest: +ELLIPSIS
Object of "Run" ...
'''
if 'code_manager' in self._current_code['properties']:
file_path = self\
._current_code['properties']['code_manager']['file_path']
self.code_manager_file = FileHandler(
location=self._code_file.directory.path + file_path)
if self.code_manager_file:
self._current_commands.update(
self._current_code['properties']['code_manager'][
'commands'])
__logger__.info(
'Detected code manager "%s".', self.code_manager_file.path)
return self
@JointPoint
# # python3.5
# # def _determine_code_file(
# # self: Self, path: (builtins.str, builtins.type(None), FileHandler)
# # ) -> (FileHandler, builtins.bool):
def _determine_code_file(self, path):
# #
'''
Determines a code file which could make sense to run. It could \
depend on inputs which where made to this class. Searches in the \
current working directory.
Examples:
>>> FileHandler('temp_determine_code_file_main.py').content = ''
>>> run = Run()
>>> run._determine_code_file(path='') # doctest: +ELLIPSIS
Object of "Handler" with path "..." (type: file).
>>> run._command_line_arguments = ['--help']
>>> run._determine_code_file('not_existing')
False
>>> run._command_line_arguments = ['--help']
>>> run._determine_code_file('') # doctest: +ELLIPSIS
Object of "Handler" with path "..." and initially given path "...
'''
if path:
if not self._command_line_arguments:
self._command_line_arguments = sys.argv[2:]
code_file = FileHandler(location=path)
if not (code_file.is_file() and
self._find_informations_by_extension(
extension=code_file.extension, code_file=code_file)):
return self._search_supported_file_by_path(
path=code_file.path)
return code_file
if not self._command_line_arguments:
self._command_line_arguments = sys.argv[1:]
return self._search_supported_file_in_current_working_directory()
@JointPoint
# # python3.5
# # def _find_informations_by_extension(
# # self: Self, extension: builtins.str, code_file: FileHandler
# # ) -> (builtins.dict, builtins.bool):
def _find_informations_by_extension(self, extension, code_file):
# #
'''
Tries to find the necessary informations for running code with \
given extension.
Examples:
>>> code_file = FileHandler(
... __test_folder__.path +
... '_find_informations_by_extension.py')
>>> FileHandler(
... 'temp_find_informations_by_extension_main.py'
... ).content = ''
>>> run = Run()
>>> run._find_informations_by_extension(
... extension='py', code_file=code_file
... ) # doctest: +ELLIPSIS
{...'type': 'python'...}
>>> run._find_informations_by_extension(
... 'not_existing', code_file)
False
'''
for name, properties in self.SUPPORTED_CODES.items():
if extension in properties['extensions']:
return {
'type': name, 'properties': self._render_properties(
properties, code_file)}
return False
@JointPoint
# # python3.5
# # def _search_supported_file_by_path(
# # self: Self, path: builtins.str
# # ) -> (FileHandler, builtins.bool):
def _search_supported_file_by_path(self, path):
# #
'''
Tries to find a useful file in current working directory by \
trying to match one file with given path name and supported \
extension.
Examples:
>>> file = FileHandler(
... __test_folder__.path + '_search_supported_file_by_path.py')
>>> file.content = '#!/usr/bin/env python'
>>> FileHandler(
... 'temp_search_supported_file_by_path_main.py'
... ).content = ''
>>> run = Run()
>>> run._search_supported_file_by_path(
... path=file.directory.path + file.basename
... ) # doctest: +ELLIPSIS
Object of "Handler" with pat..._search_supported_file_by_path.py...
>>> run._search_supported_file_by_path(
... path=''
... ) # doctest: +ELLIPSIS
Object of "Handler" with path "..." (type: file).
>>> run._search_supported_file_by_path('not_exists')
False
'''
self_file = FileHandler(
location=inspect.currentframe().f_code.co_filename,
respect_root_path=False)
location = FileHandler(location=path)
for name, properties in self.SUPPORTED_CODES.items():
for extension in properties['extensions']:
for code_file in (FileHandler(
location=location.path + '.' + extension),
FileHandler(location=location.path + extension)
):
# # python3.5
# # if code_file.is_file() and code_file != self_file:
if code_file.is_file() and not (
code_file == self_file
):
# #
return code_file
file = self._search_supported_file_by_directory(
location, extension)
if file:
return file
return False
@JointPoint
# # python3.5
# # def _search_supported_file_by_directory(
# # self: Self, location: FileHandler, extension: builtins.str,
# # ) -> (FileHandler, builtins.bool):
def _search_supported_file_by_directory(self, location, extension):
# #
'''
Searches in a directory for a suitable code file to run.
Examples:
>>> file = FileHandler(
... __test_folder__.path +
... '_search_supported_file_by_directoryMain.py')
>>> file.content = ''
>>> FileHandler(
... 'temp_search_supported_file_by_directory_main.py'
... ).content = ''
>>> Run()._search_supported_file_by_directory(
... FileHandler(__test_folder__.path), 'py'
... ) # doctest: +ELLIPSIS
Object of "Handler" with path "..." and initially given path "...
>>> file.name = '_search_supported_file_by_directory.py'
>>> Run()._search_supported_file_by_directory(
... FileHandler(__test_folder__.path), 'py'
... ) # doctest: +ELLIPSIS
Object of "Handler" with path "..." and initially given path "...
'''
if location.is_directory():
found_file = False
for file in location:
if file.is_file() and file.extension == extension:
if file.basename.lower().endswith('main'):
return file
found_file = file
if found_file:
return found_file
return False
@JointPoint
# # python3.5
# # def _search_supported_file_in_current_working_directory(
# # self: Self
# # ) -> (FileHandler, builtins.bool):
def _search_supported_file_in_current_working_directory(self):
# #
'''
Tries to find a useful file in current working directory with a \
supported extension.
Examples:
>>> FileHandler(
... 'temp_search_supported_file_in_current_working_directory_'
... 'main.py'
... ).content = ''
>>> run = Run()
>>> supported_codes_backup = copy(run.SUPPORTED_CODES)
>>> run._search_supported_file_in_current_working_directory(
... ) # doctest: +ELLIPSIS
Object of "Handler" with path "..." (type: file).
>>> run.SUPPORTED_CODES = {}
>>> run._search_supported_file_in_current_working_directory()
False
>>> run.SUPPORTED_CODES = supported_codes_backup
'''
for name, properties in self.SUPPORTED_CODES.items():
for extension in properties['extensions']:
file = self._search_supported_file_by_directory(
location=FileHandler(), extension=extension)
'''NOTE: We should return positive results only.'''
if file:
return file
return False
@JointPoint
# # python3.5
# # def _run_command(
# # self: Self, command_name: builtins.str, command: builtins.str
# # ) -> Self:
def _run_command(self, command_name, command):
# #
'''
Runs the given command by printing out what is running by \
presenting there results.
Examples:
>>> __test_buffer__.clear() # doctest: +ELLIPSIS
'...'
>>> FileHandler('temp_run_command_main.py').content = ''
>>> run = Run()
>>> run._run_command('list', 'ls') # doctest: +SKIP
Object of "Run" with detected path "...".
>>> __test_buffer__.clear() # doctest: +SKIP
'List with "ls". output [...codeRunner...]'
>>> run._run_command(
... 'do nothing', 'not_existing'
... ) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
SystemExit: 127
'''
return_code = self._log_command_run(
command_name, command,
result=Platform.run(
command=command.strip(), shell=True, error=False))
if return_code != 0:
sys.exit(return_code)
return self
@JointPoint
# # python3.5
# # def _log_command_run(
# # self: Self, command_name: builtins.str, command: builtins.str,
# # result: builtins.dict
# # ) -> builtins.int:
def _log_command_run(self, command_name, command, result):
# #
'''
Generates logging output for wrapping around generated output by \
running code file.
Examples:
>>> log_level_backup = Logger.level
>>> Logger.change_all(level=('error',))
<class 'boostnode.extension.output.Logger'>
>>> FileHandler('temp_log_command_run_main.py').content = ''
>>> Run()._log_command_run(
... 'test', 'test', {
... 'error_output': '', 'standard_output': '',
... 'return_code': 0})
0
>>> Logger.change_all(level=log_level_backup)
<class 'boostnode.extension.output.Logger'>
'''
terminator_save = Logger.terminator
Logger.change_all(terminator=('',))
# # python3.5
# # __logger__.info(
# # '%s with "%s".\nstandard output:\n[',
# # command_name.capitalize(), command.strip())
# # Logger.flush()
if __logger__.isEnabledFor(logging.INFO):
Print(
'%s with "%s".\nstandard output:\n[' %
(command_name.capitalize(), command.strip()),
end='', flush=True)
# #
Print(result['standard_output'], end='', flush=True)
# # python3.5
# # __logger__.info(']\nerror output:\n[')
# # Logger.flush()
if __logger__.isEnabledFor(logging.INFO):
Print(']\nerror output:\n[', end='', flush=True)
# #
Logger.change_all(terminator=terminator_save)
Print(result['error_output'], end='')
if __logger__.isEnabledFor(logging.INFO):
Print(']', flush=True)
__logger__.info('Return code: "%d".', result['return_code'])
return result['return_code']
@JointPoint
# # python3.5 def _run_code_file(self: Self) -> Self:
def _run_code_file(self):
'''
Runs all commands needed to run the current type of code.
Examples:
>>> FileHandler('temp_run_code_file_main.py').content = ''
>>> __test_globals__['__test_mode__'] = False
>>> Run() # doctest: +ELLIPSIS
Traceback (most recent call last):
...
SystemExit: ...
>>> __test_globals__['__test_mode__'] = True
>>> run = Run('not_existing') # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
CodeRunnerError: No supported file found for running with given ...
'''
self._current_code = self._find_informations_by_extension(
extension=self._code_file.extension, code_file=self._code_file)
self._current_commands = \
self._current_code['properties']['commands']
__logger__.info('Detected "%s".', self._current_code['type'])
self._check_code_manager()
if not __test_mode__:
try:
self._run_commands()
finally:
self._tidy_up()
return self
@JointPoint
# # python3.5
# # def _render_properties(
# # self: Self, properties: builtins.dict, code_file: FileHandler
# # ) -> builtins.dict:
def _render_properties(self, properties, code_file):
# #
'''
If a given code property is marked as executable respectively \
dynamic it's value will be determined.
Examples:
>>> code_file = FileHandler(
... location=__test_folder__.path + '_render_properties.cpp')
>>> FileHandler('temp_render_properties_main.py').content = ''
>>> run = Run()
>>> run._render_properties({
... 'commands': {
... 'compile': 'g++ "<%code_file.path%>"',
... 'run': '<%code_file.basename%>',
... },
... 'code_manager': {
... 'file_path': 'Makefile',
... 'commands': {'compile': 'make build'}
... },
... 'extensions': ('cpp', 'c')
... }, code_file) # doctest: +ELLIPSIS +SKIP
{'commands': {'compile': 'g++ "...runner.cpp"', 'run': '...}
>>> run._render_properties({'hans': 'peter'}, code_file)
{'hans': 'peter'}
'''
rendered_properties = copy(properties)
for key, value in rendered_properties.items():
if builtins.isinstance(value, builtins.dict):
rendered_properties[key] = self._render_properties(
properties=value, code_file=code_file)
# # python3.5
# # elif builtins.isinstance(value, builtins.str):
elif builtins.isinstance(value, builtins.unicode):
# #
rendered_properties[key] = TemplateParser(
template=value, string=True
).render(
code_file=code_file,
arguments=' '.join(self._command_line_arguments),
path_separator=os.sep
).output
return rendered_properties
# # endregion
# endregion
# endregion
# region footer
'''
Preset some variables given by introspection letting the linter know what \
globale variables are available.
'''
__logger__ = __exception__ = __module_name__ = __file_path__ = \
__test_mode__ = __test_buffer__ = __test_folder__ = __test_globals__ = None
'''
Extends this module with some magic environment variables to provide \
better introspection support. A generic command line interface for some \
code preprocessing tools is provided by default.
'''
Module.default(name=__name__, frame=inspect.currentframe())
# endregion
# region vim modline
# vim: set tabstop=4 shiftwidth=4 expandtab:
# vim: foldmethod=marker foldmarker=region,endregion:
# endregion
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
class implementations for real-time 3D feature extraction
"""
from abc import abstractmethod
import pandas as pd
import os
import glob
import numpy as np
from tensorflow.keras import layers as L
from tensorflow import keras
from tomo_encoders.neural_nets.Unet3D import build_Unet_3D
from tomo_encoders import Patches
from tomo_encoders import DataFile
import tensorflow as tf
from tensorflow.keras.models import load_model
from tensorflow.keras.layers import UpSampling3D
from multiprocessing import Pool, cpu_count
import functools
import cupy as cp
import h5py
import abc
import time
from tomo_encoders.misc.voxel_processing import _rescale_data, _find_min_max, modified_autocontrast, normalize_volume_gpu, _edge_map
from tomo_encoders.neural_nets.keras_processor import Vox2VoxProcessor_fCNN
# Parameters for weighted cross-entropy and focal loss - alpha is higher than 0.5 to emphasize loss in "ones" or metal pixels.
eps = 1e-12
alpha = 0.75
gamma = 2.0
DEFAULT_INPUT_SIZE = (64,64,64)
def _binary_lossmap(y_true, y_pred):
# y_true, y_pred are tensors of shape (batch_size, img_h, img_w, n_channels)
pt_1 = tf.where(tf.equal(y_true, 1), y_pred, tf.ones_like(y_pred))
pt_0 = tf.where(tf.equal(y_true, 0), y_pred, tf.zeros_like(y_pred))
return pt_1, pt_0
def focal_loss(y_true, y_pred):
"""
:return: loss value
Focal loss is defined here: https://arxiv.org/abs/1708.02002
Using this provides improved fidelity in unbalanced datasets:
Tekawade et al. https://doi.org/10.1117/12.2540442
Parameters
----------
y_true : tensor
Ground truth tensor of shape (batch_size, n_rows, n_cols, n_channels)
y_pred : tensor
Predicted tensor of shape (batch_size, n_rows, n_cols, n_channels)
"""
pt_1, pt_0 = _binary_lossmap(y_true, y_pred)
loss_map = -alpha*tf.math.log(pt_1 + eps)*tf.math.pow(1. - pt_1,gamma) - (1-alpha)*tf.math.log(1. - pt_0 + eps)*tf.math.pow(pt_0,gamma)
return tf.reduce_mean(loss_map)
class Segmenter_fCNN(Vox2VoxProcessor_fCNN):
def __init__(self,**kwargs):
# could be "data" or "label"
self.input_type = "data"
self.output_type = "labels"
super().__init__(**kwargs)
return
def random_data_generator(self, batch_size, input_size = (64,64,64)):
while True:
x_shape = tuple([batch_size] + list(input_size) + [1])
x = np.random.uniform(0, 1, x_shape)#.astype(np.float32)
y = np.random.randint(0, 2, x_shape)#.astype(np.uint8)
x[x == 0] = 1.0e-12
yield x, y
def _build_models(self, descriptor_tag = "misc", **model_params):
'''
Implementation of Segmenter_fCNN that removes blank volumes during training.
Parameters
----------
model_keys : list
list of strings describing the model, e.g., ["segmenter"], etc.
model_params : dict
for passing any number of model hyperparameters necessary to define the model(s).
'''
if model_params is None:
raise ValueError("Need model hyperparameters or instance of model. Neither were provided")
else:
self.models = {}
# insert your model building code here. The models variable must be a dictionary of models with str descriptors as keys
self.model_tag = "Unet_%s"%(descriptor_tag)
model_key = "segmenter"
self.models.update({model_key : None})
# input_size here is redundant if the network is fully convolutional
self.models[model_key] = build_Unet_3D(**model_params)
self.models[model_key].compile(optimizer=tf.keras.optimizers.Adam(),\
loss= tf.keras.losses.BinaryCrossentropy())
return
if __name__ == "__main__":
print('just a bunch of functions')
|
from django.conf import settings
from rest_framework.routers import DefaultRouter, SimpleRouter
from itez.users.api.views import UserViewSet
from itez.beneficiary.api.views import (
ProvinceAPIView,
DistrictAPIView,
ServiceAreaAPIView,
WorkDetailAPIView
)
if settings.DEBUG:
router = DefaultRouter()
else:
router = SimpleRouter()
router.register("users", UserViewSet)
router.register("provinces", ProvinceAPIView)
router.register("districts", DistrictAPIView)
router.register('service_area', ServiceAreaAPIView)
router.register('work_detail', WorkDetailAPIView)
app_name = "api"
urlpatterns = router.urls
|
import argparse
from collections import defaultdict
from pysam import VariantFile
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
def plot_distributions(vafs, true_vafs, false_vafs):
sns.set(rc={"figure.figsize": (20, 10)})
sns.set_style("white")
# plt.hist(vafs, bins=100, color='blue', alpha=0.4)
plt.hist([true_vafs, false_vafs], bins=100, density=False, histtype='bar', color=['green', 'red'], alpha=0.4, stacked=True, label=['True variants', 'False positives'])
# plt.hist(false_vafs, bins=100, color='red', alpha=0.4, stacked=True)
plt.xlim((0.00, 1.15))
# plt.ylim((0, 300))
plt.legend(fontsize='x-large')
# plt.xticks(np.arange(0, 1, step=0.10), fontsize=18)
plt.yticks(fontsize=18)
plt.xlabel("Allele frequency", fontsize='24')
plt.ylabel("Count", fontsize='24')
plt.title("Stacked histogram showing TP and FP distribution at different frequency intervals.", fontsize='20')
plt.show()
# exit()
# output_file_name = "./allele_distribution.png"
# plt.savefig(output_file_name, format='png', dpi=300, quality=95)
def calculate_stats(vcf):
vcf_in1 = VariantFile(vcf)
vafs_of_true_alleles = list()
vafs_of_false_alleles = list()
total_alts = 0
total_true_calls = 0
total_false_calls = 0
all_allele_frequencies = list()
total_recs = 0
for rec in vcf_in1.fetch():
total_recs += 1
# ['__class__', '__delattr__', '__dir__', '__doc__', '__eq__', '__format__', '__ge__', '__getattribute__', '__gt__', '__hash__', '__init__', '__init_subclass__', '__le__', '__lt__', '__ne__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__setstate__', '__sizeof__', '__str__', '__subclasshook__',
# 'alleles', 'alts', 'chrom', 'contig', 'copy', 'filter', 'format', 'header', 'id', 'info', 'pos', 'qual', 'ref', 'rid', 'rlen', 'samples', 'start', 'stop', 'translate']
alts = rec.alts
total_alts += len(alts)
if rec.filter.keys()[0] != 'PASS':
continue
for sample in rec.samples:
vafs = list(rec.samples[sample]['VAF'])
gts = list(rec.samples[sample]['GT'])
true_index = []
for gt in gts:
if gt != 0:
true_index.append(gt-1)
for i, (alt, vaf) in enumerate(zip(alts, vafs)):
if i in true_index:
vafs_of_true_alleles.append(vaf)
total_true_calls += 1
else:
vafs_of_false_alleles.append(vaf)
total_false_calls += 1
for vaf in vafs:
all_allele_frequencies.append(round(vaf, 3))
print("Total positions:\t", total_recs)
print("Total alt alleles:\t", total_alts)
print("Total true alleles:\t", total_true_calls, "(" + str(int(100 * (total_true_calls/total_alts))) + "%)")
print("Total false alleles:\t", total_false_calls, "(" + str(int(100 * (total_false_calls/total_alts))) + "%)")
plot_distributions(all_allele_frequencies, vafs_of_true_alleles, vafs_of_false_alleles)
# plot_distributions(all_allele_frequencies, q_of_true_alleles, q_of_false_alleles)
def add_merge_vcf_arguments(parser):
parser.add_argument(
"-v",
"--vcf",
type=str,
required=True,
help="VCF of haplotype 1."
)
return parser
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="PEPPER is a RNN based polisher for polishing ONT-based assemblies. "
"It works in three steps:\n"
"1) make_images: This module takes alignment file and coverts them"
"to HDF5 files containing summary statistics.\n"
"2) run_inference: This module takes the summary images and a"
"trained neural network and generates predictions per base.\n"
"3) find_snps: This module takes the inference files as input and "
"finds possible SNP sites.\n",
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
"--version",
default=False,
action='store_true',
help="Show version."
)
add_merge_vcf_arguments(parser)
FLAGS, unparsed = parser.parse_known_args()
calculate_stats(FLAGS.vcf)
|
import contextlib
import os
import shutil
import tempfile
import unittest
from pikos.recorders.csv_file_recorder import CSVFileRecorder
from pikos.recorders.abstract_recorder import RecorderError
from pikos.tests.compat import TestCase
from pikos.tests.dummy_record import DummyRecord
class TestCSVFileRecorder(TestCase):
def setUp(self):
self.directory = tempfile.mkdtemp()
self.filename = os.path.join(self.directory, 'mylog')
def tearDown(self):
shutil.rmtree(self.directory)
def test_prepare_with_namedtuple(self):
header = 'one,two,three\n'
recorder = CSVFileRecorder(filename=self.filename)
with self.finalizer(recorder):
recorder.prepare(DummyRecord)
self.assertRecordedLines(header)
def test_prepare_with_tuple(self):
header = ''
recorder = CSVFileRecorder(filename=self.filename)
with self.finalizer(recorder):
recorder.prepare(tuple)
self.assertRecordedLines(header)
def test_prepare_multiple_times(self):
header = 'one,two,three\n'
recorder = CSVFileRecorder(filename=self.filename)
with self.finalizer(recorder):
recorder.prepare(DummyRecord)
# all calls after that do nothing
for x in range(10):
recorder.prepare(DummyRecord)
self.assertRecordedLines(header)
def test_finalize(self):
header = 'one,two,three\n'
recorder = CSVFileRecorder(filename=self.filename)
# all calls do nothing
with self.finalizer(recorder):
recorder.prepare(DummyRecord)
for x in range(10):
recorder.finalize()
self.assertRecordedLines(header)
def test_record_with_namedtuple(self):
record = DummyRecord(5, 'pikos', 'apikos')
output = 'one,two,three\n5,pikos,apikos\n'
recorder = CSVFileRecorder(filename=self.filename)
with self.finalizer(recorder):
recorder.prepare(DummyRecord)
recorder.record(record)
self.assertRecordedLines(output)
def test_record_with_tuple(self):
record = (5, 'pikos', 'apikos')
output = '5,pikos,apikos\n'
recorder = CSVFileRecorder(filename=self.filename)
with self.finalizer(recorder):
recorder.prepare(tuple)
recorder.record(record)
self.assertRecordedLines(output)
def test_filter(self):
records = [
DummyRecord(5, 'pikos', 'apikos'),
DummyRecord(12, 'emilios', 'milo')]
output = 'one,two,three\n12,emilios,milo\n'
def not_pikos(record):
return all('pikos' != field for field in record)
recorder = CSVFileRecorder(filename=self.filename, filter_=not_pikos)
with self.finalizer(recorder):
recorder.prepare(DummyRecord)
for record in records:
recorder.record(record)
self.assertRecordedLines(output)
def test_dialect(self):
records = [
DummyRecord(5, 'pikos', 'apikos'),
DummyRecord(12, 'emilios', 'milo')]
output = 'one,two,three^5,pikos,apikos^12,emilios,milo^'
recorder = CSVFileRecorder(
filename=self.filename, lineterminator='^')
with self.finalizer(recorder):
recorder.prepare(DummyRecord)
for record in records:
recorder.record(record)
self.assertRecordedLines(output)
def test_exception_when_no_prepare(self):
records = [DummyRecord(5, 'pikos', 'apikos')]
recorder = CSVFileRecorder(filename=self.filename)
with self.assertRaises(RecorderError):
recorder.record(records)
with self.assertRaises(RecorderError):
recorder.finalize()
@contextlib.contextmanager
def finalizer(self, recorder):
try:
yield recorder
finally:
recorder.finalize()
def assertRecordedLines(self, expected):
with open(self.filename, 'Ur') as handle:
lines = handle.readlines()
self.assertMultiLineEqual(''.join(lines), expected)
if __name__ == '__main__':
unittest.main()
|
import socket
import sys
import time
import os
EMPTY_MESSAGE = ""
# Клиентские команды
COMMAND_JOIN = "/join"
COMMAND_QUIT = "/quit"
COMMAND_RENAME = "/rename"
COMMAND_SEARCH = "/search"
# Общие команды
COMMAND_HELP = "/help"
COMMAND_SHUTDOWN = "/shutdown"
COMMAND_WHO = "/who"
# Клиентские команды
COMMANDS_CLIENT_SIDE = (
COMMAND_JOIN, COMMAND_QUIT, COMMAND_RENAME, COMMAND_SEARCH, COMMAND_HELP, COMMAND_SHUTDOWN
)
COMMANDS_SEND_TO_SERVER = (
COMMAND_WHO,
)
# Подсказки клиентским командам
COMMANDS_CLIENT_SIDE_DESCRIPTIONS = {
COMMAND_HELP : "Выводит эту подсказку",
COMMAND_JOIN : "[-ip] -port | Присоединение к серверу",
COMMAND_QUIT : "Выход с сервера",
COMMAND_RENAME : "-name | Поменять имя",
COMMAND_SEARCH : "Найти серверы чат-комнат",
COMMAND_WHO : "Список людей на сервере",
}
|
learning_python = __import__('learning_python')
|
from django.apps import AppConfig
class RecipeAppConfig(AppConfig):
name = 'recipe_app'
|
class ArgErrorType(Exception):
"""Raised when provided argument is of unsupported type."""
pass
class UnreadableFileError(Exception):
"""Raised when pydicom cannot read provided file."""
pass
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builds the CIFAR-10 network.
Summary of available functions:
# Compute input images and labels for training. If you would like to run
# evaluations, use inputs() instead.
inputs, labels = distorted_inputs()
# Compute inference on the model inputs to make a prediction.
predictions = inference(inputs)
# Compute the total loss of the prediction with respect to the labels.
loss = loss(predictions, labels)
# Create a graph to run one step of training with respect to the loss.
train_op = train(loss, global_step)
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import sys
import tarfile
from six.moves import urllib
import tensorflow as tf
import cifar10_input
FLAGS = tf.app.flags.FLAGS
# Basic model parameters.
tf.app.flags.DEFINE_integer('batch_size', 128,
"""Number of images to process in a batch.""")
tf.app.flags.DEFINE_string('data_dir', 'D:\ComputeVision\cifar10_data',
"""Path to the CIFAR-10 data directory.""")
tf.app.flags.DEFINE_boolean('use_fp16', False,
"""Train the model using fp16.""")
# Global constants describing the CIFAR-10 data set.
IMAGE_SIZE = cifar10_input.IMAGE_SIZE
NUM_CLASSES = cifar10_input.NUM_CLASSES
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
# Constants describing the training process.
MOVING_AVERAGE_DECAY = 0.99 # The decay to use for the moving average.
NUM_EPOCHS_PER_DECAY = 200.0 # Epochs after which learning rate decays.
LEARNING_RATE_DECAY_FACTOR = 0.1 # Learning rate decay factor.
INITIAL_LEARNING_RATE = 0.1 # Initial learning rate.
# Batch Normalization
EPSILON = 0.00001
# If a model is trained with multiple GPUs, prefix all Op names with tower_name
# to differentiate the operations. Note that this prefix is removed from the
# names of the summaries when visualizing a model.
TOWER_NAME = 'tower'
DATA_URL = 'https://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz'
def distorted_inputs():
"""Construct distorted input for CIFAR training using the Reader ops.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
images, labels = cifar10_input.distorted_inputs(data_dir=data_dir,
batch_size=FLAGS.batch_size)
if FLAGS.use_fp16:
images = tf.cast(images, tf.float16)
labels = tf.cast(labels, tf.float16)
return images, labels
def inputs(eval_data):
"""Construct input for CIFAR evaluation using the Reader ops.
Args:
eval_data: bool, indicating if one should use the train or eval data set.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
images, labels = cifar10_input.inputs(eval_data=eval_data,
data_dir=data_dir,
batch_size=FLAGS.batch_size)
if FLAGS.use_fp16:
images = tf.cast(images, tf.float16)
labels = tf.cast(labels, tf.float16)
return images, labels
def batch_norm(input, training):
input = tf.layers.batch_normalization(input, momentum=MOVING_AVERAGE_DECAY, training=training,
epsilon=EPSILON, axis=-1, center=True, scale=True)
return tf.nn.relu(input)
def _bottleneck_block(input, filters, training=False, projection=None, name=None):
shortcut = input
filters_out = 4 * filters
input = batch_norm(input, training)
if projection:
shortcut = input
input = tf.layers.conv2d(input, kernel_size=1, filters=filters, strides=1, padding='SAME', use_bias=False,
kernel_initializer=tf.variance_scaling_initializer)
input = batch_norm(input, training)
input = tf.layers.conv2d(input, kernel_size=3, strides=1, filters=filters, padding='SAME', use_bias=False,
kernel_initializer=tf.variance_scaling_initializer)
input = batch_norm(input, training)
input = tf.layers.conv2d(input, filters=filters_out, kernel_size=1, strides=1, padding='SAME', use_bias=False,
kernel_initializer=tf.variance_scaling_initializer)
input = input + shortcut
return tf.identity(input)
def build_layer(input, blocks, training, filters):
def projection(input):
return tf.layers.conv2d(input, filters * 4, kernel_size=1, strides=1, padding='SAME', use_bias=False,
kernel_initializer=tf.variance_scaling_initializer)
input = projection(input)
for i in range(1, blocks):
input = _bottleneck_block(input, filters=filters, training=training)
return tf.identity(input)
def inference(image, training=False):
input = tf.layers.conv2d(image, filters=32, kernel_size=3, strides=1, use_bias=False,
kernel_initializer=tf.variance_scaling_initializer)
input = batch_norm(input, training)
input = tf.layers.conv2d(input, filters=32, kernel_size=3, strides=2, use_bias=False,
kernel_initializer=tf.variance_scaling_initializer)
input = batch_norm(input, training)
input = build_layer(input, blocks=2, training=training, filters=64)
input = build_layer(input, blocks=4, training=training, filters=128)
input = build_layer(input, blocks=2, training=training, filters=256)
input = batch_norm(input, training)
shape = input.get_shape().as_list()
ave_pool_size = shape[2]
input = tf.layers.average_pooling2d(input, pool_size=ave_pool_size, strides=1)
input = tf.identity(input)
# 很重要的一步
input = tf.reshape(input, [-1, shape[-1]])
input = tf.layers.dense(input, units=NUM_CLASSES)
input = tf.identity(input)
return input
def loss(logits, labels, weight_decay):
"""Add L2Loss to all the trainable variables.
Add summary for "Loss" and "Loss/avg".
Args:
logits: Logits from inference().
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float.
"""
# Calculate the average cross entropy loss across the batch.
labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
total_loss = cross_entropy_mean + weight_decay * tf.add_n([tf.nn.l2_loss(v)
for v in tf.trainable_variables()])
tf.summary.scalar("loss", total_loss)
return total_loss
def train(loss, global_step):
num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size
decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)
lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE,
global_step,
decay_steps,
LEARNING_RATE_DECAY_FACTOR,
staircase=True)
tf.summary.scalar('learning_rate', lr)
opt = tf.train.GradientDescentOptimizer(lr)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = opt.minimize(loss, global_step=global_step)
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
with tf.control_dependencies([train_op, variables_averages_op]):
train_op = tf.no_op(name='train')
return train_op
def maybe_download_and_extract():
"""Download and extract the tarball from Alex's website."""
dest_directory = FLAGS.data_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
extracted_dir_path = os.path.join(dest_directory, 'cifar-10-batches-bin')
if not os.path.exists(extracted_dir_path):
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
|
import cv2
def get_image_data(filename):
# Read file input
img = cv2.imread(filename)
# Create output list (length, width)
image_data = (img.shape[1], img.shape[0])
return image_data
|
from .LiteralNode import LiteralNode
class IntegerLiteralNode(LiteralNode):
def __init__(self,loc,ref,value):
super().__init__(loc,ref)
self._value = value
def value(self):
return self._value
def _dump(self,dumper):
dumper.print_member("type_node", self.type_node())
dumper.print_member("value", self._value)
def accept(self,visitor):
return visitor.visit(self)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.