max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
attack_methods/attack_initializer.py | ASU-Active-Perception-Group/decentralized_attribution_of_generative_models | 3 | 12764851 | from .Crop import Crop
from .Gaussian_blur import Gaussian_blur
from .Gaussian_noise import Gaussian_noise
from .Jpeg_compression import JpegCompression
from .Combination import Combination_attack
import torch
def attack_initializer(attack_method, is_train):
if (attack_method == 'Crop'):
attack = Crop([0.8, 1], is_train)
elif (attack_method == 'Noise'):
attack = Gaussian_noise([0, 0.3], is_train)
elif (attack_method == 'Blur'):
#terminology would be different kernel_size
attack = Gaussian_blur(kernel_size=[1,3,5,7,9], is_train = is_train)
elif(attack_method == "Jpeg"):
device = torch.device("cuda:0" if (torch.cuda.is_available()) else "cpu")
attack = JpegCompression(device)
elif (attack_method == 'Combination'):
device = torch.device("cuda:0" if (torch.cuda.is_available()) else "cpu")
attacks = []
attacks.append(Gaussian_blur(kernel_size=[1,3,5,7,9], is_train = is_train))
attacks.append(Crop([0.8, 1], is_train))
attacks.append(Gaussian_noise([0, 0.1], is_train))
attacks.append(JpegCompression(device))
attack = Combination_attack(attacks, is_train)
elif (attack_method == 'Combination_with_pillow'):
# Combination Attack but Jpeg will be done after 3 attacks finished samples
# img -> 3 attack -> Save PNG -> Pillow 75 -> Load
attacks = []
attacks.append(Gaussian_blur(kernel_size=[1, 3, 5, 7, 9], is_train=is_train))
attacks.append(Crop([0.8, 1], is_train))
attacks.append(Gaussian_noise([0, 0.1], is_train))
attack = Combination_attack(attacks, is_train)
else:
raise ValueError("Not available Attacks")
return attack | 2.484375 | 2 |
results/plot_bulk.py | vincentlui/megae | 82 | 12764852 | <filename>results/plot_bulk.py
import glob
import os
import ast
import numpy as np
from copy import deepcopy
import seaborn as sns; sns.set()
sns.set_style('whitegrid')
from scipy.signal import medfilt
import argparse
import matplotlib
import matplotlib.pyplot as plt
matplotlib.rcParams.update({'font.size': 12})
class AttrDict(dict):
__setattr__ = dict.__setitem__
def __getattr__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
raise AttributeError
def smooth_reward_curve(x, y):
halfwidth = int(np.ceil(len(x) / 60)) # Halfwidth of our smoothing convolution
k = halfwidth
xsmoo = x
ysmoo = np.convolve(y, np.ones(2 * k + 1), mode='same') / np.convolve(np.ones_like(y), np.ones(2 * k + 1),
mode='same')
return xsmoo, ysmoo
def fix_point(x, y, interval):
"""What does this do?"""
np.insert(x, 0, 0)
np.insert(y, 0, 0)
fx, fy = [], []
pointer = 0
ninterval = int(max(x) / interval + 1)
for i in range(ninterval):
tmpx = interval * i
while pointer + 1 < len(x) and tmpx > x[pointer + 1]:
pointer += 1
if pointer + 1 < len(x):
alpha = (y[pointer + 1] - y[pointer]) / (x[pointer + 1] - x[pointer])
tmpy = y[pointer] + alpha * (tmpx - x[pointer])
fx.append(tmpx)
fy.append(tmpy)
return fx, fy
def load_data(indir, smooth, bin_size):
datas = []
infiles = glob.glob(os.path.join(indir, '*.csv'))
# datas_goal = []
for inf in infiles:
data = []
data_csv = np.loadtxt(inf, delimiter=",", skiprows=1, usecols=[1, 2])
for sec, acc in zip(data_csv[:, 0], data_csv[:, 1]):
data.append([sec, acc])
datas.append(data)
def process_data(datas):
datas = sorted(datas, key=lambda d_entry: d_entry[0])
result = []
timesteps = 0
for data in datas:
result.append([timesteps, data[-1]])
timesteps = data[0]
x, y = np.array(result)[:, 0], np.array(result)[:, 1]
if smooth == 1:
x, y = smooth_reward_curve(x, y)
if smooth == 2:
y = medfilt(y, kernel_size=9)
x, y = fix_point(x, y, bin_size)
return [x, y]
# if goal:
# return list(zip(*tuple([process_data(data_goal[goal]) for data_goal in datas_goal])))
# else:
return list(zip(*(process_data(data) for data in datas)))
def load(indir, names, labels, smooth, bin_size):
result = []
for name, label in zip(names, labels):
d = os.path.join(indir, name)
tmpx, tmpy = [], []
tx, ty = load_data(d, smooth, bin_size)
tmpx += tx
tmpy += ty
if len(tmpx) > 1:
length = min([len(t) for t in tmpx])
for i in range(len(tmpx)):
tmpx[i] = tmpx[i][:length]
tmpy[i] = tmpy[i][:length]
x = np.mean(np.array(tmpx), axis=0)
y_mean = np.mean(np.array(tmpy), axis=0)
y_std = np.std(np.array(tmpy), axis=0)
else:
x = np.array(tmpx).reshape(-1)
y_mean = np.array(tmpy).reshape(-1)
y_std = np.zeros(len(y_mean))
result.append([label, x, y_mean, y_std])
return result
def plot(args):
plt.figure(figsize=(4,3), dpi=300)
sns.despine(left=True, bottom=True)
datas = load(args.source, args.names, args.labels, args.smooth, args.bin_size)
for ignore_string in args.ignore_strings:
datas = [d for d in datas if not ignore_string in d[0]]
lines = []
max_y = args.max_y
min_y = args.min_y
max_x = args.max_x
min_x = 1e10
for i, data in enumerate(datas):
label, x, y_mean, y_std = data
color = args.colors[i]
if np.sum(y_std):
y_upper = y_mean + y_std
y_lower = y_mean - y_std
plt.fill_between(
x, list(y_lower), list(y_upper), interpolate=True, facecolor=color, linewidth=0.0, alpha=0.2
)
linestyle = args.line_styles[i]
marker = args.markers[i]
markersize = args.markersizes[i]
markevery=int(max_x/500)
line = plt.plot(x, list(y_mean), linewidth=2.0, label=label, color=color, markersize=markersize, marker=marker, markevery=markevery, linestyle=linestyle)
lines.append(line[0])
if max(x) < min_x:
min_x = max(x)
xticks = np.arange(0, max_x+1, max_x // 8 )
ignore_even = lambda s, i: s if i %2 == 0 else ''
if 'Millions' in args.xlabel:
xlabels = [ignore_even('{:0.2f}'.format(float(x) / 100000000. * args.time_steps_per_episode).rstrip('0').rstrip('.'),
i) for i, x in enumerate(xticks)]
elif 'Thousands' in args.xlabel:
xlabels = [ignore_even('{:0.2f}'.format(float(x) / 100000. * args.time_steps_per_episode).rstrip('0').rstrip('.'),
i) for i, x in enumerate(xticks)]
#xlabels[-1] += 'M'
plt.xticks(xticks, xlabels, fontsize=args.label_size)
if args.xlabel is None:
plt.xlabel('Placeholder', fontsize=args.label_size, color='white')
else:
plt.xlabel(args.xlabel, fontsize=args.label_size)
if hasattr(args, 'y_values'):
plt.yticks(args.y_values, args.y_labels, fontsize=args.label_size)
if args.ylabel is None:
plt.ylabel('')
else:
plt.ylabel(args.ylabel, fontsize=args.label_size)
plt.ylim(min_y, max_y)
plt.xlim(0, max_x)
plt.legend(fancybox=True, framealpha=0.7, loc=args.legend_loc, fontsize=args.legend_size, frameon=True, facecolor='white', borderaxespad=1.)
plt.title(args.title, fontsize=args.title_size)
plt.tight_layout(pad=0.0) # Make room for the xlabel
plt.savefig(args.output, format='png', dpi=600) # Need to do savefig before plt.show() otherwise PDF file will be blank
print("DONE {}".format(args.output))
if __name__ == '__main__':
with open(os.path.join('confs','shared.conf')) as f:
shared = dict((l.split('%')[0].split('=') for l in f.readlines() if l and not l[0] == '%'))
for conf in glob.glob('confs/*.conf'):
# ignore shared conf
if 'shared' in conf:
continue
with open(conf) as f:
b = dict((l.split('%')[0].split('=') for l in f.readlines() if l and not l[0] == '%'))
a = deepcopy(shared)
a.update(b)
for k, v in a.items():
a[k] = ast.literal_eval(v)
a = AttrDict(a)
a.output = os.path.join('plots', os.path.splitext(os.path.basename(conf))[0]) + '.png'
print(conf)
plot(a) | 2.453125 | 2 |
nmfamv2/UserHandler/__init__.py | gmarupilla/NMRFAMv2 | 0 | 12764853 | import zipfile
import pandas as pd
"""
Lay of the land:
"""
class UserHandler(object):
# This Object is used to access the directory that saves the results
def __init__(self, user_directory_path, pathToMixture):
self.user_directory_path = user_directory_path
self.username = None
self.pathToMixture = pathToMixture
def addZip(self, zipfile_path):
if self.username is None:
print("[userHandler.py][addZip] Error")
else:
if zipfile.is_zipfile(zipfile_path):
with zipfile.ZipFile(zipfile_path, 'r') as zipObj:
topDirName = zipObj.infolist()[0].filename
var = self.pathToMixture
| 3.3125 | 3 |
bob/db/swan/test.py | bioidiap/bob.db.swan | 0 | 12764854 | <filename>bob/db/swan/test.py
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
"""Test Units
"""
import logging
logger = logging.getLogger(__name__)
def _test_numbers(files, n_total_files, n_clients, n_recordings,
n_devices, n_sessions, session_list, sites):
n_clients_ = len(set(f.client_id for f in files))
assert n_clients_ == n_clients, n_clients_
n_recordings_ = len(set(f.nrecording for f in files))
assert n_recordings_ == n_recordings, n_recordings_
n_devices_ = len(set(f.device for f in files))
assert n_devices_ == n_devices, n_devices_
n_sessions_ = len(set(f.session for f in files))
assert n_sessions_ == n_sessions, n_sessions_
session_list_ = set(f.session for f in files)
assert session_list_ == set(session_list), session_list_
sites_ = set(f.client.institute for f in files)
assert sites_ == set(sites), sites_
assert len(files) == n_total_files, len(files)
def _test_annotation(db, files):
try:
annot = db.annotations(files[0])
assert annot is None or isinstance(annot, dict), type(annot)
except AssertionError:
raise
except Exception:
logger.warn(
"annotations tests failed. Maybe the annotations files are "
"missing?", exc_info=True)
def test_pad_protocols():
from .query_pad import Database
protocol = 'pad_p2_face_f1'
db = Database(protocol=protocol)
bf, pa = db.all_files(groups='train')
assert len(bf) == 750, len(bf)
assert len(pa) == 1251, len(pa)
# check the filter argument
def filter_samples(sample):
return "IDIAP" in sample.client_id
db.all_files_options = dict(filter_samples=filter_samples)
bf, pa = db.all_files(groups='train')
assert len(bf) == 230, len(bf)
assert len(pa) == 391, len(pa)
| 2.484375 | 2 |
alloy_related/railMLToAlloy/parserRail/instance.py | pedrordgs/RailML-Utilities | 21 | 12764855 | class Instance:
def __init__ (self, alloyfp):
self.bitwidth = '4'
self.maxseq = '4'
self.mintrace = '-1'
self.maxtrace = '-1'
self.filename = alloyfp
self.tracel = '1'
self.backl = '0'
self.sigs = []
self.fields = []
def add_sig(self, sig):
self.sigs.append(sig)
def add_field(self, field):
self.fields.append(field)
| 2.578125 | 3 |
watcher_metering/store/ceilometer.py | b-com/watcher-metering | 2 | 12764856 | # -*- encoding: utf-8 -*-
# Copyright (c) 2015 b<>com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
import json
from oslo_log import log
import requests
from watcher_metering.load.loadable import ExternalOptConfig
from watcher_metering.store.base import MetricsStoreClientBase
from watcher_metering.store.base import MetricsStoreError
from watcher_metering.store.utils.keystone import KeystoneClient
from watcher_metering.store.utils.keystone import KeystoneError
LOG = log.getLogger(__name__)
class CeilometerClient(MetricsStoreClientBase):
"""Ceilometer client"""
def __init__(self, auth_uri, admin_user,
admin_password, admin_tenant_name):
super(CeilometerClient, self).__init__()
self._store_endpoint = None
self.auth_uri = auth_uri
self.admin_user = admin_user
self.admin_password = <PASSWORD>
self.admin_tenant_name = admin_tenant_name
self.keystone_client = KeystoneClient(
self.auth_uri, self.admin_user,
self.admin_password, self.admin_tenant_name
)
self._ceilometer_uri = None
@classmethod
def get_name(cls):
return "ceilometer"
@classmethod
def get_config_opts(cls):
return [] # No need for store_endpoint in cfg
@classmethod
def get_external_opts_configs(cls):
"""This store client requires some Keystone configuration options
:return: The list of options relative to this store client
:rtype: list of :class:`ExternalOptConfig` instances
"""
return [
ExternalOptConfig(
name="auth_uri",
module_str="keystoneclient.middleware.auth_token",
group="keystone_authtoken"),
ExternalOptConfig(
name="admin_user",
module_str="keystoneclient.middleware.auth_token",
group="keystone_authtoken"),
ExternalOptConfig(
name="admin_password",
module_str="keystoneclient.middleware.auth_token",
group="keystone_authtoken"),
ExternalOptConfig(
name="admin_tenant_name",
module_str="keystoneclient.middleware.auth_token",
group="keystone_authtoken"),
]
@property
def store_endpoint(self):
"""Dynamically retrieved from Keystone
:return: The Ceilometer endpoint
:rtype: str
"""
# Kind of cache for logging purposes (avoids repeated calls)
self._store_endpoint = self.keystone_client.ceilometer_uri
return self._store_endpoint
def connect(self):
LOG.info("No need to connect: Stateless via HTTP.")
def disconnect(self):
LOG.info("No need to disconnect: Stateless via HTTP.")
def _send(self, metric):
is_successful = self.request_http_post(metric)
if not is_successful:
LOG.error(
"[Ceilometer] Could not deliver the message to the server."
)
raise MetricsStoreError("Could not deliver the message "
"to the Ceilometer server.")
def send(self, metric):
LOG.debug('Publishing metrics to `%s`', self._store_endpoint)
try:
self._send(metric)
except MetricsStoreError as exc:
LOG.warn('Unable to send metric `%r`', metric)
LOG.exception(exc)
def encode_data(self, metric):
try:
return json.dumps([
{
"name": metric["name"],
"unit": metric["unit"],
"type": metric["type"],
"volume": metric["value"],
"host": metric["host"],
"user_id": metric.get("user_id", ""),
"project_id": metric.get("project_id", ""),
"resource_id": metric.get("resource_id", ""),
"resource_metadata": metric["resource_metadata"],
"timestamp": metric["timestamp"]
}
])
except KeystoneError as exc:
LOG.exception(exc)
def request_http_post(self, metric):
try:
token = self.keystone_client.token
if not token:
LOG.warning("token is empty!")
raise MetricsStoreError("Keystone token is empty!")
except KeyError as exc:
LOG.exception(exc)
raise MetricsStoreError("Could not get a token from Keystone!")
data = self.encode_data(metric)
headers = {
"X-Auth-Token": token,
"content-type": "application/json",
"User-Agent": "metering-agent"
}
response = requests.post(
"%s/%s" % (self.store_endpoint, metric["name"]),
data=data,
headers=headers,
timeout=10
)
return response.status_code == requests.codes.ok
| 1.6875 | 2 |
Lecture 03 - Simple Algorithms/newton_raphson.py | ag-ds-bubble/MIT6.00.1x | 0 | 12764857 | # Find square root using <NAME>
num = 987
eps = 1e-6
iteration = 0
sroot = num/2
while abs(sroot**2-num)>eps:
iteration+=1
if sroot**2==num:
print(f'At Iteration {iteration} : Found It!, the cube root for {num} is {sroot}')
break
sroot = sroot - (sroot**2-num)/(2*sroot)
print(f'At Iteration {iteration} : the square root approximation for {num} is {sroot}')
| 4 | 4 |
sourceCode/PreProcessing/filterings.py | suryagupta/ProteinAssociationPair | 0 | 12764858 | __author__ = 'surya'
from Analysis import GetAssayCount
def preFiltrationWithHighlyOCcuringProteins(MSFile,uniprotProteinList,out):
assCountList=[]
with open(MSFile) as OpenedMSFile:
next(OpenedMSFile)
for line in OpenedMSFile:
colList=line.split("\t")
assCount = int(colList[1].strip())
if colList[0].strip() in uniprotProteinList:
out.write(line)
assCountList.append(assCount)
assCountList.sort()
maxCutoff=assCountList[-10]
print "the maximum cutoff is ",maxCutoff
out.close()
return maxCutoff
def CheckAssayWithPeptideIds(MSFile,AssayLen,assayCheck=False,PhosList=[],proteincheck=False,proteinList=[],
MinPep=0,SelectedAssayList=[],selectedProteinFile=""):#,crap=[]):
maxAssayLen=preFiltrationWithHighlyOCcuringProteins(MSFile,proteinList,selectedProteinFile)
proteinSkiped=0
protein2Assay={}
protein2AssayPepString={}
count=0
assayList=[]
check=False
totalProt=0
with open(MSFile) as OpenedMSFile:
next(OpenedMSFile)
for line in OpenedMSFile:
colList=line.split("\t")
totalProt+=1
assCount = int(colList[1].strip())
if assCount>=maxAssayLen:
print colList[0].strip(),assCount
if assCount > AssayLen and assCount<maxAssayLen:
##first select the threshold
if proteincheck:
if colList[0].strip() in proteinList:# and colList[0].strip() not in crap:
check=True
else:
proteinSkiped+=1
check=False
else:
check=True
if check:
AssayDic=GetAssayCount.createAssayDicWithPepName(colList[2].strip(),MinPep=MinPep,
AssayCheckList=SelectedAssayList,AssCheck=assayCheck)
if len(AssayDic)<=AssayLen: #and AssayDic[AssayDic.keys()[0]]==1:
count+=1
# print(AssayDic)
elif len(AssayDic)>AssayLen:
# if len(protein2Assay)==50:
# break
protein2Assay[colList[0].strip()]=AssayDic
protein2AssayPepString[colList[0].strip()] = colList[2].strip()
# print colList[2].strip()
for each in AssayDic:
if each not in assayList:
assayList.append(each)
# for pro in protein2Assay:
# selectedProteinFile.write(pro+"\n")
# selectedProteinFile.close()
ln="total removed assays are "+str(count)+"\n"+"total assay found are "+str(len(assayList))+"\n"+\
"total proteins remaining are "+str(len(protein2Assay))+" out of total proteins "+str(totalProt)+"\n"+\
"total protein skipped because they are not present in pathway/uniprot are "+str(proteinSkiped)+"\n"
print ln
return protein2Assay,assayList,protein2AssayPepString,ln
| 2.734375 | 3 |
Enet_lapa_camvid_pytorch/datasets/lapa_loader.py | TheDetial/Image-Segmentation | 21 | 12764859 | # -*- coding: utf-8 -*-
import os
import random
from torch.utils.data import Dataset
from PIL import Image
import numpy as np
from datasets.data_io import get_transform, read_all_lines
from datasets.data_io import *
import torchvision.transforms as transforms
import torch
import torch.nn as nn
class LapaPngPng(Dataset):
def __init__(self, datapath, list_filename, training, crop_h, crop_w, channel):
self.datapath = datapath
self.ori_filenames, self.gt_filenames = self.load_path(list_filename)
self.training = training
self.crop_h = crop_h
self.crop_w = crop_w
self.channel = channel
self.transform_img = transforms.Compose(
[
transforms.Resize(size=(self.crop_h, self.crop_w)), # Resize:尺寸随意变大变小; h, w
]
)
# error:Floating point exception(core dumped)
self.processed_color = transforms.Compose([transforms.ColorJitter(brightness=0.5, contrast=0.5),
transforms.ToTensor(), #transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
def load_path(self, list_filename):
lines = read_all_lines(list_filename)
splits = [line.split() for line in lines]
ori_images = [x[0] for x in splits]
gt_images = [x[1] for x in splits]
return ori_images, gt_images
# load rgb-color
def load_image(self, filename):
if self.channel == 3:
return Image.open(filename).convert('RGB')
elif self.channel == 1:
return Image.open(filename).convert('L')
def load_img(self, filename):
return Image.open(filename).convert('L')
def __len__(self):
return len(self.ori_filenames)
# augmentation
def augment_image_pair(self, left_image):
# randomly shift gamma
random_gamma = torch.rand(1).numpy()[0] * 0.4 + 0.8 # random.uniform(0.8, 1.2)
left_image_aug = left_image ** random_gamma
# randomly shift brightness
random_brightness = torch.rand(1).numpy()[0] * 1.5 + 0.5 # random.uniform(0.5, 2.0)
left_image_aug = left_image_aug * random_brightness
# randomly shift color
if self.channel == 3:
random_colors = (torch.rand(1).numpy()[0] * 0.4 + 0.8, torch.rand(1).numpy()[0] * 0.4 + 0.8,
torch.rand(1).numpy()[0] * 0.4 + 0.8)
white = torch.ones(left_image.shape[1], left_image.shape[2])
color_image = torch.stack((white * random_colors[0], white * random_colors[1], white * random_colors[2]),dim=0)
left_image_aug *= color_image
# saturate
left_image_aug = torch.clamp(left_image_aug, 0, 1)
return left_image_aug
def __getitem__(self, index):
ori_img = self.load_image(os.path.join(self.datapath, self.ori_filenames[index]))
# img_np = np.asarray(ori_img, dtype=float)
# print(img_np.shape)
# print(img_np.dtype)
# ori_img = Image.fromarray(img_np, mode='RGB')
gt_img = self.load_img(os.path.join(self.datapath, self.gt_filenames[index]))
# add -png.name
ori_pathname = self.ori_filenames[index]
if self.training:
w, h = ori_img.size
if w < self.crop_w or h < self.crop_h:
# 图片尺寸比预设的裁剪尺寸小,先同步做resize
ori_img = self.transform_img(ori_img)
gt_img = self.transform_img(gt_img)
# to tensor, normalize --转为tensor归一化
ori_img = self.processed_color(ori_img)
gt_img = np.array(gt_img, dtype='int64')
gt_img = torch.from_numpy(gt_img)
gt_img = torch.squeeze(gt_img).long()
# gt_img = gt_img.squeeze(0) # (h, w)
# randomly images
# do_augment = torch.rand(1).numpy()[0]
# if do_augment > 0.5:
# ori_img = self.augment_image_pair(ori_img)
return {"ori": ori_img,
"gt": gt_img}
# random crop --同步裁剪
x1 = random.randint(0, w - self.crop_w)
y1 = random.randint(0, h - self.crop_h)
ori_img = ori_img.crop((x1, y1, x1 + self.crop_w, y1 + self.crop_h))
gt_img = gt_img.crop((x1, y1, x1 + self.crop_w, y1 + self.crop_h))
# to tensor, normalize --转为tensor
ori_img = self.processed_color(ori_img)
# GT转为tensor时不做归一化
gt_img = np.array(gt_img, dtype='int64')
gt_img = torch.from_numpy(gt_img)
gt_img = torch.squeeze(gt_img).long()
# gt_img = gt_img.squeeze(0) # (h, w)
# randomly images
# do_augment = torch.rand(1).numpy()[0]
# if do_augment > 0.5:
# ori_img = self.augment_image_pair(ori_img)
return {"ori": ori_img,
"gt": gt_img}
else:
w, h = ori_img.size
if w < self.crop_w or h < self.crop_h:
# 图片尺寸比预设的裁剪尺寸小,先同步做resize
ori_img = self.transform_img(ori_img)
gt_img = self.transform_img(gt_img)
# to tensor, normalize --转为tensor归一化
ori_img = self.processed_color(ori_img)
gt_img = np.array(gt_img, dtype='int64')
gt_img = torch.from_numpy(gt_img)
gt_img = torch.squeeze(gt_img).long()
# gt_img = gt_img.squeeze(0) # (h, w)
# randomly images
# do_augment = torch.rand(1).numpy()[0]
# if do_augment > 0.5:
# ori_img = self.augment_image_pair(ori_img)
return {"ori": ori_img,
"gt": gt_img,
"img_name": ori_pathname}
x1 = w - self.crop_w
y1 = h - self.crop_h
ori_img = ori_img.crop((x1, y1, x1 + self.crop_w, y1 + self.crop_h))
gt_img = gt_img.crop((x1, y1, x1 + self.crop_w, y1 + self.crop_h))
# to tensor, normalize --转为tensor
ori_img = self.processed_color(ori_img)
# GT转为tensor时不做归一化
gt_img = np.array(gt_img, dtype='int64')
gt_img = torch.from_numpy(gt_img)
gt_img = torch.squeeze(gt_img).long()
return {"ori": ori_img,
"gt": gt_img,
"img_name": ori_pathname}
| 2.40625 | 2 |
leetcode/monthly_challenges/2020-06/w1-2_delete_node_linked_list.py | mawillcockson/coding_challenges | 0 | 12764860 | <filename>leetcode/monthly_challenges/2020-06/w1-2_delete_node_linked_list.py
"""
Constraints:
- The linked list will have at least two elements.
- All of the nodes' values will be unique.
- The given node will not be the tail and it will always be a valid node of the linked list.
- Do not return anything from your function.
"""
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
from typing import Any, Optional
# class ListNode:
# def __init__(self, x: Any):
# self.val = x
# self.next: Optional[ListNode] = None
class Solution:
def deleteNode(self, node: ListNode) -> None:
"""
:type node: ListNode
:rtype: void Do not return anything, modify node in-place instead.
"""
if not node.next:
return None
while node.next.next:
node.val = node.next.val
node = node.next
# This is just to satisfy mypy
if not node.next:
return None
node.val = node.next.val
node.next = None
# Submitted: https://leetcode.com/submissions/detail/348828725/?from=/explore/challenge/card/june-leetcoding-challenge/539/week-1-june-1st-june-7th/3348/
# I feel so dumb:
# class Solution:
# def deleteNode(self, node):
# """
# :type node: ListNode
# :rtype: void Do not return anything, modify node in-place instead.
# """
# node.val = node.next.val
# node.next = node.next.next
| 4.0625 | 4 |
sliced/datasets/banknote.py | joshloyal/reduce-learn | 10 | 12764861 | <gh_stars>1-10
import csv
from os.path import dirname
from sliced.datasets.base import load_data
def load_banknote():
"""Load and return the swiss banknote dataset (classification).
Six measurements made on 100 genuine Swiss banknotes and 100 counterfeit
ones.
Features
========
Length:
Length of bill (mm)
Left:
Width of left edge (mm)
Right:
Width of right edge (mm)
Bottom:
Bottom margin width (mm)
Top:
Top margin width (mm)
Diagonal:
Length of image diagonal (mm)
Y:
0 = genuine, 1 = conterfeit
================= =================
Classes 2
Samples per class 100 (Y), 100 (N)
Samples total 200
Dimensionality 6
Features real, positive
================= =================
"""
module_path = dirname(__file__)
data, target = load_data(module_path, 'banknote.csv')
return data, target
| 2.9375 | 3 |
ex14-list_remove_duplicates.py | lew18/practicepython.org-mysolutions | 0 | 12764862 | """
https://www.practicepython.org
Exercise 14: List Remove Duplicates
2 chilis
Write a program (function!) that takes a list and returns a new list that
contains all the elements of the first list minus all the duplicates.
Extras:
- Write two different functions to do this - one using a loop and constructing
a list, and another using sets.
- Go back and do Exercise 5 using sets, and write the solution for that in a different function.
"""
def remove_common_items(list1):
answer = [ ]
for item in list1:
if item not in answer:
answer.append(item)
return answer
def rci2(list1):
return [list1[idx] for idx in range(len(list1)) if list1[idx] not in list1[idx+1:]]
def rci3(list1, list2):
return set(list1 + list2)
a = [1, 1, 2, 3, 5, 8, 8, 4, 21, 34, 5, 7, 6, 8, "sdfg", "rstl"]
b = [1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, "sdfg", "zyxw"]
common_list = remove_common_items(a + b)
print(common_list)
common_list = rci2(a + b)
print(common_list)
common_list = rci3(a, b)
print(common_list)
| 4.03125 | 4 |
stepRNA/index_bowtie.py | bmm514/Becky_alignment | 0 | 12764863 | <reponame>bmm514/Becky_alignment
from subprocess import run, PIPE
import os
def main(ref):
ref_base = os.path.splitext(ref)[0]
command = ['bowtie2-build', ref, ref_base]
run(command, stdout=PIPE)
return ref_base
if __name__ == "__main__":
description = 'Index a reference sequence for ready for bowtie alignment'
from argparse import ArgumentParser, SUPPRESS
parser = ArgumentParser(description=description, add_help=False)
optional = parser.add_argument_group('optional arguments')
required = parser.add_argument_group('required arguments')
#Add back help...
optional.add_argument(
'-h',
'--help',
action='help',
default=SUPPRESS,
help='show this help message and exit'
)
#Add required arguments...
required.add_argument('--ref', '-r', help='Bowtie-build reference basename')
#parser._action_groups.append(optional)
args = parser.parse_args()
ref = args.ref
main(ref)
| 2.5 | 2 |
code/doh_data_classifier/classifier_mlp.py | yshao321/doh_website_fingerprinting | 1 | 12764864 | <reponame>yshao321/doh_website_fingerprinting
#!/usr/bin/env python
# coding: utf-8
import sys
import os
import time
import json
import numpy as np
import pandas as pd
import dill
import random
from os.path import join, dirname, abspath, pardir, basename
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import KFold
from common.data import *
def numbers_to_onehotvec(samples):
onehotvec = np.zeros(shape=(len(samples), max(token_index.values()) + 1))
for i, sample in enumerate(samples):
for number in sample:
index = token_index.get(number)
onehotvec[i, index] = 1.
return onehotvec
def classify(df_train, df_valid):
train_inputs = df_train.lengths
train_labels = df_train.class_label
valid_inputs = df_valid.lengths
valid_labels = df_valid.class_label
# Vectorize input data
x_train = numbers_to_onehotvec(train_inputs)
x_valid = numbers_to_onehotvec(valid_inputs)
# Vectorize label data
from keras.utils.np_utils import to_categorical
y_train = to_categorical(train_labels)
y_valid = to_categorical(valid_labels)
# Define the network structure
from keras import models
from keras import layers
input_dim = x_train.shape[-1]
output_dim = y_train.shape[-1]
model = models.Sequential()
model.add(layers.Dense(256, activation='relu', input_shape=(input_dim,)))
model.add(layers.Dropout(0.2))
model.add(layers.Dense(256, activation='relu'))
model.add(layers.Dropout(0.2))
model.add(layers.Dense(output_dim, activation='softmax'))
# Build the network
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
# Train the network
result = model.fit(x_train,
y_train,
epochs=100,
batch_size=256,
validation_data=(x_valid, y_valid))
return result
num_classes = 5000 # Number of classes (sites)
num_samples = 10 # Number of samples for each class (site)
min_packets = 1 # Minimum of packets for each row (record)
max_packets = 50 # Maximun of packets for each row (record)
token_index = {} # An index of all tokens in the data
def classifier_train():
# Locate dataset
data_dir = join(abspath(join(dirname("__file__"), pardir, pardir)), 'dataset', 'closed-world')
print(data_dir)
# Load dataset
df = load_data(data_dir)
print("initial data", df.shape)
# Clean dataset
df_closed = clean_df_closed(df, min_packets, max_packets, num_classes, num_samples)
print("cleaned data", df_closed.shape)
# Build token index
for sample in df_closed.lengths:
for number in sample:
if number not in token_index:
token_index[number] = len(token_index) + 1
print("token index", len(token_index))
# Perform k-fold cross classification
train_results = []
valid_results = []
kf = StratifiedKFold(n_splits = 5)
for k, (train_k, test_k) in enumerate(kf.split(df_closed, df_closed.class_label)):
print("k-fold", k)
start_time = time.time()
result = classify(df_closed.iloc[train_k], df_closed.iloc[test_k])
print("--- %s seconds ---" % (time.time() - start_time))
train_results.append(result.history['accuracy'])
valid_results.append(result.history['val_accuracy'])
#break
num_epochs = len(train_results[0])
average_train_results = [np.mean([x[i] for x in train_results]) for i in range(num_epochs)]
average_valid_results = [np.mean([x[i] for x in valid_results]) for i in range(num_epochs)]
import matplotlib.pyplot as plt
plt.plot(range(1, len(average_train_results) + 1), average_train_results)
plt.xlabel('Epochs')
plt.ylabel('Training ACC')
plt.show()
plt.clf()
plt.plot(range(1, len(average_valid_results) + 1), average_valid_results)
plt.xlabel('Epochs')
plt.ylabel('Validation ACC')
plt.show()
def classifier_serve():
# Load pipeline
loaded_model = dill.load(open('doh_data_classify.pickle', 'rb'))
print("Model Loaded")
# Load websites
urls = get_url_list("../collection/websites.txt")
for line in sys.stdin:
# Locate file
data_file = join(abspath(dirname("__file__")), line)[:-1]
# Load file
df_new = load_data(data_file)
# Predict with pipeline
pred_new = loaded_model.predict(df_new)
pred_pro = loaded_model.predict_proba(df_new)
pred_url = [ urls[int(index) - 1] for index in pred_new ]
print("Prediction:", pred_url, np.max(pred_pro, axis=1))
if __name__ == '__main__':
if (len(sys.argv) == 2):
if (sys.argv[1] == 'train'):
print("Training...")
classifier_train()
print("Training done!!!")
exit(0)
elif (sys.argv[1] == 'serve'):
print("Serving...")
classifier_serve()
print("Serving done!!!")
exit(0)
print("usage: doh_data_classify.py { train | serve }")
exit(1)
classifier_train()
| 2.84375 | 3 |
pymtl3/passes/rtlir/util/test_utility.py | kevinyuan/pymtl3 | 152 | 12764865 | <gh_stars>100-1000
#=========================================================================
# test_utility.py
#=========================================================================
# Author : <NAME>
# Date : Feb 21, 2019
"""Test utilities used by RTLIR tests."""
from contextlib import contextmanager
import pytest
@pytest.fixture
def do_test( request ):
"""Call `local_do_test` of the requesting module."""
return request.module.local_do_test
@contextmanager
def expected_failure( exception = Exception, msg = None ):
"""Mark one test case as should-fail.
Not to be confused with pytest.xfail, which is commonly used to mark
tests related to unimplemented functionality. This test only passes when
it throws an expected exception.
"""
try:
yield
except exception as e:
if msg is None or e.args[0].find( msg ) != -1:
return
else:
raise
raise Exception( 'expected-to-fail test unexpectedly passed!' )
def get_parameter( name, func ):
"""Return the parameter for `name` arg of `func`"""
try:
for mark in func.pytestmark:
if mark.name == 'parametrize':
# Find the position of the given name
pos = -1
for i, arg in enumerate( mark.args[0].split() ):
if arg == name:
pos = i
break
if pos == -1:
raise Exception( f'{func} does not have parameter named {name}!' )
if len(mark.args[0].split()) == 1:
return mark.args[1]
return list(map(lambda x: x[pos], mark.args[1]))
except AttributeError:
raise Exception( f'given function {func} does not have pytest marks!' )
| 2.734375 | 3 |
modelling/model/__init__.py | nulberry/ADaPT-ML | 5 | 12764866 | import os
import logging
import argparse
TMP_ARTIFACTS = '/tmp_artifacts'
X_TRAIN_FILENAME = os.path.join(TMP_ARTIFACTS, 'x_train.npy')
TRAIN_DF_FILENAME = os.path.join(TMP_ARTIFACTS, 'train.pkl')
TRAIN_DF_HTML_FILENAME = os.path.join(TMP_ARTIFACTS, 'train.html')
TEST_PRED_DF_FILENAME = os.path.join(TMP_ARTIFACTS, 'test.pkl')
TEST_PRED_DF_HTML_FILENAME = os.path.join(TMP_ARTIFACTS, 'test.html')
CONFUSION_MATRIX_FILENAME = os.path.join(TMP_ARTIFACTS, 'confusion_matrix.jpg')
STDOUT_LOG_FILENAME = os.path.join(TMP_ARTIFACTS, 'stdout_log.txt')
LOGGING_FILENAME = os.path.join(TMP_ARTIFACTS, 'log.txt')
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO, filename=LOGGING_FILENAME, filemode='w')
parser = argparse.ArgumentParser(description='Train a multi-layer perceptron classifier.')
parser.add_argument('train_path', type=str, help='File path or URL to the training data')
parser.add_argument('test_path', type=str, help='File path or URL to the test data')
parser.add_argument('features', nargs='+', type=str, help='column name(s) of the features to use.')
parser.add_argument('--verbose', default=1, type=int, choices=(0, 1), help='redirect stdout to file?')
| 2.46875 | 2 |
ScriptBox/haddSE.py | kschweiger/ToolBox | 0 | 12764867 | <gh_stars>0
from glob import glob
import sys
import os
import subprocess
#############################################################
############### Configure Logging
import logging
log_format = (
'[%(asctime)s] %(levelname)-8s %(message)s')
logging.basicConfig(
format=log_format,
level=logging.INFO,
)
#############################################################
#############################################################
def runSingleHadd(directory, SEprefix, tmpFile, altOutput, local):
print altOutput
if directory.endswith("/"):
directory = directory[0:-1]
if altOutput.endswith("/"):
altOutput = altOutput[0:-1]
logging.info("Running of single directory %s", directory)
fileList = os.listdir(directory)
inputfiles = ""
for file_ in fileList:
logging.debug("Found file %s", SEprefix+directory+"/"+file_)
inputfiles += SEprefix+directory+"/"+file_+" "
haddCommand = "hadd {0}/tmpout.root {1}".format(tmpFile, inputfiles)
logging.debug("Command: %s", haddCommand)
logging.info("hadd to output file %s/tmpout.root", tmpFile)
process = subprocess.Popen(haddCommand, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = process.communicate()
if error != "":
print len(error)
for line in error.split("\n"):
if line != "":
logging.error(line)
logging.info("Exiting......")
exit()
else:
pass
if not local:
datasetName = directory.split("/")[-1]
if altOutput is None:
logging.info("Transferring tmpout to SE")
transfercommand = "xrdcp --force {0}/tmpout.root {1}{2}.root".format(tmpFile, SEprefix, directory)
else:
logging.info("Transferring tmpout to SE with user set destination")
transfercommand = "xrdcp --force {0}/tmpout.root {1}{2}/{3}.root".format(tmpFile, SEprefix, altOutput, datasetName)
logging.debug(transfercommand)
process = subprocess.Popen(transfercommand, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = process.communicate()
# if error != "":
# for line in error.split("\n"):
# if line != "":
# logging.error(line)
# logging.info("Exiting......")
# exit()
logging.info("Transfer complete to %s.root", directory)
process = subprocess.Popen("rm {0}/tmpout.root".format(tmpFile), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
logging.info("Removed tmpfile")
else:
logging.info("Local mode! Will rename outputfile")
datasetName = directory.split("/")[-1]
renameCommand = "mv {0}/tmpout.root {0}/{1}.root".format(tmpFile, datasetName)
process = subprocess.Popen(renameCommand, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if error != "":
for line in error.split("\n"):
if line != "":
logging.error(line)
logging.info("Exiting......")
exit()
logging.info("Renamed to %s.root",datasetName)
def runMultHadd(startdirectory, SEprefix, tmpFile, altOutput, local):
if startdirectory.endswith("/"):
startdirectory = startdirectory[0:-1]
dirList = os.listdir(startdirectory)
for dir_ in dirList:
runSingleHadd(startdirectory+"/"+dir_, SEprefix, tmpFile, altOutput, local)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Handling crab jobs')
parser.add_argument('--type', action="store", required=True, help="Run on single dataset or muliple", choices=["Single","Multi"], type=str)
parser.add_argument('--dir', action="store", required=True, help="Start directory", type=str)
parser.add_argument("--prefix", default="root://t3dcachedb.psi.ch/", help="server prefix to add to rootfiles", type=str)
parser.add_argument("--tmpOut", default="/mnt/t3nfs01/data01/shome/koschwei/scratch", help="Output of the tmp file", type=str)
parser.add_argument("--logging", default = "INFO", help="Set log level", type=str, choices=["INFO","DEBUG"])
parser.add_argument("--altOutput", default=None, help="Alternative output for the transfer to the SE")
parser.add_argument("--noSETransfer", action = "store_true", help="If set, the output will be renamed instead of copied to the SE")
args = parser.parse_args()
logging.getLogger().setLevel(args.logging)
if args.type == "Single":
runSingleHadd(args.dir, args.prefix, args.tmpOut, args.altOutput, args.noSETransfer)
else:
runMultHadd(args.dir, args.prefix, args.tmpOut, args.altOutput, args.noSETransfer)
| 2.375 | 2 |
datacube/__init__.py | otto-AMA/datacube-core | 2 | 12764868 | <reponame>otto-AMA/datacube-core<filename>datacube/__init__.py
"""
Datacube
========
Provides access to multi-dimensional data, with a focus on Earth observations data such as LANDSAT.
To use this module, see the `Developer Guide <http://datacube-core.readthedocs.io/en/stable/dev/developer.html>`_.
The main class to access the datacube is :class:`datacube.Datacube`.
To initialise this class, you will need a config pointing to a database, such as a file with the following::
[datacube]
db_hostname: 172.16.17.32
db_database: democube
db_username: cube_user
"""
from .version import __version__
from .api import Datacube
from .config import set_options
import warnings
from .utils import xarray_geoextensions
# Ensure deprecation warnings from datacube modules are shown
warnings.filterwarnings('always', category=DeprecationWarning, module=r'^datacube\.')
| 2.234375 | 2 |
python/pandemic_simulator/script_helpers/experiments.py | stacyvjong/PandemicSimulator | 0 | 12764869 | # Confidential, Copyright 2020, Sony Corporation of America, All rights reserved.
from typing import List, Optional, Sequence, Union
import numpy as np
from tqdm import trange
from .setup_sim_env import make_gym_env
from ..data.interfaces import ExperimentDataSaver, StageSchedule
from ..environment import PandemicSimOpts, PandemicSimNonCLIOpts, NoPandemicDone, PandemicRegulation, austin_regulations
from ..utils import shallow_asdict
__all__ = ['experiment_main', 'seeded_experiment_main']
def seeded_experiment_main(exp_id: int,
sim_opts: PandemicSimOpts,
sim_non_cli_opts: PandemicSimNonCLIOpts,
data_saver: ExperimentDataSaver,
pandemic_regulations: Optional[List[PandemicRegulation]] = None,
stages_to_execute: Union[int, Sequence[StageSchedule]] = 0,
enable_warm_up: bool = False,
max_episode_length: int = 120,
random_seed: int = 0) -> bool:
"""A helper that runs an experiment with the given seed and records data"""
rng = np.random.RandomState(random_seed)
env = make_gym_env(sim_opts, sim_non_cli_opts,
pandemic_regulations=pandemic_regulations or austin_regulations,
done_fn=NoPandemicDone(30), numpy_rng=rng)
env.reset()
stages = ([StageSchedule(stage=stages_to_execute, end_day=None)]
if isinstance(stages_to_execute, int) else stages_to_execute)
stage_dict = {f'stage_{i}': (s.stage, s.end_day if s.end_day is not None else -1)
for i, s in enumerate(stages)}
data_saver.begin(env.observation)
stage_idx = 0
warm_up_done = not enable_warm_up
for i in trange(max_episode_length, desc='Simulating day'):
if not env.observation.infection_above_threshold and not warm_up_done:
stage = 0
else:
warm_up_done = True
cur_stage = stages[stage_idx]
stage = cur_stage.stage
if cur_stage.end_day is not None and cur_stage.end_day <= i:
stage_idx += 1
obs, reward, done, aux = env.step(stage)
data_saver.record(obs, reward)
if done:
print('done')
break
return data_saver.finalize(exp_id=exp_id,
seed=random_seed,
num_stages_to_execute=len(stages),
num_persons=sim_non_cli_opts.population_params.num_persons,
**stage_dict,
**shallow_asdict(sim_opts))
def experiment_main(exp_id: int,
sim_opts: PandemicSimOpts,
sim_non_cli_opts: PandemicSimNonCLIOpts,
data_saver: ExperimentDataSaver,
pandemic_regulations: Optional[List[PandemicRegulation]] = None,
stages_to_execute: Union[int, Sequence[StageSchedule]] = 0,
enable_warm_up: bool = False,
max_episode_length: int = 120,
num_random_seeds: int = 5) -> None:
"""A helper that runs multi-seeded experiments and records data."""
rng = np.random.RandomState(seed=0)
num_evaluated_seeds = 0
while num_evaluated_seeds < num_random_seeds:
seed = rng.randint(0, 100000)
print(f'Running experiment seed: {seed} - {num_evaluated_seeds + 1}/{num_random_seeds}')
ret = seeded_experiment_main(exp_id=exp_id,
sim_opts=sim_opts,
sim_non_cli_opts=sim_non_cli_opts,
data_saver=data_saver,
pandemic_regulations=pandemic_regulations,
stages_to_execute=stages_to_execute,
enable_warm_up=enable_warm_up,
max_episode_length=max_episode_length,
random_seed=seed)
if ret:
num_evaluated_seeds += 1
else:
print(f'Experiment with seed {seed} did not succeed. Skipping...')
| 2.25 | 2 |
gnn-comparison/models/graph_classifiers/DeepMultisets.py | tech-srl/bottleneck | 56 | 12764870 | import torch
import torch.nn.functional as F
from torch.nn import Linear
from torch_geometric.nn import global_add_pool
class DeepMultisets(torch.nn.Module):
def __init__(self, dim_features, dim_target, config):
super(DeepMultisets, self).__init__()
hidden_units = config['hidden_units']
self.fc_vertex = Linear(dim_features, hidden_units)
self.fc_global1 = Linear(hidden_units, hidden_units)
self.fc_global2 = Linear(hidden_units, dim_target)
def forward(self, data):
x, batch = data.x, data.batch
x = F.relu(self.fc_vertex(x))
x = global_add_pool(x, batch) # sums all vertex embeddings belonging to the same graph!
x = F.relu(self.fc_global1(x))
x = self.fc_global2(x)
return x
| 3.125 | 3 |
src/tests/files/test_observer.py | mhubii/GIFT-Grab | 41 | 12764871 | from pytest import fail, mark, yield_fixture, raises
try:
# in case of PyPI installation, this will work:
from giftgrab.tests.utils import FileChecker
except ImportError:
# in case of installation from source, this will work:
from utils import FileChecker
from time import sleep
from pygiftgrab import VideoSourceFactory
factory = None
video_duration = 0 # inferred, in sec
quarter_video_duration = 0 # inferred, in sec
@yield_fixture(scope='session')
def peri_test(colour_space, filepath,
frame_rate, frame_count,
frame_width, frame_height):
global factory
factory = VideoSourceFactory.get_instance()
global video_duration, quarter_video_duration
video_duration = frame_count / frame_rate
quarter_video_duration = video_duration / 4
yield
@mark.observer_pattern
@mark.usefixtures('peri_test')
def test_valid_filepath_returns_raii_reader(
filepath, colour_space,
frame_rate, frame_count,
frame_width, frame_height
):
source = None
global factory
global video_duration, quarter_video_duration
source = factory.create_file_reader(
filepath, colour_space
)
assert source is not None
file_checker = FileChecker(source)
file_checker.attach()
sleep(video_duration)
file_checker.detach()
assert file_checker.assert_colour(colour_space)
assert file_checker.assert_frame_rate(frame_rate)
assert file_checker.assert_frame_dimensions(
frame_width, frame_height)
assert file_checker.assert_data()
assert file_checker.assert_frame_data_lengths(
colour_space, frame_width, frame_height)
@mark.observer_pattern
@mark.usefixtures('peri_test')
def test_reader_releases_file_on_destruction(
filepath, colour_space,
frame_rate, frame_count,
frame_width, frame_height
):
source = None
global factory
global video_duration, quarter_video_duration
source = factory.create_file_reader(
filepath, colour_space
)
assert source is not None
file_checker_1 = FileChecker(source)
file_checker_1.attach()
sleep(quarter_video_duration)
file_checker_1.detach()
assert file_checker_1.assert_data()
del file_checker_1
del source
source = None
source = factory.create_file_reader(
filepath, colour_space
)
file_checker_2 = FileChecker(source)
file_checker_2.attach()
sleep(video_duration)
file_checker_2.detach()
assert file_checker_2.assert_colour(colour_space)
assert file_checker_2.assert_frame_rate(frame_rate)
assert file_checker_2.assert_frame_dimensions(
frame_width, frame_height)
assert file_checker_2.assert_data()
assert file_checker_2.assert_frame_data_lengths(
colour_space, frame_width, frame_height)
@mark.observer_pattern
@mark.usefixtures('peri_test')
def test_invalid_filepath_throws_exception(colour_space):
source = None
global factory
with raises(RuntimeError):
source = factory.create_file_reader(
'/this/path/should/never/exist.video',
colour_space
)
assert source is None
@mark.observer_pattern
@mark.usefixtures('peri_test')
def test_set_sub_frame(
filepath, colour_space, frame_width, frame_height
):
global factory
source = None
source = factory.create_file_reader(
filepath, colour_space
)
sub_x = frame_width // 4
sub_y = frame_height // 4
sub_width = frame_width // 2
sub_height = frame_height // 2
assert sub_x > 0 and sub_x + sub_width < frame_width
assert sub_y > 0 and sub_y + sub_height < frame_height
source.set_sub_frame(sub_x, sub_y,
sub_width, sub_height)
file_checker = FileChecker(source)
file_checker.attach()
global video_duration
sleep(video_duration)
file_checker.detach()
assert file_checker.assert_frame_dimensions(
sub_width, sub_height)
@mark.observer_pattern
@mark.usefixtures('peri_test')
def test_get_full_frame(
filepath, colour_space, frame_width, frame_height
):
global factory
source = None
source = factory.create_file_reader(
filepath, colour_space
)
sub_x = frame_width // 4
sub_y = frame_height // 4
sub_width = frame_width // 2
sub_height = frame_height // 2
assert sub_x > 0 and sub_x + sub_width < frame_width
assert sub_y > 0 and sub_y + sub_height < frame_height
source.set_sub_frame(sub_x, sub_y,
sub_width, sub_height)
global quarter_video_duration
sleep(quarter_video_duration)
source.get_full_frame()
file_checker = FileChecker(source)
file_checker.attach()
global video_duration
sleep(video_duration)
file_checker.detach()
assert file_checker.assert_frame_dimensions(
frame_width, frame_height)
| 1.976563 | 2 |
fhcrc_pathology/OneFieldPerSpecimen.py | LabKey/argos_nlp | 0 | 12764872 | ''' author@esilgard'''
#
# Copyright (c) 2013-2016 <NAME> Cancer Research Center
#
# Licensed under the Apache License, Version 2.0: http://www.apache.org/licenses/LICENSE-2.0
#
import re, os
import global_strings as gb
PATH = os.path.dirname(os.path.realpath(__file__)) + os.path.sep
class OneFieldPerSpecimen(object):
'''
extract the value of a field which has one or more values per specimen the from path text
'''
__version__ = 'OneFieldPerSpecimen1.0'
pre_negation = r'( not | no |negative |previous|free of |without|against |(hx|history) of | \
to rule out|preclud| insufficient|suboptimal).{,75}'
post_negation = r'.{,50}( unlikely| not (likely|identif)| negative)'
## default False flag; true means the slgorithm will infer some other value based on given input
inference_flag = False
## default False secondary element; true means there's another data element that should
## be searched for based on either position or value of the first
has_secondary_data_element = False
secondary_data_elements = None
def __init__(self):
self.specimen_field_name = 'Default'
self.overall_field_name = 'Default'
self.specimen_table = 'Default'
self.overall_table = 'Default'
self.specimen_confidence = 0.0
self.unlabled_specimen_confidence = 0.0
self.return_d_list = []
## reference lists & dictionaries ##
self.file_name_string = 'Default'
self.dz_specific_list = []
self.dz_specific_standardizations = {}
self.general_list = []
self.general_standardizations = {}
## relevant sections and negations of the report ##
self.good_section = 'Default'
self.bad_section = 'Default'
def get_version(self):
''' return algorithm version '''
return self.__version__
def get_dictionaries(self, reference_file_name_string):
'''
get data element relevant resource files
'''
string_list = []
standardizations = {}
for line in open(PATH + reference_file_name_string + '.txt', 'r').readlines():
strings = line.split(';')
for each in strings:
each = each.strip().lower()
standardizations[each] = strings[0].strip()
string_list.append(each)
string_list = sorted(string_list, key=lambda x: len(x), reverse=True)
return string_list, standardizations
## loop through relevant sections findings PER SPECIMEN
def get_specimen_finding(self, specimen, string_list, standardizations, d):
''' loop through specimens in the specimenSource to get data elements for each '''
specimen_finding_set = set([])
specimen_start_stops_set = set([])
def find_string_match(text):
''' helper method for finding the string match instances '''
text = text.lower()
text = re.sub(r'[.,:;\\\/\-]', ' ', text)
for finding in string_list:
if re.search(r'([\W]|^)' + '(' + finding + ')' + r'([\W]|$)', text) and \
not re.search(self.pre_negation + '(' + finding + ')' + r'([\W]|$)', text) and \
not re.search(r'([\W]|^)' + finding + self.post_negation, text):
## only return character offsets for the regular path text (not SpecimenSource)
if line_onset:
start = text.find(finding) + line_onset
stop = start + len(finding)
## only add char off sets if there is not a longer (overlapping) string
## this works because the finding list is sorted by length
substring = False
for offsets in specimen_start_stops_set:
if start >= offsets[0] and start <= offsets[1]:
substring = True
if substring == False:
specimen_finding_set.add(standardizations[finding])
specimen_start_stops_set.add((start, stop))
else:
specimen_finding_set.add(standardizations[finding])
for section in sorted(d):
section_specimen = section[3]
line_onset = section[2]
header = section[1]
if re.search(self.good_section, header) and not re.search(self.bad_section, header):
for index, results in sorted(d[section].items(), key=lambda x: int(x[0])):
## this is a special case for getting info from the SpecimenSource
## this is a metadata field, not in the path text itself
if section == (0, 'SpecimenSource', 0, None):
if d[section][0].get(specimen):
find_string_match(d[section][0].get(specimen))
## meant to weed out references to literature/papers,
## publication info like this: 2001;30:1-14.
## these can contain confusing general statements that don't really apply
elif re.search(r'[\d]{4}[;,][ ]*[\d]{1,4}:[\d\-]{1,6}', results):
pass
elif specimen in section_specimen:
find_string_match(results)
return specimen_finding_set, specimen_start_stops_set
def add_secondary_data_elements(self, each_field_d, full_text):
'''
if current class has an inferred or secondary class (like histology:grade or metastasis
call the secondary class and add returned data elements to return list
'''
for each_secondary_element in self.secondary_data_elements:
module = __import__(each_secondary_element, globals(), locals(), [])
field_class = getattr(module, each_secondary_element)
instance = field_class()
return_d = instance.get(each_field_d, full_text)
if return_d:
self.return_d_list.append(return_d)
def get(self, disease_group, d):
'''
get values for data elements which potentially have seperate values per specimen
aggregate values for a report level reporting as well
'''
self.general_list, self.general_standardizations = self.get_dictionaries\
(self.file_name_string)
self.dz_specific_list, self.dz_specific_standardizations = self.get_dictionaries\
(disease_group + os.path.sep + self.file_name_string)
## general sets to track and aggregate overall findings for the report
finding_set = set([])
start_stops_set = set([])
## loop through explicitly labeled specimens, look for findings in relevant sections
for specimen_d in d[(0, 'SpecimenSource', 0, None)].values():
for specimen, description in specimen_d.items():
specimen_finding_set, specimen_start_stops_set = self.get_specimen_finding\
(specimen, self.dz_specific_list, self.dz_specific_standardizations, d)
## back off to general (non disease or anatomically specific) info
if not specimen_finding_set:
specimen_finding_set, specimen_start_stops_set = self.get_specimen_finding\
(specimen, self.general_list, self.general_standardizations, d)
if specimen_finding_set:
if self.inference_flag:
specimen_finding_set = self.infer(specimen_finding_set)
## drop confidence level for multiple finds
if len(specimen_finding_set) > 1:
self.specimen_confidence = self.specimen_confidence * .8
specimen_finding_d = {gb.NAME: self.specimen_field_name, gb.KEY: specimen, \
gb.TABLE: self.specimen_table, gb.VALUE: ';'.join(sorted(specimen_finding_set)), \
gb.CONFIDENCE: ("%.2f" % self.specimen_confidence), gb.VERSION: \
self.get_version(), gb.STARTSTOPS: [{gb.START: char[0], gb.STOP: char[1]}\
for char in specimen_start_stops_set]}
self.return_d_list.append(specimen_finding_d)
finding_set = finding_set.union(specimen_finding_set)
start_stops_set = start_stops_set.union(specimen_start_stops_set)
if self.has_secondary_data_element == True:
self.add_secondary_data_elements(specimen_finding_d,\
d[(-1, 'FullText', 0, None)])
## NOTE - this back off model only happens when specimen specific values, which means it
## will not currently pick up "summary cancer data" if specimen values were found
## back off model->cover case where there's no labeled specimen-=>assign to "UNK" specimen
if not finding_set:
specimen_finding_set, specimen_start_stops_set = self.get_specimen_finding\
('', self.dz_specific_list, self.dz_specific_standardizations, d)
## back off to general findings
if not specimen_finding_set:
specimen_finding_set, specimen_start_stops_set = self.get_specimen_finding\
('', self.general_list, self.general_standardizations, d)
if specimen_finding_set:
finding_set = finding_set.union(specimen_finding_set)
if self.inference_flag:
specimen_finding_set = self.infer(specimen_finding_set)
start_stops_set = start_stops_set.union(specimen_start_stops_set)
## drop confidence level for multiple finds
if len(specimen_finding_set) > 1:
self.unlabled_specimen_confidence = self.unlabled_specimen_confidence * .8
unk_finding_d = {gb.NAME: self.specimen_field_name, gb.KEY: gb.UNK, \
gb.TABLE: self.specimen_table, gb.VERSION: self.get_version(), \
gb.VALUE: ';'.join(sorted(specimen_finding_set)), gb.CONFIDENCE: \
("%.2f" % self.unlabled_specimen_confidence), gb.STARTSTOPS: \
[{gb.START: char[0], gb.STOP:char[1]} for \
char in specimen_start_stops_set]}
self.return_d_list.append(unk_finding_d)
if self.has_secondary_data_element == True:
self.add_secondary_data_elements(unk_finding_d, d[(-1, 'FullText', 0, None)])
## aggregate histologies of individual specimens for overall finding
if finding_set:
if self.inference_flag:
finding_set = self.infer(finding_set)
overall_finding_d = {gb.NAME: self.overall_field_name, gb.KEY: gb.ALL, \
gb.TABLE: self.overall_table, gb.VALUE: ';'.join(sorted(finding_set)), \
gb.CONFIDENCE: ("%.2f" % (sum([float(x.get(gb.CONFIDENCE)) \
for x in self.return_d_list])/len(self.return_d_list))), \
gb.VERSION: self.get_version(), gb.STARTSTOPS: \
[{gb.START: char[0], gb.STOP: char[1]} for char in start_stops_set]}
self.return_d_list.append(overall_finding_d)
if self.has_secondary_data_element == True:
self.add_secondary_data_elements(overall_finding_d, d[(-1, 'FullText', 0, None)])
if self.overall_field_name == 'CellularityPercent': print return_d_list
return (self.return_d_list, list)
| 2.390625 | 2 |
commandServer.py | LeHuman/PythonSocketTest | 0 | 12764873 | VERSION = "1.0.0"
HOST = "192.168.1.217"
PORT = 4578
CERT = "cert.pem"
KEY = "priv.key"
RESTARTS = 5
TARGET_SERVER = "mc.koolkidz.club"
API_URL = "https://api.mcsrvstat.us/2/" + TARGET_SERVER
print("\n----------------------------------------------")
print("Command SSL Socket Server", VERSION)
print("----------------------------------------------")
print("HOST: ", HOST)
print("PORT: ", PORT)
print("TARGET SERVER: ", TARGET_SERVER)
print("----------------------------------------------\n")
import logging
import logging.handlers
import logging.config
import socket
import ssl
import time
from datetime import datetime
from enum import Enum
from threading import Thread
import IPR
logging.config.fileConfig(fname="log_config.conf", disable_existing_loggers=False)
log = logging.getLogger("root")
class Status(Enum):
ON = 10
TURNING_ON = 5
TURNING_OFF = 2
OFF = 0
statusString = {
Status.ON: "ON",
Status.TURNING_ON: "TURNING ON",
Status.TURNING_OFF: "TURNING OFF",
Status.OFF: "OFF",
}
SERVERSTATUS = Status.OFF
class Command(Enum):
triggerOn = 15
triggerOff = 12 # is this a good idea?
check = 8
def SERVERON():
SERVERSTATUS == Status.TURNING_ON
return
def SERVEROFF():
SERVERSTATUS == Status.TURNING_OFF
return
def order(cmd):
log.debug("Recieved command: " + str(cmd))
if cmd == Command.check.value:
# return statusString[SERVERSTATUS]
return Status(SERVERSTATUS)
if SERVERSTATUS == Status.ON:
if cmd == Command.triggerOn.value:
return "Server already on!"
elif cmd == Command.triggerOff.value:
SERVEROFF()
return "Turning off server!"
elif SERVERSTATUS == Status.OFF:
if cmd == Command.triggerOn.value:
SERVERON()
return "Turning on server!"
elif cmd == Command.triggerOff.value:
return "Server already off!"
elif cmd == Command.triggerOn.value or Command.triggerOn.value:
if SERVERSTATUS == Status.TURNING_OFF:
return "Server is Turning off!"
elif SERVERSTATUS == Status.TURNING_ON:
return "Server is Turning on!"
return "Unknown command"
class client(Thread):
def __init__(self, socket, address):
Thread.__init__(self)
self.sock = socket
self.addr = address
self.straddr = str(self.addr[0]) + ":" + str(self.addr[1]) + " : "
log.debug("New client thread: " + str(self.addr[0]) + ":" + str(self.addr[1]))
self.start()
def msg(self, message):
self.sock.send(message.encode())
log.debug(self.straddr + str(message))
def end(self):
log.debug(self.straddr + "Closing socket and thread")
self.sock.close()
self._running = False
def run(self):
recieve = self.sock.recv(256).decode()
try:
recieve = int(recieve)
except:
self.msg("Bad Command")
log.warning(self.straddr + "Bad command recieved: " + recieve)
self.end()
return
self.msg(order(recieve))
self.end()
context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
context.load_cert_chain("cert.pem", "priv.key")
def socketListen():
log.info("Command server started")
with socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) as sock:
sock.bind((HOST, PORT))
sock.listen(5)
with context.wrap_socket(sock, server_side=True) as ssock:
while not (
SERVERSTATUS == Status.TURNING_OFF or SERVERSTATUS == Status.TURNING_ON
):
try:
clientsocket, address = ssock.accept()
if IPR.checkIP(address[0]):
client(clientsocket, address)
else:
log.warning("Blocked: " + address[0])
except OSError as e:
print(e)
def newThread(trgt):
thread = Thread(target=trgt)
thread.daemon = True
thread.start()
return thread
while True:
time.sleep(60)
socketThread = newThread(socketListen)
if not socketThread.is_alive():
if RESTARTS == 0:
log.critical("Max restarts has been hit, server must be manually restared")
break
RESTARTS -= 1
log.error("Server seems to have crashed... Attempting restart")
# TODO: how are we actually turning the server on?
| 2.6875 | 3 |
promoterz/evaluationPool.py | mczero80/japonicus | 229 | 12764874 | <filename>promoterz/evaluationPool.py
#!/bin/python
import time
import random
import itertools
from multiprocessing import Pool, TimeoutError
from multiprocessing.pool import ThreadPool
class EvaluationPool():
def __init__(self,
World,
Urls, poolsize, individual_info):
self.World = World
self.Urls = Urls
self.lasttimes = [0 for x in Urls]
self.lasttimesperind = [0 for x in Urls]
self.poolsizes = [poolsize for x in Urls]
self.individual_info = individual_info
def evaluateBackend(self, datasets, I, inds):
stime = time.time()
dateInds = list(itertools.product(datasets, inds))
# print(list(dateInds))
Q = [
([dataset], Ind, self.Urls[I])
for dataset, Ind in dateInds
]
P = Pool(self.poolsizes[I])
fitnesses = P.starmap(self.World.tools.Evaluate, Q)
P.close()
P.join()
delta_time = time.time() - stime
return fitnesses, delta_time
def evaluatePopulation(self, locale):
individues_to_simulate = [
ind for ind in locale.population if not ind.fitness.valid
]
props = self.distributeIndividuals(individues_to_simulate)
args = [
[
locale.Dataset,
I,
props[I],
]
for I in range(len(self.Urls))
]
pool = ThreadPool(len(self.Urls))
results = []
try:
for A in args:
results.append(pool.apply_async(self.evaluateBackend, A))
pool.close()
except (SystemExit, KeyboardInterrupt):
print("Aborted by user.")
exit(0)
TimedOut = []
for A in range(len(results)):
try:
perindTime = 3 * self.lasttimesperind[A]\
if self.lasttimesperind[A] else 12
timeout = perindTime * len(props[A])\
if A else None # no timeout for local machine;
results[A] = results[A].get(timeout=timeout)
except TimeoutError: # Timeout: remote machine is dead;
print("Machine timeouts!")
args[A][1] = 0 # Set to evaluate @ local machine
results[A] = self.evaluateBackend(* args[A])
TimedOut.append(A)
pool.join()
TotalNumberOfTrades = 0
for PoolIndex in range(len(results)):
for i, fit in enumerate(results[PoolIndex][0]):
if self.individual_info:
print(self.World.tools.showIndividue(fit))
self.World.tools.ApplyResult(fit, props[PoolIndex][i])
TotalNumberOfTrades += fit['trades']
self.lasttimes[PoolIndex] = results[PoolIndex][1]
L = len(props[PoolIndex])
self.lasttimesperind[PoolIndex] =\
self.lasttimes[PoolIndex] / L if L else 5
F = [x.fitness.valid for x in individues_to_simulate]
assert (all(F))
for T in TimedOut:
self.ejectURL(T)
N = len(individues_to_simulate)
# RECORD NUMBER OF EVALUATIONS;
locale.World.totalEvaluations += N
# CALCULATE AVERAGE TRADE NUMBER;
averageTrades = TotalNumberOfTrades / max(1, N)
return N, averageTrades
| 2.578125 | 3 |
website/website/apps/cognacy/urls.py | SimonGreenhill/Language5 | 1 | 12764875 | <gh_stars>1-10
from django.conf.urls import *
from website.apps.cognacy import views as v
urlpatterns = [
# PUBLIC
# index page -- two names, index/source_index to allow later respecification
url(r'^$', v.CognateSourceIndex.as_view(), name="index"),
url(r'^$', v.CognateSourceIndex.as_view(), name="source_index"),
# detail: details of cognate sets
url(r'^source/(?P<slug>[\w\d\-\.]+)$',
v.CognateSourceDetail.as_view(), name="cognatesource_detail"
),
# PRIVATE
url(r'^view/(?P<pk>\d+)$', v.CognateSetDetail.as_view(), name="detail"),
url(r'^do/$', v.do_index, name="do_index"),
url(r'^do/(?P<word>[\w\d\-\.]+)/(?P<clade>.*)$', v.do, name="do"),
url(r'^save/(?P<word>[\w\d\-\.]+)/(?P<clade>.*)$', v.save, name="save"),
url(r'^merge/(?P<word>[\w\d\-\.]+)/(?P<clade>.*)$', v.merge, name="merge"),
]
| 2.03125 | 2 |
target.py | kirtis26/PyMissile | 0 | 12764876 | import numpy as np
from math import *
from interpolation import InterpVec
class Target(object):
@classmethod
def get_simple_target(cls, pos, vel):
velocity_vectors = [[0, np.array(vel)]]
vel_interp = InterpVec(velocity_vectors)
target = cls(vel_interp=vel_interp)
parameters_of_target = np.array([pos[0], pos[1], 0])
target.set_init_cond(parameters_of_target=parameters_of_target)
return target
def __init__(self, *args, **kwargs):
self.g = kwargs.get('g', 9.80665)
self.dt = kwargs.get('dt', 0.001)
self.vel_interp = kwargs['vel_interp']
def set_init_cond(self, parameters_of_target=None):
if parameters_of_target is None:
parameters_of_target = self.get_standart_parameters_of_target()
self.state = np.array(parameters_of_target)
self.state_0 = np.array(parameters_of_target)
def reset(self):
self.set_state(self.state_0)
def set_state(self, state):
self.state = np.array(state)
def get_state(self):
return self.state
def get_state_0(self):
return self.state_0
def step(self, tau):
x, y, t = self.state
t_end = t + tau
flag = True
while flag:
if t_end - t > self.dt:
dt = self.dt
else:
dt = t_end - t
flag = False
t += dt
vx, vy = self.vel_interp(t)
x += vx * dt
y += vy * dt
self.set_state([x, y, t])
@property
def pos(self):
return self.state[:2]
@property
def vel(self):
return self.vel_interp(self.t)
@property
def t(self):
return self.state[-1]
@property
def Q(self):
vx, vy = self.vel_interp(self.t)
return np.sqrt(vx ** 2 + vy ** 2)
@property
def v(self):
vx, vy = self.vel_interp(self.t)
return np.sqrt(vx ** 2 + vy ** 2)
@property
def x(self):
return self.pos[0]
@property
def y(self):
return self.pos[1]
def get_summary(self):
return {
't': self.t,
'v': self.v,
'x': self.x,
'y': self.y,
'Q': np.degrees(self.Q)
} | 2.671875 | 3 |
tests/test_modules/test_web/test_system_websocket.py | MattTaylorDLS/pymalcolm | 0 | 12764877 | import unittest
import json
from tornado.websocket import websocket_connect
from tornado import gen
from malcolm.core import Process, call_with_params, Queue, Context, \
ResponseError
from malcolm.modules.builtin.blocks import proxy_block
from malcolm.modules.demo.blocks import hello_block, counter_block
from malcolm.modules.web.blocks import web_server_block, websocket_client_block
class TestSystemWSCommsServerOnly(unittest.TestCase):
socket = 8881
def setUp(self):
self.process = Process("proc")
self.hello = call_with_params(hello_block, self.process, mri="hello")
self.server = call_with_params(
web_server_block, self.process, mri="server", port=self.socket)
self.result = Queue()
self.process.start()
def tearDown(self):
self.process.stop(timeout=1)
@gen.coroutine
def send_message(self):
conn = yield websocket_connect("ws://localhost:%s/ws" % self.socket)
req = dict(
typeid="malcolm:core/Post:1.0",
id=0,
path=["hello", "greet"],
parameters=dict(
name="me"
)
)
conn.write_message(json.dumps(req))
resp = yield conn.read_message()
resp = json.loads(resp)
self.result.put(resp)
conn.close()
def test_server_and_simple_client(self):
self.server._loop.add_callback(self.send_message)
resp = self.result.get(timeout=2)
assert resp == dict(
typeid="malcolm:core/Return:1.0",
id=0,
value=dict(
typeid='malcolm:core/Map:1.0',
greeting="Hello me",
)
)
class TestSystemWSCommsServerAndClient(unittest.TestCase):
socket = 8883
def setUp(self):
self.process = Process("proc")
self.hello = call_with_params(hello_block, self.process, mri="hello")
self.counter = call_with_params(
counter_block, self.process, mri="counter")
self.server = call_with_params(
web_server_block, self.process, mri="server", port=self.socket)
self.process.start()
self.process2 = Process("proc2")
self.client = call_with_params(
websocket_client_block, self.process2, mri="client",
port=self.socket)
self.process2.start()
def tearDown(self):
self.socket += 1
self.process.stop(timeout=1)
self.process2.stop(timeout=1)
def test_server_hello_with_malcolm_client(self):
call_with_params(
proxy_block, self.process2, mri="hello", comms="client")
block2 = self.process2.block_view("hello")
ret = block2.greet("me2")
assert ret == dict(greeting="Hello me2")
with self.assertRaises(ResponseError):
block2.error()
def test_server_counter_with_malcolm_client(self):
call_with_params(
proxy_block, self.process2, mri="counter", comms="client")
block2 = self.process2.block_view("counter")
assert block2.counter.value == 0
block2.increment()
assert block2.counter.value == 1
block2.zero()
assert block2.counter.value == 0
assert self.client.remote_blocks.value == (
"hello", "counter", "server")
| 2.46875 | 2 |
src/yamagics/yamagicscore.py | addamit/youngatlas | 4 | 12764878 | """
IPYthon Magics Extension to play audio without displaying the audio widget.
"""
from yaserver import QUOTES_LOCATION, YASERVER_URI
import os
import random
import pathlib
import inspect
from typing import Optional
from IPython import get_ipython
from IPython.display import Audio, display
from IPython.core.magic import line_cell_magic, Magics, magics_class
from IPython.core.magic_arguments import argument, magic_arguments, parse_argstring
# https://stackoverflow.com/questions/61176900/jupyter-colab-play-sound-with-any-error-in-any-cell-play-sound-after-compl/61176901
from IPython.core.ultratb import AutoFormattedTB
# Catch any Exception, play error sound and re-raise the Exception
# -------------------------------------------------
# initialize the formatter for making the tracebacks into strings
itb = AutoFormattedTB(mode='Plain', tb_offset=1)
all_choices = []
included_extensions = ['mp3']
class _InvisibleAudio(Audio):
"""
An invisible (`display: none`) `Audio` element which removes itself when finished playing.
Original sample based on https://stackoverflow.com/a/50648266.
"""
def _repr_html_(self) -> str:
audio = super()._repr_html_()
audio = audio.replace(
"<audio", '<audio onended="this.parentNode.removeChild(this)"'
)
return f'<div style="display:none">{audio}</div>'
# return f'<div">{audio}</div>'
@magics_class
class NotificationMagics(Magics):
"""
IPython extension implementing the magic.
"""
@magic_arguments()
@argument(
"-u",
"--url",
default="quote1.mp3",
help="URL of audio file to play.",
)
@argument(
"line_code",
nargs="*",
help="Other code on the line will be executed, unless this is called as a cell magic.",
)
@line_cell_magic
def yamoment(self, line: str, cell: Optional[str] = None):
args = parse_argstring(self.yamoment, line)
MOMENTDEBUG = False
if line and line == '#MOMENTDEBUG':
MOMENTDEBUG = True
code = cell if cell else " ".join(args.line_code)
try:
ret = self.shell.ex(code)
finally:
quote_url = random.choice(all_choices)
audio = _InvisibleAudio(
url='{}/{}'.format(YASERVER_URI, quote_url), autoplay=True)
if MOMENTDEBUG:
print("[MomentAudio]:{}".format(quote_url))
display(audio)
return ret
def load_ipython_extension(ipython):
ipython.register_magics(NotificationMagics)
file_names = [fn for fn in os.listdir(QUOTES_LOCATION) if any(
fn.endswith(ext) for ext in included_extensions)]
all_choices.extend(file_names)
# ipython.shell.set_custom_exc((Exception,), custom_exc)
# get_ipython().register_magics(NotificationMagics)
| 2.859375 | 3 |
mongoop/default_settings.py | Lujeni/mongoop | 41 | 12764879 | <filename>mongoop/default_settings.py
# -*- coding: utf-8 -*-
"""
mongoop.default_settings
~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2015 by Lujeni.
:license: BSD, see LICENSE for more details.
"""
mongodb_host = 'localhost'
mongodb_port = 27017
mongodb_credentials = None
mongodb_options = None
frequency = 10
threshold_timeout = 60
op_triggers = None
balancer_triggers = None
query = None
| 1.007813 | 1 |
LeetCode/160_intersection_of_two_linked_lists/getIntersectionNode.py | harveyc95/ProgrammingProblems | 0 | 12764880 | # Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def getIntersectionNode(self, headA, headB):
a, b = headA, headB
while a != b:
a = a.next if a else headB
b = b.next if b else headA
return a
| 3.40625 | 3 |
dcgan.py | czhongyu/generative-models | 2 | 12764881 | <filename>dcgan.py<gh_stars>1-10
from __future__ import print_function
import argparse
import os
import random
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim as optim
import torch.utils.data
from torchvision import datasets, transforms
from torchvision.utils import save_image
import configparser
import numpy as np
from tensorboardX import SummaryWriter
import torch.backends.cudnn as cudnn
cudnn.benchmark = True
def weights_init(m):
# custom weights initialization called on Generator and Discriminator
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
class Generator(nn.Module):
def __init__(self, ngpu, nz, ngf, nc):
super(Generator, self).__init__()
self.ngpu = ngpu
self.main = nn.Sequential(
# input is Z, going into a convolution
nn.ConvTranspose2d(nz, ngf * 8, 4, 1, 0, bias=False),
nn.BatchNorm2d(ngf * 8),
nn.ReLU(True),
# state size. (ngf*8) x 4 x 4
nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 4),
nn.ReLU(True),
# state size. (ngf*4) x 8 x 8
nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.ReLU(True),
# state size. (ngf*2) x 16 x 16
nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf),
nn.ReLU(True),
# state size. (ngf) x 32 x 32
nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),
nn.Tanh()
# state size. (nc) x 64 x 64
)
def forward(self, input):
if input.is_cuda and self.ngpu > 1:
output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))
else:
output = self.main(input)
return output
class Discriminator(nn.Module):
def __init__(self, ngpu, nc, ndf):
super(Discriminator, self).__init__()
self.ngpu = ngpu
self.main = nn.Sequential(
# input is (nc) x 64 x 64
nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf) x 32 x 32
nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 2),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*2) x 16 x 16
nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*4) x 8 x 8
nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*8) x 4 x 4
nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),
nn.Sigmoid()
)
def forward(self, input):
if input.is_cuda and self.ngpu > 1:
output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))
else:
output = self.main(input)
return output.view(-1, 1).squeeze(1)
def train(epoch):
for i, data in enumerate(train_loader):
############################
# (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
###########################
# train with real data
netD.zero_grad()
real_cpu = data[0].to(device)
batch_size = real_cpu.size(0)
label = torch.full((batch_size,), real_label, device=device)
output = netD(real_cpu)
errD_real = criterion(output, label) # loss
errD_real.backward()
D_x = output.mean().item()
# train with fake data
noise = torch.randn(batch_size, z_dim, 1, 1, device=device)
fake = netG(noise)
label.fill_(fake_label)
output = netD(fake.detach())
errD_fake = criterion(output, label) # loss
errD_fake.backward()
D_G_z1 = output.mean().item()
errD = errD_real + errD_fake
optimizerD.step()
############################
# (2) Update G network: maximize log(D(G(z)))
###########################
netG.zero_grad()
label.fill_(real_label) # fake labels are real for generator cost
output = netD(fake)
errG = criterion(output, label)
errG.backward()
D_G_z2 = output.mean().item()
optimizerG.step()
# log
batch_count = (epoch - 1) * len(train_loader) + i + 1
writer.add_scalar('Loss_D', errD.item(), batch_count)
writer.add_scalar('Loss_G', errG.item(), batch_count)
writer.add_scalar('D_x', D_x, batch_count)
writer.add_scalar('D_G_z1', D_G_z1, batch_count)
writer.add_scalar('D_G_z2', D_G_z2, batch_count)
if i % log_interval == 0:
print('[%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f D(x): %.4f D(G(z)): %.4f / %.4f'
% (epoch, epochs, i, len(train_loader), errD.item(), errG.item(), D_x, D_G_z1, D_G_z2))
# real samples
n = min(real_cpu.size(0), 64)
save_image(real_cpu[:n], config['dcgan']['result_path'] + 'real_samples.png', normalize=True)
# generated samples
with torch.no_grad():
fake = netG(fixed_noise)
save_image(fake.detach()[:n], config['dcgan']['result_path'] + 'fake_samples_epoch_%03d.png' % epoch, normalize=True)
# do checkpointing
torch.save(netG.state_dict(), config['dcgan']['model_path'] + 'netG_epoch_%03d.pth' % epoch)
torch.save(netD.state_dict(), config['dcgan']['model_path'] + 'netD_epoch_%03d.pth' % epoch)
def main():
for epoch in range(1, epochs + 1):
train(epoch)
if __name__ == "__main__":
# args
parser = argparse.ArgumentParser()
parser.add_argument('--ngpu', type=int, default=1, help='number of GPUs to use')
args = parser.parse_args()
ngpu = int(args.ngpu)
# config
config = configparser.ConfigParser()
config.read("config.ini")
# seed
seed = int(config['dcgan']['seed'])
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# log_interval
log_interval = int(config['dcgan']['log_interval'])
# data set
os.makedirs(config['data']['path'], exist_ok=True)
data_transforms = transforms.Compose([
transforms.Resize(int(config['dcgan']['image_size'])),
transforms.ToTensor(),
transforms.Normalize((0.5, ), (0.5, )),
])
full_set = datasets.MNIST(config['data']['path'], train=True, download=True, transform=data_transforms)
train_amount = int(len(full_set) * (1. - float(config['data']['dev_ratio'])))
train_set = torch.utils.data.dataset.Subset(full_set, np.arange(train_amount))
dev_set = torch.utils.data.dataset.Subset(full_set, np.arange(train_amount, len(full_set)))
test_set = datasets.MNIST(config['data']['path'], train=False, download=True, transform=data_transforms)
print('dataset size', len(train_set), len(dev_set), len(test_set))
print('data size', train_set[0][0].shape)
# data loader
batch_size = int(config['dcgan']['batch_size'])
kwargs = {'num_workers': 4, 'pin_memory': True} if torch.cuda.is_available() else {}
train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size, shuffle=True, **kwargs)
dev_loader = torch.utils.data.DataLoader(dev_set, batch_size=batch_size, shuffle=False, **kwargs)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size, shuffle=False, **kwargs)
# parameters
color_channels = train_set[0][0].shape[0]
z_dim = int(config['dcgan']['z_dim'])
g_feature_map = int(config['dcgan']['g_feature_map'])
d_feature_map = int(config['dcgan']['d_feature_map'])
# model
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
netG = Generator(ngpu, nz=z_dim, ngf=g_feature_map, nc=color_channels).to(device)
netG.apply(weights_init)
print(netG)
netD = Discriminator(ngpu, nc=color_channels, ndf=d_feature_map).to(device)
netD.apply(weights_init)
print(netD)
# optimizer
lr = float(config['dcgan']['lr'])
beta1 = float(config['dcgan']['beta1'])
optimizerD = optim.Adam(netD.parameters(), lr=lr, betas=(beta1, 0.999))
optimizerG = optim.Adam(netG.parameters(), lr=lr, betas=(beta1, 0.999))
# loss
criterion = nn.BCELoss()
# fixed noise
fixed_noise = torch.randn(batch_size, z_dim, 1, 1, device=device)
# label
real_label = 1
fake_label = 0
# writer
os.makedirs(config['dcgan']['log_path'], exist_ok=True)
os.makedirs(config['dcgan']['model_path'], exist_ok=True)
os.makedirs(config['dcgan']['result_path'], exist_ok=True)
writer = SummaryWriter(config['dcgan']['log_path'])
epochs = int(config['dcgan']['epochs'])
main()
writer.close()
| 2.25 | 2 |
train.py | cwyd0822/cifar10-classification-tensorflow-slim | 2 | 12764882 | import tensorflow as tf
import readcifar10
slim = tf.contrib.slim
import os
import resnet
# 定义网络结构
# image:一张图像
# 返回10维的向量
def model(image, keep_prob=0.8, is_training=True):
batch_norm_params = {
"is_training": is_training,
"epsilon": 1e-5, # 防止除以0
"decay": 0.997, # 衰减系数
'scale': True,
'updates_collections': tf.GraphKeys.UPDATE_OPS
}
with slim.arg_scope(
[slim.conv2d], # 设置卷积默认参数的初始化
weights_initializer=slim.variance_scaling_initializer(),
activation_fn=tf.nn.relu, # 激活函数初始化
weights_regularizer=slim.l2_regularizer(0.0001), # l2正则
normalizer_fn=slim.batch_norm, # batch norm层参数初始化
normalizer_params=batch_norm_params):
with slim.arg_scope([slim.max_pool2d], padding="SAME"): # 设置默认的max pooling的初始化
net = slim.conv2d(image, 32, [3, 3], scope='conv1')
net = slim.conv2d(net, 32, [3, 3], scope='conv2')
net = slim.max_pool2d(net, [3, 3], stride=2, scope='pool1') # 进行2倍下采样
net = slim.conv2d(net, 64, [3, 3], scope='conv3')
net = slim.conv2d(net, 64, [3, 3], scope='conv4')
net = slim.max_pool2d(net, [3, 3], stride=2, scope='pool2')
net = slim.conv2d(net, 128, [3, 3], scope='conv5')
net = slim.conv2d(net, 128, [3, 3], scope='conv6')
net = slim.max_pool2d(net, [3, 3], stride=2, scope='pool3')
net = slim.conv2d(net, 256, [3, 3], scope='conv7')
net = tf.reduce_mean(net, axis=[1, 2]) # nhwc--n11c 对特征图求均值
net = slim.flatten(net)
net = slim.fully_connected(net, 1024)
slim.dropout(net, keep_prob) # 添加dropout层
net = slim.fully_connected(net, 10)
return net # 10 dim vec
def loss(logits, label):
one_hot_label = slim.one_hot_encoding(label, 10) # 对label进行one hot编码
slim.losses.softmax_cross_entropy(logits, one_hot_label) # 交叉熵损失
# 正则化的损失
reg_set = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) # 正则化的loss集合
l2_loss = tf.add_n(reg_set)
slim.losses.add_loss(l2_loss)
totalloss = slim.losses.get_total_loss()
return totalloss, l2_loss
# 优化器
def func_optimal(batchsize, loss_val):
# 从0开始
global_step = tf.Variable(0, trainable=False)
# 定义指数衰减率的学习率
lr = tf.train.exponential_decay(0.01,
global_step,
decay_steps=50000 // batchsize, # 衰减步长
decay_rate=0.95, #
staircase=False) # 以平滑的形式
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
op = tf.train.AdamOptimizer(lr).minimize(loss_val, global_step)
return global_step, op, lr
def train():
"""
训练的函数
:return:
"""
batchsize = 64
folder_log = 'logdirs' # 日志存放的路径
folder_model = 'model' # Model存放的路径
if not os.path.exists(folder_log):
os.mkdir(folder_log)
if not os.path.exists(folder_model):
os.mkdir(folder_model)
tr_summary = set()
te_summary = set()
# data
tr_im, tr_label = readcifar10.read(batchsize, 0, 1) # 训练样本
te_im, te_label = readcifar10.read(batchsize, 1, 0) # 测试样本
# 定义网络
# 定义数据
input_data = tf.placeholder(tf.float32, shape=[None, 32, 32, 3],
name='input_data')
# 定义标签
input_label = tf.placeholder(tf.int64, shape=[None],
name='input_label')
keep_prob = tf.placeholder(tf.float32, shape=None,
name='keep_prob')
is_training = tf.placeholder(tf.bool, shape=None,
name='is_training')
# logits = resnet.model_resnet(input_data, keep_prob=keep_prob, is_training=is_training)
logits = model(input_data, keep_prob=keep_prob, is_training=is_training)
# 定义loss
total_loss, l2_loss = loss(logits, input_label)
tr_summary.add(tf.summary.scalar('train total loss', total_loss))
te_summary.add(tf.summary.scalar('test l2_loss', l2_loss))
tr_summary.add(tf.summary.scalar('train total loss', total_loss))
te_summary.add(tf.summary.scalar('test l2_loss', l2_loss))
# 获取accurancy
pred_max = tf.argmax(logits, 1)
correct = tf.equal(pred_max, input_label)
accurancy = tf.reduce_mean(tf.cast(correct, tf.float32))
tr_summary.add(tf.summary.scalar('train accurancy', accurancy))
te_summary.add(tf.summary.scalar('test accurancy', accurancy))
# op
global_step, op, lr = func_optimal(batchsize, total_loss)
tr_summary.add(tf.summary.scalar('train lr', lr))
te_summary.add(tf.summary.scalar('test lr', lr))
tr_summary.add(tf.summary.image('train image', input_data * 128 + 128)) # 这里的图片数据是处理过的
te_summary.add(tf.summary.image('test image', input_data * 128 + 128))
with tf.Session() as sess:
# 对全局变量和局部变量进行初始化
sess.run(tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer()))
# 启动多线程管理器
tf.train.start_queue_runners(sess=sess,
coord=tf.train.Coordinator())
saver = tf.train.Saver(tf.global_variables(), max_to_keep=5)
ckpt = tf.train.latest_checkpoint(folder_model)
if ckpt:
saver.restore(sess, ckpt)
epoch_val = 100
# 合并日志信息
tr_summary_op = tf.summary.merge(list(tr_summary))
te_summary_op = tf.summary.merge(list(te_summary))
summary_writer = tf.summary.FileWriter(folder_log, sess.graph)
for i in range(50000 * epoch_val):
# 每次获取一个batch size的数据
train_im_batch, train_label_batch = \
sess.run([tr_im, tr_label])
feed_dict = {
input_data: train_im_batch,
input_label: train_label_batch,
keep_prob: 0.8,
is_training: True
}
_, global_step_val, \
lr_val, \
total_loss_val, \
accurancy_val, tr_summary_str = sess.run([op,
global_step,
lr,
total_loss,
accurancy, tr_summary_op],
feed_dict=feed_dict)
summary_writer.add_summary(tr_summary_str, global_step_val)
# 每隔100次打印
if i % 100 == 0:
print("{},{},{},{}".format(global_step_val,
lr_val, total_loss_val,
accurancy_val))
if i % (50000 // batchsize) == 0:
test_loss = 0
test_acc = 0
for ii in range(10000//batchsize):
test_im_batch, test_label_batch = \
sess.run([te_im, te_label])
feed_dict = {
input_data: test_im_batch,
input_label: test_label_batch,
keep_prob: 1.0,
is_training: False
}
total_loss_val, global_step_val, \
accurancy_val, te_summary_str = sess.run([total_loss,global_step,
accurancy, te_summary_op],
feed_dict=feed_dict)
summary_writer.add_summary(te_summary_str, global_step_val)
test_loss += total_loss_val
test_acc += accurancy_val
print('test:', test_loss * batchsize / 10000,
test_acc * batchsize / 10000)
if i % 1000 == 0:
saver.save(sess, "{}/model.ckpt{}".format(folder_model, str(global_step_val)))
return
if __name__ == '__main__':
train() | 2.5 | 2 |
cat_detector/observer/main.py | axbg/workshop-asmi-2020 | 1 | 12764883 | <filename>cat_detector/observer/main.py
from threading import Thread
from base64 import b64encode, b64decode
from requests.exceptions import ConnectionError
import os
import json
import sys
import cv2
import requests
import config
import numpy as np
def upload_image(url, data):
try:
payload = {"image": data.decode('utf-8')}
headers = {'Content-type': 'application/json'}
r = requests.post(url, headers=headers, auth=(
config.user, config.password), data=json.dumps(payload))
print("Sent photo")
except ConnectionError as ex:
print("Couldn't send the image in the cloud")
pass
def capture_pictures(cam, folder, url):
while True:
for image_file in sorted(os.listdir(folder)):
print(image_file)
img = cv2.imencode(".jpg", cv2.imread(
os.path.join(folder, image_file)))[1]
if img is not None:
data = b64encode(img)
upload_image(url, data)
def capture_frames(cam, url):
while True:
_, frame = cam.read()
image = cv2.imencode(".jpg", frame)[1]
data = b64encode(image)
upload_image(url, data)
def display_stream(cam):
while True:
_, frame = cam.read()
cv2.imshow('Camera stream', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
def main():
base_url = config.base_url
cam = cv2.VideoCapture(0)
if len(sys.argv) != 2:
print("Please give one parameter: live, disk or debug")
exit(1)
if sys.argv[1] == 'live':
capture_frames(cam, "{}/collect".format(base_url))
elif sys.argv[1] == 'disk':
capture_pictures(cam, "./samples", "{}/collect".format(base_url))
else:
display_stream(cam)
if __name__ == "__main__":
main()
| 2.5625 | 3 |
app/app.py | hkvh/sonata-archives | 0 | 12764884 | <gh_stars>0
#!/usr/bin/env python
import logging
import os
from flask import Flask, render_template, send_from_directory
from flask_bootstrap import Bootstrap
from psycopg2 import sql
from database_design.sonata_table_specs import Composer, Piece, Sonata, Intro, Expo, Development, \
Recap, Coda, ColumnDisplay
from directories import APP_DIR
from general_utils.postgres_utils import LocalhostCursor
log = logging.getLogger(__name__)
app = Flask(__name__)
bootstrap = Bootstrap(app)
@app.route('/')
@app.route('/index')
def index():
return render_template('index.html')
@app.route('/composers')
def composers():
with LocalhostCursor() as cur:
select_comps_query = sql.SQL("""
SELECT {id},{full_name}
FROM {composer_st};
""").format(id=Composer.ID,
full_name=Composer.FULL_NAME,
composer_st=Composer.schema_table())
cur.execute(select_comps_query)
comp_tuples = cur.fetchall()
# Create "Surname, Firstname" format by splitting on " "
comp_tuples = [(comp_id, "{}, {}".format(comp_fn.split(' ')[-1], ' '.join(comp_fn.split(' ')[:-1])))
for comp_id, comp_fn in comp_tuples]
# Sort by the name
comp_tuples.sort(key=lambda x: x[1])
return render_template('composers.html', composer_id_name_tuples=comp_tuples)
@app.route('/pieces')
def pieces():
# The main goal for this method is to render pieces.html with the following:
#
# 1. pieces_comp_tuples: List[Tuple[str, str, str]]
# a list of tuples of (comp_id, piece_id, piece_name_with_composer) for all pieces analyzed
#
# 2. pieces_movements_dict: Dict[str, List[int]]
# a dict mapping piece id --> lists of the analyzed movement nums
# Use a dict cursor to have fetchone return a dict instead of a tuple
with LocalhostCursor() as cur:
select_pieces_query = sql.SQL("""
SELECT comp.{comp_id}, piece.{piece_id},
comp.{comp_surname} || ' ' || piece.{piece_full_name} AS cfn
FROM {piece_st} AS piece
JOIN {comp_st} AS comp
ON (piece.{piece_comp_id} = comp.{comp_id})
ORDER BY cfn ASC;
""").format(piece_id=Piece.ID,
piece_full_name=Piece.FULL_NAME,
piece_comp_id=Piece.COMPOSER_ID,
piece_st=Piece.schema_table(),
comp_id=Composer.ID,
comp_surname=Composer.SURNAME,
comp_st=Composer.schema_table())
cur.execute(select_pieces_query)
pieces_comp_tuples = cur.fetchall()
select_pieces_movements_query = sql.SQL("""
SELECT {sonata_piece_id}, {sonata_movement_num}
FROM {sonata_st}
ORDER BY {sonata_movement_num};
""").format(sonata_piece_id=Sonata.PIECE_ID,
sonata_movement_num=Sonata.MOVEMENT_NUM,
sonata_st=Sonata.schema_table())
cur.execute(select_pieces_movements_query)
pieces_movements_dict = {}
for piece_id, movement_num in cur:
movement_list = pieces_movements_dict.setdefault(piece_id, [])
movement_list.append(movement_num)
return render_template('pieces.html', pieces_comp_tuples=pieces_comp_tuples,
pieces_movements_dict=pieces_movements_dict)
@app.route('/composers/<composer_id>')
def composer(composer_id: str):
# The main goal for this method is to render composer.html with the following:
#
# 1. composer_id: str
# the provided composer_id based on the url
#
# 2. composer_surname: str
# the last name of the composer
#
# 3. comp_info_dict: Dict[str, Any]]
# a dict that contains all composer-level attributes and values
#
# 4. piece_id_name_tuples: List[Tuple[str, str]]
# a list of tuples of (piece_id, piece_name) for all pieces for this composer
#
# 5. pieces_movements_dict: Dict[str, List[int]]
# a dict mapping piece id --> lists of the analyzed movement nums
# Use a dict cursor to have fetchone return a dict instead of a tuple
with LocalhostCursor(dict_cursor=True) as cur:
select_comp_info_query = sql.SQL("""
SELECT *
FROM {composer_st}
WHERE {id} = {composer_id};
""").format(id=Composer.ID,
composer_id=sql.Literal(composer_id),
composer_st=Composer.schema_table())
cur.execute(select_comp_info_query)
comp_info_dict = dict(cur.fetchone())
# Remove information that we don't want to display
comp_info_dict.pop(Composer.ID.string)
# Will use surname as separate field so need to keep it
composer_surname = comp_info_dict.pop(Composer.SURNAME.string)
with LocalhostCursor() as cur:
select_comp_pieces_query = sql.SQL("""
SELECT {id}, {full_name}
FROM {piece_st}
WHERE {comp_id} = {composer_id};
""").format(id=Piece.ID,
full_name=Piece.FULL_NAME,
comp_id=Piece.COMPOSER_ID,
composer_id=sql.Literal(composer_id),
piece_st=Piece.schema_table())
cur.execute(select_comp_pieces_query)
piece_id_name_tuples = cur.fetchall()
# Sort by the name
piece_id_name_tuples.sort(key=lambda x: x[1])
# Change info dict to have display name keys instead of raw field name keys
comp_info_dict = ColumnDisplay.create_new_dict_with_display_name_keys(
cursor=cur,
table_name=Composer.schema_table().table.string,
dict_with_column_name_keys=comp_info_dict)
select_pieces_movements_query = sql.SQL("""
SELECT s.{sonata_piece_id}, s.{sonata_movement_num}
FROM {sonata_st} AS s
JOIN {piece_st} AS p
ON s.{sonata_piece_id} = p.{piece_id}
WHERE p.{piece_comp_id} = {composer_id}
ORDER BY s.{sonata_movement_num};
""").format(sonata_piece_id=Sonata.PIECE_ID,
sonata_movement_num=Sonata.MOVEMENT_NUM,
sonata_comp_id=Sonata.PIECE_ID,
piece_id=Piece.ID,
piece_comp_id=Piece.COMPOSER_ID,
composer_id=sql.Literal(composer_id),
piece_st=Piece.schema_table(),
sonata_st=Sonata.schema_table())
cur.execute(select_pieces_movements_query)
pieces_movements_dict = {}
for piece_id, movement_num in cur:
movement_list = pieces_movements_dict.setdefault(piece_id, [])
movement_list.append(movement_num)
return render_template('composer.html',
composer_id=composer_id,
composer_surname=composer_surname,
composer_info_dict=comp_info_dict,
piece_id_name_tuples=piece_id_name_tuples,
pieces_movements_dict=pieces_movements_dict)
@app.route('/composers/<composer_id>/<piece_id>')
def piece(composer_id: str, piece_id: str):
# The main goal for this method is to render piece.html with the following:
#
# 1. composer_id: str
# the provided composer_id based on the url
#
# 2. composer_surname: str
# the last name of the composer
#
# 3. piece_name: str
# the name of the piece
#
# 4. piece_info_dict: Dict[str, Any]
# a dict of piece-level attribtues and values
#
# 5. sonatas_info_dict: Dict[str, Dict[str, Any]]
# a dict that maps movement_num to dicts of sonata-level attributes and values
#
# 6. sonatas_lilypond_image_settings_dict: Dict[str, Dict[str, Any]]
# a dict that maps movement_num to an image settings dict containing settings for the lilypond file
# Right now, the only setting it should have is "image_width"
#
# 7. sonatas_blocks_info_dict: Dict[str, Dict[str, Dict[str, Any]]]
# nested dicts that map movement_num --> block name --> block-level dict of attributes and values
#
# If sonata name = 'Itself' then this means the piece is a single-movement work that is the sonata
# Use a dict cursor to have each record return a dict instead of a tuple
with LocalhostCursor(dict_cursor=True) as cur:
###################################
# Piece Info Dict and Piece Name #
###################################
select_piece_info_query = sql.SQL("""
SELECT *
FROM {piece_st}
WHERE {id} = {piece_id};
""").format(id=Piece.ID,
piece_id=sql.Literal(piece_id),
piece_st=Piece.schema_table())
cur.execute(select_piece_info_query)
piece_info_dict = dict(cur.fetchone())
# Remove information that we don't want to display (grab the full name and composer_id while popping)
piece_info_dict.pop(Piece.ID.string)
piece_info_dict.pop(Piece.NAME.string)
piece_info_dict.pop(Piece.NICKNAME.string)
piece_info_dict.pop(Piece.CATALOGUE_ID.string)
piece_name = piece_info_dict.pop(Piece.FULL_NAME.string)
composer_id_of_piece = piece_info_dict.pop(Piece.COMPOSER_ID.string)
# Change info dict to have display name keys instead of raw field name keys
piece_info_dict = ColumnDisplay.create_new_dict_with_display_name_keys(
cursor=cur,
table_name=Piece.schema_table().table.string,
dict_with_column_name_keys=piece_info_dict)
if composer_id != composer_id_of_piece:
raise Exception("Bad composer id \"{}\" in URL! Piece with id \"{}\" should have composer id \"{}\""
"".format(composer_id, piece_id, composer_id_of_piece))
####################
# Composer Surname #
####################
select_comp_surname_query = sql.SQL("""
SELECT {surname}
FROM {composer_st}
WHERE {id} = {composer_id}
""").format(surname=Composer.SURNAME,
id=Composer.ID,
composer_id=sql.Literal(composer_id),
composer_st=Composer.schema_table())
cur.execute(select_comp_surname_query)
composer_surname = cur.fetchone()[0]
######################
# Sonatas Info Dicts #
######################
select_sonatas_query = sql.SQL("""
SELECT *
FROM {sonata_st}
WHERE {p_id} = {piece_id}
ORDER BY {movement_num};
""").format(p_id=Sonata.PIECE_ID,
piece_id=sql.Literal(piece_id),
movement_num=Sonata.MOVEMENT_NUM,
sonata_st=Sonata.schema_table())
cur.execute(select_sonatas_query)
sonatas_info_dict = {}
sonatas_blocks_info_dict = {}
sonatas_lilypond_image_settings_dict = {}
sonatas_data = cur.fetchall()
for result in sonatas_data:
sonata_info_dict = dict(result)
# Remove information that we don't want to display (grab various ids and info we need while popping)
sonata_info_dict.pop(Sonata.PIECE_ID.string)
sonata_id = sonata_info_dict.pop(Sonata.ID.string)
movement_num = sonata_info_dict.pop(Sonata.MOVEMENT_NUM.string)
intro_id = sonata_info_dict.pop(Sonata.INTRODUCTION_ID.string)
expo_id = sonata_info_dict.pop(Sonata.EXPOSITION_ID.string)
dev_id = sonata_info_dict.pop(Sonata.DEVELOPMENT_ID.string)
recap_id = sonata_info_dict.pop(Sonata.RECAPITULATION_ID.string)
coda_id = sonata_info_dict.pop(Sonata.CODA_ID.string)
lilypond_image_settings = sonata_info_dict.pop(Sonata.LILYPOND_IMAGE_SETTINGS.string)
# Change info dict to have display name keys instead of raw field name keys
sonata_info_dict = ColumnDisplay.create_new_dict_with_display_name_keys(
cursor=cur,
table_name=Sonata.schema_table().table.string,
dict_with_column_name_keys=sonata_info_dict)
sonatas_info_dict[movement_num] = sonata_info_dict
# If settings to the lilypond image were provided for this sonata, we should expect the image to exist
if lilypond_image_settings is not None:
# The assumed image path will be in the static folder named after the sonata id
lilypond_image_settings[Sonata.IMAGE_PATH] = '/static/lilypond/{}.png'.format(sonata_id)
# Provide a default image width if not provided
if Sonata.IMAGE_WIDTH not in lilypond_image_settings:
lilypond_image_settings[Sonata.IMAGE_WIDTH] = 400
sonatas_lilypond_image_settings_dict[movement_num] = lilypond_image_settings
sonatas_blocks_info_dict[movement_num] = {}
block_ids = [intro_id, expo_id, dev_id, recap_id, coda_id]
block_table_specs = [Intro, Expo, Development, Recap, Coda]
#############################
# Sonatas Blocks Info Dicts #
#############################
for block_id, block_table_spec in zip(block_ids, block_table_specs):
# if there is no block id, that means the block is missing, so we can skip
if block_id is not None:
# The name of the block is the same as the table spec class name
block_name = block_table_spec.__name__
with LocalhostCursor(dict_cursor=True) as cur2:
select_sonata_block_info_query = sql.SQL("""
SELECT *
FROM {block_st}
WHERE {id} = {block_id};
""").format(id=block_table_spec.ID,
block_id=sql.Literal(block_id),
block_st=block_table_spec.schema_table())
cur2.execute(select_sonata_block_info_query)
sonata_block_info_dict = dict(cur2.fetchone())
# Remove information that we don't want to display
sonata_block_info_dict.pop(block_table_spec.ID.string)
sonata_block_info_dict.pop(block_table_spec.SONATA_ID.string)
# Change info dict to have display name keys instead of raw field name keys
sonata_block_info_dict = ColumnDisplay.create_new_dict_with_display_name_keys(
cursor=cur,
table_name=block_table_spec.schema_table().table.string,
dict_with_column_name_keys=sonata_block_info_dict)
sonatas_blocks_info_dict[movement_num][block_name] = sonata_block_info_dict
log.debug('sonatas_lilypond_image_settings_dict: {}'.format(sonatas_lilypond_image_settings_dict))
return render_template('piece.html',
composer_id=composer_id,
composer_surname=composer_surname,
piece_name=piece_name,
piece_info_dict=piece_info_dict,
sonatas_info_dict=sonatas_info_dict,
sonatas_blocks_info_dict=sonatas_blocks_info_dict,
sonatas_lilypond_image_settings_dict=sonatas_lilypond_image_settings_dict,
IMAGE_PATH=Sonata.IMAGE_PATH,
IMAGE_WIDTH=Sonata.IMAGE_WIDTH)
@app.route('/favicon.ico')
def favicon():
return send_from_directory(os.path.join(APP_DIR, 'static'),
'favicon.ico')
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(message)s')
app.run(debug=True)
| 2.40625 | 2 |
SAM2017/registration/admin.py | jpavelw/sam-2017 | 1 | 12764885 | from django.contrib import admin
from . import models
class UserFieldFilter(admin.ModelAdmin):
fields = ['role']
admin.site.register(models.User, UserFieldFilter)
admin.site.register(models.Role)
| 1.5 | 2 |
tests/unit/test_non_empty_configs_provider.py | barryib/gitlabform | 299 | 12764886 | import pytest
from gitlabform import EXIT_INVALID_INPUT
from gitlabform.configuration.projects_and_groups import ConfigurationProjectsAndGroups
from gitlabform.filter import NonEmptyConfigsProvider
def test_error_on_missing_key():
config_yaml = """
---
# no key at all
"""
with pytest.raises(SystemExit) as e:
configuration = ConfigurationProjectsAndGroups(config_string=config_yaml)
NonEmptyConfigsProvider(configuration, None, None)
assert e.value.code == EXIT_INVALID_INPUT
def test_error_on_empty_key():
config_yaml = """
---
projects_and_groups:
"""
with pytest.raises(SystemExit) as e:
configuration = ConfigurationProjectsAndGroups(config_string=config_yaml)
NonEmptyConfigsProvider(configuration, None, None)
assert e.value.code == EXIT_INVALID_INPUT
| 2.421875 | 2 |
packages/legacycomponents/mcstas2/python/mcstas2/utils/parsers.old/ShareIncludeParser.py | mcvine/mcvine | 5 | 12764887 | #!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# <NAME>
# California Institute of Technology
# (C) 2008 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
from .pyparsing.pyparsing import *
def include():
return Suppress( '%include') + quotedString.setResultsName( 'header' )
def test():
text = '''
%include "read_table-lib"
'''
print('%r' % include().parseString( text ).header)
return
if __name__ == "__main__": test()
# version
__id__ = "$Id$"
# End of file
| 2.109375 | 2 |
question_bank/linked-list-in-binary-tree/linked-list-in-binary-tree.py | yatengLG/leetcode-python | 9 | 12764888 | # -*- coding: utf-8 -*-
# @Author : LG
"""
执行用时:108 ms, 在所有 Python3 提交中击败了96.06% 的用户
内存消耗:15.8 MB, 在所有 Python3 提交中击败了25.37% 的用户
解题思路:
先在树中找到与链表第一个节点相同值的 树节点
然后以这些树中节点为根的子树中寻找是否存在链表
"""
class Solution:
def isSubPath(self, head: ListNode, root: TreeNode) -> bool:
def Sub(root, head): # 递归在子树中判断是否当前子树存在链表
# print(root, head)
if head==None: # 如果链表匹配完,则返回True,存在
return True
elif root and head:
if root.val == head.val:
if Sub(root.left, head.next):
return True
if Sub(root.right, head.next):
return True
nodes = []
def find(root, head): # 递归寻找树中所有与链表首节点值相同的树节点
if root:
if root.val == head.val: # 如果当前节点值等于链表首节点值,记录
nodes.append(root)
find(root.left, head)
find(root.right, head)
find(root, head)
for node in nodes:
if Sub(node, head): # 判断所有子树是否存在
return True
return False | 3.71875 | 4 |
drivers/driver.py | OttrOne/suivi | 0 | 12764889 | <reponame>OttrOne/suivi
class DriverMeta(type):
def __instancecheck__(cls, __instance) -> bool:
return cls.__subclasscheck__(type(__instance))
def __subclasscheck__(cls, __subclass: type) -> bool:
return (
hasattr(__subclass, 'create') and callable(__subclass.create)
) and (
hasattr(__subclass, 'logs') and callable(__subclass.logs)
) and (
hasattr(__subclass, 'stats') and callable(__subclass.stats)
) and (
hasattr(__subclass, 'stop') and callable(__subclass.stop)
) and (
hasattr(__subclass, 'cleanup') and callable(__subclass.cleanup)
) and (
hasattr(__subclass, 'wait') and callable(__subclass.wait)
)
class Driver(metaclass=DriverMeta):
pass
| 2.4375 | 2 |
karapace/protobuf/proto_type.py | instaclustr/karapace | 1 | 12764890 | <gh_stars>1-10
# Ported from square/wire:
# wire-library/wire-schema/src/commonMain/kotlin/com/squareup/wire/schema/ProtoType.kt
"""
Names a protocol buffer message, enumerated type, service, map, or a scalar. This class models a
fully-qualified name using the protocol buffer package.
"""
from enum import auto, Enum
from karapace.protobuf.exception import IllegalArgumentException
from karapace.protobuf.kotlin_wrapper import check, require
from karapace.protobuf.option_element import OptionElement
from typing import Optional
def static_init(cls) -> object:
if getattr(cls, "static_init", None):
cls.static_init()
return cls
@static_init
class ProtoType:
@property
def simple_name(self) -> str:
dot = self.string.rfind(".")
return self.string[dot + 1:]
@classmethod
def static_init(cls) -> None:
cls.BOOL = cls(True, "bool")
cls.BYTES = cls(True, "bytes")
cls.DOUBLE = cls(True, "double")
cls.FLOAT = cls(True, "float")
cls.FIXED32 = cls(True, "fixed32")
cls.FIXED64 = cls(True, "fixed64")
cls.INT32 = cls(True, "int32")
cls.INT64 = cls(True, "int64")
cls.SFIXED32 = cls(True, "sfixed32")
cls.SFIXED64 = cls(True, "sfixed64")
cls.SINT32 = cls(True, "sint32")
cls.SINT64 = cls(True, "sint64")
cls.STRING = cls(True, "string")
cls.UINT32 = cls(True, "uint32")
cls.UINT64 = cls(True, "uint64")
cls.ANY = cls(False, "google.protobuf.Any")
cls.DURATION = cls(False, "google.protobuf.Duration")
cls.TIMESTAMP = cls(False, "google.protobuf.Timestamp")
cls.EMPTY = cls(False, "google.protobuf.Empty")
cls.STRUCT_MAP = cls(False, "google.protobuf.Struct")
cls.STRUCT_VALUE = cls(False, "google.protobuf.Value")
cls.STRUCT_NULL = cls(False, "google.protobuf.NullValue")
cls.STRUCT_LIST = cls(False, "google.protobuf.ListValue")
cls.DOUBLE_VALUE = cls(False, "google.protobuf.DoubleValue")
cls.FLOAT_VALUE = cls(False, "google.protobuf.FloatValue")
cls.INT64_VALUE = cls(False, "google.protobuf.Int64Value")
cls.UINT64_VALUE = cls(False, "google.protobuf.UInt64Value")
cls.INT32_VALUE = cls(False, "google.protobuf.Int32Value")
cls.UINT32_VALUE = cls(False, "google.protobuf.UInt32Value")
cls.BOOL_VALUE = cls(False, "google.protobuf.BoolValue")
cls.STRING_VALUE = cls(False, "google.protobuf.StringValue")
cls.BYTES_VALUE = cls(False, "google.protobuf.BytesValue")
cls.SCALAR_TYPES_ = [
cls.BOOL, cls.BYTES, cls.DOUBLE, cls.FLOAT, cls.FIXED32, cls.FIXED64, cls.INT32, cls.INT64, cls.SFIXED32,
cls.SFIXED64, cls.SINT32, cls.SINT64, cls.STRING, cls.UINT32, cls.UINT64
]
cls.SCALAR_TYPES: dict = {}
for a in cls.SCALAR_TYPES_:
cls.SCALAR_TYPES[a.string] = a
cls.NUMERIC_SCALAR_TYPES: tuple = (
cls.DOUBLE, cls.FLOAT, cls.FIXED32, cls.FIXED64, cls.INT32, cls.INT64, cls.SFIXED32, cls.SFIXED64, cls.SINT32,
cls.SINT64, cls.UINT32, cls.UINT64
)
def __init__(
self, is_scalar: bool, string: str, key_type: Optional['ProtoType'] = None, value_type: Optional['ProtoType'] = None
) -> None:
""" Creates a scalar or message type. """
if not key_type and not value_type:
self.is_scalar = is_scalar
self.string = string
self.is_map = False
""" The type of the map's keys. Only present when [is_map] is True. """
self.key_type = None
""" The type of the map's values. Only present when [is_map] is True. """
self.value_type = None
else:
if key_type.is_scalar and key_type != self.BYTES and key_type != self.DOUBLE and key_type != self.FLOAT:
self.is_scalar = False
self.string = string
self.is_map = True
self.key_type = key_type # TODO restrict what's allowed here
self.value_type = value_type
else:
# TODO: must be IllegalArgumentException
raise Exception(f"map key must be non-byte, non-floating point scalar: {key_type}")
def to_kind(self) -> OptionElement.Kind:
return {
"bool": OptionElement.Kind.BOOLEAN,
"string": OptionElement.Kind.STRING,
"bytes": OptionElement.Kind.NUMBER,
"double": OptionElement.Kind.NUMBER,
"float": OptionElement.Kind.NUMBER,
"fixed32": OptionElement.Kind.NUMBER,
"fixed64": OptionElement.Kind.NUMBER,
"int32": OptionElement.Kind.NUMBER,
"int64": OptionElement.Kind.NUMBER,
"sfixed32": OptionElement.Kind.NUMBER,
"sfixed64": OptionElement.Kind.NUMBER,
"sint32": OptionElement.Kind.NUMBER,
"sint64": OptionElement.Kind.NUMBER,
"uint32": OptionElement.Kind.NUMBER,
"uint64": OptionElement.Kind.NUMBER
}.get(self.simple_name, OptionElement.Kind.ENUM)
@property
def enclosing_type_or_package(self) -> str:
""" Returns the enclosing type, or null if self type is not nested in another type. """
dot = self.string.rfind(".")
return None if (dot == -1) else self.string[:dot]
@property
def type_url(self) -> str:
""" Returns a string like "type.googleapis.com/packagename.messagename" or null if self type is
a scalar or a map. Note that self returns a non-null string for enums because it doesn't know
if the named type is a message or an enum.
"""
return None if self.is_scalar or self.is_map else f"type.googleapis.com/{self.string}"
def nested_type(self, name: str) -> object: # ProtoType
check(not self.is_scalar, "scalar cannot have a nested type")
check(not self.is_map, "map cannot have a nested type")
require(name and name.rfind(".") == -1 and len(name) != 0, f"unexpected name: {name}")
return ProtoType(False, f"{self.string}.{name}")
def __eq__(self, other) -> bool:
return isinstance(other, ProtoType) and self.string == other.string
def __ne__(self, other) -> bool:
return not isinstance(other, ProtoType) or self.string != other.string
def __str__(self) -> str:
return self.string
def hash_code(self) -> int:
return hash(self.string)
@staticmethod
def get(enclosing_type_or_package: str, type_name: str) -> 'ProtoType':
return ProtoType.get2(f"{enclosing_type_or_package}.{type_name}") \
if enclosing_type_or_package else ProtoType.get2(type_name)
@staticmethod
def get2(name: str) -> 'ProtoType':
scalar = ProtoType.SCALAR_TYPES.get(name)
if scalar:
return scalar
require(name and len(name) != 0 and name.rfind("#") == -1, f"unexpected name: {name}")
if name.startswith("map<") and name.endswith(">"):
comma = name.rfind(",")
require(comma != -1, f"expected ',' in map type: {name}")
key = ProtoType.get2(name[4:comma].strip())
value = ProtoType.get2(name[comma + 1:len(name) - 1].strip())
return ProtoType(False, name, key, value)
return ProtoType(False, name)
@staticmethod
def get3(key_type: 'ProtoType', value_type: 'ProtoType', name: str) -> object:
return ProtoType(False, name, key_type, value_type)
# schema compatibility check functionality karapace addon
# Based on table https://developers.google.com/protocol-buffers/docs/proto3#scalar """
class CompatibilityKind(Enum):
VARIANT = auto()
SVARIANT = auto() # sint has incompatible format with int but compatible with it by size
FIXED64 = auto()
LENGTH_DELIMITED = auto()
FIXED32 = auto()
DOUBLE = auto()
FLOAT = auto()
def compatibility_kind(self, is_enum: bool) -> 'ProtoType.CompatibilityKind':
if is_enum:
return ProtoType.CompatibilityKind.VARIANT
result = {
"int32": ProtoType.CompatibilityKind.VARIANT,
"int64": ProtoType.CompatibilityKind.VARIANT,
"uint32": ProtoType.CompatibilityKind.VARIANT,
"uint64": ProtoType.CompatibilityKind.VARIANT,
"bool": ProtoType.CompatibilityKind.VARIANT,
"sint32": ProtoType.CompatibilityKind.SVARIANT,
"sint64": ProtoType.CompatibilityKind.SVARIANT,
"double": ProtoType.CompatibilityKind.DOUBLE, # it is compatible by size with FIXED64
"fixed64": ProtoType.CompatibilityKind.FIXED64,
"sfixed64": ProtoType.CompatibilityKind.FIXED64,
"float": ProtoType.CompatibilityKind.FLOAT, # it is compatible by size with FIXED32
"fixed32": ProtoType.CompatibilityKind.FIXED32,
"sfixed32": ProtoType.CompatibilityKind.FIXED32,
"string": ProtoType.CompatibilityKind.LENGTH_DELIMITED,
"bytes": ProtoType.CompatibilityKind.LENGTH_DELIMITED,
}.get(self.simple_name)
if result:
return result
raise IllegalArgumentException(f"undefined type: {self.simple_name}")
| 1.78125 | 2 |
release/cs_submitter/mainform/migrations/0002_auto_20170801_1155.py | kvswim/kv_jhucs_coursesubmit | 0 | 12764891 | <filename>release/cs_submitter/mainform/migrations/0002_auto_20170801_1155.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-01 15:55
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('mainform', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Days',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('day', models.CharField(max_length=8)),
],
),
migrations.AddField(
model_name='mainformmodel',
name='end_time',
field=models.TimeField(default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='mainformmodel',
name='start_time',
field=models.TimeField(default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='mainformmodel',
name='days_of_week',
field=models.ManyToManyField(to='mainform.Days'),
),
]
| 1.625 | 2 |
src/main.py | andrejmiscic/simcls-pytorch | 5 | 12764892 | <reponame>andrejmiscic/simcls-pytorch<filename>src/main.py
import argparse
import os
import torch
from torch.utils.data import DataLoader
from evaluator import Evaluator
from model import CandidateScorer
from trainer import Trainer, TrainConfig
from utils.data_utils import SummarizationDataset, collate_inputs_to_batch
def setup_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(description="SimCLS training/evaluation parser")
parser.add_argument("--mode", type=str, default="train", choices=["train", "test"])
parser.add_argument("--model_path", help="Path to the model folder (can be huggingface hub, e.g. roberta-base)",
type=str, default="roberta-base")
parser.add_argument("--train_path", help="Path to the training pickle file", type=str, default=None)
parser.add_argument("--val_path", help="Path to the val pickle file", type=str, default=None)
parser.add_argument("--test_path", help="Path to the test pickle file", type=str, default=None)
parser.add_argument("--save_dir", help="Path to the directory to output the trained model", type=str, default=None)
# training parameters
parser.add_argument("--lr", type=float, default=0.002)
parser.add_argument("--num_epochs", type=int, default=8)
parser.add_argument("--batch_size", type=int, default=128) # also used for testing
parser.add_argument("--margin_lambda", type=float, default=0.01)
parser.add_argument("--weight_decay", type=float, default=0.)
parser.add_argument("--warmup_steps", type=int, default=10000)
parser.add_argument("--eval_steps", type=int, default=1000)
parser.add_argument("--early_stop_patience", type=int, default=-1, help="-1 to not perform early stopping")
return parser
def parse_args_to_config(args: argparse.Namespace) -> TrainConfig:
return TrainConfig(lr=args.lr, batch_size=args.batch_size, num_epochs=args.num_epochs, save_dir=args.save_dir,
weight_decay=args.weight_decay, margin_lambda=args.margin_lambda, eval_steps=args.eval_steps,
early_stopping_patience=args.early_stop_patience)
if __name__ == '__main__':
torch.manual_seed(0)
args = setup_parser().parse_args()
model = CandidateScorer(args.model_path)
if args.mode == "train":
assert args.train_path, "If you want to train the model, you need to provide --train_path argument."
assert args.val_path, "If you want to train the model, you need to provide --val_path argument."
assert args.save_dir, "If you want to train the model, you need to provide --save_dir argument."
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
config = parse_args_to_config(args)
train_dataset = SummarizationDataset(args.train_path)
val_dataset = SummarizationDataset(args.val_path)
train_dataloader = DataLoader(train_dataset, batch_size=config.batch_size, shuffle=True, collate_fn=collate_inputs_to_batch)
val_dataloader = DataLoader(val_dataset, batch_size=2*config.batch_size, shuffle=False, collate_fn=collate_inputs_to_batch)
trainer = Trainer(model)
trainer.train(train_dataloader, val_dataloader, config)
elif args.mode == "test":
assert args.test_path, "If you want to test the model, you need to provide --test_path argument."
test_dataset = SummarizationDataset(args.test_path)
test_dataloader = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, collate_fn=collate_inputs_to_batch)
evaluator = Evaluator(model)
res = evaluator.evaluate(test_dataloader)
print("SIMCLS RESULTS:")
for metric, results in res.items():
print(f"\t- {metric}: {results[0]}, [{results[1]}, {results[2]}]")
| 2.484375 | 2 |
tests/src/smiley/smiley/commands/show.py | incognitoRepo/hdlogger | 0 | 12764893 | import logging
from cliff import command
from smiley import db
from smiley import output
class Show(command.Command):
"""Show the details of one run.
Includes summaries of the thread resource consumption, when
multiple threads are present.
"""
log = logging.getLogger(__name__)
def get_parser(self, prog_name):
parser = super(Show, self).get_parser(prog_name)
parser.add_argument(
'--database',
default='smiley.db',
help='filename for the database (%(default)s)',
)
parser.add_argument(
'run_id',
help='identifier for the run',
)
return parser
def take_action(self, parsed_args):
self.db = db.DB(parsed_args.database)
run = self.db.get_run(parsed_args.run_id)
details = {
'id': run.id,
'cwd': run.cwd,
'description': run.description,
'start_time': run.start_time.isoformat(),
'end_time': run.end_time.isoformat(),
'error_message': run.error_message,
'traceback': run.traceback,
}
output.dump_dictionary(details, self.log.info, 0)
threads = list(self.db.get_thread_details(parsed_args.run_id))
if len(threads) > 1:
for thread in threads:
td = {
'id': thread.id,
'start_time': thread.start_time.isoformat(),
'end_time': thread.end_time.isoformat(),
}
output.dump_dictionary(td, self.log.info, 0)
return
| 2.5625 | 3 |
d2l-zh/recurrent-neural-network/rnn-start.py | hyschn/practice-code | 2 | 12764894 | # encoding:utf-8
"""
@Time : 2020-05-22 21:14
@Author : <EMAIL>
@File : rnn-start.py
@Software: PyCharm
"""
import d2lzh as d2l
import math
from mxnet import autograd, nd
from mxnet.gluon import loss as gloss
import time
def to_onehot(X, size):
return [nd.one_hot(x, size) for x in X.T]
def get_params():
def _one(shape):
return nd.random.normal(scale=0.01, shape=shape, ctx=ctx)
W_xh = _one((num_inputs, num_hiddens))
W_hh = _one((num_hiddens, num_hiddens))
b_h = nd.zeros(num_hiddens, ctx=ctx)
W_hq = _one((num_hiddens, num_outputs))
b_q = nd.zeros(num_outputs, ctx=ctx)
params = [W_xh, W_hh, b_h, W_hq, b_q]
for param in params:
param.attach_grad()
return params
def init_rnn_state(batch_size, num_hiddens, ctx):
return (nd.zeros(shape=(batch_size, num_hiddens), ctx=ctx),)
def rnn(inputs, state, params):
W_xh, W_hh, b_h, W_hq, b_q = params
H, = state
outputs = []
for X in inputs:
H = nd.tanh(nd.dot(X, W_xh) + nd.dot(H, W_hh) + b_h)
Y = nd.dot(H, W_hq) + b_q
outputs.append(Y)
return outputs, (H,)
def predict_rnn(prefix, num_chars, rnn, params, init_rnn_state,
num_hiddens, vocab_size, ctx, idx_to_char, char_to_idx):
"""
预测函数
:param prefix:
:param num_chars:
:param rnn:
:param params:
:param init_rnn_state:
:param num_hiddens:
:param vocab_size:
:param ctx:
:param idx_to_char:
:param char_to_idx:
:return:
"""
state = init_rnn_state(1, num_hiddens, ctx)
output = [char_to_idx[prefix[0]]]
for t in range(num_chars + len(prefix) - 1):
X = to_onehot(nd.array([output[-1]], ctx=ctx), vocab_size)
(Y, state) = rnn(X, state, params)
if t < len(prefix) - 1:
output.append(char_to_idx[prefix[t + 1]])
else:
output.append(int(Y[0].argmax(axis=1).asscalar()))
return ''.join([idx_to_char[i] for i in output])
def grad_clipping(params, theta, ctx):
"""
裁剪梯度
:param params:
:param theta:
:param ctx:
:return:
"""
norm = nd.array([0], ctx)
for param in params:
norm += (param.grad ** 2).sum()
norm = norm.sqrt().asscalar()
if norm > theta:
for param in params:
param.grad[:] *= theta / norm
def train_and_predict_rnn(rnn, get_params, init_rnn_state, num_hiddens,
vocab_size, ctx, corpus, idx_to_char, char_to_idx, is_random_iter, num_epochs,
num_steps, lr, clipping_theta, batch_size, pred_period, pred_len, prefixes):
"""
训练并预测
:param rnn:
:param get_params:
:param init_rnn_state:
:param num_hiddens:
:param vocab_size:
:param ctx:
:param corpus:
:param idx_to_char:
:param char_to_idx:
:param is_random_iter:
:param num_epochs:
:param num_steps:
:param lr:
:param clipping_theta:
:param batch_size:
:param pred_period:
:param pred_len:
:param prefixes:
:return:
"""
if is_random_iter:
data_iter_fn = d2l.data_iter_random
else:
data_iter_fn = d2l.data_iter_consecutive
params = get_params()
loss = gloss.SoftmaxCrossEntropyLoss()
for epoch in range(num_epochs):
if not is_random_iter:
state = init_rnn_state(batch_size, num_hiddens, ctx)
l_sum, n, start = 0.0, 0, time.time()
data_iter = data_iter_fn(corpus_indices, batch_size, num_steps, ctx)
for X, Y in data_iter:
if is_random_iter:
state = init_rnn_state(batch_size, num_hiddens, ctx)
else:
for s in state:
s.detach()
with autograd.record():
inputs = to_onehot(X, vocab_size)
(outputs, state) = rnn(inputs, state, params)
outputs = nd.concat(*outputs, dim=0)
y = Y.T.reshape((-1,))
l = loss(outputs, y).mean()
l.backward()
grad_clipping(params, clipping_theta, ctx)
d2l.sgd(params, lr, 1)
l_sum += l.asscalar() * y.size
n += y.size
if (epoch + 1) % pred_period == 0:
print('epoch %d,perplexity %f,time %.2f sec' %
(epoch + 1, math.exp(l_sum / n), time.time() - start))
for prefix in prefixes:
print(' -', predict_rnn(prefix, pred_len, rnn, params,
init_rnn_state, num_hiddens, vocab_size, ctx,
idx_to_char, char_to_idx))
if __name__ == '__main__':
(corpus_indices, char_to_idx, idx_to_char, vocab_size) \
= d2l.load_data_jay_lyrics()
X = nd.arange(10).reshape((2, 5))
# print(X.T)
inputs = to_onehot(X, vocab_size)
# 定义模型
num_inputs, num_hiddens, num_outputs = vocab_size, 256, vocab_size
ctx = d2l.try_gpu()
# print('will use', ctx)
state = init_rnn_state(X.shape[0], num_hiddens, ctx)
inputs = to_onehot(X.as_in_context(ctx), vocab_size)
params = get_params()
outputs, state_new = rnn(inputs, state, params)
print(len(outputs), outputs[0].shape, state_new[0].shape)
print(predict_rnn('分开', 10, rnn, params, init_rnn_state, num_hiddens,
vocab_size, ctx, idx_to_char, char_to_idx))
num_epochs, num_steps, batch_size, lr, clipping_theta = 250, 35, 32, 1e2, 1e-2
pred_period, pred_len, prefixes = 50, 50, ['分开', '不分开']
train_and_predict_rnn(rnn, get_params, init_rnn_state, num_hiddens,
vocab_size, ctx, corpus_indices, idx_to_char,
char_to_idx, True, num_epochs, num_steps, lr,
clipping_theta, batch_size, pred_period, pred_len, prefixes)
train_and_predict_rnn(rnn, get_params, init_rnn_state, num_hiddens,
vocab_size, ctx, corpus_indices, idx_to_char,
char_to_idx, False, num_epochs, num_steps, lr,
clipping_theta, batch_size, pred_period, pred_len,
prefixes)
| 2.40625 | 2 |
python_work/Chapter5/cars.py | Elektra-2/python_crash_course_2nd | 1 | 12764895 | <reponame>Elektra-2/python_crash_course_2nd<gh_stars>1-10
# Simple example using ifs
cars = ['volks', 'ford', 'audi', 'bmw', 'toyota']
for car in cars:
if car == 'bmw':
print(car.upper())
else:
print(car.title())
# Comparing Values
#
# requested_toppings = 'mushrooms'
# if requested_toppings != 'anchovies':
# print('Hold the Anchovies')
#
#
# # Checking a value not in a list
#
# banned_users = ['andrew', 'claudia', 'jorge']
# user = 'marie'
#
# if user not in banned_users:
# print(f'{user.title()}, you can post a message here')
age = 18
if age >= 18:
print('OK to vote')
| 3.625 | 4 |
etils/epy/itertools_test.py | google/etils | 13 | 12764896 | <reponame>google/etils<filename>etils/epy/itertools_test.py<gh_stars>10-100
# Copyright 2022 The etils Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for itertools."""
from __future__ import annotations
from etils import epy
import pytest
def test_group_by():
out = epy.groupby(
[0, 30, 2, 4, 2, 20, 3],
key=lambda x: x < 10,
)
# Order is consistent with above
assert out == {
True: [0, 2, 4, 2, 3],
False: [30, 20],
}
def test_group_by_value():
out = epy.groupby(
['111', '1', '11', '11', '4', '555'],
key=len,
value=int,
)
# Order is consistent with above
assert out == {
1: [1, 4],
2: [11, 11],
3: [111, 555],
}
def test_zip_dict():
d0 = {'a': 1, 'b': 2}
d1 = {'a': 10, 'b': 20}
assert list(epy.zip_dict(d0, d1)) == [
('a', (1, 10)),
('b', (2, 20)),
]
# Order is preserved
d0 = {'b': 1, 'a': 2}
d1 = {'b': 10, 'a': 20}
assert list(epy.zip_dict(d0, d1)) == [
('b', (1, 10)),
('a', (2, 20)),
]
d0 = {'a': 1}
d1 = {'a': 10, 'b': 20}
with pytest.raises(KeyError):
list(epy.zip_dict(d0, d1))
with pytest.raises(KeyError):
list(epy.zip_dict(d1, d0))
def test_zip_dict_three():
d0 = {'a': 1, 'b': 2}
d1 = {'a': 10, 'b': 20}
d2 = {'a': 100, 'b': 200}
assert list(epy.zip_dict(d0, d1, d2)) == [
('a', (1, 10, 100)),
('b', (2, 20, 200)),
]
d2 = {'a': 100, 'b': 200, 'c': 300}
with pytest.raises(KeyError):
list(epy.zip_dict(d0, d1, d2))
d2 = {'a': 100, 'c': 300}
with pytest.raises(KeyError):
list(epy.zip_dict(d0, d1, d2))
def test_issubclass():
assert not epy.issubclass(1, int)
assert epy.issubclass(bool, int)
| 2.375 | 2 |
models/xsts.py | tefra/xsdata-w3c-tests | 1 | 12764897 | from dataclasses import dataclass, field
from decimal import Decimal
from enum import Enum
from typing import Dict, List, Optional, Union
from xsdata.models.datatype import XmlDate
from models.xlink import TypeType
from models.xml import LangValue
__NAMESPACE__ = "http://www.w3.org/XML/2004/xml-schema-test-suite/"
class XdmFiltering(Enum):
"""<div>
<p>
Clause 1.2 of validation rule
<a href="http://www.w3.org/TR/xmlschema11-1/#sec-cvc-assertion">Assertion satisifed</a> (in Structures sec. 3.13.4.1) says
</p>
<blockquote>
<p>By default, comments and processing instructions are
excluded from the partial post-schema-validation infoset,
but at user option processors may retain comments and
processing instructions instead of excluding them.</p>
</blockquote>
<p>
The value "<tt>comments-and-PIs-excluded</tt>" denotes the default
situation: comments and processing instructions are suppressed
before creating the XDM instance and thus cannot be examined
by assertions.
</p>
<p>
The value "<tt>comments-and-PIs-included</tt>" denotes the opposite:
comments and processing instructions are included in the XDM
instance and thus can be examined by assertions. (Since this is
required to be "at user option", any processor that supports this
token must also be available in a configuration that supports the
other token.)
</p>
<p>
(The user option was added in November 2012 to address bug
<a href="http://www.w3.org/Bugs/Public/show_bug.cgi?id=13935">13935
xsd 1.1 assertions testing comment nodes</a>.
These token values were added 20 January 2012 to allow both
configurations to be tested.)
</p>
</div>
"""
COMMENTS_AND_PIS_EXCLUDED = "comments-and-PIs-excluded"
COMMENTS_AND_PIS_INCLUDED = "comments-and-PIs-included"
@dataclass
class Appinfo:
class Meta:
name = "appinfo"
namespace = "http://www.w3.org/XML/2004/xml-schema-test-suite/"
source: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
}
)
other_attributes: Dict[str, str] = field(
default_factory=dict,
metadata={
"type": "Attributes",
"namespace": "##other",
}
)
content: List[object] = field(
default_factory=list,
metadata={
"type": "Wildcard",
"namespace": "##any",
"mixed": True,
}
)
class ExpectedOutcome(Enum):
"""<div>
<p>
Enumerates the possible values for the prescribed outcome
of a test. Values include both (a) the possible values of
type <a href="#type_test-outcome">ts:test-outcome</a> and
the following additional values:
</p>
<dl>
<dt>
<tt>implementation-defined</tt>
</dt>
<dd>(For instance tests) The value of the
<tt>[validity]</tt> property on the validation root
depends upon some property or behavior which is
explicitly described in the relevant version of the spec
as "implementation-defined", or for which the spec explicitly
imposes a requirement that implementations specify their
behavior. (It follows that this
value should never occur for 1.0 tests.)</dd>
<dd>(For schema tests) The conformance of the schema
depends upon some property or behavior explicitly
described in the spec as "implementation-defined",
or for which the spec explicitly
imposes a requirement that implementations specify their
behavior.</dd>
</dl>
<p>Note: in most cases of implementation-defined behaviors,
as a matter of test suite design it is better to analyse
the set of possible implementation behaviors, define
version tokens for the possible behaviors, and specify
more informative results based on those tokens. The value
<tt>implementation-defined</tt> is provided for situations
where this is not feasible for whatever reason.
</p>
<dl>
<dt>
<tt>implementation-dependent</tt>
</dt>
<dd>(For instance tests) The value of the
<tt>[validity]</tt> property on the validation root
depends upon some property or behavior which is
explicitly described in the relevant version of the spec
as "implementation-dependent", or otherwise explicitly
described as varying among implementations but not
"implementation-defined". (For XSD 1.0, this will often
take the form of a normative "<span class="rfc">may</span>" in the text.)
</dd>
<dd>(For schema tests) The conformance of the schema
depends upon some property or behavior explicitly
described in the spec as "implementation-dependent" or
as varying among implementations, but not described as
"implementation-defined".</dd>
<dt>
<tt>indeterminate</tt>
</dt>
<dd>The intended result is indeterminate for one of the
following reasons, or for other reasons:<ul><li>The result is under-determined (the spec is vague
or underspecified), but not described explicitly as
varying among conforming implementations.
</li><li>The spec imposed contradictory requirements on the
result. (I.e. the result is
<em>over-determined.)</em></li><li>
There is an unresolved dispute within the working
group as to what the spec requires the result to be.
(This includes cases where the working group cannot
agree on whether the spec explicitly labels the
result as implementation-dependent or
implementation-defined or not, as well as cases
where the group cannot agree on how to apply the
spec to the case in hand.)
</li></ul></dd>
</dl>
<p>N.B. the values <tt>implementation-dependent</tt> and
<tt>implementation-defined</tt> should be used only when
the spec is explicit about the implementation-dependence
of the result and it is thus clear that the
implementation-dependence is a design choice consciously
made by the working group. They must not be used in cases
where the spec simply appeals to some concept which it
turns out not to define: such cases are to be marked
<tt>indeterminate</tt>.
</p>
<p>Note: in most cases, as a matter of language design
it is better for the language specification to prescribe
clearly a particular result for a test, or to identify the
result explicitly as implementation-defined or
implementation-dependent. The value
<tt>indeterminate</tt> is provided for situations where
this has not been done for whatever reason.
</p>
<p class="note">The value <tt>invalid-latent</tt> described
in earlier drafts of this schema document is no longer
needed; the version keywords for complex-type restriction
behaviors can be used to describe the relevant cases
more precisely.
</p>
</div>
"""
VALID = "valid"
INVALID = "invalid"
NOT_KNOWN = "notKnown"
RUNTIME_SCHEMA_ERROR = "runtime-schema-error"
IMPLEMENTATION_DEFINED = "implementation-defined"
IMPLEMENTATION_DEPENDENT = "implementation-dependent"
INDETERMINATE = "indeterminate"
INVALID_LATENT = "invalid-latent"
class KnownToken(Enum):
"""<div>
<p>Tokens to denote well-known (i.e. documented) versions, features,
or implementation-defined behaviors, of XSD.</p> <p>The <tt>known-
token</tt> type is a union of several other types, each with an
enumeration of values. Each sub-type defines keywords for a set of
mutually exclusive versions, features, or behaviors, such that in
any given schema validation episode, at most one keyword in any
subtype will apply. For examples, see the various subtypes defined
immediately below. </p> </div>
"""
VALUE_1_0 = "1.0"
VALUE_1_1 = "1.1"
VALUE_1_0_1E = "1.0-1e"
VALUE_1_0_2E = "1.0-2e"
XML_1_0 = "XML-1.0"
XML_1_0_1E_4E = "XML-1.0-1e-4e"
XML_1_0_5E = "XML-1.0-5e"
XML_1_1 = "XML-1.1"
UNICODE_4_0_0 = "Unicode_4.0.0"
UNICODE_6_0_0 = "Unicode_6.0.0"
CTR_ALL_COMPILE = "CTR-all-compile"
CTR_ALL_RUNTIME = "CTR-all-runtime"
CTR_ALL_IDEP = "CTR-all-idep"
RESTRICTED_XPATH_IN_CTA = "restricted-xpath-in-CTA"
FULL_XPATH_IN_CTA = "full-xpath-in-CTA"
COMMENTS_AND_PIS_EXCLUDED = "comments-and-PIs-excluded"
COMMENTS_AND_PIS_INCLUDED = "comments-and-PIs-included"
class KnownXsdVersion(Enum):
"""<div>
<p> Tokens to denote specific known versions of XSD. </p> <p> Each
token denotes the version of the XSD language identified by the
<tt>ts:standard-version-id</tt> attribute on the
<tt>xsd:enumeration</tt> element. That is, "<tt>1.0</tt>" denotes
XSD 1.0 (without reference to a particular edition), and
"<tt>1.1</tt>" denotes XSD 1.1 (without referece to a particular
edition). </p> </div>
"""
VALUE_1_0 = "1.0"
VALUE_1_1 = "1.1"
class RuntimeSchemaError(Enum):
"""<div>
<p>
Tokens to denote different implementation-defined
behavior in the presence of faulty restriction in
a complex-type definition.
</p>
<p>
A full explanation of this token and its meaning
is needed, but not yet available. For the moment let it
suffice to say that if an <tt>all</tt>-group
in a restriction allows content not allowed by
the base type, the processor is not required
to detect the problem by inspection of the schema
in isolation. Three behaviors are allowed; the choice
among them is implementation-defined. The values
denoting the three behaviors are these.
</p>
<dl>
<dt>
<tt>CTR-all-compile</tt>
</dt>
<dd>Compile-time detection: the processor always
detects the problem by examining the schema in
isolation; it warrants that no non-conforming
schema will ever be used in validation.
</dd>
<dt>
<tt>CTR-all-runtime</tt>
</dt>
<dd>Run-time detection: the processor never
detects the problem by examining the schema in
isolation; it detects it always and only when
an instance of the type is valid against the
restriction but not against the base type.
If no instance of the type forces the recognition
of the fault, then a non-conforming schema will
have been used in validation. The results, however,
will always be the same as for a schema in
which the error had been corrected.
(Processors that don't always check the declaration
in isolation will need to validate each instance
both against its governing type and against the
base type.)
</dd>
<dt>
<tt>CTR-all-idep</tt>
</dt>
<dd>Implementation-dependent detection: the processor
sometimes detects the problem by examining the schema in
isolation, sometimes when examining an instance.
No guarantees.
</dd>
</dl>
<p>Note, 20 January 2012. Is this distinction still required,
or has it been overtaken by the change made to resolve
<a href="https://www.w3.org/Bugs/Public/show_bug.cgi?id=12185">bug 12185 Conditional Type Assignment and substitutability</a>
(or other late changes)?</p>
</div>
"""
CTR_ALL_COMPILE = "CTR-all-compile"
CTR_ALL_RUNTIME = "CTR-all-runtime"
CTR_ALL_IDEP = "CTR-all-idep"
class Status(Enum):
ACCEPTED = "accepted"
STABLE = "stable"
QUERIED = "queried"
DISPUTED_TEST = "disputed-test"
DISPUTED_SPEC = "disputed-spec"
class TestOutcome(Enum):
"""<div>
<p>
Enumerates the possible outcomes of running a test.
Usually, these are values of the <tt>[validity]</tt>
property on the validation root.
</p>
<p>The most common values are:</p>
<dl>
<dt>
<tt>valid</tt>
</dt>
<dd>(For instance tests) The value of the <tt>[validity]</tt>
property on the validation root is <tt>valid</tt>.</dd>
<dd>(For schema tests) The schema is a conforming schema.</dd>
<dt>
<tt>invalid</tt>
</dt>
<dd>(For instance tests) The value of the <tt>[validity]</tt>
property on the validation root is <tt>invalid</tt>.</dd>
<dd>(For schema tests) The schema is <em>not</em> a
conforming schema.</dd>
<dt>
<tt>notKnown</tt>
</dt>
<dd>(For instance tests) The value of the <tt>[validity]</tt>
property on the validation root is <tt>notKnown</tt>.</dd>
<dd>(For schema tests, this value is meaningless.)</dd>
</dl>
<p>Note: processors built as <a href="http://www.w3.org/TR/xmlschema11-1/#key-validator">instance validators</a> are not required by XSD to
distinguish between invalid documents and documents with
unknown validity; it is thus not an absolute requirement
(although it is desirable for clarity)
that a test result distinguish <tt>invalid</tt>
from <tt>notKnown</tt> outcomes.
</p>
<p>One further value is needed only in fairly specialized
circumstances (but is essential there):</p>
<dl>
<dt>
<tt>runtime-schema-error</tt>
</dt>
<dd>
<p>(For instance tests) The instance has a schema with
a latent error (see description below in the documentation
for type <a href="#type_expected-outcome">ts:expected-outcome</a>);
the processor did not detect the latent error on the
corresponding schema test, but the instance document
has exposed the error (by including content
which is valid against the apparent content model of the
governing type, but not valid against the base type)
and the processor has detected the schema error in the
course of instance validation.
</p>
<p>Note: processors are encouraged, though not required, to
distinguish this outcome from <tt>invalid</tt>, since
on an instance test <tt>invalid</tt> normally means that
the processor has found an invalid instance, not a
non-conforming schema.
</p>
</dd>
<dd>
<p>(For schema tests) The value <tt>runtime-schema-error</tt>
is meaningless for schema tests and should not be used for
them. (It would be a contradiction in terms.)</p>
</dd>
</dl>
</div>
"""
VALID = "valid"
INVALID = "invalid"
NOT_KNOWN = "notKnown"
RUNTIME_SCHEMA_ERROR = "runtime-schema-error"
class TestSuiteResultsPublicationPermission(Enum):
W3_C_MEMBERS = "W3C members"
PUBLIC = "public"
class UnicodeVersions(Enum):
"""<div>
<p> Tokens to denote specific known versions of Unicode. </p> <p>
Each token denotes the version of the Unicode specification. The
list is not complete; in the only cases where results are known to
vary between Unicode versions, results are published for version
4.0.0 and 6.0.0. Implementors wishing to provide reference results
for other versions of Unicode are welcome to submit such results.
</p> </div>
"""
UNICODE_4_0_0 = "Unicode_4.0.0"
UNICODE_6_0_0 = "Unicode_6.0.0"
class XmlSubstrate(Enum):
"""<div>
<p>
Tokens to denote different versions of XML-dependent
datatypes. Conforming XSD 1.1 processors may support
XML 1.0-based datatypes, XML 1.1-based datatypes,
or both. There is dispute in the working group over
whether conforming XSD 1.0 processors are allowed to
suport XML 1.1-based datatypes or not.
</p>
<p>
The value "<tt>XML-1.0</tt>" denotes processor support
for, or test applicability to, XSD datatypes based on XML
1.0, without specifying a particular edition. (This value
is retained for backward compatibility of this schema, but
it should be avoided unless there is no difference, for a
given test or test result, between editions 1-4 and
edition 5 of XML 1.0. Where there is a difference, the
values "<tt>XML-1.0-1e-4e</tt>" and "<tt>XML-1.0-5e</tt>"
should be used in preference.
(XSD 1.1 describes XML 1.0 Fifth Edition as the base
version in its normative reference, so in theory the
distinction between "<tt>XML-1.0-1e-4e</tt>" and
"<tt>XML-1.0-5e</tt>" is only relevant to XSD 1.0
processors. In practice, it may also be relevant for some
XSD 1.1 processors.
</p>
<p>
The value "<tt>XML-1.0-1e-4e</tt>" denotes processor support
for, or test applicability to, XSD datatypes based on XML
1.0 First Edition through Fourth Edition.
</p>
<p>
The value "<tt>XML-1.0-5e</tt>" denotes processor support
for, or test applicability to, XSD datatypes based on XML
1.0 Fifth Edition.
</p>
<p>
The value "<tt>XML-1.1</tt>" denotes processor support
for, or test applicability to, XSD datatypes based on XML
1.1 (for which at the moment there is only one edition).
</p>
<p>
In most cases, of course, "<tt>XML-1.0-5e</tt>" and
"<tt>XML-1.1</tt>" will describe the same behaviors.
</p>
</div>
"""
XML_1_0 = "XML-1.0"
XML_1_0_1E_4E = "XML-1.0-1e-4e"
XML_1_0_5E = "XML-1.0-5e"
XML_1_1 = "XML-1.1"
class XpathInCta(Enum):
"""<div>
<p>
Tokens to distinguish tests which use only the "required
subset" of XPath in conditional type assignment
from tests which use full XPath (or: any XPath outside
the subset) in conditional type assignment.
See "3.12.6 Constraints on Type Alternative Schema Components"
of the Structures spec, which reads in part
</p>
<blockquote>
<p>A conforming processor must accept and process any XPath
expression conforming to the "required subset" of [XPath 2.0]
defined by the following grammar.</p>
<p style="margin-left: 2em;">
Note: Any XPath expression containing no static errors as
defined in [XPath 2.0] may appear in a conforming schema.
Conforming processors may but are not required to support
XPath expressions not belonging to the required subset of
XPath.</p>
</blockquote>
<p>
The value "<tt>restricted-xpath-in-CTA</tt>" denotes processor support
for, or test applicability to, the minimal subset of XPath
required of all conforming 1.1 processors. All 1.1 processors
should support this feature and run tests marked with it.
</p>
<p>
The value "<tt>full-xpath-in-CTA</tt>" denotes processor support
for, or test applicability to, full XPath in conditional type
assignment expressions.
</p>
<p>
(These token values were added 29 July 2011 to address bug
<a href="http://www.w3.org/Bugs/Public/show_bug.cgi?id=13455">13455
XPath subset causes problem</a>.)
</p>
</div>
"""
RESTRICTED_XPATH_IN_CTA = "restricted-xpath-in-CTA"
FULL_XPATH_IN_CTA = "full-xpath-in-CTA"
class Xsd10Editions(Enum):
"""<div>
<p>
Tokens to denote specific editions of XSD 1.0.
</p>
<p>
Each token denotes the version of the XSD language
identified by the <tt>ts:standard-version-id</tt>
attribute on the <tt>xsd:enumeration</tt> element.
That is,
"<tt>1.0-1e</tt>" and "<tt>1.0-2e</tt>" represent
1.0 First Edition and 1.0 Second Edition,
respectively.
</p>
<p>Outside the context of XSD 1.0, these edition
identifiers have no meaning or applicability.
</p>
</div>
"""
VALUE_1_0_1E = "1.0-1e"
VALUE_1_0_2E = "1.0-2e"
@dataclass
class Documentation:
class Meta:
name = "documentation"
namespace = "http://www.w3.org/XML/2004/xml-schema-test-suite/"
source: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
}
)
lang: Optional[Union[str, LangValue]] = field(
default=None,
metadata={
"type": "Attribute",
"namespace": "http://www.w3.org/XML/1998/namespace",
}
)
other_attributes: Dict[str, str] = field(
default_factory=dict,
metadata={
"type": "Attributes",
"namespace": "##other",
}
)
content: List[object] = field(
default_factory=list,
metadata={
"type": "Wildcard",
"namespace": "##any",
"mixed": True,
}
)
@dataclass
class Expected:
"""<div>
<p>The validation outcome prescribed by the spec
for a test in the XSTS.</p>
<p>This element has one optional attribute:</p>
<ul>
<li>
<p><tt>version</tt> - a list of version tokens.
The result specified is applicable to processor
configurations supporting <em>all</em> of the
indicated versions or features of XSD.
See the definition of the
<a href="#type_version-info"><tt>version-info</tt></a>
type.
</p>
<p>It is an error for more than one <tt>expected</tt>
element to be applicable to any given processor
configuration; this is most easily avoided by
making sure that any two sibling <tt>expected</tt>
elements have <tt>version</tt> attributes containing
mutually exclusive tokens.
</p>
</li>
</ul>
<p class="note">Note: The meaning of the <tt>version</tt></p>
<p>
On tests and elements for groups of
tests (<tt>testGroup</tt> etc.), a <tt>version</tt>
attribute of the form <code>version="<i>x</i><i>y</i><i>z</i>"</code> means "If <strong>any</strong> of
<tt>x</tt>, <tt>y</tt>, or <tt>z</tt> are supported, tests
in this group are applicable."
</p>
<p>On the <tt>expected</tt> element, the
meaning changes in a crucial way: the tokens are connected
with an implicit <tt>and</tt>, not an <tt>or</tt>. So
<code>version="<i>x</i><i>y</i><i>z</i>"</code> means
"If <strong>all</strong> of <tt>x</tt>, <tt>y</tt>, or
<tt>z</tt> are supported, the prescribed outcome is as
described. So on a test group, <code>version="1.0
1.1"</code> means tests for both versions are included.
On an <tt>expected</tt> element, <code>version="1.0
1.1"</code> would mean the expected result holds only if a
given processor is using both version 1.0 and version 1.1
in the same validation episode. Since the two tokens are
defined as mutually exclusive, this would be a
contradiction.
</p>
<p class="note">As a matter of test suite design, it
is a good idea to keep <tt>version</tt> attributes
on <tt>expected</tt> elements to a single token if
possible, to minimize opportunities for confusion.
</p>
<p>And one required attribute:</p>
<ul>
<li>
<p><tt>validity</tt> - indicates the expected outcome
of the test, using a value of type
<a href="#type_expected-outcome">ts:expected-outcome</a>.</p>
<p>
For an instance test, this typically indicates the expected
value of the <code>[validity]</code> property on the
root element of the instance document, or indicates
that the value may vary among processors.
</p>
<p>
For a schema test, this indicates whether the
schema created from the schema documents in the test
is expected to be a conforming schema (<code>valid</code>)
or a non-conforming schema (<code>invalid</code>).
The value <code>notKnown</code> has no meaning
for a schema test.
</p>
</li>
</ul>
</div>
"""
class Meta:
name = "expected"
namespace = "http://www.w3.org/XML/2004/xml-schema-test-suite/"
validity: Optional[ExpectedOutcome] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
}
)
version: List[Union[KnownToken, Decimal, str]] = field(
default_factory=list,
metadata={
"type": "Attribute",
"tokens": True,
}
)
other_attributes: Dict[str, str] = field(
default_factory=dict,
metadata={
"type": "Attributes",
"namespace": "##other",
}
)
@dataclass
class Annotation:
"""<div>
<p> This is an exact copy of the <tt>annotation</tt> element defined
in the Schema Recommendation. It is duplicated here in order to
replicate the functionality of the <tt>xsd:annotation</tt> element
and because the Schema for Schemas cannot be imported. </p> </div>
"""
class Meta:
name = "annotation"
namespace = "http://www.w3.org/XML/2004/xml-schema-test-suite/"
appinfo: List[Appinfo] = field(
default_factory=list,
metadata={
"type": "Element",
}
)
documentation: List[Documentation] = field(
default_factory=list,
metadata={
"type": "Element",
}
)
other_attributes: Dict[str, str] = field(
default_factory=dict,
metadata={
"type": "Attributes",
"namespace": "##other",
}
)
@dataclass
class Ref:
class Meta:
name = "ref"
annotation: List[Annotation] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "http://www.w3.org/XML/2004/xml-schema-test-suite/",
}
)
type: TypeType = field(
default=TypeType.LOCATOR,
metadata={
"type": "Attribute",
"namespace": "http://www.w3.org/1999/xlink",
}
)
href: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"namespace": "http://www.w3.org/1999/xlink",
}
)
other_attributes: Dict[str, str] = field(
default_factory=dict,
metadata={
"type": "Attributes",
"namespace": "##other",
}
)
@dataclass
class StatusEntry:
class Meta:
name = "statusEntry"
annotation: List[Annotation] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "http://www.w3.org/XML/2004/xml-schema-test-suite/",
}
)
status: Optional[Status] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
}
)
date: Optional[XmlDate] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
}
)
bugzilla: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"pattern": r"http://www\.w3\.org/Bugs/Public/show_bug\.cgi\?id=[0-9]*",
}
)
other_attributes: Dict[str, str] = field(
default_factory=dict,
metadata={
"type": "Attributes",
"namespace": "##other",
}
)
@dataclass
class TestResult:
"""<div>
<p>
The result of an individual instance test or a schema test.
</p>
<p>
This element has four required attributes:
</p>
<ul>
<li><tt>validity</tt> - the validition outcome of the test.
A value of type <a href="#type_expected-outcome">ts:expected-outcome</a>,
i.e.
one of "<tt>valid</tt>", "<tt>invalid</tt>",
"<tt>notKnown</tt>", or "<tt>runtime-schema-error</tt>".
</li>
<li><tt>set</tt> - the value of the "<tt>name</tt>"
attribute of the test set to which the test belongs.
</li>
<li><tt>group</tt> - the value of the "<tt>name</tt>"
attribute of the test group to which the test belongs.
</li>
<li><tt>test</tt> - the value of the "<tt>name</tt>"
attribute of the schema test or instance test, the
validation outcome of which this result reports.
</li>
</ul>
<p>
NOTE: The "<tt>set</tt>", "<tt>group</tt>" and
"<tt>test</tt>" attributes are used to uniquely identify
the test within the XSTS for which this result reports the
validation outcome. Each matches the "<tt>name</tt>"
attribute of the respective element in the test suite.
</p>
<p>
This element has one optional attribute:
</p>
<ul>
<li><tt>normalizedLoad</tt> - a relative load value, intended as an indicator
of the resource requirements of an individual
test. Values may be based on processing time,
memory usage or a combination of the two.
Values should be in the vicinity of 1.0.
</li>
</ul>
<p>The element has one optional element:</p>
<ul>
<li><tt>annotation</tt> - zero or more instances of more detailed
(<tt>ts:documentation</tt>) or structured (<tt>ts:appinfo</tt>)
information or commentary regarding the individual
test result. Reporters are encouraged to use
<tt>annotation/appinfo</tt> to report more detailed outcome
information, such as error and warning messages.
</li>
</ul>
</div>
"""
class Meta:
name = "testResult"
namespace = "http://www.w3.org/XML/2004/xml-schema-test-suite/"
annotation: List[Annotation] = field(
default_factory=list,
metadata={
"type": "Element",
}
)
validity: Optional[TestOutcome] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
}
)
set: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
}
)
group: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
}
)
test: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
}
)
normalized_load: Optional[Decimal] = field(
default=None,
metadata={
"name": "normalizedLoad",
"type": "Attribute",
}
)
other_attributes: Dict[str, str] = field(
default_factory=dict,
metadata={
"type": "Attributes",
"namespace": "##other",
}
)
@dataclass
class Current(StatusEntry):
"""<div>
<p>The current status of a test in the XSTS.</p>
<p>This element has two attributes, both of which are
required:</p>
<ul>
<li><tt>status</tt> - the status of the test. One of
"<tt>accepted</tt>", "<tt>stable</tt>",
"<tt>disputed-test</tt>" or "<tt>disputed-spec</tt>"
(see the XSTS website for an explanation of these values).
</li>
<li><tt>date</tt> - the date on which the test or the
metadata (including the value in the
<tt>status</tt> attribute, but also anything else
of importance) was last changed.
</li>
</ul>
</div>
"""
class Meta:
name = "current"
namespace = "http://www.w3.org/XML/2004/xml-schema-test-suite/"
@dataclass
class DocumentationReference(Ref):
"""<div>
<p> A link to documentation relevant to a test, such as a link to
the Recommendation, an erratum, an archived email discussion, etc.
</p> </div>
"""
class Meta:
name = "documentationReference"
namespace = "http://www.w3.org/XML/2004/xml-schema-test-suite/"
@dataclass
class InstanceDocument(Ref):
class Meta:
name = "instanceDocument"
namespace = "http://www.w3.org/XML/2004/xml-schema-test-suite/"
@dataclass
class Prior(StatusEntry):
"""<div>
<p>A former status of a test in the XSTS.</p>
<p>This element has two attributes, both of which are
required:</p>
<ul>
<li><tt>status</tt> - the former status of the test. One of
"<tt>accepted</tt>", "<tt>stable</tt>",
"<tt>disputed-test</tt>" or "<tt>disputed-spec</tt>"
(see the XSTS website for an explanation of these values).
</li>
<li><tt>date</tt> - the date on which the test or the
metadata (including the value in the
<tt>status</tt> attribute, but also anything else
of importance) was last changed.
</li>
</ul>
</div>
"""
class Meta:
name = "prior"
namespace = "http://www.w3.org/XML/2004/xml-schema-test-suite/"
@dataclass
class SchemaDocument(Ref):
class Meta:
name = "schemaDocument"
namespace = "http://www.w3.org/XML/2004/xml-schema-test-suite/"
@dataclass
class TestSetRef(Ref):
class Meta:
name = "testSetRef"
namespace = "http://www.w3.org/XML/2004/xml-schema-test-suite/"
@dataclass
class TestSuiteResults:
"""<div>
<p>
This is the root element of a document containing a test
result report. The report takes the form of a set of test
results returned by a processor/validator when run against
the XSTS.
</p>
<p>
It has three required attributes:
</p>
<ul>
<li><tt>suite</tt> - the name of the test suite to which
these results correspond. This should be the value of
the <tt>name</tt> attribute of the <tt>testSuite</tt>
element at the root of the test suite document
describing the tests to which these results correspond.
</li>
<li><tt>processor</tt> - some identifying information for
the processor/ validator which produced the reported
results. The value of this attribute is left to the
discretion of the reporter.
</li>
<li><tt>submitDate</tt> - the date on which these results
were submitted to the XSTS Task Force.
</li>
</ul>
<p>The element also has one optional attribute:</p>
<ul>
<li><tt>publicationPermission</tt> - the degree to which the
result reporter authorizes the W3C to disseminate the
reported results. One of "<tt>W3C members</tt>" or
"<tt>public</tt>" (see the XSTS website for an explanation
of these values). If this attribute is absent, no
permission to publish is granted.
</li>
</ul>
<p>This element has two optional elements:</p>
<ul>
<li><tt>annotation</tt> - zero or more instances of more
detailed (<tt>ts:documentation</tt>) or structured
(<tt>ts:appinfo</tt>) information or commentary
regarding the enclosed test results.
</li>
<li><tt>testResult</tt> - any number of reports of the
results of individual tests. Any results may be omitted,
particularly those for tests of features for which the
processor claims no support.
</li>
</ul>
</div>
"""
class Meta:
name = "testSuiteResults"
namespace = "http://www.w3.org/XML/2004/xml-schema-test-suite/"
annotation: List[Annotation] = field(
default_factory=list,
metadata={
"type": "Element",
}
)
test_result: List[TestResult] = field(
default_factory=list,
metadata={
"name": "testResult",
"type": "Element",
}
)
suite: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
}
)
processor: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
}
)
submit_date: Optional[XmlDate] = field(
default=None,
metadata={
"name": "submitDate",
"type": "Attribute",
"required": True,
}
)
publication_permission: Optional[TestSuiteResultsPublicationPermission] = field(
default=None,
metadata={
"name": "publicationPermission",
"type": "Attribute",
}
)
other_attributes: Dict[str, str] = field(
default_factory=dict,
metadata={
"type": "Attributes",
"namespace": "##other",
}
)
@dataclass
class InstanceTest:
"""<div>
<p>
This element groups together information about an instance
document which should be validated against the schema
referenced in the enclosing <tt>testGroup</tt>.
</p>
<p>
Note: per section 5.2 "Assessing Schema-Validity" of the
Recommendation "XML Schema Part 1: Structures", validation
may be started in a variety of ways. For the purposes of
the XSTS, only the third method shall be used:
</p>
<blockquote>
<p>
The processor starts from Schema-Validity Assessment
(Element) (3.3.4) with no stipulated declaration or
definition.
</p>
</blockquote>
<p>The validation root is the outermost element in the
instance document.</p>
<p>
The <tt>instanceTest</tt> element has one required
attribute:
</p>
<ul>
<li><tt>name</tt> - the name of the instance document, which
must differ from the name of any other
<tt>schemaTest</tt> or <tt>instanceTest</tt> element
within the enclosing <tt>testGroup</tt></li>
</ul>
<p>
and one attribute which is optional, for signaling
that the test is applicable only to a particular set of
versions of XSD:
</p>
<ul>
<li>
<p><tt>version</tt> - Tests which only apply to certain
versions of XML Schema list those versions in the
<tt>version</tt> attribute.
</p>
<p>Processors supporting <em>any</em> version or feature
indicated by a keyword in the attribute should run the
test. (Or, more declaratively: the test is meaningful
to any processor which supports any of the features or
versions listed.) If no value is specified, all
processors which haven't already skipped the enclosing
test group, test set, or test suite should run the
test.
</p>
<p>
The value is a list of version tokens. See the
definition of the <a href="#type_version-info"><tt>version-info</tt></a>
type.</p>
<p class="note">Note: running instance tests with a
processor for an inapplicable version may produce an
failure owing to non-conformant constructs in the
schema document; if the processor does not detect the
problem or continues anyway, the results are certain
to be meaningless.
</p>
</li>
</ul>
<p>
One child element is required:
</p>
<ul>
<li><tt>instanceDocument</tt> - a link to a file containing
the instance document.
</li>
</ul>
<p>
Four child elements may optionally be present:
</p>
<ul>
<li><tt>annotation</tt> - zero or more instances of general
documentation</li>
<li><tt>expected</tt> - the prescribed validation outcome for
the instance document. Optional, and repeatable.
Each <tt>expected</tt> element indicates the result
on this test for a particular set of versions of the
language.
</li>
<li><tt>current</tt> - the current status of this test in
the XSTS (an indication of the test's accuracy in testing
the feature it is intended to test).
</li>
<li><tt>prior</tt> - the history of any changes in the
status of this test.
</li>
</ul>
<p>
The elements "<tt>expected</tt>" and "<tt>current</tt>" may
be absent when tests are contributed, but will always be
present for tests included in the XSTS.
</p>
<p>The <tt>current</tt> and <tt>prior</tt> elements should
be used to keep a change history of the test; see
discussion under the <a href="#elem_schemaTest"><tt>schemaTest</tt></a> element.
</p>
</div>
"""
class Meta:
name = "instanceTest"
namespace = "http://www.w3.org/XML/2004/xml-schema-test-suite/"
annotation: List[Annotation] = field(
default_factory=list,
metadata={
"type": "Element",
}
)
instance_document: Optional[InstanceDocument] = field(
default=None,
metadata={
"name": "instanceDocument",
"type": "Element",
"required": True,
}
)
expected: List[Expected] = field(
default_factory=list,
metadata={
"type": "Element",
}
)
current: Optional[Current] = field(
default=None,
metadata={
"type": "Element",
}
)
prior: List[Prior] = field(
default_factory=list,
metadata={
"type": "Element",
}
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
}
)
version: List[Union[KnownToken, Decimal, str]] = field(
default_factory=list,
metadata={
"type": "Attribute",
"tokens": True,
}
)
other_attributes: Dict[str, str] = field(
default_factory=dict,
metadata={
"type": "Attributes",
"namespace": "##other",
}
)
@dataclass
class SchemaTest:
"""<div>
<p>
This element groups together information about the schema
for a particular test group.
</p>
<p>
It has one attribute which is required:
</p>
<ul>
<li><tt>name</tt> - the name of the schema test, which must be
unique within the enclosing <tt>testGroup</tt> (i.e. it must
differ from the name(s) of any associated <tt>instanceTest</tt>
elements).
</li>
</ul>
<p>
and one attribute which is optional, for identifying a subset
of versions and/or editions for which the test is valid:
</p>
<ul>
<li>
<p><tt>version</tt> - Tests which only apply to certain
versions of XML Schema list those versions in the
<tt>version</tt> attribute. Processors supporting
<em>any</em> version or feature indicated by a keyword
in the attribute should run the test. (Or, phrased
more declaratively: the test is meaningful to any
processor which supports any of the features or
versions listed.)
</p>
<p>If no value is specified, all processors which
haven't already skipped the enclosing test group,
test set, or test suite should run the test.
</p>
<p>
The value is a list of version tokens. See the
definition of the <a href="#type_version-info"><tt>version-info</tt></a>
type.</p>
<p>Note that the omission of a version token on a schema
test is in some sense strictly advisory: any schema
test is meaningful for any processor in any
configuration. For processor configurations not
supporting any of the features or versions named, the
expected result is that the schema is not a conforming
schema. This will <em>not</em> be indicated with an
explicit <tt>expected</tt> element.
</p>
</li>
</ul>
<p>
One child element is required:
</p>
<ul>
<li><tt>schemaDocument</tt> - at least one link to a file
containing a schema document. The schema for the test is
constructed from the set (or from other schemas via
import).
</li>
</ul>
<p>Four child elements may optionally be present:</p>
<ul>
<li><tt>annotation</tt> - zero or more instances of general
documentation</li>
<li><tt>expected</tt> - indicates the conformance or
non-conformance of the schema described by the schema
document(s)
(<tt>valid</tt> = conformant, <tt>invalid</tt> =
non-conformant).
</li>
<li><tt>current</tt> - the current status of this test in
the XSTS (an indication of the test's accuracy in testing
the feature it is intended to test).
</li>
<li><tt>prior</tt> - the history of any changes in the
status of this test.
</li>
</ul>
<p>
The elements "<tt>expected</tt>" and "<tt>current</tt>"
may be absent when tests are contributed, but will always
be present for tests included in the XSTS.
</p>
<p>
The <tt>current</tt> and <tt>prior</tt> elements were originally
designed for tracking changes of status in tests; they can and
should be used to keep a general change history of the test.
Whenever anything changes that may be of importance for users
of the test suite, it is appropriate to clone the existing
<tt>current</tt> element into a pair of similar elements, then
rename the second one <tt>prior</tt>. In the new <tt>current</tt>
element, the change made should be described in the
<tt>annotation</tt> children, and the date of the change
should be recorded.
</p>
<p>
Examples: The status of the test changes. The expected
result is questions and reaffirmed. The expected result is
changed, or multiple expected results are given for different
processor configurations.
</p>
<p>
For status changes involving bug reports, the relevant status
entries should have a Bugzilla cross-reference.
</p>
</div>
"""
class Meta:
name = "schemaTest"
namespace = "http://www.w3.org/XML/2004/xml-schema-test-suite/"
annotation: List[Annotation] = field(
default_factory=list,
metadata={
"type": "Element",
}
)
schema_document: List[SchemaDocument] = field(
default_factory=list,
metadata={
"name": "schemaDocument",
"type": "Element",
"min_occurs": 1,
}
)
expected: List[Expected] = field(
default_factory=list,
metadata={
"type": "Element",
}
)
current: Optional[Current] = field(
default=None,
metadata={
"type": "Element",
}
)
prior: List[Prior] = field(
default_factory=list,
metadata={
"type": "Element",
}
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
}
)
version: List[Union[KnownToken, Decimal, str]] = field(
default_factory=list,
metadata={
"type": "Attribute",
"tokens": True,
}
)
other_attributes: Dict[str, str] = field(
default_factory=dict,
metadata={
"type": "Attributes",
"namespace": "##other",
}
)
@dataclass
class TestSuite:
"""<div>
<p>
The root element of a document describing a set of tests for one
or more versions of W3C XML Schema.
</p>
<p>
The element has three attributes, each of which is required:
</p>
<ul>
<li>
<p><tt>name</tt> - the name of this test suite.
</p>
</li>
<li>
<p><tt>releaseDate</tt> - the date on which this test
suite was released. This value serves to identify the
version of the test suite.
</p>
</li>
<li>
<p><tt>schemaVersion</tt> - the versions of XSD for which
the tests are designed. This has documentary function
only, and is intended for human readers. The
machine-processable version information is handled by
the <tt>version</tt> attribute.
</p>
</li>
<li>
<p><tt>version</tt> - a list of version tokens indicating
versions and features for which at least some tests in the
test suite are applicable.
</p>
<p>Any processor or processor configuration which
supports <em>any</em> of the tokens given should run
the tests. Processors which support none of the named
features can skip the entire test suite without loss.
If no <tt>version</tt> value is given, or if the value
is the empty string, all processors should run the
tests.</p>
<p>For example <code>version="1.1"</code> on a test suite
element indicates that XSD 1.1 processors will find
relevant tests, and XSD 1.0 processors will not,
while <code>version="1.0 1.1"</code>, or no
<code>version</code> attribute at all, indicates
that the test suite contains tests relevant to both.
</p>
<p>Logically, the <tt>version</tt> attribute on
the <tt>testSuite</tt> element, if given explicitly,
should include all the tokens used on any
<tt>testSet</tt>, <tt>testGroup</tt>,
<tt>schemaTest</tt>, or <tt>instanceTest</tt> in the
test suite, and no others. This is not necessarily
enforced, however, by the schema for this
vocabulary.</p>
</li>
</ul>
<p>
Two child elements may optionally be present:
</p>
<ul>
<li><tt>annotation</tt> - zero or more instances of
general documentation.</li>
<li><tt>testSetRef</tt> - a set of references to the sets
of tests which make up this test suite. No two test sets
referenced may have the same name.</li>
</ul>
</div>
"""
class Meta:
name = "testSuite"
namespace = "http://www.w3.org/XML/2004/xml-schema-test-suite/"
annotation: List[Annotation] = field(
default_factory=list,
metadata={
"type": "Element",
}
)
test_set_ref: List[TestSetRef] = field(
default_factory=list,
metadata={
"name": "testSetRef",
"type": "Element",
}
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
}
)
release_date: Optional[XmlDate] = field(
default=None,
metadata={
"name": "releaseDate",
"type": "Attribute",
"required": True,
}
)
schema_version: Optional[str] = field(
default=None,
metadata={
"name": "schemaVersion",
"type": "Attribute",
"required": True,
}
)
version: List[Union[KnownToken, Decimal, str]] = field(
default_factory=list,
metadata={
"type": "Attribute",
"tokens": True,
}
)
other_attributes: Dict[str, str] = field(
default_factory=dict,
metadata={
"type": "Attributes",
"namespace": "##other",
}
)
@dataclass
class TestGroup:
"""<div>
<p>
This element groups a collection of closely related
tests. All instance tests in the group are to be
validated against the same schema; if a <tt>schemaTest</tt>
is present, it is the schema produced for that test
which should be used for the instance tests; if no
<tt>schemaTest</tt> is present, the instance tests
should be validated against a schema consisting only
of the built-in components.
</p>
<p>
The <tt>testGroup</tt> element has one attribute which is
required:
</p>
<ul>
<li><tt>name</tt> - an identifier for the <tt>testGroup</tt>
which differs from the name of any other
<tt>testGroup</tt> in the enclosing <tt>testSet</tt>.
</li>
</ul>
<p>
And one attribute which is optional:
</p>
<ul>
<li>
<p><tt>version</tt> - a list of version tokens, indicating
that the tests in the group are applicable to implementations
supporting <em>any</em> of the versions or features
or behaviors indicated. Any processor or processor
configuration which supports <em>any</em> of the features
indicated should run the tests. Processors which support
<em>none</em> of them can skip the entire test set.
See the definition of the
<a href="#type_version-info"><tt>version-info</tt></a>
type.
</p>
<p>
Logically, all keywords appearing here should also appear
in the <tt>version</tt> attribute of the enclosing
<tt>testSet</tt>, if it has one.
</p>
</li>
</ul>
<p>
Four child elements may optionally be present:
</p>
<ul>
<li>
<p><tt>annotation</tt> - zero or more instances of
general documentation.</p>
</li>
<li>
<p><tt>documentationReference</tt> - any number of
references to external documentation upon which the
test is based, e.g. links to relevant sections of the
Recommendation, to the Errata, etc.</p>
</li>
<li>
<p><tt>schemaTest</tt> - at most on <tt>schemaTest</tt>
element, containing any number of
<tt>schemaDocument</tt> elements, each of which holds
information on a single schema document.
</p>
<p>
When more than one schema document is present, a single
schema is constructed from the set (or from other
schemas via import).
</p>
<p class="note">Note: XSD's rules for schema composition
mean that the order in which schema documents are
encountered may be significant. When more than one
schema document is listed in the <tt>schemaTest</tt>
element, the test should be run as if the schema
documents given were loaded one by one, in order. For
most processors that will correspond to the result of
processing an otherwise empty schema document for an
otherwise unused namespace, containing one
<tt>xsd:import</tt> element for each schema document
listed in the <tt>schemaTest</tt>, with the location
indicated, in a processing mode that involves
following the schema-location hints in import
statements.
</p>
<p class="note">Note: the working group has made no
decision on whether the schema should be constructed
solely from the schema documents listed in the
<tt>schemaTest</tt> element, or from those schema
documents plus the transitive closure of their
references to other schema documents. Similarly, the
working group has not decided whether
<tt>schemaLocation</tt> hints in the instance tests
should be honored or not. It is therefore advisable
to draft test cases without dependencies on
<tt>schemaLocation</tt> hints and the like.
</p>
<p class="note">Note: work is pending on these issues of
schema composition. When it's complete, this part o
the test suite schema may be expected to change.
</p>
<p>
Schema documents may be omitted, for the purpose of
testing a processor's validation of an instance
containing only the built-in datatypes defined in the
Recommendation.
</p>
</li>
<li>
<p><tt>instanceTest</tt> - any number of elements, each
of which holds information on a single instance
document to be validated against the included
schema.</p>
</li>
</ul>
</div>
"""
class Meta:
name = "testGroup"
namespace = "http://www.w3.org/XML/2004/xml-schema-test-suite/"
annotation: List[Annotation] = field(
default_factory=list,
metadata={
"type": "Element",
}
)
documentation_reference: List[DocumentationReference] = field(
default_factory=list,
metadata={
"name": "documentationReference",
"type": "Element",
}
)
schema_test: Optional[SchemaTest] = field(
default=None,
metadata={
"name": "schemaTest",
"type": "Element",
}
)
instance_test: List[InstanceTest] = field(
default_factory=list,
metadata={
"name": "instanceTest",
"type": "Element",
}
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
}
)
version: List[Union[KnownToken, Decimal, str]] = field(
default_factory=list,
metadata={
"type": "Attribute",
"tokens": True,
}
)
other_attributes: Dict[str, str] = field(
default_factory=dict,
metadata={
"type": "Attributes",
"namespace": "##other",
}
)
@dataclass
class TestSet:
"""<div>
<p>
The root element of a document describing a set of tests,
normally from a single contributor. A contributor may
supply any number of <tt>testSet</tt> files.
</p>
<p class="note">
Note: In order to make it possible to browse the test
suite in a browser, it is helpful if large test
collections are broken up into several <tt>testSet</tt>
documents of no more than a megabyte or so each. If
contributions have larger <tt>testSet</tt> documents, they
may be broken up into smaller ones.
</p>
<p>
The element has two attributes:
</p>
<ul>
<li>
<p><tt>contributor (required)</tt> - the name of the contributor of
this <tt>testSet</tt>. May contain any string of characters;
intended for human readers.</p>
</li>
<li>
<p><tt>name (required)</tt> - the name of this <tt>testSet</tt>,
which must be a name unique among the names of
<tt>testSet</tt> elements within the enclosing
<tt>testSuite</tt>.</p>
</li>
<li>
<p><tt>version (optional)</tt> - a list of version tokens indicating
versions and features for which at least some tests in the
test set are applicable.</p>
<p>Any processor or processor configuration which
supports <em>any</em> of the tokens given should run
the tests. Processors which support none of the named
features can skip the entire test set without loss.
If no <tt>version</tt> value is given, or if the value
is the empty string, all processors should run the
tests.</p>
<p>Logically, the tokens given in the <tt>version</tt>
attribute should all also be included in the
<tt>version</tt> attribute [if any] of any
<tt>testSuite</tt> including this test set. And
similarly the <tt>version</tt> attribute on a
<tt>testSet</tt> element should include all the tokens
used on any <tt>testGroup</tt>, <tt>schemaTest</tt>,
or <tt>instanceTest</tt> in the test set, and no
others. Otherwise processors may skip test sets they
ought to run. This logical rule is not necessarily
enforced, however, by the schema for this
vocabulary.</p>
</li>
</ul>
<p>
Two child elements may optionally be present:
</p>
<ul>
<li><tt>annotation</tt> - zero or more instances of general
documentation.
</li>
<li><tt>testGroup</tt> - a set of <tt>testGroup</tt>
elements, each of which defines a group of closely
related tests.
No two <tt>testGroup</tt> elements in the same
<tt>testSet</tt> may have the same name.
</li>
</ul>
</div>
"""
class Meta:
name = "testSet"
namespace = "http://www.w3.org/XML/2004/xml-schema-test-suite/"
annotation: List[Annotation] = field(
default_factory=list,
metadata={
"type": "Element",
}
)
test_group: List[TestGroup] = field(
default_factory=list,
metadata={
"name": "testGroup",
"type": "Element",
}
)
contributor: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
}
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
}
)
version: List[Union[KnownToken, Decimal, str]] = field(
default_factory=list,
metadata={
"type": "Attribute",
"tokens": True,
}
)
other_attributes: Dict[str, str] = field(
default_factory=dict,
metadata={
"type": "Attributes",
"namespace": "##other",
}
)
| 2.265625 | 2 |
tddd92-SC2-AI/ArmyHandler/squad.py | AxelGard/university-projects | 2 | 12764898 | class Squad():
def __init__(self, handler, role, hold_point):
self.handler = handler
if len(handler.squads[role]) > 0:
self.id = handler.squads[role][-1].id + 1
else:
self.id = 1
self.units = {
handler.unit.MARINE: [],
handler.unit.SIEGETANK: []
} # type : [unit]
self.role = role
self.composition = self.handler.composition(self.role)
self.full = False
self.hold_point = hold_point
self.on_mission = False
def assign(self, unit):
self.units[unit.unit_type].append(unit)
def train(self):
self.full = True
for unit_type, amount in self.composition.items():
if len(self.units[unit_type]) < amount:
self.full = False
#print("Train " + unit_type.name)
self.handler.train(unit_type, amount - len(self.units[unit_type]))
def move_to_hold_point(self):
self.move(self.hold_point)
def move(self, pos):
for _, units in self.units.items():
for unit in units:
if util.distance(unit.position, pos) > 3:
if unit.unit_type == self.handler.unit.SIEGETANKSIEGED:
unit.morph(self.handler.unit.SIEGETANK)
unit.move(pos)
elif unit.unit_type == self.handler.unit.SIEGETANK:
unit.morph(self.handler.unit.SIEGETANKSIEGED)
def attack_move(self, pos):
for unit_type, units in self.units.items():
for unit in units:
if unit_type == self.handler.unit.SIEGETANK:
unit.morph(self.handler.unit.SIEGETANK)
if unit.is_idle:
unit.attack_move(pos)
def is_empty(self):
return len(self.units[self.handler.unit.MARINE]) == 0 and len(self.units[self.handler.unit.SIEGETANK]) == 0
| 2.90625 | 3 |
Q049.py | Linchin/python_leetcode_git | 0 | 12764899 | <filename>Q049.py
"""
49
medium
group anagrams
Given an array of strings strs, group the anagrams together.
You can return the answer in any order.
"""
from typing import List
class Solution:
def groupAnagrams(self, strs: List[str]) -> List[List[str]]:
hash = {}
for string in strs:
key = "".join(sorted(string))
if key not in hash:
hash[key] = [string]
else:
hash[key].append(string)
ans = []
for key in hash:
ans.append(hash[key])
return ans
sol = Solution()
strs = ["eat","tea","tan","ate","nat","bat"]
print(sol.groupAnagrams(strs))
| 4.125 | 4 |
src/zignalz/core/util.py | rscohn2/sensepy | 0 | 12764900 | <gh_stars>0
# SPDX-FileCopyrightText: 2020 <NAME>
#
# SPDX-License-Identifier: MIT
import logging
from subprocess import Popen, run
logger = logging.getLogger(__name__)
def shell(cmd, dry_run=False, check=True):
logger.info(f'shell: {cmd}')
if dry_run:
return
run(cmd, shell=True, check=check)
def background(cmd, dry_run=False):
logger.info(f'background: {cmd}')
if dry_run:
return
return Popen(cmd.split(' '))
| 2.28125 | 2 |
GeneratorInterface/GenFilters/python/ZgammaFilter_cfi.py | ckamtsikis/cmssw | 852 | 12764901 | <reponame>ckamtsikis/cmssw
import FWCore.ParameterSet.Config as cms
# values tuned also according to slide 3 of :
# https://indico.cern.ch/getFile.py/access?contribId=23&sessionId=2&resId=0&materialId=slides&confId=271548
# selection efficiency of approx 6% for ZMM_8TeV
myZgammaFilter = cms.EDFilter('ZgammaMassFilter',
HepMCProduct = cms.InputTag("generator","unmeared"),
minPhotonPt = cms.double(7.),
minLeptonPt = cms.double(7.),
minPhotonEta = cms.double(-3),
minLeptonEta = cms.double(-3),
maxPhotonEta = cms.double(3),
maxLeptonEta = cms.double(3),
minDileptonMass = cms.double(30.),
minZgMass = cms.double(40.)
)
ZgammaFilter = cms.Sequence( myZgammaFilter )
| 1.398438 | 1 |
SublimeText3_3176/Data/Packages/SublimeTextXdebug-master/xdebug/config.py | xiexie1993/Tool_Sublime_Text3_for_Windows | 1 | 12764902 | import sublime
# Settings variables
try:
from . import settings as S
except:
import settings as S
def load_project_values():
try:
settings = sublime.active_window().active_view().settings()
# Use 'xdebug' as key which contains dictionary with project values for package
S.CONFIG_PROJECT = settings.get(S.KEY_XDEBUG)
except:
pass
def load_package_values():
# Clear previous settings
config = {}
try:
# Load default/user package settings
settings = sublime.load_settings(S.FILE_PACKAGE_SETTINGS)
# Loop through all configuration keys
for key in S.CONFIG_KEYS:
# Set in config if available
if settings and settings.has(key):
config[key] = settings.get(key)
except:
pass
# Set settings in memory
S.CONFIG_PACKAGE = config
def get_value(key, default_value=None):
"""
Get value from package/project configuration settings.
"""
# Get value from project configuration
value = get_project_value(key)
# Use package configuration when value has not been found
if value is None:
value = get_package_value(key)
# Return package/project value
if value is not None:
return value
# Otherwise use default value
return default_value
def get_package_value(key, default_value=None):
"""
Get value from default/user package configuration settings.
"""
try:
config = sublime.load_settings(S.FILE_PACKAGE_SETTINGS)
if config and config.has(key):
return config.get(key)
except RuntimeError:
sublime.set_timeout(lambda: load_package_values(), 0)
if S.CONFIG_PACKAGE:
if key in S.CONFIG_PACKAGE:
return S.CONFIG_PACKAGE[key]
return default_value
def get_project_value(key, default_value=None):
"""
Get value from project configuration settings.
"""
# Load project coniguration settings
try:
load_project_values()
except RuntimeError:
sublime.set_timeout(lambda: load_project_values(), 0)
# Find value in project configuration
if S.CONFIG_PROJECT:
if key in S.CONFIG_PROJECT:
return S.CONFIG_PROJECT[key]
# Otherwise use default value
return default_value
def get_window_value(key, default_value=None):
"""
Get value from window session settings.
NOTE: Window object in Sublime Text 2 has no Settings.
"""
try:
settings = sublime.active_window().settings()
if settings.has(S.KEY_XDEBUG):
xdebug = settings.get(S.KEY_XDEBUG)
if isinstance(xdebug, dict) and key in xdebug.keys():
return xdebug[key]
except:
pass
return default_value
def set_package_value(key, value=None):
"""
Set value in package configuration settings.
"""
try:
config = sublime.load_settings(S.FILE_PACKAGE_SETTINGS)
if value is not None:
config.set(key, value)
elif config and config.has(key):
return config.erase(key)
except:
pass
def set_project_value(key, value=None):
"""
Set value in project configuration settings.
"""
# Unable to set project value if no project file
if not sublime.active_window().project_file_name():
return False
# Get current project data
project = sublime.active_window().project_data()
# Make sure project data is a dictionary
if not isinstance(project, dict):
project = {}
# Create settings entries if they are undefined
if S.KEY_SETTINGS not in project.keys() or not isinstance(project[S.KEY_SETTINGS], dict):
project[S.KEY_SETTINGS] = {}
if S.KEY_XDEBUG not in project[S.KEY_SETTINGS].keys() or not isinstance(project[S.KEY_SETTINGS][S.KEY_XDEBUG], dict):
project[S.KEY_SETTINGS][S.KEY_XDEBUG] = {}
# Update Xdebug settings
if value is not None:
project[S.KEY_SETTINGS][S.KEY_XDEBUG][key] = value
elif key in project[S.KEY_SETTINGS][S.KEY_XDEBUG].keys():
del project[S.KEY_SETTINGS][S.KEY_XDEBUG][key]
# Save project data
sublime.active_window().set_project_data(project)
return True
def set_window_value(key, value=None):
"""
Set value in window session settings.
NOTE: Window object in Sublime Text 2 has no Settings.
"""
try:
settings = sublime.active_window().settings()
if settings.has(S.KEY_XDEBUG):
xdebug = settings.get(S.KEY_XDEBUG)
else:
xdebug = {}
if value is not None:
xdebug[key] = value
elif key in xdebug.keys():
del xdebug[key]
settings.set(S.KEY_XDEBUG, xdebug)
except:
pass | 2.359375 | 2 |
master_thesis_doc/img/statsWilcoxon_examples.py | nicolai-schwartze/Masterthesis | 1 | 12764903 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 26 15:42:31 2020
@author: Nicolai
"""
import seaborn as sns
import sys
sys.path.append("../../code/post_proc/")
import post_proc as pp
import matplotlib.pyplot as plt
import numpy as np
if __name__ == "__main__":
###############################
# significantly better #
###############################
a = [0,1,1,2,2,2,3,3,3,3,4,4,4,4,4,5,5,5,5,5,5,5,6,6,6,6,6,6,7,7,7,8,8,8,9]
b = [0,1,2,3,3,3,4,4,4,5,5,5,7,8,8,9,9,9,9,9,9,10,10,10,10,10,10,10,10,11,11,11,11,11,11]
b = [x + 5 for x in b]
f, ax_hist = plt.subplots()
sns.distplot(a, ax=ax_hist)
ax_hist.axvline(np.mean(a), color='g', linestyle='--')
ax_hist.axvline(np.median(a), color='g', linestyle='-')
sns.distplot(b, ax=ax_hist)
ax_hist.axvline(np.mean(b), color='r', linestyle='--')
ax_hist.axvline(np.median(b), color='r', linestyle='-')
plt.legend({'mean(a)':np.mean(a),'median(a)':np.median(a), 'mean(b)':np.mean(b),'median(b)':np.median(b)})
stat_result = pp.statsWilcoxon(a, b)
plt.title("statsWilcoxon: " + "a is " + stat_result + " than b")
plt.savefig("./pdf/sig_better.pdf", bbox_inches='tight')
plt.show()
##############################
# signifcantly worse #
##############################
a = [0,1,1,2,2,2,3,3,3,3,4,4,4,4,4,5,5,5,5,5,5,5,6,6,6,6,6,6,7,7,7,8,8,8,9]
b = [0,1,2,3,3,3,4,4,4,5,5,5,7,8,8,9,9,9,9,9,9,10,10,10,10,10,10,10,10,11,11,11,11,11,11]
a = [x + 5 for x in a]
b = [x - 5 for x in b]
f, ax_hist = plt.subplots()
sns.distplot(a, ax=ax_hist)
ax_hist.axvline(np.mean(a), color='r', linestyle='--')
ax_hist.axvline(np.median(a), color='r', linestyle='-')
sns.distplot(b, ax=ax_hist)
ax_hist.axvline(np.mean(b), color='g', linestyle='--')
ax_hist.axvline(np.median(b), color='g', linestyle='-')
plt.legend({'mean(a)':np.mean(a),'median(a)':np.median(a), 'mean(b)':np.mean(b),'median(b)':np.median(b)})
stat_result = pp.statsWilcoxon(a, b)
plt.title("statsWilcoxon: " + "a is " + stat_result + " than b")
plt.savefig("./pdf/sig_worse.pdf", bbox_inches='tight')
plt.show()
#################################
# unsignificantly better #
#################################
b = [0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,3,3,3,8,8,8,8,8,8,8,8,9,9,9,9,9,9,9,9,9,9,9,9]
a = [0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,2,2,3,3,3,4,5,5,6,7,7,8,8,9,9,9,9,9,9,9,9,9,9,9,9]
f, ax_hist = plt.subplots()
sns.distplot(a, ax=ax_hist)
ax_hist.axvline(np.mean(a), color='g', linestyle='--')
ax_hist.axvline(np.median(a), color='g', linestyle='-')
sns.distplot(b, ax=ax_hist)
ax_hist.axvline(np.mean(b), color='r', linestyle='--')
ax_hist.axvline(np.median(b), color='r', linestyle='-')
plt.legend({'mean(a)':np.mean(a),'median(a)':np.median(a), 'mean(b)':np.mean(b),'median(b)':np.median(b)})
stat_result = pp.statsWilcoxon(a, b)
plt.title("statsWilcoxon: " + "a is " + stat_result + " than b")
plt.savefig("./pdf/unsig_better.pdf", bbox_inches='tight')
plt.show()
##################################
# unsignificantly worse #
##################################
a = [0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,3,3,3,8,8,8,8,8,8,8,8,9,9,9,9,9,9,9,9,9,9,9,9]
b = [0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,2,2,3,3,3,4,5,5,6,7,7,8,8,9,9,9,9,9,9,9,9,9,9,9,9]
f, ax_hist = plt.subplots()
sns.distplot(a, ax=ax_hist)
ax_hist.axvline(np.mean(a), color='r', linestyle='--')
ax_hist.axvline(np.median(a), color='r', linestyle='-')
sns.distplot(b, ax=ax_hist)
ax_hist.axvline(np.mean(b), color='g', linestyle='--')
ax_hist.axvline(np.median(b), color='g', linestyle='-')
plt.legend({'mean(a)':np.mean(a),'median(a)':np.median(a), 'mean(b)':np.mean(b),'median(b)':np.median(b)})
stat_result = pp.statsWilcoxon(a, b)
plt.title("statsWilcoxon: " + "a is " + stat_result + " than b")
plt.savefig("./pdf/unsig_worse.pdf", bbox_inches='tight')
plt.show()
#################################
# unsignificantly undecided #
#################################
a = [0,0,0,0,0,1,1,1,1,1,2,2,2,3,3,4,5,5,5,6,6,6,7,7,7,7,8,8,8,8,9,9,9,9,9,9,9,9,9,9,9]
b = [0,0,0,0,0,1,1,2,2,2,3,3,3,3,4,5,5,5,6,6,6,6,6,6,7,7,7,8,8,8,9,9,9,9,9,9,9,9,9,9,9]
f, ax_hist = plt.subplots()
sns.distplot(a, ax=ax_hist)
ax_hist.axvline(np.mean(a), color='g', linestyle='--')
ax_hist.axvline(np.median(a), color='g', linestyle='-')
sns.distplot(b, ax=ax_hist)
ax_hist.axvline(np.mean(b), color='r', linestyle='--')
ax_hist.axvline(np.median(b), color='r', linestyle='-')
plt.legend({'mean(a)':np.mean(a),'median(a)':np.median(a), 'mean(b)':np.mean(b),'median(b)':np.median(b)})
stat_result = pp.statsWilcoxon(a, b)
plt.title("statsWilcoxon: " + "a is " + stat_result + " than b")
plt.savefig("./pdf/unsig_undecided.pdf", bbox_inches='tight')
plt.show()
| 2.109375 | 2 |
tests/integration/test_dummy.py | vbpns/fastapi-boilerplate | 0 | 12764904 | <reponame>vbpns/fastapi-boilerplate<filename>tests/integration/test_dummy.py
import unittest
# Arquivo de exemplo para testes de integracao
# Fontes:
# https://realpython.com/python-testing/#writing-your-first-test
class TestDummy2(unittest.TestCase):
def sum(arg):
total = 0
for val in arg:
total += val
return total
def test_dummy2(self):
"""
Exemplo de um caso de teste
"""
data = [1, 2, 3, 4]
result = sum(data)
self.assertEqual(result, 10)
| 3.171875 | 3 |
rubra/utils.py | bjpop/rubra | 14 | 12764905 | <reponame>bjpop/rubra
# Various useful utilities for the pipeline.
import sys
import errno
import subprocess
from ruffus.proxy_logger import *
import logging
import imp
import os
import os.path
from shell_command import shellCommand
from cluster_job import PBS_Script
import re
# A simple container object
class Bag:
pass
# XXX I don't think the defaults really belong here.
defaultOptionsModule = ['pipeline_config']
defaultWalltime = None # use the default walltime of the scheduler
defaultModules = []
defaultQueue = 'batch'
defaultMemInGB = None # use the default mem of the scheduler
defaultDistributed = False
defaultLogDir = 'log'
defaultLogFile = 'pipeline.log'
defaultStyle = 'run'
defaultProcs = 4
defaultPaired = False
stageDefaults = {
'distributed': defaultDistributed,
'walltime': defaultWalltime,
'memInGB': defaultMemInGB,
'modules': defaultModules,
'queue': defaultQueue
}
pipeline = {
'logDir': defaultLogDir,
'logFile': defaultLogFile,
'style': defaultStyle,
'procs': defaultProcs,
'paired': defaultPaired,
'verbose': 0
}
defaultConfig = {
'reference': None,
'sequences': [],
'optionsModule': defaultOptionsModule,
'stageDefaults': stageDefaults,
'pipeline': pipeline
}
def mkDir(dir):
if not os.path.exists(dir):
try:
os.mkdir(dir, 0777)
except IOError, e:
sys.exit('%s\nFailed to make directory %s' % (e, dir))
def mkLink(source, target):
try:
os.symlink(source, target)
except OSError, e:
if e.errno != errno.EEXIST:
sys.exit('%s\nFailed to create symlink %s from %s' %
(e, target, source))
# or just raise?
def mkForceLink(source, target):
"""Create a symlink, overwriting any existing symlink."""
if os.path.isfile(target):
os.remove(target)
os.symlink(source, target)
def initLog(options):
logDir = options.pipeline['logDir']
logFile = os.path.join(logDir, options.pipeline['logFile'])
mkDir(logDir)
loggerArgs = {}
loggerArgs["file_name"] = logFile
loggerArgs["level"] = logging.DEBUG
loggerArgs["rotating"] = False
loggerArgs["maxBytes"] = 20000
loggerArgs["backupCount"] = 10
loggerArgs["formatter"] = "%(asctime)s - %(message)s"
proxy, mutex = make_shared_logger_and_proxy(setup_std_shared_logger,
"NGS_pipeline", loggerArgs)
return {'proxy': proxy, 'mutex': mutex}
# Look for a particular option in a stage, otherwise return the default
def getStageOptions(options, stage, optionName):
try:
return options.stages[stage][optionName]
except KeyError:
return options.stageDefaults[optionName]
# returns the exit status of the job if possible otherwise None
def distributedCommand(stage, comm, options):
time = getStageOptions(options, stage, 'walltime')
mods = getStageOptions(options, stage, 'modules')
queue = getStageOptions(options, stage, 'queue')
mem = getStageOptions(options, stage, 'memInGB')
try:
literals = getStageOptions(options, stage, 'jobscript')
except KeyError:
literals = None
logDir = options.pipeline['logDir']
verbosity = options.pipeline['verbose']
script = PBS_Script(command=comm, walltime=time, name=stage, memInGB=mem,
queue=queue, moduleList=mods, logDir=logDir, literals=literals)
return script.runJobAndWait(stage, logDir, verbosity)
# check the exit status of the command and if == 0 then write a checkpoint
# file to indicate success.
def runStageCheck(stage, flag_file, *args):
status = runStage(stage, *args)
if status == 0:
open(flag_file, 'w').close()
else:
command = getCommand(stage, pipeline_options)
commandStr = command(*args)
print('Error: command failed: %s' % commandStr)
# returns exit status of the executed command or None
def runStage(stage, *args):
command = getCommand(stage, pipeline_options)
commandStr = command(*args)
logStr = stage + ': ' + commandStr
logInfo(logStr, pipeline_logger)
if getStageOptions(pipeline_options, stage, 'distributed'):
exitStatus = distributedCommand(stage, commandStr, pipeline_options)
return exitStatus
else:
(stdoutStr, stderrStr, exitStatus) = shellCommand(commandStr)
if exitStatus != 0:
msg = ("Failed to run '%s'\n%s%sNon-zero exit status %s" %
(commandStr, stdoutStr, stderrStr, exitStatus))
logInfo(msg, pipeline_logger)
return exitStatus
# This converts a short-hand command string, such as:
# 'bwa aln -t 8 %ref %seq > %out'
# into:
# 'lambda x1, x2, x3: "bwa aln -t 8 %s %s > %s" % (x1, x2, x3)'
def commandToLambda(command):
expanded, numPats = re.subn('%[^ ]*', '%s', command)
args = []
for n in range(numPats):
args.append("x" + str(n))
argsTuple = str(','.join(args))
lamStr = 'lambda %s : "%s" %% (%s)' % (argsTuple, expanded, argsTuple)
return lamStr
def getCommand(name, options):
try:
commandStr = getStageOptions(options, name, 'command')
return eval(commandToLambda(commandStr))
except KeyError:
exit("command: %s, is not defined in the options file" % name)
def logInfo(msg, logger):
with logger['mutex']:
logger['proxy'].info(msg)
def splitPath(path):
(prefix, base) = os.path.split(path)
(name, ext) = os.path.splitext(base)
return (prefix, name, ext)
# drop the .py suffix from a filename,
# otherwise return the name unchanged.
def drop_py_suffix(filename):
prefix, suffix = os.path.splitext(filename)
if suffix == '.py':
return prefix
else:
return filename
# XXX should sanity check that all the required options are
# specified somwhere, either on command line or in config files.
def getOptions(args):
options = Bag()
options.pipeline = {}
for module_file in args.config:
# Check if the config module name ends in a .py suffix, if
# so drop the suffix because Python module imports do not
# allow it. XXX is this still necessary?
module = drop_py_suffix(module_file)
try:
imported = __import__(module)
except ImportError:
exit('Could not find configuration file: %s' % module_file)
for name in dir(imported):
if name[:2] != '__':
setattr(options, name, getattr(imported, name))
# Command line options can override what is in the config files,
# so these settings must be done _after_ reading the config files.
options.pipeline['rebuild'] = args.rebuild # will default on cmdline
options.pipeline['style'] = args.style # will default on cmdline
options.pipeline['verbose'] = args.verbose # will default on cmdline
options.pipeline['force'] = args.force # will default on cmdline
if args.end:
options.pipeline['end'] = [args.end]
# make sure we have an 'end' target specified somewhere
if 'end' not in options.pipeline:
exit('end task(s) not specified on command line or in config file')
# add the pipeline name prefix to all the force tasks and all the end
# tasks.
pipeline_name = drop_py_suffix(args.pipeline)
options.pipeline['force'] = \
map(lambda name: pipeline_name + '.' + name, options.pipeline['force'])
options.pipeline['end'] = \
map(lambda name: pipeline_name + '.' + name, options.pipeline['end'])
return options
def mkLogFile(logDir, fullFilename, extension):
prefix, name, ext = splitPath(fullFilename)
return os.path.join(logDir, name + extension)
def mkTempFilename(file):
return file + '.tmp'
# truncate a file to zero bytes, and preserve its original modification time
def zeroFile(file):
if os.path.exists(file):
# save the current time of the file
timeInfo = os.stat(file)
try:
f = open(file, 'w')
except IOError:
pass
else:
f.truncate(0)
f.close()
# change the time of the file back to what it was
os.utime(file, (timeInfo.st_atime, timeInfo.st_mtime))
pipeline_logger = None
def startLogger():
global pipeline_logger
pipeline_logger = initLog(pipeline_options)
pipeline_options = None
def setOptions(options):
global pipeline_options
pipeline_options = options
| 2.140625 | 2 |
tests/test_github.py | dantin/coral | 0 | 12764906 | <filename>tests/test_github.py<gh_stars>0
# -*- coding: utf-8 -*-
import os
import pytest
from scrapy.http import TextResponse
from coral.spiders.github import parse_release_link, parse_release_links
@pytest.fixture
def landing_page(page_path):
with open(os.path.join(page_path, 'landing_page.html'), 'r') as f:
return f.read()
@pytest.fixture
def release_page(page_path):
with open(os.path.join(page_path, 'release_page.html'), 'r') as f:
return f.read()
def test_parse_release_link(landing_page):
response = TextResponse(
url='https://github.com/vim/vim',
encoding='utf-8',
body=landing_page)
url = parse_release_link(response)
assert url == '/vim/vim/releases'
def test_parse_release_links(release_page):
response = TextResponse(
url='https://github.com/vim/vim/releases',
encoding='utf-8',
body=release_page)
urls = parse_release_links(response)
assert len(urls) > 0
| 2.40625 | 2 |
utils/data_cleaning.py | grazimelo/previsao-diabetes | 0 | 12764907 | from pandas import DataFrame
def remover_duplicatas(df: DataFrame, drop_if_found=True) -> tuple:
"""Remove linhas e colunas duplicadas no dataframe.
Args:
df (DataFrame): Um dataframe para ser analisado.
drop_if_found (bool): Flag indicando se é pra devolver o dataframe ou só analisar.
Returns:
df_T, list_duplicated_columns: Dataframe sem linhas e colunas duplicadas
e lista com as colunas duplicadas
"""
# Verificando duplicatas colunas
print(
f"Existem {df.T.duplicated().sum()} colunas duplicadas e {df.duplicated().sum()} linhas duplicadas"
)
# Verificando duplicatas nas linhas
if drop_if_found == True:
print("Removendo...")
df = df.drop_duplicates()
df_T = df.T
df_T.drop_duplicates(inplace=True)
list_duplicated_columns = df_T[df_T.duplicated(keep=False)].index.tolist()
print("Colunas duplicadas:")
print(list_duplicated_columns)
return df_T.T, list_duplicated_columns
else:
list_duplicated_columns = df.T[df.T.duplicated(keep=False)].index.tolist()
return list_duplicated_columns
def remover_colunas_constantes(df: DataFrame) -> tuple:
"""Remove colunas constantes no dataframe.
Args:
df (DataFrame): Dataframe para ser analisado.
Returns:
df, const_cols: Dataframe sem colunas constantes
e lista com as colunas constantes encontradas
"""
const_cols = []
for i in df.columns:
if len(df[i].unique()) == 1:
df.drop(i, axis=1, inplace=True)
const_cols.append(i)
return df, const_cols
def normalizar_caracteres(
lista_variaveis_categoricas: list, df: DataFrame
) -> DataFrame:
"""Torna os caracteres de uma string minúsculos.
Args:
lista_variaveis_categoricas (list): Lista com as variáveis categóricas (strings)
para serem transformadas.
df (DataFrame): Dataframe para ser analisado.
Returns:
df: Dataframe transformado.
"""
for var_cat in lista_variaveis_categoricas:
df[var_cat] = df[var_cat].str.lower()
return df
| 3.953125 | 4 |
leetcode/binary-tree/unique_BSTs.py | Gaurav-Pande/DataStructures | 5 | 12764908 | <reponame>Gaurav-Pande/DataStructures
# https://leetcode.com/problems/unique-binary-search-trees-ii/
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def generateTrees(self, n):
"""
:type n: int
:rtype: List[TreeNode]
"""
if n == 0:
return []
return self.calculate(1, n)
def calculate(self, start, end):
result = []
if start > end:
result.append(None)
return result
for i in range(start, end + 1):
leftsubtree = self.calculate(start, i - 1)
rightsubtree = self.calculate(i + 1, end)
for j in range(len(leftsubtree)):
for k in range(len(rightsubtree)):
root = TreeNode(i)
root.left = leftsubtree[j]
root.right = rightsubtree[k]
result.append(root)
return result
| 3.90625 | 4 |
scripts/tools/feature.py | kamigaito/SLAHAN | 15 | 12764909 | import argparse
import codecs
parser = argparse.ArgumentParser(description='Conveter of the Google sentence compression dataset')
parser.add_argument("-s", "--sent-file", dest="file_sent", type=str, help="path to the sentence file")
parser.add_argument("-p", "--pos-file", dest="file_pos", type=str, help="path to the pos file")
parser.add_argument("-r", "--rel-file", dest="file_rel", type=str, help="path to the relation file")
parser.add_argument("-o", "--out-file", dest="file_out", type=str, help="path to the output file")
opts = parser.parse_args()
f_sent = codecs.open(opts.file_sent,"r",encoding="utf8")
f_pos = codecs.open(opts.file_pos,"r",encoding="utf8")
f_rel = codecs.open(opts.file_rel,"r",encoding="utf8")
f_out = codecs.open(opts.file_out,"w",encoding="utf8")
with f_sent, f_pos, f_rel, f_out:
for line_sent in f_sent:
line_sent = line_sent.rstrip()
line_pos = f_pos.readline().rstrip()
line_rel = f_rel.readline().rstrip()
col_sent = line_sent.split(" ")
col_pos = line_pos.split(" ")
col_rel = line_rel.split(" ")
if len(col_sent) != len(col_pos) or len(col_sent) != len(col_rel):
print("POS, Rel and Tokens are not correctly aligned.")
assert(len(col_sent) == len(col_pos))
assert(len(col_sent) == len(col_rel))
body = "";
for i in range(len(col_sent)):
if body != "":
body += " "
body += col_sent[i] + "-|-" + col_pos[i] + "-|-" + col_rel[i]
f_out.write(body + "\n")
| 2.96875 | 3 |
libraries/stats/widgets/spline.py | PiRSquared17/r-orange | 1 | 12764910 | <gh_stars>1-10
"""Spline Fit widget
.. helpdoc::
Generates a spline fit to X, Y data. This can be used for plotting or for interogating the splines.
"""
"""<widgetXML>
<name>Spline Fit</name>
<icon>stats.png</icon>
<screenshots></screenshots>
<summary>Produces a spline fit of X/Y data.</summary>
<tags>
<tag priority="10">
Parametric
</tag>
</tags>
<author>
<authorname>Red-R Core Development Team</authorname>
<authorcontact>www.red-r.org</authorcontact>
</author>
</widgetXML>
"""
"""
<name>Spline Fit</name>
<author>Generated using Widget Maker written by <NAME></author>
<Description>Generates a spline fit to X, Y data. This can be used for plotting or for interogating the splines.</Description>
<RFunctions>stats:spline</RFunctions>
<tags>Parametric</tags>
<icon>stats.png</icon>
"""
from OWRpy import *
import redRGUI, signals
import redRGUI
class spline(OWRpy):
settingsList = []
def __init__(self, **kwargs):
OWRpy.__init__(self, **kwargs)
""".. rrvnames::""" ## left blank so no description
self.setRvariableNames(["spline"])
self.data = {}
self.RFunctionParam_y = ''
self.RFunctionParam_x = ''
""".. rrsignals::
:description: `X data`"""
self.inputs.addInput('id1', 'x', [signals.base.RVector, signals.base.RDataFrame], self.processx)
""".. rrsignals::
:description: `Y data`"""
self.inputs.addInput('id0', 'y', signals.base.RVector, self.processy)
""".. rrsignals::
:description: `spline Output fit`"""
self.outputs.addOutput('id0', 'spline Output', signals.base.RModelFit)
""".. rrsignals::
:description: `spline plot attribute`"""
self.outputs.addOutput('id1', 'spline plot attribute', signals.plotting.RPlotAttribute)
self.standardTab = redRGUI.base.groupBox(self.controlArea, label = 'Parameters')
""".. rrgui::
:description: `XMin.`
"""
self.RFunctionParamxmin_lineEdit = redRGUI.base.lineEdit(self.standardTab, label = "xmin:", text = 'min(x)')
""".. rrgui::
:description: `Function to handle ties.`
"""
self.RFunctionParamties_lineEdit = redRGUI.base.lineEdit(self.standardTab, label = "ties:", text = 'mean')
""".. rrgui::
:description: `Fit method.`
"""
self.RFunctionParammethod_lineEdit = redRGUI.base.lineEdit(self.standardTab, label = "method:", text = '"fmm"')
""".. rrgui::
:description: `xmax.`
"""
self.RFunctionParamxmax_lineEdit = redRGUI.base.lineEdit(self.standardTab, label = "xmax:", text = 'max(x)')
""".. rrgui::
:description: `Number of inflection points.`
"""
self.RFunctionParamn_lineEdit = redRGUI.base.lineEdit(self.standardTab, label = "n:", text = '3*length(x)')
""".. rrgui::
:description: `Optional X Data parameter.`
"""
self.xcolumnComboBox = redRGUI.base.comboBox(self.standardTab, label = 'X data')
""".. rrgui::
:description: `Optional Y Data parameter.`
"""
self.ycolumnComboBox = redRGUI.base.comboBox(self.standardTab, label = 'Y data')
redRGUI.base.commitButton(self.bottomAreaRight, "Commit", callback = self.commitFunction)
self.RoutputWindow = redRGUI.base.textEdit(self.controlArea, label = "RoutputWindow")
def processy(self, data):
if data:
self.RFunctionParam_y=data.getData()
#self.data = data
self.commitFunction()
else:
self.RFunctionParam_y=''
def processx(self, data):
if data:
self.RFunctionParam_x=data.getData()
self.data = data
colnames = self.R('colnames('+self.RFunctionParam_x+')', wantType = 'list')
if colnames and len(colnames) > 1:
self.xcolumnComboBox.update(colnames)
self.ycolumnComboBox.update(colnames)
else:
self.xcolumnComboBox.clear()
self.ycolumnComboBox.clear()
self.commitFunction()
else:
self.RFunctionParam_x=''
def commitFunction(self):
if unicode(self.ycolumnComboBox.currentText()) == '':
if unicode(self.RFunctionParam_y) == '': return
if unicode(self.RFunctionParam_x) == '': return
else:
data = self.data.getData()
self.RFunctionParam_x = data+'$'+unicode(self.xcolumnComboBox.currentText())
self.RFunctionParam_y = data+'$'+unicode(self.ycolumnComboBox.currentText())
injection = []
if unicode(self.RFunctionParamxmin_lineEdit.text()) != '':
string = 'xmin='+unicode(self.RFunctionParamxmin_lineEdit.text())+''
injection.append(string)
if unicode(self.RFunctionParamties_lineEdit.text()) != '':
string = 'ties='+unicode(self.RFunctionParamties_lineEdit.text())+''
injection.append(string)
if unicode(self.RFunctionParammethod_lineEdit.text()) != '':
string = 'method='+unicode(self.RFunctionParammethod_lineEdit.text())+''
injection.append(string)
if unicode(self.RFunctionParamxmax_lineEdit.text()) != '':
string = 'xmax='+unicode(self.RFunctionParamxmax_lineEdit.text())+''
injection.append(string)
if unicode(self.RFunctionParamn_lineEdit.text()) != '':
string = 'n='+unicode(self.RFunctionParamn_lineEdit.text())+''
injection.append(string)
inj = ','.join(injection)
self.R('x <- as.vector('+unicode(self.RFunctionParam_x)+')')
self.R(self.Rvariables['spline']+'<-spline(x = as.vector('+unicode(self.RFunctionParam_x)+'),y=as.vector('+unicode(self.RFunctionParam_y)+'),'+','+inj+')')
self.R('rm(x)')
self.R('txt<-capture.output('+self.Rvariables['spline']+')')
self.RoutputWindow.clear()
tmp = self.R('paste(txt, collapse ="\n")')
self.RoutputWindow.insertHtml('<br><pre>'+tmp+'</pre>')
newData = signals.base.RModelFit(self, data = self.Rvariables["spline"]) # moment of variable creation, no preexisting data set. To pass forward the data that was received in the input uncomment the next line.
#newData.copyAllOptinoalData(self.data) ## note, if you plan to uncomment this please uncomment the call to set self.data in the process statemtn of the data whose attributes you plan to send forward.
self.rSend("id0", newData)
newLine = signals.plotting.RPlotAttribute(self, data = 'lines('+self.Rvariables['spline']+')')
self.rSend("id1", newLine)
| 2.921875 | 3 |
scripts/mmarc_reads_aligned.py | lakinsm/meta-marc-publication | 1 | 12764911 | <reponame>lakinsm/meta-marc-publication
#!/usr/bin/env python3
import re
import os.path
import sys
ltrans = {1: 'Class', 2: 'Mechanism', 3: 'Group'}
counts = {1: {}, 2: {}, 3: {}}
reads_mapped = 0
total_reads = 0
def load_mmarc_metadata(infile):
ret = {}
with open(infile, 'r') as f:
data = f.read().split('\n')[1:]
for line in data:
if not line:
continue
entry = line.split('\t')
ret[entry[1]] = entry[-3:]
return ret
def load_tblout(infile):
ret = {}
with open(infile, 'r') as f:
data = f.read().split('\n')
for line in data:
if line.startswith('#') or not line:
continue
entry = line.split()
contig = entry[0]
if int(entry[6]) < int(entry[7]):
ali = (int(entry[6]), int(entry[7]))
else:
ali = (int(entry[7]), int(entry[6]))
ret.setdefault(contig, []).append((ali[0], ali[1], entry[2]))
return ret
def parse_cigar(s):
length = 0
ret = re.findall(r'(\d+)([A-Z=]{1})', s)
universe = {'X', 'P', 'I', 'N', 'D', '=', 'M'}
for occ, op in ret:
if op in universe:
length += int(occ)
return length
class SamParser:
"""This object takes as input a SAM file path and constructs an iterable that outputs
hash-mapping of header to sequence information. Only one line will be held in memory at a time using this method.
"""
def __init__(self, filepath):
"""
constructor
@param filepath: filepath to the input raw SAM file.
"""
if os.path.exists(filepath): # if file is a file, read from the file
self.sam_file = str(filepath)
self.stdin = False
elif not sys.stdin.isatty(): # else read from standard in
self.stdin = True
else:
raise ValueError("Parameter filepath must be a SAM file")
self.current_line = None
self.reads_mapping = 0
self.reads_total = 0
self.header_lens = {}
def __iter__(self):
return self
@property
def _iterate(self):
# Skip all leading whitespace
while True:
if self.stdin:
sam_line = sys.stdin.readline() # read from stdin
else:
sam_line = self.sam_file.readline() # read from file
if not sam_line:
return # End of file
if sam_line[0] != '@': # these lines are the actual reads
self.reads_total += 1
if self.reads_total % 100000 == 0: # update the counter on stdout every 100000 reads
sys.stdout.write("\rReads processed: {}".format(self.reads_total))
sys.stdout.flush()
temp = sam_line.split()
if (int(temp[1]) & 4) == 0:
self.reads_mapping += 1
return temp[2], int(temp[3]), temp[5] # RefName, 1-start, CIGAR
self.sam_file.close() # catch all in case this line is reached
assert False, "Should not reach this line"
def __next__(self):
if not self.stdin and type(self.sam_file) is str: # only open file here if sam_file is a str and not fileIO
self.sam_file = open(self.sam_file, "r")
value = self._iterate
if not value: # close file on EOF
if not self.stdin:
self.sam_file.close()
sys.stdout.write(
"\n{:d} reads mapped out of {:d} total reads\n".format(self.reads_mapping, self.reads_total))
sys.stdout.flush()
global reads_mapped
global total_reads
reads_mapped = self.reads_mapping
total_reads = self.reads_total
raise StopIteration()
else:
return value
if __name__ == '__main__':
R = load_mmarc_metadata(sys.argv[1])
D = load_tblout(sys.argv[2])
dantas_string = sys.argv[3]
dataset = sys.argv[4]
groupstring = sys.argv[5]
for refname, start, cigar in SamParser('-'):
# RefName, 1-start, CIGAR, RefLen, ReadSeq
stop = start + parse_cigar(cigar) - 1
if refname not in D:
continue
model_hits = set()
for triplets in D[refname]:
if max(start, triplets[0]) <= min(stop, triplets[1]):
if R[triplets[2]] != 'NA':
model_hits.add(triplets[2])
if model_hits:
for x in model_hits:
for e, a in enumerate(R[x]):
annots = a.split('|')
correct = len(model_hits) * len(annots)
for annot in annots:
try:
counts[e + 1][annot] += float(1) / correct
except KeyError:
counts[e + 1][annot] = float(1) / correct
with open(sys.argv[6], 'a') as out:
for level, ldict in counts.items():
for k, count in ldict.items():
out.write(
'{},{},MMARC,{},{},{},{}\n'.format(dataset, dantas_string, groupstring, ltrans[level], k,
str(count)))
| 2.46875 | 2 |
dzTraficoBackend/dzTrafico/BusinessLayer/SimulationManager.py | DZAymen/dz-Trafico | 0 | 12764912 | <reponame>DZAymen/dz-Trafico<filename>dzTraficoBackend/dzTrafico/BusinessLayer/SimulationManager.py
from dzTrafico.BusinessLayer.SimulationCreation.SimulationCreator import SimulationCreator
from dzTrafico.BusinessEntities.Simulation import Simulation
from dzTrafico.BusinessEntities.Sink import Sink
from dzTrafico.BusinessEntities.Sensor import Sensor
from dzTrafico.BusinessEntities.Node import Node
from dzTrafico.BusinessLayer.TrafficAnalysis.TrafficAnalyzer import TrafficAnalyzer
from dzTrafico.BusinessLayer.Statistics.StatisticsManager import StatisticsManager
from dzTrafico.BusinessLayer.TrafficAnalysis.LaneChangeControlAlgo import LaneChange
from dzTrafico.BusinessLayer.TrafficAnalysis.VirtualRampMeteringControlAlgo import VirtualRampMetering
class SimulationManager:
__simulation = Simulation()
__simulationCreator = SimulationCreator()
__trafficAnalyzer = TrafficAnalyzer(__simulation)
__statisticsManager = StatisticsManager(__simulation)
incidents = []
#Define a singleton SimulationManager class
__simulationManager = None
@staticmethod
def get_instance():
if SimulationManager.__simulationManager is None:
SimulationManager.__simulationManager = SimulationManager()
return SimulationManager.__simulationManager
def get_simulation(self):
return SimulationManager.__simulation
# -------------------------------- Net file creation ------------------------------------------------
#Call SimulationCreator.set_map method to create the map
def set_map(self, map_box):
SimulationManager.incidents = []
self.__simulation = Simulation()
self.__simulationCreator = SimulationCreator()
self.__trafficAnalyzer = TrafficAnalyzer(self.__simulation)
self.__statisticsManager = StatisticsManager(self.__simulation)
self.__simulationCreator.set_map_box(map_box)
def get_map_box(self):
return self.__simulationCreator.get_map_box()
# ---------------------------------------------------------------------------------------------------
# -------------------------------- Flows definition -------------------------------------------------
def add_inflow(self, inFlowPoint):
SimulationManager.__simulation.add_inflows(inFlowPoint)
def add_outflow(self, outFlowPoint):
SimulationManager.__simulation.add_outflows(outFlowPoint)
def generate_flows(self):
if (len(SimulationManager.__simulation.inFlowPoints) > 0) and (
len(SimulationManager.__simulation.outFlowPoints) > 0):
SimulationManager.__simulationCreator.define_traffic_flows(
SimulationManager.__simulation.inFlowPoints,
SimulationManager.__simulation.outFlowPoints
)
def generate_routes(self):
SimulationManager.__simulationCreator.create_route_file()
def get_inflow_points(self):
return SimulationManager.__simulation.get_inflows()
def get_outflow_points(self):
return SimulationManager.__simulation.get_outflows()
def delete_traffic_outflow(self, id):
SimulationManager.__simulation.delete_outflow(id)
def delete_traffic_inflow(self, id):
SimulationManager.__simulation.delete_inflow(id)
# ---------------------------------------------------------------------------------------------------
# -------------------------------- Incidents definition ---------------------------------------------
# Call SimulationCreator.set_map method to create the map
def add_incident(self, incident):
SimulationManager.incidents.append(incident)
#SimulationManager.__simulationCreator.add_incidents(SimulationManager.incident)
def get_incidents(self):
return SimulationManager.incidents
def delete_incident(self, id):
for incident in SimulationManager.incidents:
if incident.id == int(id):
SimulationManager.incidents.remove(incident)
# ---------------------------------------------------------------------------------------------------
# -------------------------------- Vehicle types definition -----------------------------------------
def add_vehicule_type(self, vehicle_type):
SimulationManager.__simulationCreator.add_vehicle_type(vehicle_type)
def set_vehicle_types_percentages(self, vehicle_types_percentages):
SimulationManager.__simulationCreator.set_vehicle_types_percentages(vehicle_types_percentages)
def get_vehicle_types(self):
return SimulationManager.__simulationCreator.get_vehicle_types()
# ---------------------------------------------------------------------------------------------------
# ------------------------------------- Add sensors -------------------------------------------------
def set_sensors_distance(self, distance):
SimulationManager.__simulationCreator.set_sensors_distance(distance)
def add_sensors(self):
Sink.trafficAnalyzer = SimulationManager.__trafficAnalyzer
SimulationManager.__simulationCreator.create_sensors()
# ---------------------------------------------------------------------------------------------------
# ------------------------------------- Simulation Creation -----------------------------------------
# Split network edges
# && update network file
# && generate route file
def split_network_edges(self):
self.generate_flows()
SimulationManager.__simulationCreator.split_network_edges()
SimulationManager.__simulationCreator.add_incidents(SimulationManager.incidents)
self.generate_flows()
self.generate_routes()
def create_simulation(self, consumer):
print "download map"
consumer.send("download map")
self.__simulationCreator.create_network_file()
consumer.send("map is downloaded")
print "map downloaded"
self.split_network_edges()
consumer.send("Network is splitted")
self.add_sensors()
consumer.send("Sensors are added correctly")
SimulationManager.__simulation = SimulationManager.__simulationCreator.createSimulation()
consumer.send("Simulation is created successfully")
# ---------------------------------------------------------------------------------------------------
# ----------------------------------------- Simulation Config ---------------------------------------
def update_config(self, data):
LaneChange.Xi = data["xi"]
VirtualRampMetering.num_vsl_controlled_sections = data["num_vsl_sections"]
VirtualRampMetering.V_min = data["v_min"]
VirtualRampMetering.Ki = data["ki"]
VirtualRampMetering.Cv = data["cv"]
VirtualRampMetering.critical_density = data["critical_density"]
Sensor.set_critical_density(data["critical_density"])
Simulation.sim_step_duration = data["sim_step_duration"]
SimulationManager.__simulationCreator.set_sim_duration(data["simDuration"])
TrafficAnalyzer.isVSLControlActivated = True
TrafficAnalyzer.isLCControlActivated = True
TrafficAnalyzer.isCongestionDetected = False
TrafficAnalyzer.congestionExists = False
self.set_sensors_distance(data["distance"])
Node.COMPLIANCE_PERCENTAGE = data["driver_compliance"] / 100
if Node.COMPLIANCE_PERCENTAGE < 1:
Simulation.LCMode_vsl_lc = 597
else:
Simulation.LCMode_vsl_lc = 512
# ---------------------------------------------------------------------------------------------------
# ------------------------------------ Simulation Results -------------------------------------------
def get_simulation_gpm_results(self):
return SimulationManager.__statisticsManager.get_GPMs()
def get_incident_flow(self):
return SimulationManager.__statisticsManager.get_incident_flow()
def get_incident_density(self):
return SimulationManager.__statisticsManager.get_incident_density()
def get_queue_measurements(self):
return SimulationManager.__statisticsManager.get_queue_measurements()
# --------------------------------------------------------------------------------------------------- | 2.3125 | 2 |
sqlhandler/custom/executable.py | matthewgdv/sqlhandler | 0 | 12764913 | <filename>sqlhandler/custom/executable.py
from __future__ import annotations
from typing import Any, TYPE_CHECKING, Optional
from abc import ABC, abstractmethod
from pathmagic import File, PathLike
from miscutils import ParametrizableMixin
from sqlhandler.frame import Frame
if TYPE_CHECKING:
from sqlhandler import Sql
class Executable(ParametrizableMixin, ABC):
"""An abstract class representing a SQL executable such. Concrete implementations such as scripts or stored procedures must inherit from this. An implementaion of Executable._compile_sql() must be provided."""
def __init__(self, sql: Sql = None, verbose: bool = False) -> None:
self.sql: Optional[Sql] = None
self.results: list[list[Frame]] = []
def __call__(self, *args: Any, **kwargs: Any) -> Any:
return self.execute(*args, **kwargs)
def parametrize(self, param: Sql) -> ParametrizableMixin:
self.sql = param
return self
def execute(self, params: dict) -> Optional[list[Frame]]:
"""Execute this executable SQL object. Passes on its args and kwargs to Executable._compile_sql()."""
if (cursor := self._execute(params)) is None:
return None
with cursor:
self.results.append(result := self._get_frames_from_cursor(cursor))
return result
@abstractmethod
def _execute(self, params: dict):
raise NotImplementedError
@staticmethod
def _get_frames_from_cursor(cursor: Any) -> list[Frame]:
def get_frame_from_cursor(curs: Any) -> Optional[Frame]:
try:
return Frame([tuple(row) for row in curs.fetchall()], columns=[info[0] for info in cursor.description])
except Exception:
return None
data = [get_frame_from_cursor(cursor)]
while cursor.nextset():
data.append(get_frame_from_cursor(cursor))
return [frame for frame in data if frame is not None] or None
class StoredProcedure(Executable):
"""A class representing a stored procedure in the database. Can be called to execute the proc. Arguments and keyword arguements will be passed on."""
def __init__(self, name: str, schema: str = None, database: str = None, sql: Sql = None) -> None:
super().__init__(sql=sql)
self.name, self.schema, self.database = name, schema or self.sql.database.default_schema, database
def __repr__(self) -> str:
return f"{type(self).__name__}(name={self.name}, schema={self.schema})"
def _execute(self, params: dict) -> Any:
with self.sql.engine.raw_connection() as con:
cursor = con.cursor()
cursor.callproc(f"{self.schema or self.sql.database.default_schema}.{self.name}", params)
return cursor
class Script(Executable):
"""A class representing a SQL script in the filesystem. Can be called to execute the script."""
def __init__(self, path: PathLike, sql: Sql = None) -> None:
super().__init__(sql=sql)
self.file = File.from_pathlike(path)
def __repr__(self) -> str:
return f"{type(self).__name__}(file={self.file})"
def _execute(self, *args, **kwargs) -> Any:
return self.sql.session.execute(self.file.content, *args, **kwargs).cursor
| 2.734375 | 3 |
audio8/wrd2bpe.py | mead-ml/audio8 | 2 | 12764914 | <reponame>mead-ml/audio8
import logging
import os
from argparse import ArgumentParser
from audio8.text import BPEVectorizer
from eight_mile.utils import revlut
parser = ArgumentParser()
parser.add_argument("--root_dir")
parser.add_argument("--train_dataset", type=str, help='Dataset (by name), e.g. train-clean-360')
parser.add_argument("--valid_dataset", type=str, help='Dataset (by name), e.g. dev-other')
parser.add_argument("--subword_model_file", type=str, help="The BPE model file", required=True)
parser.add_argument("--subword_vocab_file", type=str, help="The BPE subword vocab", required=True)
parser.add_argument("--emit_begin_tok", type=str, default=[])
parser.add_argument("--emit_end_tok", type=str, default=[])
parser.add_argument("--lower", action='store_true')
parser.add_argument("--split", type=str, default=" ")
args = parser.parse_args()
vec = BPEVectorizer(args.subword_model_file, args.subword_vocab_file, args.emit_begin_tok, args.emit_end_tok)
i2w = revlut(vec.vocab)
num_vocab = max(i2w.keys())
with open(os.path.join(args.root_dir, 'dict.bpe.txt'), 'w') as wf:
for i in range(num_vocab):
wf.write(i2w[i] + '\n')
train_file = os.path.join(args.root_dir, args.train_dataset)
valid_file = os.path.join(args.root_dir, args.valid_dataset)
files = [train_file, valid_file]
input_files = [f.replace('.tsv', '.wrd') for f in files]
output_files = [f.replace('.wrd', '.bpe') for f in input_files]
for inf, outf in zip(input_files, output_files):
print(outf)
with open(inf) as rf, open(outf, 'w') as wf:
for line in rf:
line = line.strip()
if args.lower:
line = line.lower()
tok = line.split(args.split)
outline = ' '.join([i2w[x] for x in vec.run(tok)])
wf.write(outline + '\n')
| 2.53125 | 3 |
xautodl/spaces/__init__.py | Joey61Liuyi/AutoDL-Projects | 817 | 12764915 | #####################################################
# Copyright (c) <NAME> [GitHub D-X-Y], 2021.01 #
#####################################################
# Define complex searc space for AutoDL #
#####################################################
from .basic_space import Categorical
from .basic_space import Continuous
from .basic_space import Integer
from .basic_space import Space
from .basic_space import VirtualNode
from .basic_op import has_categorical
from .basic_op import has_continuous
from .basic_op import is_determined
from .basic_op import get_determined_value
from .basic_op import get_min
from .basic_op import get_max
| 1.914063 | 2 |
AIO/pairs/pairs.py | eddiegz/Personal-C | 3 | 12764916 | infile=open('pairin.txt','r')
n=int(infile.readline().strip())
dic={}
for i in range(2*n):
sn=int(infile.readline().strip())
if sn not in dic:
dic[sn]=i
else:
dic[sn]-=i
maxkey=min(dic,key=dic.get)
outfile=open('pairout.txt','w')
outfile.write(str(abs(dic[maxkey])))
outfile.close() | 2.9375 | 3 |
feder/domains/__init__.py | efefre/feder | 0 | 12764917 | <gh_stars>0
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class DomainsConfig(AppConfig):
name = "feder.domains"
verbose_name = _("Domains")
| 1.242188 | 1 |
imagex/upload/views.py | natlungfy/comp3297-imagex | 0 | 12764918 | from django.shortcuts import render
from accounts.models import Member
from search.models import Image, Tag, Category
import datetime
def parse_tags(tag, img):
data = tag.split(',')
for d in data:
num_records = Tag.objects.filter(tag_name__iexact=d).count()
if num_records == 0:
img.tag.create(tag_name=d)
else:
exist_tag = Tag.objects.get(tag_name__iexact=d)
img.tag.add(exist_tag)
def upload_image(request):
member = Member.objects.get(username=request.user.username) # e.g. Member instance with username 'nat'
all_cat = Category.CATEGORY
message = state = ''
d = datetime.date.today()
daily_usage = member.daily_quota
if d > member.username.last_login.date():
member.daily_quota = 4
member.save()
daily_usage = member.daily_quota
system_usage = str(3 - Image.objects.filter(photographer=member).count())
if request.method == 'POST':
img_name = request.FILES.get('img').name
tag_list = request.POST.get('tag').split(',')
cat = Category.objects.get(cat_name=request.POST.get('category'))
if len(tag_list) > 10:
state = 'F'
message = 'You cannot add more than 10 tags for an image. Please try again.'
elif img_name.endswith(('.jpg', '.jpeg')):
new_img = Image(
img=request.FILES.get('img'),
title=request.POST.get('title'),
description=request.POST.get('description'),
category=cat,
photographer=member
)
new_img.save()
# parse tags
parse_tags(request.POST.get('tag'), new_img)
# handle quota
member.daily_quota -= 1
member.save()
daily_usage = member.daily_quota
system_usage = str(3 - Image.objects.filter(photographer=member).count())
# send success signal
state = 'T'
message = 'Image uploaded'
else:
# send error signal
state = 'F'
message = 'Image is not jpg. Please upload only jpg files.'
return render(request, 'upload/upload.html', {'daily_usage': daily_usage, 'system_usage': system_usage, 'all_cat': all_cat, 'state': state, 'message': message}) | 2.234375 | 2 |
stacks_queues/queue_via_stacks.py | UPstartDeveloper/Problem_Solving_Practice | 0 | 12764919 | <reponame>UPstartDeveloper/Problem_Solving_Practice<filename>stacks_queues/queue_via_stacks.py
"""
Queue via Stacks
Implement a MyQueue class which implements a queue using two stacks.
Questions and Assumptions:
Model 1: Back to Back ArrayStacks
MyQ
stack1 stack2
t2 t1
[ ][ x1 x2 x3 x4]
f ? b
1. Interface:
- enqueue
- dequeue
- front
2. Top Down
- enq ----> popping from the front
- deq ---> adding item to the end
3. Do the stacks have any size limits?
- assume no
- But,
4. Time Constraints on the Data Structure?
- for now assume none
5. Should we account for error handling e.g. dequeing from an empty queue?
Break Down the Problem - is it possible to implement a q w/ 1 stack?
no, b/c LIFO not FIFO
[x1 x2 x3 x4 x5]
t1
how to go from LIFO -> FIFO using a second stack?
Brainstorming:
1. use a second stack to reverse the first
- fill up the first stack however much
- once it got full, pop all the items into another stack
- then if someone calls pop, pop normally from the second stack
[x6 ]
t1
[x5 x4 x3 x2 x1]
t2
Pros:
- slow
- imposes size restrictions
2. Using a LinkedList to immplement the stack
- intuition: use stack2 to get access to the "bottom" stack1
- approach:
- enqueue - append to the tail of a ll
- deq - delete head
- front - return item at the end
Pros:
- resolves ambiguity about middle
- no size restrictions
- fast
"""
class QueueNode:
"""Regular to a node in a regular singly linked list."""
def __init__(self, val):
self.val = val
self.next = None
class MyQueue:
def __init__(self, top: QueueNode):
self.top, self.back = top, None
def front(self):
if self.top is not None:
return self.top.val
def enqueue(self, node: QueueNode):
# TODO: test cases for all 3 scenarios; and refactor
# no current head
if not self.top:
self.top = node
# no current tail
elif not self.back:
self.back = node
# adding a node after the current
else:
self.back.next = node
self.back = node
def dequeue(self) -> QueueNode:
if self.top is not None:
front = self.top
self.top = self.top.next
return front
if __name__ == "__main__":
# A: init MyQ w/ no head or tail, then enQ
q = MyQueue(None)
x1 = QueueNode(1)
q.enqueue(x1)
assert q.top == x1
assert q.top.next == None
assert q.back == None
assert q.top.next == q.back
# B: init MyQ w/ a head, then enQ
# C: enq after there's a head and tail
pass
"""
top = None
back = None
List
____
"""
| 4.1875 | 4 |
src/pascal-ranslate-exp.py | alisure-ml/Semantic-Segmentation-clockwork-fcn | 2 | 12764920 | import os
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import time
from collections import namedtuple
import caffe
from lib import run_net
from lib import score_util
from datasets.pascal_voc import Pascal
PV = Pascal('C:\\ALISURE\\Data\\voc\\VOCdevkit\\VOC2012')
val_set = PV.get_data_set()
def show_demo():
image_name_0 = val_set[0]
im, label = PV.load_image(image_name_0), PV.load_label(image_name_0)
im_t, label_t = PV.make_translated_frames(im, label, shift=32, num_frames=6)
plt.rcParams['image.cmap'] = 'gray'
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['figure.figsize'] = (12, 12)
plt.figure()
for i, im in enumerate(im_t):
plt.subplot(3, len(im_t), i + 1)
plt.imshow(im)
plt.axis('off')
plt.subplot(3, len(label_t), len(im_t) + i + 1)
plt.imshow(PV.palette(label_t[i]))
plt.axis('off')
plt.subplot(3, len(label_t), 2 * len(im_t) + 2)
plt.imshow(PV.palette(label))
plt.axis('off')
plt.subplot(3, len(label_t), 2 * len(im_t) + 5)
plt.imshow(PV.make_boundaries(label, thickness=2))
plt.axis('off')
plt.show()
class_number = len(PV.classes)
num_frames = 6
thickness = 5
shifts = (16, 32)
Method = namedtuple('Method', 'method arch weights infer_func, input_offset')
fcn = Method('fcn', '../nets/voc-fcn8s.prototxt',
'../nets/voc-fcn8s-heavy.caffemodel', run_net.segrun, 2)
baseline_3stage = Method('baseline_3stage', '../nets/voc-fcn-pool3.prototxt',
'../nets/voc-fcn-pool3.caffemodel', run_net.segrun, 2)
baseline_2stage = Method('baseline_2stage', '../nets/voc-fcn-pool4.prototxt',
'../nets/voc-fcn-pool4.caffemodel', run_net.segrun, 2)
pipeline_3stage = Method('pipeline_3stage', '../nets/stage-voc-fcn8s.prototxt',
'../nets/voc-fcn8s-heavy.caffemodel', run_net.pipeline_3stage_forward, 0)
pipeline_2stage = Method('pipeline_2stage', '../nets/stage-voc-fcn8s.prototxt',
'../nets/voc-fcn8s-heavy.caffemodel', run_net.pipeline_2stage_forward, 1)
def score_translations(method, shift, arch, weights, infer, offset):
"""
Score the translated "video" of PASCAL VOC seg11valid images
taking care of the net architecture and weights, the particular inference method,
and the input offset needed to align every frame and pipeline methods.
"""
net = caffe.Net(arch, weights, caffe.TEST)
hist, hist_b = np.zeros((class_number, class_number)), np.zeros((class_number, class_number))
for index, image_name in enumerate(val_set[0: 10]):
print("{} begin {}".format(time.strftime("%H:%M:%S", time.localtime()), index))
im, label = PV.load_image(image_name), PV.load_label(image_name)
im_frames, label_frames = PV.make_translated_frames(im, label, shift=shift, num_frames=num_frames)
im_frames, label_frames = im_frames[offset:], label_frames[offset:]
# prepare pipelines: feed initial inputs then skip accordingly
if method == 'pipeline_3stage':
run_net.pipeline_fill_3stage(net, PV.pre_process(im_frames[0]), PV.pre_process(im_frames[1]))
im_frames, label_frames = im_frames[2:], label_frames[2:]
elif method == 'pipeline_2stage':
run_net.pipeline_fill_2stage(net, PV.pre_process(im_frames[0]))
im_frames, label_frames = im_frames[1:], label_frames[1:]
for im_t, label_t in zip(im_frames, label_frames):
print("{} begin {} .....".format(time.strftime("%H:%M:%S", time.localtime()), index))
out = infer(net, PV.pre_process(im_t))
Image.fromarray(out * 12).convert("L").show()
hist += score_util.score_out_gt(out, label_t, n_cl=class_number)
bdry = PV.make_boundaries(label_t, thickness=thickness)
hist_b += score_util.score_out_gt_bdry(out, label_t, bdry, n_cl=class_number)
pass
for name, h in zip(('seg', 'bdry'), (hist, hist_b)):
accP, cl_accP, mean_iuP, fw_iuP = score_util.get_scores(h)
print('{}: {}, shift {}'.format(method, name, shift))
print('acc\t\t cl acc\t\t mIU\t\t fwIU')
print('{:f}\t {:f}\t {:f}\t {:f}\t'.format(100*accP, 100*cl_accP, 100*mean_iuP, 100*fw_iuP))
for shift in shifts:
for m in (fcn, baseline_3stage, pipeline_3stage, baseline_2stage, pipeline_2stage):
score_translations(m.method, shift, m.arch, m.weights, m.infer_func, m.input_offset)
"""
fcn: seg, shift 16
acc cl acc mIU fwIU
91.974863 82.881608 70.022842 85.902034
fcn: bdry, shift 16
acc cl acc mIU fwIU
63.948030 65.065930 49.555515 53.667815
baseline_3stage: seg, shift 16
acc cl acc mIU fwIU
60.286632 13.705269 4.690409 43.286493
baseline_3stage: bdry, shift 16
acc cl acc mIU fwIU
11.166320 11.496480 1.818804 3.798124
pipeline_3stage: seg, shift 16
acc cl acc mIU fwIU
88.349069 74.970788 55.175040 79.954080
pipeline_3stage: bdry, shift 16
acc cl acc mIU fwIU
56.349989 56.221222 41.709843 46.512476
baseline_2stage: seg, shift 16
acc cl acc mIU fwIU
69.464357 36.060632 13.589065 53.310369
baseline_2stage: bdry, shift 16
acc cl acc mIU fwIU
29.001448 26.982958 8.294242 19.001877
pipeline_2stage: seg, shift 16
acc cl acc mIU fwIU
90.942986 80.925445 67.604536 84.123599
pipeline_2stage: bdry, shift 16
acc cl acc mIU fwIU
61.421430 61.866688 46.878984 51.400744
baseline_3stage: seg, shift 32
acc cl acc mIU fwIU
59.179855 13.635292 4.642013 41.671485
baseline_3stage: bdry, shift 32
acc cl acc mIU fwIU
11.052108 11.406026 1.778673 3.790397
pipeline_3stage: seg, shift 32
acc cl acc mIU fwIU
82.182183 64.902390 49.651163 70.833176
pipeline_3stage: bdry, shift 32
acc cl acc mIU fwIU
50.599466 48.977500 35.428999 41.064723
"""
| 2.453125 | 2 |
channels/management/commands/backpopulate_channel_fields.py | mitodl/open-discussions | 12 | 12764921 | <filename>channels/management/commands/backpopulate_channel_fields.py
"""Management command to backpopulate channel fields"""
from django.core.management.base import BaseCommand
from channels.tasks import populate_channel_fields
from open_discussions.utils import now_in_utc
class Command(BaseCommand):
"""Backpopulate channel fields from reddit"""
help = "Backpopulate channel fields from reddit"
def handle(self, *args, **options):
"""Run celery task to backpopulate channel fields from reddit"""
task = populate_channel_fields.delay()
self.stdout.write(
"Started celery task {task} to populate channel fieldss".format(task=task)
)
self.stdout.write("Waiting on task...")
start = now_in_utc()
task.get()
total_seconds = (now_in_utc() - start).total_seconds()
self.stdout.write(
"Population of channel fields finished, took {} seconds".format(
total_seconds
)
)
| 2.59375 | 3 |
tests/test_generation.py | createchaos/coop_assembly | 3 | 12764922 | import os
import pytest
from itertools import combinations
from compas.datastructures import Network
from coop_assembly.help_functions import find_point_id
from coop_assembly.help_functions import find_point_id, tet_surface_area, \
tet_volume, distance_point_triangle
from coop_assembly.geometry_generation.tet_sequencing import \
compute_distance_from_grounded_node
from coop_assembly.geometry_generation.tet_sequencing import \
get_pt2tri_search_heuristic_fn, \
point2point_shortest_distance_tet_sequencing, \
point2triangle_tet_sequencing
from coop_assembly.geometry_generation.execute import execute_from_points
from coop_assembly.assembly_info_generation import calculate_gripping_plane, calculate_offset
from coop_assembly.help_functions.parsing import export_structure_data, parse_saved_structure_data
@pytest.fixture
def save_dir():
here = os.path.abspath(os.path.dirname(__file__))
return os.path.join(here, 'test_data')
@pytest.mark.gen_from_pts
# @pytest.mark.parametrize('test_set_name', [('single_cube'), ('YJ_12_bars')])
@pytest.mark.parametrize('test_set_name', [('YJ_12_bars')])
@pytest.mark.parametrize('radius', [(3.17), ])
# @pytest.mark.parametrize('pt_search_method', [('point2point'), ])
@pytest.mark.parametrize('pt_search_method', [('point2triangle'), ])
# @pytest.mark.parametrize('pt_search_method', [('point2point'), ('point2triangle')])
def test_generate_from_points(points_library, test_set_name, radius, pt_search_method, save_dir, write):
points, base_tri_pts = points_library[test_set_name]
print('\n' + '#'*10)
print('Testing generate from point for set: {}, total # of pts: {}'.format(test_set_name, len(points)))
start_tri_ids = [find_point_id(base_pt, points) for base_pt in base_tri_pts]
assert len(start_tri_ids) == 3, 'start triangle should only have three points!'
print('base triangle ids: {}'.format(start_tri_ids))
if pt_search_method == 'point2point':
cost_from_node = {}
all_pt_ids = list(range(len(points)))
elements = list(combinations(all_pt_ids, 2))
cost_from_node = compute_distance_from_grounded_node(elements, points, start_tri_ids)
tet_node_ids = point2point_shortest_distance_tet_sequencing(points, cost_from_node)
elif pt_search_method == 'point2triangle':
ordering_heuristic = 'tet_surface_area'
penalty_cost = 2.0
print('pt search strategy: {} | heuristic: {} | penalty cost: {}'.format(pt_search_method, ordering_heuristic, penalty_cost))
heuristic_fn = get_pt2tri_search_heuristic_fn(points, penalty_cost, ordering_heuristic)
tet_node_ids = point2triangle_tet_sequencing(points, start_tri_ids)
else:
raise NotImplementedError('search method not implemented!')
b_struct_data, o_struct_data = execute_from_points(points, tet_node_ids, radius, correct=True, check_collision=True)
if write:
export_structure_data(save_dir, b_struct_data, o_struct_data, file_name=test_set_name+'_'+pt_search_method+'.json')
@pytest.mark.gen_grasp_planes
@pytest.mark.parametrize('test_file_name', [('YJ_12_bars_point2triangle.json'),])
def test_gen_grasp_planes(points_library, test_file_name, save_dir):
b_struct_data, o_struct_data, _ = parse_saved_structure_data(os.path.join(save_dir, test_file_name))
o_struct = Network.from_data(o_struct_data)
b_struct = Network.from_data(b_struct_data)
o_struct.struct_bar = b_struct
offset_d1, offset_d2 = 5, 5
nb_rot, nb_trans = 4, 4
seq = [v for v in b_struct.vertex]
for v in b_struct.vertex:
calculate_gripping_plane(b_struct, v, b_struct.vertex[v]["mean_point"], nb_rot=nb_rot, nb_trans=nb_trans)
calculate_offset(o_struct, b_struct, v, offset_d1, offset_d2, seq)
| 2.015625 | 2 |
etutils/viz/savefig.py | erdogant/etutils | 0 | 12764923 | """ This function saves figures in PNG format.
from etutils.viz.savefig import savefig
A=savefig(data, <optional>)
INPUT:
data: fig object
OPTIONAL
OUTPUT
BOOLEAN
[0]: If not succesful
[1]: If succesful
DESCRIPTION
This function saves figures in PNG format.
EXAMPLE
from etutils.viz.donutchart import donutchart
from etutils.viz.savefig import savefig
A = donutchart([15, 30, 45, 10],['aap','boom','mies','banaan'])
B = savefig(A,"c://temp//magweg//fig.png")
SEE ALSO
"""
#print(__doc__)
#--------------------------------------------------------------------------
# Name : savefig.py
# Version : 0.1.0
# Author : E.Taskesen
# Date : Sep. 2017
#--------------------------------------------------------------------------
# Libraries
from os import mkdir
from os import path
#%%
def savefig(fig, filepath, dpi=100, transp=False):
out=False # Returns True if succesful
Param = {}
Param['filepath'] = filepath
Param['dpi'] = dpi
Param['transp'] = transp
# Write figure to path
if Param['filepath']!="":
# Check dir
[getpath, getfilename] = path.split(Param['filepath'])
if path.exists(getpath)==False:
mkdir(getpath)
#save file
#print(fig.canvas.get_supported_filetypes())
fig.savefig(Param['filepath'], dpi=Param['dpi'], transparent=Param['transp'], bbox_inches='tight')
out=True
return(out)
| 3.34375 | 3 |
Python/leetcode.260.single-number-iii.py | tedye/leetcode | 4 | 12764924 | <filename>Python/leetcode.260.single-number-iii.py
class Solution(object):
def singleNumber(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
workingset = set()
for n in nums:
if n not in workingset:
workingset.add(n)
else:
workingset.remove(n)
return list(workingset)
| 3.640625 | 4 |
app/models/old/teams.py | WilliamBesseau/Groot | 0 | 12764925 | from app.models import db
from app.models.projects import Project
class Team(db.Model):
__tablename__ = 'teams'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String)
logo = db.Column(db.String)
tokens = db.Column(db.Integer)
description = db.Column(db.String)
id_assigned_project = db.Column(db.Integer, db.ForeignKey(Project.id, name="fk_assigned_project_id"))
project = db.relationship('Project', lazy='joined')
def __repr__(self):
return "<Team (id='{}, name='{}', logo='{}', tokens='{}', description='{}', id_assigned_project='{}')>".format(
self.id, self.name, self.logo, self.tokens, self.description, self.id_assigned_project)
| 2.578125 | 3 |
test.py | Chash-dot/Pythin_CI_Example | 0 | 12764926 | <filename>test.py
import unittest
import SimpleRest as app
class TestHello(unittest.TestCase):
def setUp(self):
app.app.testing = True
self.app = app.app.test_client()
def test_hello(self):
rv = self.app.get('/')
self.assertEqual(rv.status, '200 OK')
self.assertEqual(rv.data, 'Home Page')
def test_hello_greet(self):
rv = self.app.get('/greet')
self.assertEqual(rv.status, '200 OK')
self.assertEqual(rv.data, 'Greetings from Flask Server')
def test_hello_name(self):
name = 'Simon'
rv = self.app.get('/hello/{name}')
self.assertEqual(rv.status, '200 OK')
self.assertIn("{name}", rv.data)
if __name__ == '__main__':
import xmlrunner
runner = xmlrunner.XMLTestRunner(output='test-reports')
unittest.main(testRunner=runner)
unittest.main()
| 3.15625 | 3 |
src/data_loading.py | HugoHo0212/Elasticsearch-demo | 0 | 12764927 | <filename>src/data_loading.py<gh_stars>0
from typing import Tuple
import tarfile
import os
from elasticsearch import Elasticsearch
from elasticsearch.client import IndicesClient
from pyquery import PyQuery as pq
def load_data(es: Elasticsearch) -> None:
"""
This function loads data from the tarball "wiki-small.tar.gz"
to the Elasticsearch cluster
Parameters
----------
es : Elasticsearch
The Elasticsearch client
Returns
-------
None
"""
#
tf = tarfile.open("wiki-small.tar.gz")
count = 1
for tarinfo in tf.getmembers():
if(".html" in os.path.split(tarinfo.name)[1]):
file = tf.extractfile(tarinfo)
contents = file.read()
tup = parse_html(contents)
title = tup[0]
body = tup[1]
d = {
"title": title,
"body": body
}
es.index(index="wikipedia", id=count,body=d)
count += 1
tf.close()
def parse_html(html: str) -> Tuple[str, str]:
"""
This function parses the html, strips the tags an return
the title and the body of the html file.
Parameters
----------
html : str
The HTML text
Returns
-------
Tuple[str, str]
A tuple of (title, body)
"""
#
doc = pq(html)
title = doc("title").text()
body = doc("body").text()
return (title, body)
def create_wikipedia_index(ic: IndicesClient) -> None:
"""
Add an index to Elasticsearch called 'wikipedia'
Parameters
----------
ic : IndicesClient
The client to control Elasticsearch index settings
Returns
-------
None
"""
#
ic.create(
index = "wikipedia",
body = {
"settings":{
"analysis":{
"analyzer":{
"my_analyzer":{
"type": "custom",
"tokenizer": "standard",
"filter": [
"lowercase",
"my_stops"
]
}
},
"filter":{
"my_stops":{
"type": "stop",
"stopwords_path":"stopwords.txt"
}
}
}
},
'mappings':{
'properties':{
'body':{
'type': 'text',
'analyzer': 'my_analyzer'
}
}
}
}
)
| 3.296875 | 3 |
ovpn_bypass.py | gaohuazuo/pku-free-ip | 0 | 12764928 | #!/usr/bin/env python3
import os
from fetch import fetch
def main():
ip_list = fetch()
print('# PKU free ip')
for ip, _, netmask in ip_list:
print('route', ip, netmask, 'net_gateway')
if __name__ == '__main__':
main()
| 2.890625 | 3 |
labs-python/tests/test3/exe2.py | xR86/ml-stuff | 3 | 12764929 | <filename>labs-python/tests/test3/exe2.py
import urllib
import re
import json
f = urllib.urlopen("http://localhost:8000/participanti/orar_I3B5.html")
response = f.read()
# print response
"""
<tr>
<td> 12:00</td>
<td> 14:00</td>
<td> Animatie 3D: algoritmi si tehnici fundamentale</td>
<td> Seminar</td>
<td> <a href="../participanti/orar_vitcu.html">Conf. dr. Vitcu Anca</a>
</td>
<td> <a href="../resurse/orar_C413.html">C413</a>
</td>
<td align="center">
</td>
<td align="center">
4
</td>
</tr>
"""
pattern = '<td> Seminar</td> <td><a .*>(.*)</a></td> <td><a .*>(.*)</a></td>'
#pattern = '<td>.*<a .*>(.*)</a></td> .* <td><a .*>(.*)</a></td>'
prog = re.compile(pattern)
matches = re.findall(pattern, response, flags=re.DOTALL)
tuples = ()
for match in matches:
print match
| 3.0625 | 3 |
core/urls.py | adson62/donodotime | 0 | 12764930 | from django.urls import path
from core import views
app_name = 'core'
urlpatterns = [
path('', views.index, name='index'),
path('noticias/', views.noticiaListView.as_view(), name='noticiaListView'),
path('noticias/new/', views.noticiaCadastro, name='noticiaCadastro'),
path('noticias/<int:id>/', views.noticiaDetalhe, name='noticiaDetalhe'),
path('noticias/<int:id>/edit/', views.noticiaEdit, name='noticiaEdit'),
path('noticias/<int:id>/delete/', views.noticiaExcluirBotao, name='noticiaExcluirBotao'),
path('noticias/<int:id>/delete/confirm/', views.noticiaExcluir, name='noticiaExcluir'),
path('comentarios/<int:id>/new/', views.comentarioCadastro, name='comentarioCadastro'),
] | 1.835938 | 2 |
visualizer/go.py | AndreasMadsen/bachelor-code | 1 | 12764931 |
import os.path as path
import sys
import numpy as np
thisdir = path.dirname(path.realpath(__file__))
sys.path.append(path.join(thisdir, '..'))
import dataset
import model
from graph_server import GraphServer
name = sys.argv[1]
build_dir = path.join(thisdir, '..', 'outputs', 'builds')
clusters = model.load(path.join(build_dir, '%s.cluster.npz' % name))
distance = model.load(path.join(build_dir, '%s.distance.hd5' % name))
connectivity = model.load(path.join(build_dir, 'connectivity.hd5'))
nodes = dataset.news.fetch(100000)
server = GraphServer(clusters, distance, connectivity, nodes, verbose=True)
server.listen()
# test
# print(server._groups_from_title('Denmark'))
# print(server._fetch_single_group(300))
| 2.25 | 2 |
pygmy/rest/wsgi.py | ParikhKadam/pygmy | 571 | 12764932 | #!/usr/bin/env python3
from pygmy.core.initialize import initialize
initialize()
from pygmy.rest.manage import app
if __name__ == '__main__':
app.run()
| 1.273438 | 1 |
adventOfCode/Day3_2.py | oana-cit/Aoc2017 | 0 | 12764933 | '''
Created on Dec 4, 2017
@author: atip
'''
def getAdjSquareSum():
global posX, posY, valMatrix
adjSquareSum = 0
adjSquareSum += valMatrix[maxRange + posX + 1][maxRange + posY]
adjSquareSum += valMatrix[maxRange + posX + 1][maxRange + posY + 1]
adjSquareSum += valMatrix[maxRange + posX][maxRange + posY + 1]
adjSquareSum += valMatrix[maxRange + posX - 1][maxRange + posY + 1]
adjSquareSum += valMatrix[maxRange + posX - 1][maxRange + posY]
adjSquareSum += valMatrix[maxRange + posX - 1][maxRange + posY - 1]
adjSquareSum += valMatrix[maxRange + posX][maxRange + posY - 1]
adjSquareSum += valMatrix[maxRange + posX + 1][maxRange + posY - 1]
return adjSquareSum
def check():
global posX, posY, theirNumber, maxRange, valMatrix
found = valMatrix[maxRange + posX][maxRange + posY] > theirNumber
if found:
print("[{}][{}] +++> {}".format(posX, posY, valMatrix[maxRange + posX][maxRange + posY]))
return found
def checkAndUpdate():
global posX, posY, theirNumber, maxRange, valMatrix
crtSum = getAdjSquareSum()
if crtSum == 0:
crtSum = 1
valMatrix[maxRange + posX][maxRange + posY] = crtSum
print("[{}][{}] => {}".format(posX, posY, crtSum))
return check()
def goSpiraling(stepX, stepY):
global posX, posY, theirNumber, maxRange, valMatrix
finished = False
while stepX > 0 and not finished:
print("stepX > 0 : stepX: {}, posX: {}".format(stepX, posX))
posX += 1
stepX -= 1
finished = checkAndUpdate()
# finished = True
while stepX < 0 and not finished:
print("stepX < 0 : stepX: {}, posX: {}".format(stepX, posX))
posX -= 1
stepX += 1
finished = checkAndUpdate()
# finished = True
while stepY > 0 and not finished:
print("stepY > 0 : stepY: {}, posY: {}".format(stepY, posY))
posY += 1
stepY -= 1
finished = checkAndUpdate()
# finished = True
while stepY < 0 and not finished:
print("stepY < 0 : stepY: {}, posY: {}".format(stepY, posY))
posY -= 1
stepY += 1
finished = checkAndUpdate()
# finished = True
if valMatrix[maxRange + posX][maxRange + posY] > theirNumber:
return check()
return False
print("Let us begin!")
theirNumber = 265149 # 438
# theirNumber = 1 #0
# theirNumber = 747
valMatrix = [[0 for col in range(200)] for row in range(200)]
maxRange = 100
maxNo = 1
crtStep = 0
posX = 0
posY = 0
finished = False
crtNumber = 1
valMatrix[maxRange + posX][maxRange + posY] = 1
while not finished:
finished = goSpiraling(1, 0)
crtStep += 1
if not finished:
finished = goSpiraling(0, crtStep)
crtStep += 1
if not finished:
finished = goSpiraling(-crtStep, 0)
if not finished:
finished = goSpiraling(0, -crtStep)
if not finished:
finished = goSpiraling(crtStep, 0)
print("[{}][{}] ==>> {}".format(posX, posY, valMatrix[maxRange + posX][maxRange + posY]))
| 2.84375 | 3 |
python/CeaserCipher.py | rishab9750/HacktoberFest_2021 | 33 | 12764934 | def encrypt(text,s):
result = ""
for i in range(len(text)):
char = text[i]
if (char.isupper()):
result += chr((ord(char) + s-65) % 26 + 65)
else:
result += chr((ord(char) + s - 97) % 26 + 97)
return result
text = "ATT<PASSWORD>"
s = 4
print "Text : " + text
print "Shift : " + str(s)
print "Cipher: " + encrypt(text,s)
| 3.984375 | 4 |
tests/test_NDList.py | Tyler314/led_matrix | 3 | 12764935 | <gh_stars>1-10
import unittest
from random import random
import led_matrix
class test_NDList(unittest.TestCase):
def test_init_0(self):
ndl = led_matrix.NDList(shape=(0,))
self.assertEqual(ndl, [])
def test_init_1(self):
ndl = led_matrix.NDList(shape=(1,))
self.assertEqual(ndl, [0.0])
def test_init_17x7x41x90x1(self):
ndl = led_matrix.NDList(shape=(17, 7, 41, 90, 1))
ndl_list = [[[[[0.0 for _ in range(1)]
for _ in range(90)]
for _ in range(41)]
for _ in range(7)]
for _ in range(17)]
self.assertEqual(ndl, ndl_list)
def test_shape(self):
ndl = led_matrix.NDList(shape=(0,))
self.assertEqual(ndl, [])
ndl = led_matrix.NDList(shape=(17,))
self.assertEqual(ndl.shape, (17,))
ndl = led_matrix.NDList(shape=(71, 17, 303))
self.assertEqual(ndl.shape, (71, 17, 303))
def test_size(self):
ndl = led_matrix.NDList(shape=(0,))
self.assertEqual(ndl.size, 0)
ndl = led_matrix.NDList(shape=(1,))
self.assertEqual(ndl.size, 1)
ndl = led_matrix.NDList(shape=(15, 24, 2))
self.assertEqual(ndl.size, 720)
def test_get_single(self):
# Test a single embedded list
num = random() * 10
ndl = led_matrix.NDList(shape=(1,), fill=num)
self.assertEqual(ndl[0], num)
def test_get_10x10(self):
# Test a 10x10 array
num = random() * 10
ndl = led_matrix.NDList(shape=(10, 10), fill=num)
for i in range(10):
for j in range(10):
self.assertEqual(ndl[i, j], num)
def test_get_4x4(self):
# Test a 4x4x4 array
num = random() * 10
ndl = led_matrix.NDList(shape=(5, 5, 5), fill=num)
for i in range(5):
for j in range(5):
for k in range(5):
self.assertEqual(ndl[i, j, k], num)
def test_get_17x7x41x90x1(self):
# Advanced Test
num = random() * 10
ndl = led_matrix.NDList(shape=(17, 7, 41, 90, 1), fill=num)
for i in range(17):
for j in range(7):
for k in range(41):
for l in range(90):
for m in range(1):
self.assertEqual(ndl[i, j, k, l, m], num)
def test_set_single(self):
# Test an array with a single element
ndl = led_matrix.NDList(shape=(1,))
ndl[0] = 7
self.assertEqual(ndl[0], 7)
def test_set_single_embedded(self):
# Test a single embedded list
ndl = led_matrix.NDList(shape=(1, 1))
ndl[0, 0] = 7
self.assertEqual(ndl[0, 0], 7)
def test_set_5x5(self):
# Test a 5x5 array
ndl = led_matrix.NDList(shape=(5, 5))
for i in range(5):
for j in range(5):
num = random() * 10
ndl[i, j] = num
self.assertEqual(ndl[i, j], num)
def test_set_4x4(self):
# Test a 4x4x4 array
ndl = led_matrix.NDList(shape=(4, 4, 4))
for i in range(4):
for j in range(4):
for k in range(4):
num = random() * 10
ndl[i, j, k] = num
self.assertEqual(ndl[i, j, k], num)
def test_set_2x4(self):
# Test array with varrying dimensions
ndl = led_matrix.NDList(shape=(2, 4))
for i in range(2):
for j in range(4):
num = random() * 10
ndl[i, j] = num
self.assertEqual(ndl[i, j], num)
def test_set_17x7x41x1x23x2(self):
# Advanced Test
ndl = led_matrix.NDList(shape=(17, 7, 41, 1, 23, 2))
for i in range(17):
for j in range(7):
for k in range(41):
for m in range(1):
for n in range(23):
for o in range(2):
num = random() * 10
ndl[i, j, k, m, n, o] = num
self.assertEqual(ndl[i, j, k, m, n, o], num)
| 2.859375 | 3 |
models.py | jwadden/invoicinator | 0 | 12764936 | <reponame>jwadden/invoicinator
import sqlalchemy
import sqlalchemy.ext.declarative
import sqlalchemy.orm
engine = None
Session = None
Base = sqlalchemy.ext.declarative.declarative_base()
def init_engine(settings_dict):
global engine
global Session
engine = sqlalchemy.create_engine(settings_dict['db_engine'], echo=settings_dict['debug'])
Session = sqlalchemy.orm.sessionmaker(bind=engine)
class Task(Base):
__tablename__ = 'task'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
name = sqlalchemy.Column(sqlalchemy.Unicode(255))
def __repr__(self):
return "<Task(title='%s')>" % (self.name)
class WorkLog(Base):
__tablename__ = 'work_log'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
task_id = sqlalchemy.Column(sqlalchemy.Integer, sqlalchemy.ForeignKey('task.id'))
start_time = sqlalchemy.Column(sqlalchemy.DateTime(timezone=True), nullable=True)
end_time = sqlalchemy.Column(sqlalchemy.DateTime(timezone=True), nullable=True)
def __repr__(self):
return "<WorkLog(task='%d', start='%s', end='%s')>" % (self.task_id, str(self.start_date), str(self.end_date))
| 2.265625 | 2 |
Python3/77.combinations.py | 610yilingliu/leetcode | 0 | 12764937 | <reponame>610yilingliu/leetcode<filename>Python3/77.combinations.py<gh_stars>0
#
# @lc app=leetcode id=77 lang=python3
#
# [77] Combinations
#
# @lc code=start
class Solution:
def combine(self, n, k):
self.ans = []
nums = [num for num in range(1, n + 1)]
if n == k:
self.ans.append(nums)
return self.ans
else:
ls = []
self.helper(nums, k, ls)
return self.ans
def helper(self, array, k, current_ls):
if k > len(array):
return
if k == 0:
self.ans.append(current_ls)
for i in range(len(array)):
self.helper(array[i + 1:], k - 1, [array[i]] + current_ls)
# @lc code=end
| 3.09375 | 3 |
tuiuiu/tuiuiudocs/__init__.py | caputomarcos/tuiuiu.io | 3 | 12764938 | default_app_config = 'tuiuiu.tuiuiudocs.apps.TuiuiuDocsAppConfig'
| 1.15625 | 1 |
bin/augment_classification.py | VynOpenSource/vyn-augment | 0 | 12764939 | <reponame>VynOpenSource/vyn-augment<gh_stars>0
"""
This file as well as the other examples will store the augmented images in the vyn-augment/images/output_data folder.
On the other hand, the folder notebooks contains the same examples with the images being plotted in the same file
instead of being saved in memory. In addition, a better explanation about the augmentor options is provided.
"""
import os
import sys
import numpy as np
from skimage.io import imread, imsave
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
from utils.data_processing import set_generator_classifier
from src.vyn_augment.augmentor import Augmentor
def pre_processing_function(label, filename: str, augmentor: Augmentor = None):
"""
Pre-processing function. This function is run within the generator, which calls it for each individual image
regardless the batch size.
:param label: Anything that can be used as a identification for the image type.
:param filename: The complete path to the image file. This function will read the image into a numpy array.
:param augmentor: An object of type Augmentor
:return:
"""
image = imread(filename)
if augmentor is not None:
image = np.round(augmentor.run(image)).astype(np.uint8)
return image, label
def set_augmentor():
"""
Set the augmentor.
1. Select the operations and create the config dictionary
2. Pass it to the Augmentor class with any other information that requires
3. Return the instance of the class.
:return:
"""
config = {'blur': {'values': ('gaussian', 0.7, 1.0), 'prob': 0.3},
'brightness': {'values': (0.6, 1.0), 'prob': 0.1},
'brightness1': {'values': (1.0, 1.5), 'prob': 0.1},
'flip': {'values': ('hor',), 'prob': 0.5},
'grid_mask': {'values': (0, 0.2, 0, 0.2, 0.01, 0.1, 0.01, 0.1, 0.1, 0.2, 0.1, 0.2), 'prob': 0.4},
'illumination': {'values': ('blob_negative', 0.1, 0.2, 100, 150), 'prob': 0.2},
'noise': {'values': (2, 10), 'use_gray_noise': True, 'prob': 1},
'rotate': {'values': (-45, 45), 'prob': 0.4},
'translate': {'values': ('RANDOM', -0.2, 0.2), 'prob': 0.2, 'use_replication': True},
'zoom': {'values': (0.5, 1.5), 'prob': 0.9, 'use_replication': True}}
augmentor = Augmentor(config, no_repetition=True)
return augmentor
def generate_n_augmented_images(data_dirname: str, root_dirname: str, n=20) -> None:
"""
Generate n new augmented images, where n is an input parameter
:param data_dirname: The directory where the initial set of images are.
:param root_dirname: The directory where to save the augmented set of images.
:param n: The number of augmented images
:return: None
"""
augmentor = set_augmentor()
preprocessing_fun = lambda *args: pre_processing_function(*args, augmentor=augmentor)
generator = set_generator_classifier(data_dirname, preprocessing_fun, batch_size=1, number_of_images=n)
generator.not_batch = True
# NOTICE: This generator can be used for keras and pytorch in case that instead of saving images one desires to
# augment images on the fly. Use a number larger than 1 for the batch size when training directly a CNN.
# Save the new generated images
counter_labels = {}
for image, label in generator:
counter = counter_labels.get(label, 0)
output_filename = label + '_' + str(counter_labels.get(label, 0)) + '.jpg'
save_dirname = os.path.join(root_dirname, label)
if not os.path.isdir(save_dirname):
os.makedirs(save_dirname)
filename = os.path.join(save_dirname, output_filename)
imsave(filename, image.astype(np.uint8))
counter_labels[label] = counter + 1
print(f'Finished image generation. The output images were saved in {root_dirname}')
if __name__ == '__main__':
root = os.path.dirname(os.path.dirname(__file__))
data_dirname = os.path.join(root, 'images', 'classification')
root_dirname = os.path.join(root, 'images', 'output_data', 'classification')
generate_n_augmented_images(data_dirname, root_dirname)
| 3.046875 | 3 |
assignment26.py | swarnaishu/nlp-peronsal-archive | 0 | 12764940 | <gh_stars>0
import grammar_check
tool=grammar_check.LanguageTool('en-gb')
sentence='he is playing cricket'
matches=tool.check(sentence)
len(matches)
grammar_check.correct(text,matches) | 2.65625 | 3 |
contents_rank_crawling.py | dldldlfma/py_tutorial | 0 | 12764941 | <filename>contents_rank_crawling.py
from selenium import webdriver
from bs4 import BeautifulSoup
from pprint import pprint
def keyword_preprocessing(word):
"""
검색 keyword를 google 검색 URL과 붙이기 위해 적합한 형태로 전환합니다.
띄어쓰기를 +로 바꾸는 정도 입니다.
"""
change_word = list(word)
for i in range(len(change_word)):
if(change_word[i]==' '):
change_word[i]='+'
change_word = ''.join(change_word)
return change_word
def single_keyword_search(keyword):
"""
구글에 keyword 검색결과를 html로 받아온뒤에 그 안에 일반 게시물 분류에 속하는
class='r' 부분만 모아서 return해주는 함수 입니다.
Args:
Keyword (String) : 구글에 검색할 Keyword
Returns:
title_list (bs4.element.ResultSet) : 구글 검색에서 확인된 일반게시물(class='r')들의 모음
"""
URL = 'https://www.google.com/search?q=' +keyword_preprocessing(keyword)
driver = webdriver.Chrome("C:/Users/ksg/py_tutorial/chromedriver.exe")
driver.implicitly_wait(1)
driver.get(URL)
driver.implicitly_wait(2)
html = driver.page_source
soup = BeautifulSoup(html, 'html.parser')
title_list = soup.find_all(name='div',attrs={'class':'r'})
return title_list
def google_contents_rank(keyword_list, channel_list):
"""
구글에서 keyword로 검색했을때 channel_list속 채널에서 몇개의 데이터가 검출되었는지를 확인할 수 있는 코드 입니다.
"""
for keyword in keyword_list:
title_list = single_keyword_search(keyword)
for i,val in enumerate(title_list):
for channel in channel_list:
title=str(val).split("\"")[3]
if(title[0:len(channel)]==channel):
print(keyword,end=" : ")
print(i,end=" => ")
print(val.text[0:40])
print("")
| 3.0625 | 3 |
manage/propget.py | Rome84/AWS | 0 | 12764942 | <reponame>Rome84/AWS
# Copyright (c) 2006-2009 <NAME> http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
def get(prop, choices=None):
prompt = prop.verbose_name
if not prompt:
prompt = prop.name
if choices:
if callable(choices):
choices = choices()
else:
choices = prop.get_choices()
valid = False
while not valid:
if choices:
min = 1
max = len(choices)
for i in range(min, max+1):
value = choices[i-1]
if isinstance(value, tuple):
value = value[0]
print('[%d] %s' % (i, value))
value = raw_input('%s [%d-%d]: ' % (prompt, min, max))
try:
int_value = int(value)
value = choices[int_value-1]
if isinstance(value, tuple):
value = value[1]
valid = True
except ValueError:
print('%s is not a valid choice' % value)
except IndexError:
print('%s is not within the range[%d-%d]' % (min, max))
else:
value = raw_input('%s: ' % prompt)
try:
value = prop.validate(value)
if prop.empty(value) and prop.required:
print('A value is required')
else:
valid = True
except:
print('Invalid value: %s' % value)
return value
| 2.734375 | 3 |
datachimp/views/api/ml_model.py | ModelChimp/datachimp | 0 | 12764943 | from django.conf import settings
from django.http import HttpResponse
from rest_framework import generics, status
from rest_framework.response import Response
from rest_framework.decorators import api_view, permission_classes
from datachimp.models.machinelearning_model import MachineLearningModel
from datachimp.models.membership import Membership
from datachimp.serializers.machinelearning_model import MachineLearningModelSerializer
from datachimp.utils.data_utils import execute_query
from datachimp.api_permissions import HasProjectMembership
from rest_framework.permissions import IsAuthenticated
class MLModelAPI(generics.ListAPIView):
serializer_class = MachineLearningModelSerializer
queryset = MachineLearningModel.objects.select_related('user__profile').all()
permission_classes = (IsAuthenticated, HasProjectMembership)
def list(self, request,model_id=None, project_id=None, st=None):
if model_id:
queryset = self.get_queryset().filter(id=model_id, project=project_id).order_by("-date_created")
else:
queryset = self.get_queryset().filter(project=project_id).order_by("-date_created")
serializer = MachineLearningModelSerializer(queryset, many=True)
if st is None:
st = status.HTTP_200_OK
return Response(serializer.data, status=st)
def delete(self, request, project_id):
mid = request.data.get('model_id')
user = request.user
ml_model_obj = MachineLearningModel.objects.get(pk=mid)
# Set an owner flag based on project_owner or model owner
owner_flag = True if user == ml_model_obj.user or user == ml_model_obj.project.user else False
if not owner_flag:
return Response(status=status.HTTP_401_UNAUTHORIZED)
ml_model_obj.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class CreateExperimentAPI(generics.CreateAPIView):
serializer_class = MachineLearningModelSerializer
queryset = MachineLearningModel.objects.all()
permission_classes = (IsAuthenticated, HasProjectMembership)
def create(self, request, *args, **kwargs):
data = request.data.copy()
data['user'] = self.request.user.id
experiment_id = self.request.data.get('experiment_id')
# If the experiment already exists then don't create the experiment
try:
exp_obj = MachineLearningModel.objects.get(experiment_id = experiment_id)
return Response({ 'model_id': exp_obj.id }, status=status.HTTP_200_OK)
except MachineLearningModel.DoesNotExist:
pass
serializer = self.get_serializer(data=data)
serializer.is_valid()
exp_obj = serializer.save()
headers = self.get_success_headers(serializer.data)
return Response({ 'model_id': exp_obj.id }, status=status.HTTP_201_CREATED)
@api_view(['GET'])
@permission_classes((HasProjectMembership, IsAuthenticated))
def get_param_fields(request, project_id):
'''
List of model parameters as columnas to be shown on customize menu
'''
try:
project_id = int(project_id)
except Exception as e:
return Response("Error: %s" % e, status=status.HTTP_400_BAD_REQUEST)
# Check the user has permission for the project
try:
Membership.objects.get(user=request.user, project=project_id)
except Membership.DoesNotExist:
return Response(status=status.HTTP_403_FORBIDDEN)
query = '''
select distinct json_object_keys(model_parameters::json) as parameter
from datachimp_machinelearningmodel ml
where json_typeof(model_parameters::json) = 'object'
and project_id = %s
'''
query = query % (
project_id,
)
result_raw = execute_query(query)
return Response(result_raw, status=status.HTTP_200_OK)
@api_view(['POST'])
@permission_classes((HasProjectMembership, IsAuthenticated))
def send_selected_param_data(request, project_id):
'''
Send the data of the model parameters to be displayed in the table
'''
try:
param_fields = request.data.getlist('param_fields[]')
except Exception as e:
return Response("Error: %s" % e, status=status.HTTP_400_BAD_REQUEST)
query = '''
select distinct id,key,value
from datachimp_machinelearningmodel ml, json_each_text(model_parameters::json)
where json_typeof(model_parameters::json) = 'object'
and project_id = %s
and key in (%s)
'''
query = query % (
project_id,
",".join([ "'" + param + "'" for param in param_fields])
)
result_raw = execute_query(query)
return Response(result_raw, status=status.HTTP_200_OK)
| 2.078125 | 2 |
tests/test_CameraCorrected.py | a1rb4Ck/camera_fusion | 25 | 12764944 | """camera_fusion CameraCorrected class tests."""
import cv2
import os
import sys
import filecmp
import pytest
import numpy as np
import shutil
import time
import unittest.mock as mock
sys.path.insert(
0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import camera_fusion # noqa
class Vc(object):
"""VideoCapture mockup."""
def __init__(self, parent, real_captured_frame=None):
"""Initialize VideoCapture mockup.
Args:
parent(Camera object): parent's Camera.
"""
self.parent = parent
self.real_captured_frame = real_captured_frame
def get(self, setting):
"""Mock VideoCapture's get function only to get width and height."""
if setting == 3:
return 1280
if setting == 4:
return 720
return setting
def isOpened(self):
"""Mock VideoCapture's isOpened function."""
return True
def read(self):
"""Mock VideoCapture's read function."""
time.sleep(0.33)
self.parent.stop = True
print('1 frame')
return (True, self.real_captured_frame)
def set(self, setting0, setting1):
print(setting0, setting1)
# Import tests
def test_import_CameraCorrected():
"""Test CameraCorrected class importation."""
assert camera_fusion.CameraCorrected.__module__ == 'camera_fusion.CameraCorrected' # noqa
# PostureBuffer tests
def test_PostureBuffer():
"""Test PostureBuffer class definition."""
c = camera_fusion.CameraCorrected(0, 11)
assert c.board_post.window_length == 4
def test_PostureBuffer_pop():
"""Test PostureBuffer buffer abilities."""
rvec = np.array([[1], [0], [0]])
tvec = np.array([[1], [0], [0]])
c = camera_fusion.CameraCorrected(0, 11)
frvec, ftvec = c.board_post.update(rvec, tvec)
b_rvecs_shape = c.board_post.buff_rvecs.shape
b_tvecs_shape = c.board_post.buff_tvecs.shape
assert (frvec.shape, ftvec.shape, b_rvecs_shape, b_tvecs_shape) == (
(3,), (3,), (3, 1), (3, 1))
def test_PostureBuffer_filter():
"""Test PostureBuffer filtering."""
rvec = np.array([[0.1], [0.2], [0]])
tvec = np.array([[0.2], [0.1], [0]])
c = camera_fusion.CameraCorrected(0, 0)
frvec, ftvec = c.board_post.update(rvec, tvec)
frvec, ftvec = c.board_post.update(rvec * 0.1, tvec * 0.1)
frvec, ftvec = c.board_post.update(rvec, tvec)
# This should trigger the filter default avg_max_std=0.1 maximal limit
frvec, ftvec = c.board_post.update(rvec * 2, tvec * 2)
frvec, ftvec = c.board_post.update(rvec * 3, tvec * 3)
np.testing.assert_allclose([[0.3], [0.6], [0.0]], frvec)
np.testing.assert_allclose([[0.6], [0.3], [0.]], ftvec)
# Camera tests
def test_calibrate_camera_correction():
"""Test calibrate_camera_correction function."""
c = camera_fusion.CameraCorrected(0, 11)
assert os.path.isdir('./data')
shutil.rmtree('data')
shutil.copytree('./tests/test_CameraCorrected', 'data')
c.calibrate_camera_correction()
assert c.aruco_dict_num == 11
assert c.charuco_square_length == 3.7999999999999999e-02
assert c.charuco_marker_size == 2.9000000000000001e-02
assert c.width == 1280
assert c.height == 720
np.testing.assert_allclose(
[[1.0824122780443031e+03, 0., 6.4165850036653376e+02],
[0., 1.0824122780443031e+03, 3.5960861017399100e+02],
[0., 0., 1.]],
c.camera_matrix)
np.testing.assert_allclose(
[[7.6732549196567842e-02, -4.1976860824194072e-02, 0., 0.,
-1.8028155099783838e-01]], c.dist_coeffs)
shutil.rmtree('data')
def test_detect_markers():
"""Test the detect_markers function."""
c = camera_fusion.CameraCorrected(0, 11)
shutil.rmtree('data')
shutil.copytree('./tests/test_CameraCorrected', 'data')
c.calibrate_camera_correction()
real_captured_frame = np.load('./data/real_captured_frame.npy')
with mock.patch('camera_fusion.CameraCorrected.read',
return_value=real_captured_frame):
frame, corners, ids = c.detect_markers()
np.testing.assert_array_equal(frame, real_captured_frame)
correct_corners = np.array([
[[[1112., 506.], [1111., 374.], [1245., 368.], [1245., 500.]]],
[[[22., 194.], [11., 57.], [144., 51.], [158., 189.]]],
[[[744., 164.], [739., 23.], [878., 17.], [879., 157.]]],
[[[243., 715.], [236., 585.], [366., 580.], [373., 708.]]],
[[[591., 699.], [584., 570.], [714., 565.], [720., 694.]]],
[[[940., 688.], [934., 558.], [1067., 552.], [1072., 684.]]],
[[[57., 549.], [45., 419.], [178., 413.], [189., 543.]]],
[[[407., 534.], [399., 405.], [529., 399.], [538., 528.]]],
[[[757., 519.], [752., 390.], [884., 384.], [888., 514.]]],
[[[220., 367.], [207., 234.], [341., 228.], [351., 362.]]],
[[[573., 353.], [565., 219.], [699., 213.], [705., 347.]]],
[[[930., 337.], [927., 201.], [1062., 195.], [1065., 330.]]],
[[[383., 180.], [372., 42.], [508., 34.], [517., 175.]]]])
np.testing.assert_array_equal(corners, correct_corners)
correct_ids = np.array(
[[15], [1], [11], [2], [7], [12], [0], [5], [10], [3], [8], [13], [6]])
np.testing.assert_array_equal(ids, correct_ids)
shutil.rmtree('data')
def test_draw_fps():
"""Test draw_fps function."""
with mock.patch('time.time', return_value=0):
c = camera_fusion.CameraCorrected(0, 11)
c.width = 1280
c.height = 720
frame = np.load('./tests/test_CameraCorrected/real_captured_frame.npy')
with mock.patch('time.time', return_value=0.03):
frame = c.draw_fps(frame) # 33 fps
np.testing.assert_array_equal(np.load(
'./tests/test_CameraCorrected/real_captured_frame_with_30fps.npy'),
frame)
def test_draw_text():
"""Test draw_text function."""
c = camera_fusion.CameraCorrected(0, 11)
c.width = 1280
c.height = 720
frame = np.load('./tests/test_CameraCorrected/real_captured_frame.npy')
frame = c.draw_text(frame, 'test') # 33 fps
np.save('./tests/test_CameraCorrected/real_captured_frame_withText.npy',
frame)
np.testing.assert_array_equal(
np.load(
'./tests/test_CameraCorrected/real_captured_frame_withText.npy'),
frame)
def test_estimate_markers_posture():
"""Test the estimate_markers_posture function."""
c = camera_fusion.CameraCorrected(0, 11)
shutil.rmtree('data')
shutil.copytree('./tests/test_CameraCorrected', 'data')
c.calibrate_camera_correction()
real_captured_frame = np.load('./data/real_captured_frame.npy')
with mock.patch('camera_fusion.CameraCorrected.read',
return_value=real_captured_frame):
frame = c.estimate_markers_posture()
correct_markers_posture_frame = np.load(
'./data/correct_markers_posture_frame.npy')
np.testing.assert_array_equal(frame, correct_markers_posture_frame)
shutil.rmtree('data')
def test_estimate_board_posture():
"""Test the estimate_board_posture function."""
c = camera_fusion.CameraCorrected(0, 11)
shutil.rmtree('data')
shutil.copytree('./tests/test_CameraCorrected', 'data')
c.calibrate_camera_correction()
real_captured_frame = np.load('./data/real_captured_frame.npy')
with mock.patch('camera_fusion.CameraCorrected.read',
return_value=real_captured_frame):
frame = c.estimate_board_posture()
correct_board_posture_frame = np.load(
'./data/correct_board_posture_frame.npy')
np.testing.assert_array_equal(frame, correct_board_posture_frame)
shutil.rmtree('data')
def test_estimate_board_and_markers_posture():
"""Test the estimate_estimate_board_and_markers_posture function."""
c = camera_fusion.CameraCorrected(0, 11)
shutil.rmtree('data')
shutil.copytree('./tests/test_CameraCorrected', 'data')
c.calibrate_camera_correction()
real_captured_frame = np.load('./data/real_captured_frame.npy')
with mock.patch('camera_fusion.CameraCorrected.read',
return_value=real_captured_frame):
frame = c.estimate_board_and_markers_posture()
np.save(
'./tests/test_CameraCorrected/correct_board_and_markers_posture_frame.npy', # noqa
frame)
np.save('./data/correct_board_and_markers_posture_frame.npy',
frame)
correct_board_and_markers_posture_frame = np.load(
'./data/correct_board_and_markers_posture_frame.npy')
np.testing.assert_array_equal(
frame, correct_board_and_markers_posture_frame)
shutil.rmtree('data')
def test_initialize():
"""Test CameraCorrected's initialize function."""
c = camera_fusion.CameraCorrected(0, 11)
c.settings = [(0, 0), (1, 1), (3, 1280), (4, 720)]
frame = np.load('./tests/test_CameraCorrected/real_captured_frame.npy')
c.current_frame = frame
with mock.patch('cv2.VideoCapture', return_value=Vc(c)):
with mock.patch(
'camera_fusion.CameraCorrected.calibrate_camera_correction'):
with mock.patch('camera_fusion.CameraCorrected.read',
return_value=frame):
c.initialize()
def test_read_undistort():
"""Test the read_undistort function."""
c = camera_fusion.CameraCorrected(0, 11)
shutil.rmtree('data')
shutil.copytree('./tests/test_CameraCorrected', 'data')
c.calibrate_camera_correction()
with mock.patch('camera_fusion.CameraCorrected.read',
return_value=np.load('./data/real_captured_frame.npy')):
frame_undistored = c.read_undistort()
valid_frame_undistored = np.load('./data/real_undistored_frame.npy')
np.testing.assert_array_equal(valid_frame_undistored, frame_undistored)
shutil.rmtree('data')
def test_test_camera():
"""Test the basic camera test."""
shutil.copytree('./tests/test_CameraCorrected', 'data')
c = camera_fusion.CameraCorrected(0, 11)
c.calibrate_camera_correction()
# Testing camera setup
with mock.patch('camera_fusion.CameraCorrected.read',
return_value=np.load(
'./data/real_captured_frame.npy')):
c.test_camera()
shutil.rmtree('data')
def test__update_frame():
"""Test the _update_frame function."""
c = camera_fusion.CameraCorrected(0, 11)
c.stop = False
shutil.rmtree('data')
shutil.copytree('./tests/test_CameraCorrected', 'data')
real_captured_frame = np.load('./data/real_captured_frame.npy')
c.cap = Vc(c, real_captured_frame)
c.calibrate_camera_correction()
# Testing camera frame read and update
c.cap = Vc(c, real_captured_frame)
c._update_frame()
np.testing.assert_array_equal(c.current_frame, real_captured_frame)
shutil.rmtree('data')
# def test_write_defaultConfig():
# """Test write_defaultConfig function."""
# shutil.rmtree('data')
# c = camera_fusion.CameraCorrected(0, 11)
# c.width = 1280
# c.height = 720
# with mock.patch('builtins.input', return_value=0.03):
# c.write_defaultConfig()
# assert os.path.isfile('./data/defaultConfig.xml')
# assert filecmp.cmp(
# './data/defaultConfig.xml',
# './tests/test_CameraCorrected/defaultConfig_assert.xml')
# shutil.rmtree('data')
| 2.59375 | 3 |
tests/__init__.py | dbauducco/DistributedReplays | 69 | 12764945 | from sqlalchemy.engine import create_engine
from sqlalchemy.orm.session import Session
def setup_module():
global transaction, connection, engine
# Connect to the database and create the schema within a transaction
engine = create_engine('postgresql:///yourdb')
connection = engine.connect()
transaction = connection.begin()
# If you want to insert fixtures to the DB, do it here
def teardown_module():
# Roll back the top level transaction and disconnect from the database
transaction.rollback()
connection.close()
engine.dispose()
class DatabaseTest:
def setup(self):
self.__transaction = connection.begin_nested()
self.session = Session(connection)
def teardown(self):
self.session.close()
self.__transaction.rollback() | 3.03125 | 3 |
gmailapi_backend/service.py | innoveit/django-gmailapi-json-backend | 1 | 12764946 | import base64
import json
import logging
import mimetypes
import email.encoders as encoder
import socket
from email.mime.audio import MIMEAudio
from email.mime.base import MIMEBase
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from django.conf import settings
from google.oauth2 import service_account
from googleapiclient.discovery import build
from django.core.mail.backends.smtp import EmailBackend
logger = logging.getLogger(__name__)
class GmailApiBackend(EmailBackend):
def __init__(
self,
fail_silently=False,
**kwargs
):
super().__init__(fail_silently=fail_silently)
self.connection = build('gmail', 'v1', cache_discovery=False, credentials=get_credentials())
def send_messages(self, email_messages):
new_conn_created = self.open()
if not self.connection or new_conn_created is None:
return 0
num_sent = 0
for email_message in email_messages:
message = create_message(email_message)
sent = self._send(message)
if sent:
num_sent += 1
if new_conn_created:
self.close()
return num_sent
def _send(self, email_message):
try:
self.connection.users().messages().send(userId=settings.GMAIL_USER, body=email_message).execute()
except Exception as error:
logger.error('Error sending email', error)
if settings.EMAIL_BACKEND and settings.EMAIL_BACKEND == "mailer.backend.DbBackend":
# If using "django-mailer" https://github.com/pinax/django-mailer, tt marks the related message as
# deferred only for some exceptions, so we raise one of them to save the error on the db
raise socket.error(error)
else:
raise
return True
def get_credentials():
credentials = service_account.Credentials.from_service_account_info(
json.loads(settings.GOOGLE_SERVICE_ACCOUNT), scopes=settings.GMAIL_SCOPES, subject=settings.GMAIL_USER)
return credentials
def create_message(email_message):
if email_message.attachments:
message = MIMEMultipart()
msg = MIMEText(email_message.body, email_message.content_subtype)
message.attach(msg)
else:
message = MIMEText(email_message.body, email_message.content_subtype)
message['to'] = ','.join(map(str, email_message.to))
message['from'] = email_message.from_email
if email_message.reply_to:
message['reply-to'] = ','.join(map(str, email_message.reply_to))
if email_message.cc:
message['cc'] = ','.join(map(str, email_message.cc))
if email_message.bcc:
message['bcc'] = ','.join(map(str, email_message.bcc))
message['subject'] = str(email_message.subject)
if email_message.attachments:
for attachment in email_message.attachments:
content_type, encoding = mimetypes.guess_type(attachment[0])
if content_type is None or encoding is not None:
content_type = 'application/octet-stream'
main_type, sub_type = content_type.split('/', 1)
if main_type == 'text':
fp = open(attachment[1], 'rb')
msg = MIMEText(fp.read(), _subtype=sub_type)
fp.close()
elif main_type == 'image':
fp = open(attachment[1], 'rb')
msg = MIMEImage(fp.read(), _subtype=sub_type)
fp.close()
elif main_type == 'audio':
fp = open(attachment[1], 'rb')
msg = MIMEAudio(fp.read(), _subtype=sub_type)
fp.close()
elif type(attachment[1]) is bytes:
msg = MIMEBase(main_type, sub_type)
msg.set_payload(attachment[1])
else:
fp = open(attachment[1], 'rb')
msg = MIMEBase(main_type, sub_type)
msg.set_payload(fp.read())
fp.close()
filename = attachment[0]
msg.add_header('Content-Disposition', 'attachment', filename=filename)
encoder.encode_base64(msg)
message.attach(msg)
b64_bytes = base64.urlsafe_b64encode(message.as_bytes())
b64_string = b64_bytes.decode()
return {'raw': b64_string}
| 2.078125 | 2 |
discode/models/message.py | TheFarGG/Discode | 3 | 12764947 | from __future__ import annotations
__all__ = ("Message", "MessageReference")
from typing import Any, Optional, Dict, List, Union, TYPE_CHECKING
from ..app import Button, LinkButton, component_from_dict
from ..utils import UNDEFINED
from .abc import Snowflake
from .channel import TextChannel
from .guild import Guild
from .member import Member
from .user import User
if TYPE_CHECKING:
from ..connection import Connection
class MessageReference(Snowflake):
__slots__ = (
"id",
"channel_id",
"guild_id",
"fail_if_not_exists",
"referrer",
"_connection",
)
if TYPE_CHECKING:
id: int
_connection: Connection
channel_id: Optional[int]
guild_id: Optional[int]
fail_if_not_exists: bool
referrer: Message
def __init__(self, connection, payload: Dict[str, Any]):
self._connection = connection
self.id = int(payload.pop("message_id", UNDEFINED))
self.channel_id = int(payload.pop("channel_id", UNDEFINED))
self.guild_id = int(payload.pop("guild_id", UNDEFINED))
self.fail_if_not_exists = payload.pop("fail_if_not_exists", True)
self.referrer = payload.pop("msg")
@property
def cached_message(self) -> Optional[Message]:
return self._connection.message_cache.get(self.id)
async def fetch_message(self) -> Optional[Message]:
http = self._connection.http
msg_payload = await http.request(
"GET",
"/channels/{channel_id}/messages/{message_id}",
parameters={"channel_id": self.channel_id, "message_id": self.id},
)
return Message(self._connection, msg_payload)
class Message(Snowflake):
__slots__ = (
"id",
"content",
"channel_id",
"guild_id",
"author_id",
"reference",
"_components",
"_mentions",
"_connection",
)
if TYPE_CHECKING:
id: int
_connection: Connection
content: str
channel_id: int
guild_id: Optional[int]
author_id: int
reference: MessageReference
def __init__(self, connection, payload: Dict[str, Any]):
self._connection = connection
self.id = int(payload.pop("id"))
self.content = payload.pop("content", None)
self.channel_id = int(payload.pop("channel_id"))
self.guild_id = int(payload.pop("guild_id", UNDEFINED))
self.author_id = int(payload.pop("author", {}).get("id", 0))
self._components = [
component_from_dict(comp) for comp in payload.pop("components", ())
]
ref = payload.pop("message_reference", UNDEFINED)
if ref != UNDEFINED:
ref["msg"] = self
self.reference = MessageReference(connection, ref)
else:
self.reference = None
ms_data = payload.pop("mentions", ())
self._mentions = []
if len(ms_data) >= 1:
for md in ms_data:
u = connection.get_user(int(md.get("id", UNDEFINED)))
if u:
self._mentions.append(u)
def copy(self, **options):
payload = {"id": self.id, "channel_id": options['channel_id']}
ret = Message(self._connection, payload)
ret.content = options.get("content")
ret.guild_id = ret.guild_id = self.guild_id
ret.author_id = self.author_id
if options.get("components") and len(options["components"] >= 1):
ret._components = [component_from_dict(comp) for comp in options["components"]]
ret.reference = self.reference
ms_data = options.pop("mentions", ())
if len(ms_data) >= 1:
for md in ms_data:
u = self._connection.get_user(int(md.get("id", UNDEFINED)))
if u:
ret._mentions.append(u)
return ret
def __repr__(self) -> str:
return f"<{self.__class__.__name__} id = {self.id} content = {self.content}"
def __str__(self) -> str:
return self.content or ""
@property
def author(self) -> Union[User, Member]:
g = self.guild
if g:
return g.get_member(self.author_id)
return self._connection.get_user(self.author_id)
@property
def channel(self) -> TextChannel:
return self._connection.channel_cache.get(self.channel_id)
@property
def guild(self) -> Guild:
return self._connection.get_guild(self.guild_id)
@property
def components(self) -> List[Union[Button, LinkButton]]:
return self._components.copy()
@property
def mentions(self) -> List[User]:
return self._mentions.copy()
| 2.046875 | 2 |
mayan/apps/appearance/settings.py | Syunkolee9891/Mayan-EDMS | 1 | 12764948 | from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from mayan.apps.smart_settings.classes import Namespace
from .literals import DEFAULT_MAXIMUM_TITLE_LENGTH
namespace = Namespace(label=_('Appearance'), name='appearance')
setting_max_title_length = namespace.add_setting(
default=DEFAULT_MAXIMUM_TITLE_LENGTH,
global_name='APPEARANCE_MAXIMUM_TITLE_LENGTH', help_text=_(
'Maximum number of characters that will be displayed as the view '
'title.'
)
)
| 1.804688 | 2 |
tfmiss/training/adapt.py | shkarupa-alex/tfmiss | 1 | 12764949 | <filename>tfmiss/training/adapt.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import time
from collections import Counter
from scipy.interpolate import LinearNDInterpolator
from functools import lru_cache
def test_device_matmul(max_batch, max_hidden, max_classes, repeats, device, dtype):
"""Measures matrix multiplication time: [BATCH, HIDDEN] * [HIDDEN, CLASSES].
In case of sequences this is the same as: [BATCH / TIME, TIME, HIDDEN] * [HIDDEN, CLASSES]
Args:
max_batch: Maximum size of batch.
max_hidden: Maximum size of input logits.
max_classes: Maximum number of output classes.
repeats: Number of repeats to average.
device: Which device to use fo multiplication.
dtype: Matrices data type.
Returns:
A dict with tested `batch_sizes`, `hidden_sizes` and `class_sizes` along with measured `cost_values`
(time in seconds).
"""
if max_classes < max_hidden:
raise ValueError('Number of classes should be greater then input logits size')
if 'GPU' not in device.upper():
tf.get_logger().warning('Device matmul estimation is useful for GPUs. '
'You ask to measure non-GPU device. Hope you know what you are doing.')
physical_devices = ', '.join([
d.name.replace('physical_device:', '') for d in tf.config.experimental.list_physical_devices()])
if device.upper() not in physical_devices:
raise SystemError('Requested device {} is not available: {}'.format(device, physical_devices))
dtype = tf.dtypes.as_dtype(dtype)
if not (dtype.is_floating or dtype.is_complex):
raise TypeError('Unable to test matrix multiplication time with non-floating dtype {}'.format(dtype))
cost_values = []
def _matmul_dim_space(max_val):
log_space = np.power(2, np.arange(0, int(np.log2(max_val)) + 1))
mean_space = (log_space[:-1] + log_space[1:]) // 2
mod8_space = mean_space[mean_space % 8 == 0]
rc_space = np.concatenate((log_space, mod8_space))
final_space = np.concatenate((rc_space, rc_space - 1, rc_space + 1, [max_val]))
final_space = np.unique(final_space)
final_space = final_space[final_space > 0]
final_space = final_space[final_space <= max_val]
return np.array(final_space)
batch_sizes = _matmul_dim_space(max_batch)
hidden_sizes = _matmul_dim_space(max_hidden)
class_sizes = _matmul_dim_space(max_classes)
dense_grid = np.array(np.meshgrid(batch_sizes, hidden_sizes, class_sizes, indexing='ij')).T.reshape([-1, 3])
with tf.device(device):
# Check if device has enough memory
left = tf.random.normal(shape=(batch_sizes[-1], hidden_sizes[-1]), dtype=dtype)
right = tf.random.normal(shape=(hidden_sizes[-1], class_sizes[-1]), dtype=dtype)
mult = tf.matmul(left, right)
mult.numpy()
del left, right, mult
for step, (batch_size, hidden_size, class_size) in enumerate(dense_grid):
left = tf.random.normal(shape=(batch_size, hidden_size), dtype=dtype)
right = tf.random.normal(shape=(hidden_size, class_size), dtype=dtype)
# TensorFlow initializes a GPU the first time it's used, exclude from timing.
mult = tf.matmul(left, right)
mult.numpy()
start = time.time()
for _ in range(repeats):
mult = tf.matmul(left, right)
# tf.matmul can return before completing the matrix multiplication (e.g., can return after enqueing the
# operation on a CUDA stream). The mult.numpy() call below will ensure that all enqueued operations have
# completed (and will also copy the result to host memory, so we're including a little more than just the
# matmul operation time).
mult.numpy()
finish = time.time()
mult.numpy()
over = time.time()
total = (finish - start - (over - finish)) * 1000 / repeats
cost_values.append(total)
del left, right, mult
tf.get_logger().info('Done {} steps of {}'.format(step + 1, dense_grid.shape[0]))
return {
'batch_sizes': batch_sizes.tolist(),
'hidden_sizes': hidden_sizes.tolist(),
'class_sizes': class_sizes.tolist(),
'cost_values': cost_values
}
def interpolate_matmul_cost(device_params):
"""Interpolates matrix multiplication time according to measurements provided by `test_device_matmul` function.
Args:
device_params: A dict with 4 keys (`batch_sizes`, `hidden_sizes`, `class_sizes`, `cost_values`).
Device performance measurements.
Returns:
A linear interpolation function with signature (batch_size, hidden_size, classes_size);
"""
batch_sizes = device_params['batch_sizes']
hidden_sizes = device_params['hidden_sizes']
class_sizes = device_params['class_sizes']
cost_values = device_params['cost_values']
point_grid = np.array(np.meshgrid(batch_sizes, hidden_sizes, class_sizes, indexing='ij')).T.reshape([-1, 3])
cost_values = np.array(cost_values)
approx_cost = LinearNDInterpolator(point_grid, cost_values, fill_value=np.nan, rescale=True)
@lru_cache(maxsize=10000)
def _with_bounds(batch_size, hidden_size, num_classes):
batch_size = max(1, batch_size)
hidden_size = max(1, hidden_size)
num_classes = max(1, num_classes)
value = approx_cost(batch_size, hidden_size, num_classes)
if np.isnan(value):
raise ValueError('Required point ({}, {}, {}) is out of known bounds'.format(
batch_size, hidden_size, num_classes))
return value.item()
return _with_bounds
def build_zipf_vocab(num_classes):
"""Builds frequency vocabulary according to Zipf's law.
Args:
num_classes: Total number of classes.
Returns:
A `Counter` instance with classes from 0 to num_classes - 1 and corresponding frequencies.
"""
freq_keys = np.arange(1, num_classes + 1)
freq_vals = num_classes / freq_keys
return Counter({k: v for k, v in zip(freq_keys, freq_vals)})
def generate_class_clusters(num_tails, prob_accum, head=None):
"""Generates granular class splits for Adaptive Softmax.
Args:
num_tails: Number of tail clusters.
prob_accum: A list of cumulative probabilities for all classes.
head: Pre-estimated splits. Reserved for internal purposes.
Returns:
A list of possible splits. Each split is a list with cluster sizes.
All cluster sizes except last one have size evenly dividable by 8.
Head cluster size + number of clusters is evenly dividable by 8 too.
"""
num_classes = len(prob_accum)
size_space = np.cumsum(np.linspace(1, num_classes, 1000)).astype(np.int32)
size_space = size_space[size_space > 0.01 * num_classes]
size_space = size_space[size_space < 0.99 * num_classes]
if head is None:
if num_tails < 1:
raise ValueError('There are should be at least one tail cluster')
head_split = np.floor((size_space + num_tails) / 8).astype(np.int32) * 8 - num_tails
head_split = np.unique(head_split[head_split > 0]).reshape([-1, 1])
return generate_class_clusters(num_tails - 1, prob_accum, head_split)
if 0 == np.size(head):
raise ValueError('Could not generate required number of clusters. '
'Try to decrease number of clusters or increase number of classes.')
body = []
sizes = np.cumsum(head, axis=-1)
for split, size in zip(head, sizes):
consumed = size[-1]
if 0 == num_tails:
rest_size = num_classes - consumed
subspace = np.array([rest_size])
else:
subspace = np.floor(size_space / 8).astype(np.int32) * 8
subspace = np.unique(subspace)
subspace = subspace[subspace < num_classes - consumed]
# Next cluster should be at least 10% larger
last_size = split[-1]
subspace = subspace[subspace > 1.1 * last_size]
for bone in subspace:
left = 0 if len(size) < 2 else size[-2] - 1
middle = size[-1] - 1
right = size[-1] + bone - 1
# Next cluster should have at least 10% lower probability
if 2.1 * prob_accum[middle] > prob_accum[left] + 1.1 * prob_accum[right]:
body.append(split.tolist() + [bone])
body = np.array(body)
if 0 == num_tails:
return body
return generate_class_clusters(num_tails - 1, prob_accum, body)
def adaptive_split_cost(approx_cost, prob_accum, cluster_sizes, batch_size, hidden_size, factor):
"""Estimates computation time for adaptpive softmax split.
Args:
approx_cost: Function to estimate matmul for batch-hidden_size-class matrices.
prob_accum: Per-class appearance probability.
cluster_sizes: List of cluster sizes
batch_size: Size of input batch
hidden_size: Size of input logits
factor: Scale factor for tail projections.
Returns:
Split computation time.
"""
if np.sum(cluster_sizes).item() != len(prob_accum):
raise ValueError('Wrong inputs: Sum of cluster sizes should be equal to size of accumulated probabilities.')
cluster_accum = np.cumsum(cluster_sizes)
cost = approx_cost(batch_size, hidden_size, cluster_sizes[0] + len(cluster_sizes) - 1) # Root prediction cost
prev_dim = None
for i, tail_size in enumerate(cluster_sizes[1:]):
dim = hidden_size / (factor ** (i + 1))
dim = max(1, round(dim / 8)) * 8
if dim == prev_dim:
raise ValueError('Some clusters have same internal size. '
'Try to decrease number of clusters or `factor`')
prev_dim = dim
tail_start, tail_end = cluster_accum[i] - 1, cluster_accum[i + 1] - 1
clust_prob = prob_accum[tail_end] - prob_accum[tail_start]
tail_batch = int(batch_size * clust_prob)
# In most cases we can't guarantee tail batch size evenly dividable by 8. So, for estimation it won't.
tail_batch = tail_batch + 1 if 0 == tail_batch % 8 else tail_batch
cost += approx_cost(tail_batch, hidden_size, dim) # Tail projection cost
cost += approx_cost(tail_batch, dim, tail_size) # Tail prediction cost
return cost
def estimate_best_splits(device_params, freq_vocab, num_tails, hidden_size, factor):
"""Estimates best class splits for Adaptive Softmax.
Args:
device_params: A dict with 4 keys (`batch_sizes`, `hidden_sizes`, `class_sizes`, `cost_values`).
Device performance measurements.
freq_vocab: Class-to-frequency counter.
num_tails: Number of tail clusters.
hidden_size: Size of input logits
factor: Scale factor for tail projections.
Returns:
A tuple of:
- unique batch sizes
- unique head sizes
- speedups for each batch & head
- split indices for each batch & head
"""
approx_cost = interpolate_matmul_cost(device_params)
if not isinstance(freq_vocab, Counter):
raise ValueError('Frequency vocabulary should be a Counter instance.')
all_freq = np.array([f for _, f in freq_vocab.most_common()])
prob_dist = all_freq / np.sum(all_freq)
prob_accum = np.cumsum(prob_dist)
all_splits = generate_class_clusters(num_tails, prob_accum)
head_sizes = list(np.unique(all_splits[:, 0]))
batch_sizes = [bs for bs in device_params['batch_sizes'] if bs < 8 or bs % 8 == 0]
batch_sizes = sorted(set(batch_sizes + [max(device_params['batch_sizes'])]))
try:
base_costs = [approx_cost(batch, hidden_size, len(prob_accum)) for batch in batch_sizes]
except ValueError:
base_costs = None
tf.get_logger().warning('Can\'t estimate non-adaptive softmax computation time. '
'Will use worst split time to compute speedup.')
best_ids, split_speedups = [], []
for bi, batch in enumerate(batch_sizes):
with_costs = {}
for split in all_splits:
curr_cost = adaptive_split_cost(approx_cost, prob_accum, split, batch, hidden_size, factor)
head_size = split[0]
if head_size not in with_costs or with_costs[head_size][0] > curr_cost:
split_ids = np.cumsum(split[:-1])
with_costs[head_size] = curr_cost, list(split_ids)
max_cost = max([with_costs[hs][0] for hs in head_sizes])
if base_costs is not None:
max_cost = base_costs[bi]
for hs in head_sizes:
split_speedups.append(max_cost / with_costs[hs][0])
best_ids.append(with_costs[hs][1])
return batch_sizes, head_sizes, split_speedups, best_ids
| 2.6875 | 3 |
advertools/ad_from_string.py | JUSTIN-BOLAND/advertools | 1 | 12764950 | <gh_stars>1-10
import string
def ad_from_string(s, slots=(30, 30, 30, 90, 90, 15, 15), sep=None,
capitalize=False):
"""Convert string ``s`` to an ad by splitting it into groups of words.
Each group would have a length of at most the allowed length for that slot.
If the total length of ``s`` exceeds the total allowed length, all
remaining characters would be grouped in the last element of the
returned list.
:param s: a string of characters, with no restrictions on length
:param slots: an iterable of integers for the maximum lengths for
each slot
:param sep: by which character to split ``s``
:param capitalize: whether or not to capitalize each word after grouping
Setting it as False would leave the input string as is
:returns text_ad: a list of strings
>>> ad_from_string('this is a short ad')
['this is a short ad', '', '', '', '', '', '', '']
>>> ad_from_string('this is a longer ad and will take the first two slots')
['this as a longer ad and would', 'take the first two slots',
'', '', '', '', '', '']
>>> ad_from_string("Slots can be changed the way you want", (10, 15, 10))
['Slots can', 'be changed the', 'way you', 'want']
>>> ad_from_string("The capitalization REMAinS as IS bY DefAULt",
... (10, 15, 10))
['The', 'capitalization', 'REMAinS as', 'IS bY DefAULt']
>>> ad_from_string("set captialize=True to capitalize first letters",
... capitalize=True)
['Set Captialize=true To', 'Capitalize First Letters',
'', '', '', '', '', '']
"""
str_words = s.split(sep=sep)
text_ad = ['' for x in range(len(slots)+1)]
counter = 0
for i, slot in enumerate(slots):
while counter <= len(str_words) - 1:
if len(text_ad[i] + str_words[counter]) + 1 > slot:
break
text_ad[i] += (' ' + str_words[counter] if text_ad[i]
else str_words[counter])
counter += 1
text_ad[-1] = (sep.join(str_words[counter:])
if sep is not None else ' '.join(str_words[counter:]))
return [string.capwords(x) if capitalize else x for x in text_ad]
| 4.0625 | 4 |