max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
import data/insert_metadata.py | elenisproject/internet-and-applications | 2 | 12766851 | #------------------------------------------ EXTRACT DATA FROM METADATA.CSV -------------------------------------------------
#
# our table has the same structrure as the csv file downloaded
#
#
#--------------------------------------------------------------------------------------------------------------------------------
import pandas as pd
import unicodedata
import re
import string
import csv
import json
import itertools
from joblib import Parallel, delayed
import collections
from collections import Counter,defaultdict,OrderedDict,namedtuple
import mysql.connector
from settings import DB_CREDS
#establish a connection with the database
cnx = mysql.connector.connect(
host = DB_CREDS['host'],
user = DB_CREDS['user'],
passwd = DB_CREDS['pass'],
database = DB_CREDS['db']
)
cursor = cnx.cursor()
df = pd.read_csv('metadata.csv',low_memory=False)
#fill all null cells
df.fillna(" ",inplace=True)
index=0
for i in df.loc[:,'cord_uid']:
#if there is only the year given fill the rest date with year-1-1
if len(df.loc[index,'publish_time']) == 4:
year = df.loc[index,'publish_time']
df.loc[index,'publish_time'] = year + '-1-1'
#'pubmed_id is given in the database as int, make sure there are no empty spaces
if df.loc[index,'pubmed_id'] == ' ':
df.loc[index,'pubmed_id'] = 0
#save tha extracted data in a tuple
data = (df.loc[index,'cord_uid'],df.loc[index,'sha'],df.loc[index,'source_x'],df.loc[index,'title'],df.loc[index,'doi'],df.loc[index,'pmcid'],df.loc[index,'pubmed_id'],df.loc[index,'license'],df.loc[index,'abstract'],df.loc[index,'publish_time'],df.loc[index,'authors'],df.loc[index,'journal'],df.loc[index,'mag_id'],df.loc[index,'who_covidence_id'],df.loc[index,'arxiv_id'],df.loc[index,'pdf_json_files'],df.loc[index,'pmc_json_files'],df.loc[index,'url'],df.loc[index,'s2_id'])
#insert our data into our database
add_data = ("INSERT IGNORE INTO general "
"(cord_uid , sha, source_x, title, doi, pmcid, pubmed_id, license, abstract, publish_time, authors, journal, mag_id, who_covidence_id, arxiv_id, pdf_json_files, pmc_json_files, url, s2_id) "
"VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)")
cursor.execute(add_data,data)
cnx.commit()
index += 1
cursor.close()
cnx.close()
| 2.609375 | 3 |
scripts/omtools/om/devices/hca.py | sifive/testenv-metal | 1 | 12766852 | from typing import Dict, Union
from ..generator import OMSifiveSisHeaderGenerator
from .. import pprint
class OMSifiveSisHcaHeaderGenerator(OMSifiveSisHeaderGenerator):
"""SiFive SIS header generator for HCA crypto engine.
"""
def _generate_definitions(self, devname: str,
features: Dict[str, Dict[str, Union[bool, int]]]) -> Dict[str, int]:
"""Generate constant definitions.
:param devname: the device name
:param features: map of supported features and subfeatures.
:return: a map of definition name, values
"""
# implementation is device specific
defs = {}
uname = devname.upper()
for feature in features:
ufeat = feature.upper()
defs[f'{uname}_HAS_{ufeat}'] = 1
for feature, subfeats in features.items():
for subfeat, value in subfeats.items():
ufeat = feature.upper()
usfeat = subfeat.upper()
defs[f'{uname}_{ufeat}_HAS_{usfeat}'] = int(value or 0)
maxwidth = max([len(c) for c in defs]) + self.EXTRA_SEP_COUNT
pdefs = {}
for name, value in defs.items():
padlen = maxwidth - len(name)
if padlen > 0:
name = f"{name}{' '*padlen}"
pdefs[name] = value
return pdefs
| 2.546875 | 3 |
porcupine/plugins/run/common.py | Akuli/editor | 1 | 12766853 | from __future__ import annotations
import dataclasses
import os
from pathlib import Path
from typing import Dict, List, Optional
from porcupine import tabs, utils
@dataclasses.dataclass
class Command:
command_format: str
cwd_format: str
external_terminal: bool
substitutions: Dict[str, str]
def format_cwd(self) -> Path:
return Path(self.cwd_format.format(**self.substitutions))
def format_command(self) -> str:
return self.command_format.format(
**{name: utils.quote(value) for name, value in self.substitutions.items()}
)
@dataclasses.dataclass
class ExampleCommand:
command: str
windows_command: Optional[str] = None
macos_command: Optional[str] = None
working_directory: str = "{folder_path}"
external_terminal: bool = True
class Context:
def __init__(self, tab: tabs.FileTab, key_id: int):
assert tab.path is not None
self.file_path = tab.path
self.project_path = utils.find_project_root(tab.path)
self.key_id = key_id # with default bindings: 0 = F5, 1 = F6, 2 = F7, 3 = F8
self.filetype_name: str | None = tab.settings.get("filetype_name", Optional[str])
self.example_commands: list[ExampleCommand] = tab.settings.get(
"example_commands", List[ExampleCommand]
)
def get_substitutions(self) -> dict[str, str]:
return {
"file_stem": self.file_path.stem,
"file_name": self.file_path.name,
"file_path": str(self.file_path),
"folder_name": self.file_path.parent.name,
"folder_path": str(self.file_path.parent),
"project_name": self.project_path.name,
"project_path": str(self.project_path),
}
def prepare_env() -> dict[str, str]:
env = dict(os.environ)
# If Porcupine is running within a virtualenv, ignore it
if "VIRTUAL_ENV" in env and "PATH" in env:
# os.pathsep = ":"
# os.sep = "/"
porcu_venv = env.pop("VIRTUAL_ENV")
env["PATH"] = os.pathsep.join(
p for p in env["PATH"].split(os.pathsep) if not p.startswith(porcu_venv + os.sep)
)
return env
| 2.46875 | 2 |
utils.py | sillwood/voicemos | 2 | 12766854 | # Modified based on the HRNet repo.
from __future__ import absolute_import, division, print_function
import logging
import os
import time
from pathlib import Path
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
class FullModel(nn.Module):
"""
Distribute the loss on multi-gpu to reduce
the memory cost in the main gpu.
You can check the following discussion.
https://discuss.pytorch.org/t/dataparallel-imbalanced-memory-usage/22551/21
"""
def __init__(self, model, loss):
super(FullModel, self).__init__()
self.model = model
self.loss = loss
def forward(self, inputs, labels, train_step=-1, **kwargs):
outputs, jac_loss, sradius = self.model(inputs, train_step=train_step, **kwargs)
loss = self.loss(outputs, labels)
return loss.unsqueeze(0), jac_loss.unsqueeze(0), outputs, sradius
def get_world_size():
if not torch.distributed.is_initialized():
return 1
return torch.distributed.get_world_size()
def get_rank():
if not torch.distributed.is_initialized():
return 0
return torch.distributed.get_rank()
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.initialized = False
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def initialize(self, val, weight):
self.val = val
self.avg = val
self.sum = val * weight
self.count = weight
self.initialized = True
def update(self, val, weight=1):
if not self.initialized:
self.initialize(val, weight)
else:
self.add(val, weight)
def add(self, val, weight):
self.val = val
self.sum += val * weight
self.count += weight
self.avg = self.sum / self.count
def value(self):
return self.val
def average(self):
return self.avg
def create_logger(cfg, cfg_name, phase="train"):
root_output_dir = Path(cfg.OUTPUT_DIR)
# set up logger
if not root_output_dir.exists():
print("=> creating {}".format(root_output_dir))
root_output_dir.mkdir()
dataset = cfg.DATASET.DATASET
model = cfg.MODEL.NAME
cfg_name = os.path.basename(cfg_name).split(".")[0]
final_output_dir = root_output_dir / dataset / cfg_name
print("=> creating {}".format(final_output_dir))
final_output_dir.mkdir(parents=True, exist_ok=True)
time_str = time.strftime("%Y-%m-%d-%H-%M")
log_file = "{}_{}_{}.log".format(cfg_name, time_str, phase)
final_log_file = final_output_dir / log_file
head = "%(asctime)-15s %(message)s"
logging.basicConfig(filename=str(final_log_file), format=head)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
console = logging.StreamHandler()
logging.getLogger("").addHandler(console)
tensorboard_log_dir = Path(cfg.LOG_DIR) / dataset / model / cfg_name
print("=> creating {}".format(tensorboard_log_dir))
tensorboard_log_dir.mkdir(parents=True, exist_ok=True)
return logger, str(final_output_dir), str(tensorboard_log_dir)
def get_optimizer(cfg, model):
optimizer = None
if cfg.TRAIN.OPTIMIZER == "sgd":
optimizer = optim.SGD(
filter(lambda p: p.requires_grad, model.parameters()),
lr=cfg.TRAIN.LR,
momentum=cfg.TRAIN.MOMENTUM,
weight_decay=cfg.TRAIN.WD,
nesterov=cfg.TRAIN.NESTEROV,
)
elif cfg.TRAIN.OPTIMIZER == "adam":
optimizer = optim.Adam(
filter(lambda p: p.requires_grad, model.parameters()),
lr=cfg.TRAIN.LR,
weight_decay=cfg.TRAIN.WD,
)
elif cfg.TRAIN.OPTIMIZER == "rmsprop":
optimizer = optim.RMSprop(
filter(lambda p: p.requires_grad, model.parameters()),
lr=cfg.TRAIN.LR,
momentum=cfg.TRAIN.MOMENTUM,
weight_decay=cfg.TRAIN.WD,
alpha=cfg.TRAIN.RMSPROP_ALPHA,
centered=cfg.TRAIN.RMSPROP_CENTERED,
)
return optimizer
def save_checkpoint(states, is_best, output_dir, filename="checkpoint.pth.tar"):
torch.save(states, os.path.join(output_dir, filename))
if is_best and "state_dict" in states:
torch.save(states["state_dict"], os.path.join(output_dir, "model_best.pth.tar"))
def get_confusion_matrix(label, pred, size, num_class, ignore=-1):
"""
Calcute the confusion matrix by given label and pred
"""
output = pred.cpu().numpy().transpose(0, 2, 3, 1)
seg_pred = np.asarray(np.argmax(output, axis=3), dtype=np.uint8)
seg_gt = np.asarray(label.cpu().numpy()[:, : size[-2], : size[-1]], dtype=np.int)
ignore_index = seg_gt != ignore
seg_gt = seg_gt[ignore_index]
seg_pred = seg_pred[ignore_index]
index = (seg_gt * num_class + seg_pred).astype("int32")
label_count = np.bincount(index)
confusion_matrix = np.zeros((num_class, num_class))
for i_label in range(num_class):
for i_pred in range(num_class):
cur_index = i_label * num_class + i_pred
if cur_index < len(label_count):
confusion_matrix[i_label, i_pred] = label_count[cur_index]
return confusion_matrix
def adjust_learning_rate(optimizer, base_lr, max_iters, cur_iters, power=0.9):
lr = base_lr * ((1 - float(cur_iters) / max_iters) ** (power))
optimizer.param_groups[0]["lr"] = lr
return lr
################################################################################
# The following function are based on:
# https://github.com/espnet/espnet/blob/master/espnet/nets/pytorch_backend/nets_utils.py
def make_pad_mask(lengths, xs=None, length_dim=-1):
"""Make mask tensor containing indices of padded part.
Args:
lengths (LongTensor or List): Batch of lengths (B,).
xs (Tensor, optional): The reference tensor.
If set, masks will be the same shape as this tensor.
length_dim (int, optional): Dimension indicator of the above tensor.
See the example.
Returns:
Tensor: Mask tensor containing indices of padded part.
dtype=torch.uint8 in PyTorch 1.2-
dtype=torch.bool in PyTorch 1.2+ (including 1.2)
Examples:
With only lengths.
>>> lengths = [5, 3, 2]
>>> make_non_pad_mask(lengths)
masks = [[0, 0, 0, 0 ,0],
[0, 0, 0, 1, 1],
[0, 0, 1, 1, 1]]
With the reference tensor.
>>> xs = torch.zeros((3, 2, 4))
>>> make_pad_mask(lengths, xs)
tensor([[[0, 0, 0, 0],
[0, 0, 0, 0]],
[[0, 0, 0, 1],
[0, 0, 0, 1]],
[[0, 0, 1, 1],
[0, 0, 1, 1]]], dtype=torch.uint8)
>>> xs = torch.zeros((3, 2, 6))
>>> make_pad_mask(lengths, xs)
tensor([[[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1]],
[[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1]],
[[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1]]], dtype=torch.uint8)
With the reference tensor and dimension indicator.
>>> xs = torch.zeros((3, 6, 6))
>>> make_pad_mask(lengths, xs, 1)
tensor([[[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1]],
[[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1]],
[[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1]]], dtype=torch.uint8)
>>> make_pad_mask(lengths, xs, 2)
tensor([[[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1]],
[[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1]],
[[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1]]], dtype=torch.uint8)
"""
if length_dim == 0:
raise ValueError("length_dim cannot be 0: {}".format(length_dim))
if not isinstance(lengths, list):
lengths = lengths.tolist()
bs = int(len(lengths))
if xs is None:
maxlen = int(max(lengths))
else:
maxlen = xs.size(length_dim)
seq_range = torch.arange(0, maxlen, dtype=torch.int64)
seq_range_expand = seq_range.unsqueeze(0).expand(bs, maxlen)
seq_length_expand = seq_range_expand.new(lengths).unsqueeze(-1)
mask = seq_range_expand >= seq_length_expand
if xs is not None:
assert xs.size(0) == bs, (xs.size(0), bs)
if length_dim < 0:
length_dim = xs.dim() + length_dim
# ind = (:, None, ..., None, :, , None, ..., None)
ind = tuple(
slice(None) if i in (0, length_dim) else None for i in range(xs.dim())
)
mask = mask[ind].expand_as(xs).to(xs.device)
return mask
def make_non_pad_mask(lengths, xs=None, length_dim=-1):
"""Make mask tensor containing indices of non-padded part.
Args:
lengths (LongTensor or List): Batch of lengths (B,).
xs (Tensor, optional): The reference tensor.
If set, masks will be the same shape as this tensor.
length_dim (int, optional): Dimension indicator of the above tensor.
See the example.
Returns:
ByteTensor: mask tensor containing indices of padded part.
dtype=torch.uint8 in PyTorch 1.2-
dtype=torch.bool in PyTorch 1.2+ (including 1.2)
Examples:
With only lengths.
>>> lengths = [5, 3, 2]
>>> make_non_pad_mask(lengths)
masks = [[1, 1, 1, 1 ,1],
[1, 1, 1, 0, 0],
[1, 1, 0, 0, 0]]
With the reference tensor.
>>> xs = torch.zeros((3, 2, 4))
>>> make_non_pad_mask(lengths, xs)
tensor([[[1, 1, 1, 1],
[1, 1, 1, 1]],
[[1, 1, 1, 0],
[1, 1, 1, 0]],
[[1, 1, 0, 0],
[1, 1, 0, 0]]], dtype=torch.uint8)
>>> xs = torch.zeros((3, 2, 6))
>>> make_non_pad_mask(lengths, xs)
tensor([[[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0]],
[[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0]],
[[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0]]], dtype=torch.uint8)
With the reference tensor and dimension indicator.
>>> xs = torch.zeros((3, 6, 6))
>>> make_non_pad_mask(lengths, xs, 1)
tensor([[[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0]],
[[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]],
[[1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]]], dtype=torch.uint8)
>>> make_non_pad_mask(lengths, xs, 2)
tensor([[[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 0]],
[[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0]],
[[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0]]], dtype=torch.uint8)
"""
return ~make_pad_mask(lengths, xs, length_dim)
| 2.734375 | 3 |
xc/common/libraries/generate_verilog.py | bl0x/symbiflow-arch-defs | 183 | 12766855 | """Transforms the XML module definitions parsed from the PDF into a verilog representation"""
from lxml import etree
from datetime import datetime
def format_port(name, width, type, **kwargs):
wstr = '' if int(width) == 1 else '[%s:0]\t' % width
return '\t%s\t%s%s;\n' % (type, wstr, name)
def format_attrib(name, type, default, **kwargs):
if type == 'STRING':
default = '"%s"' % default # need to ensure strings are quoted
return '\tparameter %s = %s;\n' % (name, default)
def process(infile, outfile):
tree = etree.parse(infile)
root = tree.getroot()
with open(outfile, "w") as output:
output.write(
'// Automatically generated from %s on %s\n\n' %
(infile, datetime.now().isoformat())
)
for module in root.getchildren():
ports = module.xpath('port')
attrs = module.xpath('attribute')
output.write(
'module %s (%s);\n' % (
module.attrib['name'],
', '.join([port.attrib['name'] for port in ports])
)
)
for port in ports:
output.write(format_port(**dict(port.attrib)))
if len(attrs):
output.write('\n')
for attr in attrs:
output.write(format_attrib(**dict(attr.attrib)))
output.write('endmodule\n\n')
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--input', '-i', nargs='?', default='cells_xtra.xml')
parser.add_argument('--output', '-o', nargs='?', default='cells_xtra.v')
args = parser.parse_args()
process(args.input, args.output)
| 2.859375 | 3 |
slides/template.py | russellromney/dash-slides | 14 | 12766856 | <filename>slides/template.py
###
# no need to delete this - it won't show up in the presentation unless you add it to presentation.py
###
# necessary imports - do not change
from app import app
# custom imports - delete these if you don't need them
import dash_html_components as html
import dash_core_components as dcc
from dash.dependencies import Output, Input, State
from custom_utilities.custom_functions import my_function
content = html.Div(style=dict(textAlign='center'),children=[
html.H1('Template Slide Title'),
html.Button('Click this!',id='template-button',n_clicks=0),
html.H2(id='template-div')
])
@app.callback(
Output('template-div','children'),
[Input('template-button','n_clicks')]
)
def create_template_graph(n):
return 'Button has been clicked {} times.'.format(n) | 2.421875 | 2 |
moment_polytopes/__init__.py | amsqi/moment_polytopes | 2 | 12766857 | <reponame>amsqi/moment_polytopes
from __future__ import absolute_import, print_function
#:Current version.
__version__ = "1.2.dev0"
from .polyhedron import *
from .lie_group import *
from .ressayre import *
from .combinat import *
from . import qmp
from . import third_party
| 0.894531 | 1 |
done/12.py | eNV25/euler | 0 | 12766858 | <gh_stars>0
import sys
def factors(n):
'''Generator of factors.'''
n = int(n)
for i in range(1, int(n**0.5) +1):
if not n % i: # if n is multiple of i
yield i
for i in reversed(range(1, int(n**0.5) +1)):
if not n % i:
yield n//i
def lenfactors(n):
'''Generator of factors.'''
n = int(n)
j = int()
for i in range(1, int(n**0.5) +1):
if not n % i: # if n is multiple of i
j += 1
return j*2
def triangles():
'''Infinite generator of triange numbers.'''
sumn = 0
i = 0
while True:
i += 1
sumn += i
yield sumn
try:
for number in triangles():
if lenfactors(number) >= 500:
print(number)
break
except KeyboardInterrupt:
print(number)
| 3.546875 | 4 |
tm4/colourTools.py | zhubonan/tm4 | 1 | 12766859 | # -*- coding: utf-8 -*-
"""
Created on Sun Feb 14 10:40:39 2016
Converting reflectance spectrum to a CIE coordinate
@author: Bonan
"""
import numpy as np
from scipy import interpolate
import os
# Adobe RGB (1998) D65 as reference white
# http://www.brucelindbloom.com/index.html?Eqn_XYZ_to_RGB.html
_RGB_to_XYZ = np.array([
[0.5767309, 0.1855540, 0.1881852],
[0.2973769, 0.6273491, 0.0752741],
[0.0270343, 0.0706872, 0.9911085], ])
_XYZ_to_RGB = np.array([
[2.0413690, -0.5649464, -0.3446944],
[-0.9692660, 1.8760108, 0.0415560],
[0.0134474, -0.1183897, 1.0154096], ])
# Load the
_dirname = os.path.dirname(__file__)
fn1 = os.path.join(_dirname, 'CIE_1931_XYZ.txt')
fn2 = os.path.join(_dirname, 'CIE_A.txt')
fn3 = os.path.join(_dirname, 'CIE_D65.txt')
CIE_XYZ_table = np.loadtxt(fn1).T # Transpose column into rows
CIE_A = np.loadtxt(fn2).T
CIE_D65 = np.loadtxt(fn3).T
def splineInterp(xNew, xRaw, yRaw):
"""
Compute the spline interpolation(cubic) of the data
"""
tck = interpolate.splrep(xRaw, yRaw)
return interpolate.splev(xNew, tck, der=0, ext=1)
def specToXYZ(spec, SI='D65'):
"""
Calculate the XYZ coordinate of the spectrum input.
It interpolates the charts to every wavelength that was inputed.
By default the input spectrum was first eveloped using a SPD function
to simulation illumination.
spec: input spectrum, 2*N ndarray, 1st row must be the wavelength
return: (X,Y,Z)
"""
wl = spec[0] # the input must have the 1st element as the wavelength
XYZ = CIE_XYZ_table
if SI == 'D65':
interpSI = splineInterp(wl, CIE_D65[0], CIE_D65[1])
if SI == 'A':
interpSI = splineInterp(wl, CIE_A[0], CIE_A[1])
else:
interpSI = np.ones(len(wl))
interpX = splineInterp(wl, XYZ[0], XYZ[1])
interpY = splineInterp(wl, XYZ[0], XYZ[2])
interpZ = splineInterp(wl, XYZ[0], XYZ[3])
interpXYZ = np.array([interpX, interpY, interpZ])
X, Y, Z = np.sum(spec[1] * interpSI * interpXYZ, axis=1)
return X, Y, Z
def specToxyz(spec, SI='D65'):
"""
Transfer spectrum into normalised x,y,z coordinates
Return: (x, y, z)
"""
X, Y, Z = specToXYZ(spec, SI)
x = X / (X + Y + Z)
y = Y / (X + Y + Z)
z = 1 - x - y
return x, y, z
def specToRGB(spec, SI='D65', scale_factor=1):
"""
Convert the spectrum(reflectivity) into an RGB value
Return: (R,G,B)
"""
XYZArray = specToxyz(spec, SI)
RGBArray = np.dot(_XYZ_to_RGB, XYZArray).clip(0, 1)
RGBArray *= scale_factor
return tuple(RGBArray.clip(0, 1))
if __name__ == '__main__':
# Testing of the module
import matplotlib.pyplot as pl
wlRange = np.linspace(400, 800, 100)
example = np.sin((wlRange - 400) * np.pi / 400)
spec = np.array([wlRange, example])
c = specToRGB(spec)
pl.plot(spec[0], spec[1] / spec[1].max(),
label='Example distribution', color=c)
print(c)
# Use the D65 as the light source
spec = CIE_D65
c = specToRGB(spec, SI='D65')
print('Test using D65 illumination. Should give R=G=B')
print(c)
pl.plot(spec[0], spec[1] / spec[1].max(),
label='D65 distribution', color=np.array(c))
pl.title('Coloured Spectrum')
pl.legend()
| 2.6875 | 3 |
bot/settings/base.py | hqrrylyu/ob-eshop-telegram-bot | 0 | 12766860 | from pathlib import Path
from typing import Dict
from environs import Env
from furl import furl
from .utils import FilterSettings
env = Env()
DEBUG = False
BASE_DIR = Path(__file__).parent.parent
LOCALES_DIR = BASE_DIR / "locales"
I18N_DOMAIN = "messages"
BOT_TOKEN = env("BOT_TOKEN")
ADMINS = env.list("ADMINS", subcast=int)
BASE_URL = env("BASE_URL")
API_BASE_URL = furl(BASE_URL).add(path=env("API_PATH")).url
API_TOKEN = env("API_TOKEN")
PAYMENTS_PROVIDER_TOKEN = env("PAYMENTS_PROVIDER_TOKEN")
FSM_STORAGE = {"host": env("STORAGE_HOST"), "port": env.int("STORAGE_PORT")}
TIMEZONE = env("TIMEZONE", "UTC")
DATETIME_FORMAT = "%d/%m/%Y %H:%M:%S %Z%z"
SHORT_DATETIME_FORMAT = "%d/%m/%Y %H:%M"
FILTERS_STORAGE_KEY = "filters"
CACHED_PAGE_STORAGE_KEY = "cached_page"
PRODUCT_PAGE_SIZE = 10
_ = lambda s: s # noqa
PRODUCT_FILTERS: Dict[str, FilterSettings] = {
"gender": FilterSettings(
_("Gender"), ("title",), api_endpoint="/categories/", query_name="category"
),
"category": FilterSettings(
_("Category"),
("title",),
api_endpoint="/categories/",
depends_on="gender",
choices_keyboard_width=3,
),
"season": FilterSettings(_("Season"), ("name",), choices_keyboard_width=2),
"brand": FilterSettings(_("Brand"), ("name",), api_endpoint="/brands/"),
"color": FilterSettings(_("Color"), ("name",), api_endpoint="/colors/"),
"outer_material": FilterSettings(
_("Outer material"), ("name",), api_endpoint="/outer_materials/"
),
}
SHIPPING_OPTIONS = [
("nova_poshta", _("Nova Poshta (Сustomer pays shipping)"), [(_("Nova Poshta"), 0)]),
("pickup", _("Local pickup (Mykolaiv)"), [(_("Local pickup"), 0)]),
]
SUCCESSFUL_PAYMENT_STICKER_ID = env("SUCCESSFUL_PAYMENT_STICKER_ID")
CONTACT_PHONES = env.list("CONTACT_PHONES")
CONTACT_EMAILS = env.list("CONTACT_EMAILS")
# fmt: off
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"default": {
"format": "%(asctime)s %(levelname)s %(name)s: %(message)s",
},
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"formatter": "default",
},
},
"loggers": {
"bot": {
"handlers": ["console"],
"level": "INFO",
},
},
}
# fmt: on
| 2 | 2 |
17-exercises/02-lista/regex.py | deniscostadsc/regex-tutorial | 4 | 12766861 | '''
Nesse exer você deve casar linha que tenham 'b' ou 'c' seguido de uma vogal,
duas vezes seguidas.
exemplo: "baba", "caca", ou "cabo"
Para fixação, usar o '[]' (lista).
'''
import re
import sys
REGEX = r''
lines = sys.stdin.readlines()
for line in lines:
if re.search(REGEX, line):
print(line.replace('\n', ''))
| 3.453125 | 3 |
dataloader.py | husnejahan/DeepAR-Pytorch | 213 | 12766862 | <gh_stars>100-1000
from __future__ import division
import numpy as np
import torch
import os
import logging
from torch.utils.data import DataLoader, Dataset, Sampler
logger = logging.getLogger('DeepAR.Data')
class TrainDataset(Dataset):
def __init__(self, data_path, data_name, num_class):
self.data = np.load(os.path.join(data_path, f'train_data_{data_name}.npy'))
self.label = np.load(os.path.join(data_path, f'train_label_{data_name}.npy'))
self.train_len = self.data.shape[0]
logger.info(f'train_len: {self.train_len}')
logger.info(f'building datasets from {data_path}...')
def __len__(self):
return self.train_len
def __getitem__(self, index):
return (self.data[index,:,:-1],int(self.data[index,0,-1]), self.label[index])
class TestDataset(Dataset):
def __init__(self, data_path, data_name, num_class):
self.data = np.load(os.path.join(data_path, f'test_data_{data_name}.npy'))
self.v = np.load(os.path.join(data_path, f'test_v_{data_name}.npy'))
self.label = np.load(os.path.join(data_path, f'test_label_{data_name}.npy'))
self.test_len = self.data.shape[0]
logger.info(f'test_len: {self.test_len}')
logger.info(f'building datasets from {data_path}...')
def __len__(self):
return self.test_len
def __getitem__(self, index):
return (self.data[index,:,:-1],int(self.data[index,0,-1]),self.v[index],self.label[index])
class WeightedSampler(Sampler):
def __init__(self, data_path, data_name, replacement=True):
v = np.load(os.path.join(data_path, f'train_v_{data_name}.npy'))
self.weights = torch.as_tensor(np.abs(v[:,0])/np.sum(np.abs(v[:,0])), dtype=torch.double)
logger.info(f'weights: {self.weights}')
self.num_samples = self.weights.shape[0]
logger.info(f'num samples: {self.num_samples}')
self.replacement = replacement
def __iter__(self):
return iter(torch.multinomial(self.weights, self.num_samples, self.replacement).tolist())
def __len__(self):
return self.num_samples | 2.484375 | 2 |
tests/utils/test_utils_create_match.py | battleforcastile/battleforcastile | 0 | 12766863 | <filename>tests/utils/test_utils_create_match.py
import json
import os
from unittest.mock import patch
from battleforcastile.constants import BATTLEFORCASTILE_BACKEND_URL
from battleforcastile.utils.create_match import create_match
CURRENT_PATH = os.path.dirname(os.path.abspath(__file__))
@patch('battleforcastile.utils.create_match.requests.post')
def test_if_create_match_works_as_it_should(mocked_post):
mocked_post.return_value.status_code = 200
username = 'test'
character = {}
response = create_match(username, character)
assert response.status_code == 200
mocked_post.assert_called_with(
f'{BATTLEFORCASTILE_BACKEND_URL}/enqueue-match/',
data=json.dumps({
'first_user': {
'username': username,
'character': character
}
}))
| 2.734375 | 3 |
_testing/upython/app_echo.py | kevinkk525/micropython_iot_generic | 0 | 12766864 | <reponame>kevinkk525/micropython_iot_generic
# Author: <NAME>
# Copyright <NAME> 2019 Released under the MIT license
# Created on 2019-01-02
__updated__ = "2019-01-02"
__version__ = "0.0"
from client.apphandler import get_apphandler, TimeoutError
from client.app_template import App, AppTemporary
import uasyncio as asyncio
from machine import Pin
import gc
import time
server = "192.168.178.60"
port = 9999
loop = asyncio.get_event_loop()
app_handler = get_apphandler(loop, b"1", server, port, verbose=True, led=Pin(2, Pin.OUT, value=1))
###
# Echo Client
###
echoApp = App
# send message to server and get it back. Also measure delay between message and response
async def echoClient(app):
count = 0
delay = -1
while True:
st = time.ticks_ms()
try:
header, message = await AppTemporary(app, 0, ["echo message", count, delay, gc.mem_free()], ident=0)
except TimeoutError:
print("TimeoutError waiting for message")
return
et = time.ticks_ms()
delay = et - st
count = message[1] + 1
await asyncio.sleep(2)
loop.create_task(echoClient(echoApp))
try:
loop.run_forever()
finally:
app_handler.close()
| 2.84375 | 3 |
source/xiaopeip/read.py | xuyu92327/waveform-analysis | 7 | 12766865 | import h5py
import tables
import numpy as np
import sys
args=int(sys.argv[1])
# Read hdf5 file
h5file = tables.open_file(f"./data/atraining-{args}.h5", "r")
WaveformTable = h5file.root.Waveform
GroundTruthTable = h5file.root.GroundTruth
sinevet,sinchan,sintime=[],[],[]
#根据groundtruth找出只有单光子的事例
i=1
while i <100000:
if GroundTruthTable[i]['ChannelID']!=GroundTruthTable[i-1]['ChannelID'] and GroundTruthTable[i]['ChannelID']!=GroundTruthTable[i+1]['ChannelID']:
sinevet.append(GroundTruthTable[i]['EventID'])
sintime.append(GroundTruthTable[i]['PETime'])
sinchan.append(GroundTruthTable[i]['ChannelID'])
i+=1
#将单光子事例波形累加
sumwave=np.zeros(1029,dtype=np.int32)
sinlen=len(sinevet)
for x in range(sinlen):
if x%100==0:
print(f"{x*100/sinlen}%")
posi=0
while True:
if WaveformTable[posi]["EventID"]==sinevet[x] and WaveformTable[posi]["ChannelID"]==sinchan[x]:
break
posi+=1
sumwave+=np.append(WaveformTable[posi]['Waveform'][sintime[x]:],WaveformTable[posi]['Waveform'][:sintime[x]])-972
#求得平均值
averwave=sumwave/sinlen
averzero=np.average(averwave[100:])
spe=averwave-averzero
with h5py.File(f"medium/average{args+1}.h5", "w") as opt1:
opt1.create_dataset("averzero", data=np.array([averzero]))
with h5py.File(f'medium/singlewave{args+1}.h5',"w") as opt2:
opt2.create_dataset("spe",data=spe,compression="gzip", shuffle=True)
#写入文件
h5file.close()
| 2.015625 | 2 |
DataAnalysis/__init__.py | AdamSwenson/TwitterProject | 0 | 12766866 | <reponame>AdamSwenson/TwitterProject
__author__ = 'adam'
import environment
| 1.085938 | 1 |
scripts/PIRSensor.py | sradevski/HomeAutomate | 0 | 12766867 | <reponame>sradevski/HomeAutomate<gh_stars>0
import RPi.GPIO as GPIO
import time
import come_home
import radio_lights
import remote_core as core
sensor = 23
GPIO.setmode(GPIO.BCM)
GPIO.setup(sensor, GPIO.IN, GPIO.PUD_DOWN)
#Denotes when to start checking if the kitchen lights should be turned on or not.
turn_on_hour = 18
turn_off_hour = 8
last_movement_time = time.localtime(time.time())
while True:
time.sleep(3)
current_state = GPIO.input(sensor)
current_time = time.localtime(time.time())
print(current_state)
if current_state == 1:
last_movement_time = time.localtime(time.time())
config = core.load_config()
print(last_movement_time)
if config['location']['am_home'] == False:
come_home.turn_on_all([])
elif current_time.tm_hour > turn_on_hour or localtime.tm_hour < turn_off_hour:
config = core.load_config()
if not config["lights"]["shelf_light"]["is_on"]:
radio_lights.turn_on_single(config["lights"]["shelf_light"])
core.write_config(config)
if time.mktime(current_time) - time.mktime(last_movement_time) > 80:
config = core.load_config()
if config["lights"]["shelf_light"]["is_on"]:
radio_lights.turn_off_single(config["lights"]["shelf_light"])
core.write_config(config)
| 2.671875 | 3 |
scripts/deploy.py | mikaelaakko/vesting-contracts | 1 | 12766868 | from brownie import LinearVesting, Contract
from scripts.helper_functions import get_account
custom_token_address = "0x61c2984d0D60e8C498bdEE6dbE4A4E83E53ecfE8"
amount = 1000000 * 10 ** 18
def deploy():
account = get_account()
publish_source = True
vesting = LinearVesting.deploy(
custom_token_address,
{"from": account},
publish_source=publish_source,
)
print(f"Contract {vesting.address} deployed succesfully!")
print(
f"View the contract at https://rinkeby.etherscan.io/address/{vesting.address}"
)
def load_tokens():
account = get_account()
vesting = LinearVesting[-1]
custom_token = Contract(custom_token_address)
custom_token.transfer(vesting.address, amount, {"from": account})
def add_new_recipient():
account = get_account()
vesting = LinearVesting[-1]
vesting.addNewRecipient(account, amount, {"from": account})
def withdraw():
account = get_account()
vesting = LinearVesting[-1]
vesting.withdrawToken(account, {"from": account})
def print_values():
account = get_account()
vesting = LinearVesting[-1]
print("Amount locked: ", vesting.getLocked(account))
print("Amount withdrawable: ", vesting.getWithdrawable(account))
print("LOLLOL:", vesting.getVested(account))
def main():
deploy()
load_tokens()
add_new_recipient()
print_values()
# withdraw()
| 2.421875 | 2 |
tests/test_dataset_reader.py | ReyRiordan/GoEmotions-pytorch | 0 | 12766869 | from classifier.dataset_readers.dataset_reader import ClassificationTsvReader
from classifier.dataset_readers.dataset_reader_pt import ClassificationPtTsvReader
from allennlp.common.util import ensure_list
def test_rey_reader_1(project_root_dir_path, test_fixtures_dir_path, test_log):
data_file_path = test_fixtures_dir_path / 'data' / 'train_500.tsv'
reader = ClassificationTsvReader()
instances = ensure_list(reader.read(str(data_file_path)))
print(instances)
assert len(instances) == 10
# instances[0].fields["text"].tokens
assert instances[0].fields["label"].label == '27'
def test_rey_reader_2(project_root_dir_path, test_fixtures_dir_path, test_log):
data_file_path = test_fixtures_dir_path / 'data' / 'train_500.tsv'
reader = ClassificationPtTsvReader()
instances = ensure_list(reader.read(str(data_file_path)))
print(instances)
assert len(instances) == 10
print(instances[0].fields["text"].tokens)
assert instances[0].fields["label"].label == '27' | 2.328125 | 2 |
app/run.py | es-g/disaster_response | 0 | 12766870 | <reponame>es-g/disaster_response
import gzip
import json
import os
import pickle
import plotly
import pandas as pd
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize
from flask import Flask
from flask import render_template, request
from plotly.graph_objs import Bar
from sqlalchemy import create_engine
app = Flask(__name__)
def tokenize(text):
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
return clean_tokens
# load data
def find_filename(file_ex):
"""
Returns first filename with specified extension. E.g. '.db', '.pkl'
:return: file - filename
"""
filenames = os.listdir('../data')
filenames.extend(os.listdir('../models'))
for file in filenames:
if file.endswith(file_ex):
return file
engine = create_engine('sqlite:///../data/{}'.format(find_filename('.db')))
df = pd.read_sql_table('data', engine)
categories = df.select_dtypes(include=['int64']) # Select only int64 datatypes
categories = categories.drop('id', axis=1) # Drop id column as irrelevant
# load model
with gzip.open("../models/{}".format(find_filename('.pkl')), 'rb') as f:
p = pickle.Unpickler(f)
model = p.load()
# index webpage displays cool visuals and receives user input text for model
@app.route('/')
@app.route('/index')
def index():
# extract data needed for visuals
mean_categories = categories.mean()
category_names = categories.columns
genre_counts = df.groupby('genre').count()['message']
genre_names = list(genre_counts.index)
# create visuals
graphs = [
{
'data': [
Bar(
x=category_names,
y=mean_categories
)
],
'layout': {
'title': 'Distribution of messages categories',
'yaxis': {
'title': ""
},
'xaxis': {
'title': ""
}
}
},
{
'data': [
Bar(
x=genre_names,
y=genre_counts
)
],
'layout': {
'title': 'Distribution of message genres',
'yaxis': {
'title': "Count"
},
'xaxis': {
'title': "Genre"
}
}
}
]
# encode plotly graphs in JSON
ids = ["graph-{}".format(i) for i, _ in enumerate(graphs)]
graphJSON = json.dumps(graphs, cls=plotly.utils.PlotlyJSONEncoder)
# render web page with plotly graphs
return render_template('master.html', ids=ids, graphJSON=graphJSON)
# web page that handles user query and displays model results
@app.route('/go')
def go():
# save user input in query
query = request.args.get('query', '')
# use model to predict classification for query
classification_labels = model.predict([query])[0]
classification_results = dict(zip(categories, classification_labels))
# This will render the go.html Please see that file.
return render_template(
'go.html',
query=query,
classification_result=classification_results
)
if __name__ == "__main__":
app.run(host='127.0.0.1', port=3001, debug=True)
| 2.671875 | 3 |
llvm_tools/custom_script_example.py | TinkerBoard-Android/external-toolchain-utils | 0 | 12766871 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A custom script example that utilizes the .JSON contents of the tryjob."""
from __future__ import print_function
import json
import sys
from update_tryjob_status import TryjobStatus
def main():
"""Determines the exit code based off of the contents of the .JSON file."""
# Index 1 in 'sys.argv' is the path to the .JSON file which contains
# the contents of the tryjob.
#
# Format of the tryjob contents:
# {
# "status" : [TRYJOB_STATUS],
# "buildbucket_id" : [BUILDBUCKET_ID],
# "extra_cls" : [A_LIST_OF_EXTRA_CLS_PASSED_TO_TRYJOB],
# "url" : [GERRIT_URL],
# "builder" : [TRYJOB_BUILDER_LIST],
# "rev" : [REVISION],
# "link" : [LINK_TO_TRYJOB],
# "options" : [A_LIST_OF_OPTIONS_PASSED_TO_TRYJOB]
# }
abs_path_json_file = sys.argv[1]
with open(abs_path_json_file) as f:
tryjob_contents = json.load(f)
CUTOFF_PENDING_REVISION = 369416
SKIP_REVISION_CUTOFF_START = 369420
SKIP_REVISION_CUTOFF_END = 369428
if tryjob_contents['status'] == TryjobStatus.PENDING.value:
if tryjob_contents['rev'] <= CUTOFF_PENDING_REVISION:
# Exit code 0 means to set the tryjob 'status' as 'good'.
sys.exit(0)
# Exit code 124 means to set the tryjob 'status' as 'bad'.
sys.exit(124)
if tryjob_contents['status'] == TryjobStatus.BAD.value:
# Need to take a closer look at the contents of the tryjob to then decide
# what that tryjob's 'status' value should be.
#
# Since the exit code is not in the mapping, an exception will occur which
# will save the file in the directory of this custom script example.
sys.exit(1)
if tryjob_contents['status'] == TryjobStatus.SKIP.value:
# Validate that the 'skip value is really set between the cutoffs.
if SKIP_REVISION_CUTOFF_START < tryjob_contents['rev'] < \
SKIP_REVISION_CUTOFF_END:
# Exit code 125 means to set the tryjob 'status' as 'skip'.
sys.exit(125)
if tryjob_contents['rev'] >= SKIP_REVISION_CUTOFF_END:
sys.exit(124)
if __name__ == '__main__':
main()
| 2.09375 | 2 |
app/routes/error.py | Chartes-TNAH/CollectArt | 0 | 12766872 | from flask import render_template, url_for
# importation de render_template (pour relier les templates aux routes) et d'url_for (pour construire des URL vers les
# fonctions et les pages html)
from ..app import app
# importation de la variable app qui instancie l'application
# | ROUTES POUR LES ERREURS COURANTES |
@app.errorhandler(401)
def not_found_error(error):
"""
Route qui permet en cas d'erreur 401 (accès non autorisé) de renvoyer vers la page 401.html
:return: template 401.html
:rtype: template
"""
return render_template('error/401.html'), 401
@app.errorhandler(404)
def not_found_error(error):
"""
Route qui permet en cas d'erreur 404 (page introuvable) de renvoyer vers la page 404.html
:return: template 404.html
:rtype: template
"""
return render_template('error/404.html'), 404
@app.errorhandler(500)
def internal_error(error):
"""
Route qui permet en cas d'erreur 500 (erreur de serveur interne) de renvoyer vers la page 500.html
:return: template 500.html
:rtype: template
"""
return render_template('error/500.html'), 500
| 2.984375 | 3 |
app.py | KristoferLintonReid/hex_cam | 0 | 12766873 | <gh_stars>0
from flask import Flask, request, jsonify
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.utils import to_categorical
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from IPython.display import FileLink
from imutils import paths
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
np.random.seed(0)
import pickle
import random
import shutil
import cv2
import os
import keras
from keras.applications import VGG16
from keras import models, layers, optimizers
from keras.preprocessing.image import ImageDataGenerator
from flask_cors import CORS,cross_origin
app = Flask(__name__)
CORS(app, support_credentials=True)
# load the models
RFC_Model = pickle.load(open('./model/RF_model.pkl', 'rb'))
LR_Model = pickle.load(open('./model/LR_model.pkl', 'rb'))
SVM_Model = pickle.load(open('./model/SVM_model.pkl', 'rb'))
reconstructed_model = keras.models.load_model('./model/keras_model')
#### define functions
RF_list = []
def predict_Covid_RF(img_file):
'function to take image and return prediction'
test_image = cv2.imread(img_file)
test_image = cv2.cvtColor(test_image, cv2.IMREAD_GRAYSCALE)
test_image = cv2.resize(test_image, (224, 224))
test_img = test_image.flatten().reshape(1, -1)
RFC_pred_prob = RFC_Model.predict_proba(test_img)
RFC_pred = RFC_Model.predict(test_img)
RF_list = ['RF_Covid', RFC_pred_prob[0,0], RFC_pred[0]]
return (RF_list)
LR_list = []
def predict_Covid_LR(img_file):
'function to take image and return prediction'
test_image = cv2.imread(img_file)
test_image = cv2.cvtColor(test_image, cv2.IMREAD_GRAYSCALE)
test_image = cv2.resize(test_image, (224, 224))
test_img = test_image.flatten().reshape(1, -1)
LR_pred_prob = LR_Model.predict_proba(test_img)
LR_pred = LR_Model.predict(test_img)
LR_list = ['LR_Covid', LR_pred_prob[0,0], LR_pred[0]]
return (LR_list)
def iload(filename):
np_image = Image.open(filename)
np_image = np.array(np_image).astype('float32')/255
np_image = transform.resize(np_image, (150, 150, 3))
np_image = np.expand_dims(np_image, axis=0)
return np_image
def pneumonia_CNN(img_file):
iload(img_file)
image = iload(img_file)
pred_pnemonia = (1-reconstructed_model.predict(image))
if reconstructed_model.predict(image) <0.5:
out_pneu= 'normal'
if reconstructed_model.predict(image) >0.5:
out_pneu = 'pneumonia'
arrayList = ['Pneumonia_CNN', pred_pnemonia[0,0], out_pneu]
return(arrayList)
SVM_list = []
def predict_Covid_SVM(img_file):
'function to take image and return prediction'
test_image = cv2.imread(img_file)
test_image = cv2.cvtColor(test_image, cv2.IMREAD_GRAYSCALE)
test_image = cv2.resize(test_image, (224, 224))
test_img = test_image.flatten().reshape(1, -1)
SVM_pred_prob = 'Na'
SVM_pred = SVM_Model.predict(test_img)
SVM_list = ['SVM_Covid', SVM_pred_prob, SVM_pred[0]]
return (SVM_list)
def predict_single(img_file):
heading = ['model', 'probability', 'prediction']
covid_RF = predict_Covid_RF(img_file)
covid_LR = predict_Covid_LR(img_file)
covid_SVM = predict_Covid_SVM(img_file)
pneu = pneumonia_CNN(img_file)
out = np.vstack((heading, covid_RF, covid_LR, covid_SVM, pneu))
return (out)
# route for prediction
@app.route('/predict', methods=['POST'])
def predict():
return jsonify(predict_single(request.files['image']))
def api_response():
from flask import jsonify
if request.method == 'POST':
return jsonify(**request.json)
if __name__ == '__main__':
app.debug = True
app.run()
| 2.296875 | 2 |
src/emmental/scorer.py | vishalbelsare/emmental | 75 | 12766874 | <gh_stars>10-100
"""Emmental scorer."""
import logging
from functools import partial
from typing import Callable, Dict, List, Union
from numpy import ndarray
from emmental.metrics import METRICS
from emmental.utils.utils import array_to_numpy
logger = logging.getLogger(__name__)
class Scorer(object):
"""A class to score tasks.
Args:
metrics: A list of metric names which provides
in emmental (e.g., accuracy), defaults to [].
customize_metric_funcs: a dict of customize metric where key is the metric
name and value is the metric function which takes golds, preds, probs, uids as
input, defaults to {}.
"""
def __init__(
self, metrics: List[str] = [], customize_metric_funcs: Dict[str, Callable] = {}
) -> None:
"""Initialize Scorer."""
self.metrics: Dict[str, Callable] = dict()
for metric in metrics:
if metric in METRICS:
self.metrics[metric] = METRICS[metric] # type: ignore
elif metric.startswith("accuracy@"):
self.metrics[metric] = partial(
METRICS["accuracy"], topk=int(metric.split("@")[1]) # type: ignore
)
else:
raise ValueError(f"Unrecognized metric: {metric}")
self.metrics.update(customize_metric_funcs)
def score(
self,
golds: Union[ndarray, List[ndarray]],
preds: Union[ndarray, List[ndarray]],
probs: Union[ndarray, List[ndarray]],
uids: List[str] = None,
) -> Dict[str, float]:
"""Calculate the score.
Args:
golds: Ground truth values.
probs: Predicted probabilities.
preds: Predicted values.
uids: Unique ids, defaults to None.
Returns:
Score dict.
"""
metric_dict = dict()
for metric_name, metric in self.metrics.items():
# handle no examples
if len(golds) == 0:
metric_dict[metric_name] = float("nan")
continue
try:
golds = array_to_numpy(golds)
except ValueError:
pass
try:
probs = array_to_numpy(probs) if probs is not None else probs
except ValueError:
pass
try:
preds = array_to_numpy(preds) if preds is not None else preds
except ValueError:
pass
res = metric(golds, preds, probs, uids)
if isinstance(res, dict):
metric_dict.update(res)
else:
metric_dict[metric_name] = res
return metric_dict
| 2.515625 | 3 |
Curso de Python Youtube/Aula15-Operacao-ternaria/Aula15-Operacao-ternaria.py | Rafael-Inacio/CursoPython | 0 | 12766875 |
looged_user = False
# if looged_user:
# msg = 'Usuário logado.'
# else:
# msg = 'Usuário não está logado.'
msg = 'Usuário logado' if looged_user else 'Usuário não está logado.'
print(msg)
| 2.5 | 2 |
vedro_pyppeteer/_screenshot_path.py | nikitanovosibirsk/vedro-pyppeteer | 0 | 12766876 | from pathlib import Path
from typing import Union
__all__ = ("ScreenshotPath",)
class ScreenshotPath:
def __init__(self, dir_: Path) -> None:
self.dir = dir_
self.rerun: Union[int, None] = None
self.timestamp: Union[int, None] = None
self.scenario_path: Union[Path, None] = None
self.scenario_subject: Union[str, None] = None
self.step_name: Union[str, None] = None
self.tab_index: Union[int, None] = None
def resolve(self) -> Path:
dir_path = self.dir
if self.scenario_path is not None:
cwd = Path().resolve()
rel_path = self.scenario_path.relative_to(cwd)
dir_path = self.dir.joinpath(rel_path.with_suffix(""))
file_path = "screenshot"
if self.scenario_subject is not None:
file_path = self.scenario_subject
if self.rerun is not None:
file_path = f"[{self.rerun}]{file_path}"
if self.timestamp is not None:
file_path += f"__{self.timestamp}"
if self.step_name is not None:
file_path += f"__{self.step_name}"
if self.tab_index is not None:
file_path = f"tab{self.tab_index}__{file_path}"
return dir_path / (file_path + ".png")
def __repr__(self) -> str:
path = self.resolve()
return f"{self.__class__.__name__}<{path}>"
| 2.453125 | 2 |
python/GravaCnpj.py | mozarthasse/cnpjs | 0 | 12766877 | <reponame>mozarthasse/cnpjs
import psycopg2
from FilaDeLinhas import FilaDeLinhas
def GravacaoPgCopy(origem : FilaDeLinhas, tabela : str):
cond = psycopg2.connect(host='postgres-compose', dbname="postgres", user="postgres", password="<PASSWORD>")
curd = cond.cursor()
curd.copy_from(origem,tabela,sep=';',null='')
cond.commit()
| 2.28125 | 2 |
app/api/serializers/post.py | parkgeonhu/drf-boilerplate | 1 | 12766878 | from app.models import *
from rest_framework import serializers
from .user import *
class PostSerializer(serializers.ModelSerializer):
author = UserSerializer(read_only=True)
def create(self, validated_data):
return Post.objects.create(**validated_data)
class Meta:
model = Post
fields = ('__all__') | 2.171875 | 2 |
main.py | Rhoana/soma_segmentation | 0 | 12766879 | import tensorflow as tf
import dataIO
import numpy as np
from datetime import datetime
from model import model
from parameters import *
# preprocess input data
def prepareDataTraining(seg_data, somae_data_raw):
somae_data = seg_data.copy()
somae_data[somae_data_raw==0]=0
seg_data = seg_data[:,:network_size,:network_size]
somae_data = somae_data[:,:network_size,:network_size]
# create object to hold elements for 3D input tensors of depth(*2)+1
seg_deep = np.zeros((seg_data.shape[0],seg_data.shape[1],seg_data.shape[2],depth*2+1), dtype=np.uint8)
# populate deep segmentation tensor
seg_deep[:,:,:,depth]=seg_data
for d in range(1,depth+1):
seg_deep[:-d,:,:,depth+d]=seg_data[d:,:,:]
seg_deep[d:,:,:,depth-d]=seg_data[:-d,:,:]
# cut training and validation dataset
valid_seg = seg_deep[:val_data_size,:,:,:]
valid_mask = somae_data[:val_data_size,:,:]
train_seg = seg_deep[val_data_size:,:,:,:]
train_mask = somae_data[val_data_size:,:,:]
# shuffle both training and validation data
valid_ids = np.random.permutation(valid_seg.shape[0])
train_ids = np.random.permutation(train_seg.shape[0])
valid_seg[:,:,:] = valid_seg[valid_ids,:,:,:]
valid_mask[:,:,:] = valid_mask[valid_ids,:,:]
train_seg[:,:,:] = train_seg[train_ids,:,:,:]
train_mask[:,:,:] = train_mask[train_ids,:,:]
return train_seg, train_mask, valid_seg, valid_mask
# preprocess input data
def prepareDataPrediction(seg_data):
seg_data = seg_data[:,:network_size,:network_size]
# create object to hold elements for 3D input tensors of depth(*2)+1
seg_deep = np.zeros((seg_data.shape[0],seg_data.shape[1],seg_data.shape[2],depth*2+1), dtype=np.uint8)
# populate deep segmentation tensor
seg_deep[:,:,:,depth]=seg_data
for d in range(1,depth+1):
seg_deep[:-d,:,:,depth+d]=seg_data[d:,:,:]
seg_deep[d:,:,:,depth-d]=seg_data[:-d,:,:]
# cut training and validation dataset
valid_seg = seg_deep[:,:,:,:]
return valid_seg
# define the weighted loss function
class WeightedBinaryCrossEntropy(tf.losses.Loss):
"""
Args:
pos_weight: Scalar to affect the positive labels of the loss function.
weight: Scalar to affect the entirety of the loss function.
from_logits: Whether to compute loss form logits or the probability.
reduction: Type of tf.losses.Reduction to apply to loss.
name: Name of the loss function.
"""
def __init__(self, pos_weight, weight, from_logits=False,
reduction=tf.losses.Reduction.AUTO,
name='weighted_binary_crossentropy'):
super(WeightedBinaryCrossEntropy, self).__init__(reduction=reduction,
name=name)
self.pos_weight = pos_weight
self.weight = weight
self.from_logits = from_logits
def call(self, y_true, y_pred):
if not self.from_logits:
# Manually calculate the weighted cross entropy.
# Formula is qz * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
# where z are labels, x is logits, and q is the weight.
# Since the values passed are from sigmoid (assuming in this case)
# sigmoid(x) will be replaced by y_pred
# qz * -log(sigmoid(x)) 1e-6 is added as an epsilon to stop passing a zero into the log
x_1 = y_true * self.pos_weight * -tf.math.log(y_pred + 1e-6)
# (1 - z) * -log(1 - sigmoid(x)). Epsilon is added to prevent passing a zero into the log
x_2 = (1 - y_true) * -tf.math.log(1 - y_pred + 1e-6)
return tf.add(x_1, x_2) * self.weight
# Use built in function
return tf.nn.weighted_cross_entropy_with_logits(y_true, y_pred, self.pos_weight) * self.weight
# model weights
class model_weights:
def __init__(self, shapes):
self.values = []
self.checkpoint_path = './ckpt_'+ datetime.now().strftime("%Y%m%d-%H%M%S")+'/'
initializer = tf.initializers.RandomNormal()
def get_weight( shape , name ):
return tf.Variable( initializer( shape ) , name=name , trainable=True , dtype=tf.float32 )
for i in range( len( shapes ) ):
self.values.append( get_weight( shapes[ i ] , 'weight{}'.format( i ) ) )
self.ckpt = tf.train.Checkpoint(**{f'values{i}': v for i, v in enumerate(self.values)})
def saveWeights(self):
self.ckpt.save(self.checkpoint_path)
def restoreWeights(self, ckpt_restore):
print("restoring weights from: " + str(ckpt_restore))
status = self.ckpt.restore(ckpt_restore)
status.assert_consumed() # Optional check
def initializeModel(restore, ckpt_restore):
# filters for the UNET layers:
# filters = [depth*2+1,64,128,256,512,1024,1] #original UNET
filters = [depth*2+1, 16,32, 64, 128,256,1] # modified, lighter UNET
# shapes of the weight tensors
shapes = [
[ 3, 3, filters[0], filters[1]], #L11 -> L12
[ 3, 3, filters[1], filters[1]], #L12 -> L13
[ 3, 3, filters[1], filters[2]], #L21 -> L22
[ 3, 3, filters[2], filters[2]], #L22 -> L23
[ 3, 3, filters[2], filters[3]], #L31 -> L32
[ 3, 3, filters[3], filters[3]], #L32 -> L33
[ 3, 3, filters[3], filters[4]], #L41 -> L42
[ 3, 3, filters[4], filters[4]], #L42 -> L43
[ 3, 3, filters[4], filters[5]], #L51 -> L52
[ 3, 3, filters[5], filters[5]], #L52 -> L53
[ 2, 2, filters[4], filters[5]], #L53 -> L44
[ 3, 3, 2*filters[4], filters[4]], #L44 -> L45
[ 3, 3, filters[4], filters[4]], #L45 -> L46
[ 2, 2, filters[3], filters[4]], #L46 -> L34
[ 3, 3, 2*filters[3], filters[3]], #L34 -> L35
[ 3, 3, filters[3], filters[3]], #L35 -> L36
[ 2, 2, filters[2], filters[3]], #L36 -> L24
[ 3, 3, 2*filters[2], filters[2]], #L24 -> L25
[ 3, 3, filters[2], filters[2]], #L25 -> L26
[ 2, 2, filters[1], filters[2]], #L25 -> L14
[ 3, 3, 2*filters[1], filters[1]], #L14 -> L15
[ 3, 3, filters[1], filters[1]], #L15 -> L16
[ 1, 1, filters[1], filters[6]], #L16 -> L17
]
weights = model_weights(shapes)
if restore:
weights.restoreWeights(ckpt_restore)
# initialize loss
w_loss = WeightedBinaryCrossEntropy(12, 1)
# initialize optimizer
optimizer = tf.optimizers.Adam(learning_rate)
# initialize accuracy objects
train_acc = tf.metrics.BinaryAccuracy()
valid_acc = tf.metrics.BinaryAccuracy()
train_loss = tf.metrics.Mean()
valid_loss = tf.metrics.Mean()
TP = tf.keras.metrics.TruePositives()
FP = tf.keras.metrics.FalsePositives()
TN = tf.keras.metrics.TrueNegatives()
FN = tf.keras.metrics.FalseNegatives()
return weights, w_loss, optimizer, train_acc, valid_acc, train_loss, valid_loss, TP, FP, TN, FN
# define train step
def train_step(model, weights, inputs, gt, optimizer, w_loss, train_loss, train_acc):
with tf.GradientTape() as tape:
pred = model(inputs, weights)
current_loss = w_loss( gt, pred)
grads = tape.gradient(current_loss, weights.values )
optimizer.apply_gradients(zip(grads , weights.values ) )
train_loss.update_state(current_loss)
train_acc.update_state(gt, pred)
return optimizer
#define prediction step
def predict_step(model, weights, inputs, gt, w_loss, valid_loss, valid_acc, TP, FP, TN, FN): #TODO remove paqssing of model here
pred = model(inputs, weights)
current_loss = w_loss( gt, pred)
valid_loss.update_state(current_loss)
valid_acc.update_state(gt, pred)
TP.update_state(gt,pred)
FP.update_state(gt,pred)
TN.update_state(gt,pred)
FN.update_state(gt,pred)
return pred
def trainOnEpochs(train_seg, train_mask, valid_seg, valid_mask, weights, w_loss, optimizer, train_acc, valid_acc, train_loss, valid_loss, TP, FP, TN, FN):
current_time = datetime.now().strftime("%Y%m%d-%H%M%S")
train_log_dir = 'logs/gradient_tape/' + current_time + '/train'
valid_log_dir = 'logs/gradient_tape/' + current_time + '/valid'
train_summary_writer = tf.summary.create_file_writer(train_log_dir)
valid_summary_writer = tf.summary.create_file_writer(valid_log_dir)
valid_loss_best = 1000000000
for epoch in range(epochs):
print("TP: ")
print(TP.result().numpy())
print("FN: ")
print(FN.result().numpy())
print("FP: ")
print(FP.result().numpy())
print("TN: ")
print(TN.result().numpy())
TPR = TP.result().numpy()/(TP.result().numpy()+FN.result().numpy())
FPR = FP.result().numpy()/(FP.result().numpy()+TN.result().numpy())
print("TPR: ")
print(TPR)
print("FPR: ")
print(FPR)
with train_summary_writer.as_default():
tf.summary.scalar('loss', train_loss.result(), step=epoch)
tf.summary.scalar('accuracy', train_acc.result(), step=epoch)
with valid_summary_writer.as_default():
tf.summary.scalar('loss', valid_loss.result(), step=epoch)
tf.summary.scalar('accuracy', valid_acc.result(), step=epoch)
tf.summary.scalar('TPR', TPR, step=epoch)
tf.summary.scalar('FPR', FPR, step=epoch)
train_acc.reset_states()
valid_acc.reset_states()
train_loss.reset_states()
valid_loss.reset_states()
print("---------------------")
print("Epoch: " + str(epoch))
for k in np.arange(0,train_seg.shape[0],batch_size):
image = train_seg[k:k+batch_size,:,:,:].copy()
mask = train_mask[k:k+batch_size,:,:,None].copy()
# choose random ID
ids_present = np.unique(mask)
if ids_present[0]==0: ids_present=ids_present[1:]
id_rand = np.random.choice(ids_present)
# binarize
image[image!=id_rand]=0
image[image==id_rand]=1
mask[mask!=id_rand]=0
mask[mask==id_rand]=1
image = tf.convert_to_tensor(image, dtype=tf.float32 )
mask_gt = tf.convert_to_tensor(mask, dtype=tf.float32 )
optimizer = train_step(model, weights, image, mask_gt, optimizer, w_loss, train_loss, train_acc)
for j in np.arange(0,valid_seg.shape[0],batch_size):
image = valid_seg[j:j+batch_size,:,:,:].copy()
mask = valid_mask[j:j+batch_size,:,:,None].copy()
# choose random ID
ids_present = np.unique(mask)
if ids_present[0]==0: ids_present=ids_present[1:]
id_rand = np.random.choice(ids_present)
# binarize
image[image!=id_rand]=0
image[image==id_rand]=1
mask[mask!=id_rand]=0
mask[mask==id_rand]=1
image = tf.convert_to_tensor( image , dtype=tf.float32 )
mask_gt = tf.convert_to_tensor( mask , dtype=tf.float32 )
mask_pred = predict_step(model, weights, image, mask_gt, w_loss, valid_loss, valid_acc, TP, FP, TN, FN).numpy()
if epoch%10==0:
with valid_summary_writer.as_default():
tf.summary.image("valid-epoch"+str(epoch)+"j-"+str(j), tf.concat([tf.expand_dims(image[:,:,:,depth],3), mask_gt, mask_pred],axis=1), step=epoch, max_outputs=5)
print("Train loss: " + str(train_loss.result().numpy()))
print("Train accu: " + str(train_acc.result().numpy()))
print("Valid loss: " + str(valid_loss.result().numpy()))
print("Valid accu: " + str(valid_acc.result().numpy()))
weights.saveWeights()
print("Weights saved ------------------")
def Train(restore, ckpt_restore):
# Mouse
seg_filepath = train_seg_in_filepath
somae_filepath = train_somae_in_filepath
seg_data = dataIO.ReadH5File(seg_filepath, [1])
somae_data = dataIO.ReadH5File(somae_filepath, [1])
train_seg, train_mask, valid_seg, valid_mask = prepareDataTraining(seg_data, somae_data)
weights, w_loss, optimizer, train_acc, valid_acc, train_loss, valid_loss, TP, FP, TN, FN = initializeModel(restore=restore, ckpt_restore=ckpt_restore)
trainOnEpochs(train_seg, train_mask, valid_seg, valid_mask, weights, w_loss, optimizer, train_acc, valid_acc, train_loss, valid_loss, TP, FP, TN, FN)
def Predict(ckpt_restore):
# Zebrafinch
seg_filepath = predict_seg_in_filepath
seg_data = dataIO.ReadH5File(seg_filepath, [1])
seg_data = seg_data[:,:network_size,:network_size]
somae_mask_out = np.zeros((seg_data.shape[0],seg_data.shape[1],seg_data.shape[2]), dtype=np.float64)
weights, w_loss, optimizer, train_acc, valid_acc, train_loss, valid_loss, TP, FP, TN, FN = initializeModel(restore=True, ckpt_restore=ckpt_restore)
seg_data_prep = prepareDataPrediction(seg_data)
unique_ids = np.unique(seg_data)
for ID in unique_ids:
print("Processind ID " + str(ID))
seg_data_filtered = seg_data_prep.copy()
seg_data_filtered[seg_data_filtered!=ID]=0
# mask the data to be binary
seg_data_filtered[seg_data_filtered>0]=1
for j in np.arange(0,seg_data_filtered.shape[0],batch_size):
image = seg_data_filtered[j:j+batch_size,:,:,:]
image = tf.convert_to_tensor( image , dtype=tf.float32 )
if np.max(image[:,:,:,depth])!=0:
mask_pred = tf.squeeze(model(image, weights)).numpy()
mask_pred[mask_pred<=0.5]=0
mask_pred[mask_pred>0.5]=1
mask_pred = image[:,:,:,depth]*mask_pred
somae_mask_out[j:j+batch_size,:,:] = somae_mask_out[j:j+batch_size,:,:]+mask_pred[:,:,:]
del seg_data_filtered
somae_mask_out = somae_mask_out.astype(np.uint64)
dataIO.WriteH5File(somae_mask_out, somae_prediction_out_filepath, "main") | 2.296875 | 2 |
utils/metrics.py | chansoopark98/CSNet-SOD | 0 | 12766880 | import tensorflow as tf
# class MeanIOU(tf.keras.metrics.MeanIoU):
# def update_state(self, y_true, y_pred, sample_weight=None):
# # y_true = tf.squeeze(y_true, -1)
# y_pred = tf.nn.softmax(y_pred)
# y_true = tf.argmax(y_true, axis=-1)
#
# return super().update_state(y_true, y_pred, sample_weight)
# class Precision(tf.keras.metrics.Precision):
# def update_state(self, y_true, y_pred, sample_weight=None):
# # y_true = tf.squeeze(y_true, -1)
# y_pred = tf.nn.softmax(y_pred)
# y_true = tf.argmax(y_true, axis=-1)
#
# return super().update_state(y_true, y_pred) | 2.453125 | 2 |
test/integration/test_scopes.py | BBVA/MIST | 13 | 12766881 | import os
import platform
import pytest
from mist.action_run import execute_from_text
CHECK_FILE = "scopes.mist"
@pytest.mark.asyncio
async def test_check_if_bool_functions(examples_path):
with open(os.path.join(examples_path, CHECK_FILE), "r") as f:
content = f.read()
output = await execute_from_text(content)
assert """Test before - global: Global, outerLocal: Local
InnerTest before - global: test, outerLocal: Local, local: local
InnerTest after - global: innerTest, outerLocal: innerLocal, local: local
Test after - global: innerTest, outerLocal: innerLocal
Test: global: innerTest
""" == output
| 2.171875 | 2 |
Python/simple_array_sum.py | AntonioMM8506/HackerRank | 0 | 12766882 | <gh_stars>0
#!/bin/python
from __future__ import print_function
import os
import sys
#@param recieves a map as an argument
#@return the sum of all the numbers inside the map.
def simpleArraySum(ar):
ar_1 = list(ar)
total = 0
for i in ar_1:
total += i
return total
#End of simpleArraySum
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
ar_count = int(input())
ar = map(int, input().rstrip().split())
#print(list(ar))
result = simpleArraySum(ar)
print(result)
fptr.write(str(result) + '\n')
fptr.close()
| 3.53125 | 4 |
tests/test_c_data_model/test_array.py | mrh1997/headlock | 2 | 12766883 | import pytest
from unittest.mock import patch
import headlock.c_data_model as cdm
from headlock.address_space.virtual import VirtualAddressSpace
@pytest.fixture
def carray_type(cint_type, addrspace):
return cdm.CArrayType(cint_type, 10, addrspace)
class TestCArrayType:
def test_init_returnsArrayCProxy(self, unbound_cint_type):
carray_type = cdm.CArrayType(unbound_cint_type, 10)
assert carray_type.__addrspace__ is None
assert carray_type.base_type is unbound_cint_type
assert carray_type.element_count == 10
def test_init_onBaseTypeWithDifferentAddrSpaceSet_raisesInvalidAddressSpace(self, cint_type):
other_addrspace = VirtualAddressSpace()
with pytest.raises(cdm.InvalidAddressSpaceError):
_ = cdm.CArrayType(cint_type, 10, other_addrspace)
def test_bind_bindsAlsoBaseElement(self, addrspace):
ctype = cdm.CProxyType(1)
carray_type = cdm.CArrayType(ctype, 10)
bound_carray_type = carray_type.bind(addrspace)
assert bound_carray_type.base_type.__addrspace__ is addrspace
def test_shallowIterSubTypes_returnsBaseType(self, carray_type):
assert list(carray_type.shallow_iter_subtypes()) \
== [carray_type.base_type]
def test_eq_onSamePointer_returnsTrue(self, cint_type):
assert cdm.CPointerType(cint_type, 32, 'little') \
== cdm.CPointerType(cint_type, 32, 'little')
@pytest.mark.parametrize('diff_carr_type', [
"othertype",
cdm.CArrayType(cdm.CIntType('x', 32, True, cdm.ENDIANESS), 10)
.with_attr('attr'),
cdm.CArrayType(cdm.CIntType('x', 32, True, cdm.ENDIANESS), 1000),
cdm.CArrayType(cdm.CIntType('y', 16, False, cdm.ENDIANESS), 10)])
def test_eq_onSamePointer_returnsTrue(self, diff_carr_type):
basetype = cdm.CIntType('x', 32, True, cdm.ENDIANESS)
assert cdm.CArrayType(basetype, 10) != diff_carr_type
def test_len_returnsSizeOfObject(self, carray_type):
assert len(carray_type) == carray_type.element_count
def test_sizeof_returnsSizeInBytes(self, carray_type):
assert carray_type.sizeof \
== carray_type.element_count * carray_type.base_type.sizeof
@patch.object(cdm.CIntType, 'null_val')
def test_nullValue_ok(self, null_val, carray_type):
assert carray_type.null_val == [null_val] * carray_type.element_count
def test_cDefinition_onRefDef_returnsWithRefDef(self, cint_type):
assert cint_type.array(12).c_definition('x') == 'cint x[12]'
def test_cDefinition_onNoRefDef_returnsWithoutRefDef(self, cint_type):
assert cint_type.array(12).c_definition() == 'cint [12]'
def test_cDefinition_onArrayOfArrays_ok(self, cint_type):
assert cint_type.array(11).array(22).c_definition() == 'cint [22][11]'
def test_cDefinition_onArrayOfPtr_ok(self, cint_type):
assert cint_type.ptr.array(10).c_definition('x') == 'cint *x[10]'
def test_cDefinition_onPtrToArray_ok(self, cint_type):
assert cint_type.array(10).ptr.c_definition('x') == 'cint (*x)[10]'
def test_repr_returnsBaseNamePlusArray(self, unbound_cint_type):
cptr_type = cdm.CArrayType(unbound_cint_type, 123).with_attr('attr')
assert repr(cptr_type) == 'ts.cint_attr_array123'
def test_convertToCRepr_onPyIterable_initializesElementsWithIterablePlusNullVals(self):
carray_type = cdm.CArrayType(cdm.CIntType('i', 32, False, 'big'), 5)
c_repr = carray_type.convert_to_c_repr([0x11, 0x22, 0x33445566])
assert c_repr == b'\x00\x00\x00\x11\x00\x00\x00\x22\x33\x44\x55\x66' \
b'\x00\x00\x00\x00\x00\x00\x00\x00'
def test_convertToCRepr_onUtf8WithBigCodepoint_returnsArrayOfCorrectSize(self):
carray_type = cdm.CArrayType(cdm.CIntType('i', 32, False, 'big'), 4)
c_repr = carray_type.convert_to_c_repr('A\u1122')
assert c_repr == b'\x00\x00\x00\x41\x00\x00\x11\x22' \
b'\x00\x00\x00\x00\x00\x00\x00\x00'
def test_convertFromCRepr_returnsArrayOfCorrectSize(self):
carray_type = cdm.CArrayType(cdm.CIntType('i', 32, False, 'big'), 5)
py_repr = carray_type.convert_from_c_repr(
b'\x00\x00\x00\x11\x00\x00\x00\x22\x33\x44\x55\x66')
assert py_repr == [0x11, 0x22, 0x33445566, 0, 0]
def test_init_onConstArray_ok(self, cint_type):
carray_type = cint_type.with_attr('const').array(1)
_ = carray_type()
@pytest.mark.parametrize('size', [1, 4])
def test_getAlignment_returnsAlignmentOfBase(self, size, unbound_cint_type):
with patch.object(cdm.CIntType, 'alignment', size):
carray_type = cdm.CArrayType(unbound_cint_type, 4)
assert carray_type.alignment == size
class TestCArray:
def create_int_carray_obj(self, bits, init_val):
cint_type = cdm.CIntType('i'+str(bits), bits, False, cdm.ENDIANESS)
content = b''.join(map(cint_type.convert_to_c_repr, init_val))
addrspace = VirtualAddressSpace(content)
carray_type = cdm.CArrayType(cint_type.bind(addrspace), len(init_val),
addrspace)
return cdm.CArray(carray_type, 0)
def test_str_returnsStringWithZeros(self):
test_vector = [ord('x'), ord('Y'), 0]
carray_obj = self.create_int_carray_obj(16, test_vector)
assert str(carray_obj) == 'xY\0'
def test_getCStr_onZeroTerminatedStr_returnsBytes(self):
test_vector = [ord('X'), ord('y'), 0]
carray_obj = self.create_int_carray_obj(16, test_vector)
assert carray_obj.c_str == b'Xy'
def test_setCStr_onPyStr_changesArrayToZeroTerminatedString(self):
carray_obj = self.create_int_carray_obj(16, [111]*6)
carray_obj.c_str = 'Xy\0z'
assert carray_obj.val == [ord('X'), ord('y'), 0, ord('z'), 0, 0]
def test_setCStr_onTooLongPyStr_raisesValueError(self):
array = self.create_int_carray_obj(16, [111] * 3)
with pytest.raises(ValueError):
array.c_str = 'Xyz'
def test_getUnicodeStr_onZeroTerminatedStr_returnsPyString(self):
test_vector = [0x1234, 0x56, 0]
carray_obj = self.create_int_carray_obj(16, test_vector)
assert carray_obj.unicode_str == '\u1234\x56'
def test_setUnicodeStr_onPyStr_changesArrayToZeroTerminatedString(self):
carray_obj = self.create_int_carray_obj(16, [111] * 6)
carray_obj.unicode_str = '\u1234\x56\0\x78'
assert carray_obj.val == [0x1234, 0x56, 0, 0x78, 0, 0]
def test_getItem_returnsObjectAtNdx(self):
carray_obj = self.create_int_carray_obj(16, [1, 2, 3, 4])
assert carray_obj[2].__address__ \
== carray_obj.__address__ + 2*carray_obj.base_type.sizeof
def test_getItem_onNegativeIndex_returnsElementFromEnd(self):
carray_obj = self.create_int_carray_obj(16, [0]*5)
assert carray_obj[-2].__address__ == carray_obj[3].__address__
def test_getItem_onSlice_returnsSubArray(self):
carray_obj = self.create_int_carray_obj(16, [1, 2, 3, 4])
sliced_carray_obj = carray_obj[1:3]
assert isinstance(sliced_carray_obj, cdm.CArray)
assert sliced_carray_obj.base_type == carray_obj.base_type
assert sliced_carray_obj.__address__ == carray_obj[1].__address__
assert sliced_carray_obj.element_count == 2
def test_getItem_onSliceWithSteps_raiseValueError(self):
carray_obj = self.create_int_carray_obj(16, [1, 2, 3, 4])
with pytest.raises(ValueError):
_ = carray_obj[0:4:2]
def test_getItem_onSliceWithNegativeBoundaries_returnsPartOfArrayFromEnd(self):
carray_obj = self.create_int_carray_obj(16, [0x11, 0x22, 0x33, 0x44])
assert carray_obj[-3:-1] == [0x22, 0x33]
def test_getItem_onSliceWithOpenEnd_returnsPartOfArrayUntilEnd(self):
carray_obj = self.create_int_carray_obj(16, [0x11, 0x22, 0x33, 0x44])
assert carray_obj[1:] == [0x22, 0x33, 0x44]
def test_getItem_onSliceWithOpenStart_returnsPartOfArrayFromStart(self):
carray_obj = self.create_int_carray_obj(16, [0x11, 0x22, 0x33, 0x44])
assert carray_obj[:3] == [0x11, 0x22, 0x33]
def test_add_returnsPointer(self):
carray_obj = self.create_int_carray_obj(8, [0x11] * 32)
added_cproxy = carray_obj + 3
assert isinstance(added_cproxy, cdm.CPointer)
assert added_cproxy.val == carray_obj[3].__address__
def test_repr_returnsClassNameAndContent(self, cint_type, addrspace):
carray_type = cdm.CArrayType(cint_type, 3, addrspace)
carray_obj = carray_type([1, 2, 3])
assert repr(carray_obj) == 'ts.cint_array3([1, 2, 3])'
def test_iter_returnsIterOfElements(self):
data = [0x11, 0x22, 0x33, 0x44]
carray_obj = self.create_int_carray_obj(8, data)
assert list(iter(carray_obj)) == data
| 2.203125 | 2 |
gltf/__init__.py | An2op/3dTileConverter | 0 | 12766884 | from .gltf import Glb
from .slicer import Slicer
from .element import Element
| 1.03125 | 1 |
opts.py | ricbl/vrgan | 9 | 12766885 | <reponame>ricbl/vrgan<gh_stars>1-10
"""User configuration file
File organizing all configurations that may be set by user when running the
train.py script.
Call "python train.py for a complete and formatted list of available user options.
"""
import argparse
import time
from random import randint
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def get_opt():
parser = argparse.ArgumentParser(description='Configuration for running VRGAN code')
parser.add_argument('--skip_train', type=str2bool, nargs='?', default='false',
help='If you just want to run validation, set this value to true.')
parser.add_argument('--lambda_reg', type=float, nargs='?', default=0.03,
help='Multiplier for the generator regularization loss L_{REG}. Appears on Eq. 6 on the paper.')
parser.add_argument('--lambda_gxprime', type=float, nargs='?', default=0.3,
help='Multiplier for the generator loss L_{Gx\'}. Appears on Eq. 6 on the paper.')
parser.add_argument('--lambda_rx', type=float, nargs='?', default=1.0,
help='Multiplier for the regressor loss L_{Rx}. Appears on Eq. 6 on the paper.')
parser.add_argument('--lambda_rxprime', type=float, nargs='?', default=0.3,
help='Multiplier for the regressor loss L_{Rx\'}. Appears on Eq. 6 on the paper.')
parser.add_argument('--batch_size', type=int, nargs='?', default=20,
help='Batch size for training the toy dataset.')
parser.add_argument('--folder_toy_dataset', type=str, nargs='?', default='./',
help='If you want to load/save toy dataset files in a folder other than the local folder, change this variable.')
parser.add_argument('--save_folder', type=str, nargs='?', default='./runs',
help='If you want to save files and outputs in a folder other than \'./runs\', change this variable.')
parser.add_argument('--learning_rate_g', type=float, nargs='?', default=1e-4,
help='Learning rate for the optimizer used for updating the weigths of the generator')
parser.add_argument('--learning_rate_r', type=float, nargs='?', default=1e-4,
help='Learning rate for the optimizer used for updating the weigths of the regressor')
# parser.add_argument('--use_xray_dataset', type=str2bool, nargs='?', default='false',
# help='The model will run for the toy dataset by default. \
# If you want to run a demo for the xray dataset ,set this to true. \
# Training will be skipped if true. If this variable is true, \
# you should also provide the variables xray_x, xray_y and xray_yprime.')
# parser.add_argument('--inference_x', type=str, nargs='?', default='',
# help='Set a path for an input image to use for a single inference of the model.')
# parser.add_argument('--inference_y', type=str, nargs='?', default=0.7,
# help='Set a value to use as the original PFT output (FEV1/FVC) of the input image')
# parser.add_argument('--inference_yprime', type=str, nargs='?', default=0.7,
# help='Set a value to use as desired PFT output (FEV1/FVC) for the input image')
parser.add_argument('--gpus', type=str, nargs='?', default=None,
help='Set the gpus to use, using CUDA_VISIBLE_DEVICES syntax.')
parser.add_argument('--experiment', type=str, nargs='?', default='',
help='Set the name of the folder where to save the run.')
parser.add_argument('--nepochs', type=int, nargs='?', default=30,
help='Number of epochs to run training and validation')
parser.add_argument('--split_validation', type=str, nargs='?', default='val',
help='Use \'val\' to use the validation set for calculating scores every epoch. Use \'test\' for using the test set')
parser.add_argument('--load_checkpoint_g', type=str, nargs='?', default=None,
help='Set a filepath locating a model checkpoint for the generator that you want to load')
parser.add_argument('--load_checkpoint_r', type=str, nargs='?', default=None,
help='Set a filepath locating a model checkpoint for the regressor that you want to load')
args = parser.parse_args()
timestamp = time.strftime("%Y%m%d-%H%M%S") + '-' + str(randint(1000,9999))
args.timestamp = timestamp
return args
| 2.453125 | 2 |
botovod/agents/types.py | OlegYurchik/botovod | 7 | 12766886 | from __future__ import annotations
from typing import Iterator, Optional
class Entity:
def __init__(self, **raw):
self.raw = raw
def __getattr__(self, item):
if item in self.raw:
return self.raw[item]
return super().__getattribute__(item)
class Chat(Entity):
def __init__(self, agent, id: str, **raw):
super().__init__(**raw)
self.agent = agent
self.id = id
class Message(Entity):
def __init__(self, text: Optional[str] = None, images: Iterator[Attachment] = (),
audios: Iterator[Attachment] = (), videos: Iterator[Attachment] = (),
documents: Iterator[Attachment] = (), locations: Iterator[Location] = (), **raw):
super().__init__(**raw)
self.text = text
self.images = images
self.audios = audios
self.videos = videos
self.documents = documents
self.locations = locations
class Attachment(Entity):
def __init__(self, url: Optional[str] = None, filepath: Optional[str] = None, **raw):
super().__init__(**raw)
self.url = url
self.filepath = filepath
class Location(Entity):
def __init__(self, latitude: float, longitude: float, **raw):
super().__init__(**raw)
self.latitude = latitude
self.longitude = longitude
class Keyboard(Entity):
def __init__(self, buttons: Iterator[Iterator[KeyboardButton]], **raw):
super().__init__(**raw)
self.buttons = buttons
class KeyboardButton(Entity):
def __init__(self, text: str, **raw):
super().__init__(**raw)
self.text = text
| 2.5 | 2 |
Day 6/Ex1: Hurdles Loop 1 Challenge.py | Nishi-16-K/100DaysCodeChallenge-Python- | 1 | 12766887 | #Reeborg's World Hurdle 1 Loop Challenge using for loop
def turn_right():
turn_left()
turn_left()
turn_left()
def step():
move()
turn_left()
move()
turn_right()
move()
turn_right()
move()
turn_left()
for in in range(0,5):
step()
| 3.703125 | 4 |
bfassist/colours/rgbcolours.py | SanHime/bfassist | 1 | 12766888 | #############################################################################
#
#
# RGB/A Colour webGenFramework module to BFA c7
#
#
#############################################################################
""" This is a rgb/a colour module for bfa colours.
Dependencies:
None
note:: Author(s): Mitch last-check: 07.07.2021 """
# noinspection PyUnusedLocal
def __preload__(forClient: bool = True):
pass
# noinspection PyUnusedLocal
def __postload__(forClient: bool = True):
pass
class Colour:
""" Base class for representing any colour.
:param name: Name of this colour.
note:: Author(s): Mitch """
def __init__(self, name):
self.name = name
def toCSS(self):
""" To be overridden. """
pass
class RGB_Colour(Colour):
""" Represents a colour in rgb encoding.
:param name: Name of this colour.
:param red: Red value of this colour.
:param green: Green value of this colour.
:param blue: Blue value of this colour.
note:: Author(s): Mitch """
def __init__(self, name: str, red: int, green: int, blue: int):
super().__init__(name)
self.red = red
self.green = green
self.blue = blue
def toCSS(self):
""" Converts the colour to a CSS readable string.
:return: CSS string.
note:: Author(s): Mitch """
return "rgb(" + ",".join([str(self.red), str(self.green), str(self.blue)]) + ")"
@classmethod
def fromHex(cls, name: str, hexString: str):
""" Alternative constructor to instantiate a colour via its hex-string definition.
:param name: Name of the colour
:param hexString: The hex-string containing the rgb values of the colour.
:return: The rgb colour corresponding to the hex-string.
note:: Author(s): Mitch """
return cls(name, int(hexString[:2], 16), int(hexString[2:4], 16), int(hexString[4:], 16))
class RGBA_Colour(RGB_Colour):
""" Represents a colour in rgba encoding.
:param name: Name of this colour.
:param alpha: Alpha(opacity) value of this colour.
note:: Author(s): Mitch """
def __init__(self, name: str, red: int, green: int, blue: int, alpha: float):
super().__init__(name, red, green, blue)
self.alpha = alpha
def toCSS(self):
""" Converts the colour to a CSS readable string.
:return: CSS string.
note:: Author(s): Mitch """
return "rgba(" + ",".join([str(self.red), str(self.green), str(self.blue), str("%.2f" % self.alpha)]) + ")"
def toRGB(self):
""" Converts the RGBA colour to an RGB colour.
:return: RGB colour disregarding the alpha value.
note:: Author(s): Mitch """
return RGB_Colour(self.name, self.red, self.green, self.blue)
def createVariant(self, variation: str):
""" Creates a variant of this colour.
:param variation: The type of variation that's desired.
:return: RGBA colour variation if applicable, otherwise None.
note:: Author(s): Mitch """
if variation == 'faint':
if self.alpha > 0.21:
return RGBA_Colour(variation + self.name, self.red, self.green, self.blue, self.alpha-0.21)
elif variation == 'fainter':
if self.alpha > 0.8:
return RGBA_Colour(variation + self.name, self.red, self.green, self.blue, self.alpha-0.8)
return None
| 2.546875 | 3 |
NCMusicAPI/logindb.py | ch4r04/NCMusicAPI-py | 0 | 12766889 | <reponame>ch4r04/NCMusicAPI-py
# -*- coding:utf-8 -*-
"""
@author: ‘ch4r0n‘
@contact: <EMAIL>
@site:
@software: PyCharm
@file: logindb.py
@time: 2018/10/8 上午12:00
"""
from pony.orm import *
from .const import Constant
import os
db = Database()
db.bind(provider='sqlite', filename=os.path.join(Constant.conf_dir, "ncmusicapi.db"),create_db=True)
class Login_info(db.Entity):
id = PrimaryKey(int, auto=True)
phone = Optional(str)
userId = Optional(int)
Cookie = Optional(str)
nickname = Optional(str)
db.generate_mapping(create_tables=True)
@db_session
def createSomeOne():
p1 = Login_info(userId='123',Cookie="123123",nickname="myname")
commit()
@db_session
def deleteAll():
delete(p for p in Login_info)
commit()
@db_session
def getSomeOneWithUserID(userid):
p1 = Login_info.get(userId=userid)
return p1
@db_session
def getSomeOneWithPhone(phone):
p1 = Login_info.get(phone=phone)
return p1
@db_session
def updateSomeOneCookiesWithPhone(phone, Cookie):
p1 = getSomeOneWithPhone(phone)[0]
p1.Cookie = Cookie
commit()
@db_session
def createOne(phone, userId, Cookie, nickname):
p1 = Login_info(userId=userId, Cookie=Cookie, phone=phone, nickname=nickname)
commit()
@db_session
def updateOrInsertOne(phone, userId, Cookie, nickname):
if Login_info.exists(userId=userId):
puser = Login_info.get(userId=userId)
puser.Cookie = Cookie
puser.phone = phone
puser.nickname = nickname
else:
createOne(phone,userId,Cookie, nickname)
| 2.1875 | 2 |
setup.py | aaront/backcheck | 0 | 12766890 | <reponame>aaront/backcheck
# -*- coding: utf-8 -*-
import re
import ast
from setuptools import setup, find_packages
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('backcheck/__init__.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
setup(
name='backcheck',
author='<NAME>',
version=version,
url='https://github.com/aaront/backcheck',
description='An asynchronous NHL.com data scraper',
long_description=open('README.rst').read(),
install_requires=[
'lxml',
'click',
'aiohttp',
'python-dateutil'
],
test_suite="tests",
include_package_data=True,
packages=find_packages(),
package_data={'': ['LICENSE']},
package_dir={'backcheck': 'backcheck'},
license='Apache 2.0',
entry_points='''
[console_scripts]
backcheck=backcheck.cli:main
''',
classifiers=(
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Software Development :: Libraries'
),
)
| 1.632813 | 2 |
decomp.py | MelyPic/labgaif | 0 | 12766891 | '''
Handle transactional file via github's labgaif/td2dot.py
Similar to integration tests inside maindecomposition but
trying them "from outside file".
Yesterday I got some strange error in the union/find str
but I cannot reproduce it anymore :(
It read:
if x.parent == x:
AttributeError: 'str' object has no attribute 'parent'
'''
from maindecomposition import decompose, stdGgraph, labGgraph, hack_items_in, hack_graph_in
from td2dot import read_graph_in
# ~ from td2dot import dump_graph # might become necessary to track read in graph
datasetfile = 'titanic_'
graph, items = read_graph_in(datasetfile + '.td')
# make items available as global variable, necessary for Ely's code to work
# there, replace '-' and '=' in names as disallowed by dot
# means currently:
# TotalAttributesValues = [ item.replace('-', '_').replace('=', '_') for item in items ]
hack_items_in(items)
# option 1 for original labeled Gaifman graph
# ~ my_graph = labGgraph(graph, items)
# option 2 for standard Gaifman graph
my_graph = stdGgraph(graph, items)
# make my_graph available as global variable, necessary for Ely's code to work
hack_graph_in(my_graph)
#decompose it
decompose(my_graph, '2', datasetfile + '_std_decomp')
| 2.234375 | 2 |
djauth/bin/ldap_modify.py | carthagecollege/django-djauth | 0 | 12766892 | <filename>djauth/bin/ldap_modify.py
# -*- coding: utf-8 -*-
"""
Shell script to search LDAP store by username or ID
"""
from django.conf import settings
from djauth.LDAPManager import LDAPManager
import sys
import hashlib, base64
import argparse
import ldap.modlist as modlist
import ldap
# set up command-line options
desc = """
Accepts as input:
cn
name [of attribute]
value [of attribute]
"""
parser = argparse.ArgumentParser(description=desc)
parser.add_argument(
"-c", "--cn",
required=True,
help="LDAP cn",
dest="cn"
)
parser.add_argument(
"-n", "--name",
required=True,
help="Attribute name to modify",
dest="name"
)
parser.add_argument(
"-v", "--value",
required=True,
help="Attribute value to modify",
dest="value"
)
def hash(password):
return "{SHA}" + base64.encodestring(hashlib.sha1(str(password)).digest())
def main():
"""
main method
"""
global cn
global name
global value
print "cn = {}".format(cn)
print "name = {}".format(name)
print "value = {}".format(value)
# encrypt the password
if name == "userPassword":
value = hash(value)
# initialize the manager
if name == "userPassword":
l = LDAPManager(
protocol=settings.LDAP_PROTOCOL_PWM,
server=settings.LDAP_SERVER_PWM,
port=settings.LDAP_PORT_PWM,
user=settings.LDAP_USER_PWM,
password=settings.LDAP_PASS_PWM,
base=settings.LDAP_BASE_PWM
)
else:
l = LDAPManager()
# use search to obtain dn
search = l.search(cn,field="cn")
print search
dn = search[0][0]
print "dn = {}".format(dn)
#result = l.modify(dn, name, value)
old = {
"dn":search[0][0],
"cn":search[0][1]["cn"],
"mail":search[0][1]["mail"],
"carthageNameID":search[0][1]["carthageNameID"],
"sn":search[0][1]["sn"],
"carthageFormerStudentStatus":search[0][1]["carthageFormerStudentStatus"],
"givenName":search[0][1]["givenName"],
"carthageDob":search[0][1]["carthageDob"]
}
new = old
new[name] = value
#result = l.modify(dn, old, new)
# success = (103, [])
#print result
######################
# this doesn't really work for dn or cn but should work for other name/values
######################
if __name__ == "__main__":
args = parser.parse_args()
cn = args.cn
name = args.name
value = args.value
print args
if not cn or not name or not value:
print "You must provide a cn, an attribute name, and an attribute value.\n"
parser.print_help()
exit(-1)
else:
sys.exit(main())
| 3.171875 | 3 |
KFold_ACC.py | wzhlearning/fNIRS-Transformer | 0 | 12766893 | import numpy as np
# Select dataset
dataset = ['A', 'B', 'C']
dataset_id = 0
print(dataset[dataset_id])
# Select model
models = ['fNIRS-T', 'fNIRS-PreT']
models_id = 0
print(models[models_id])
test_acc = []
for tr in range(1, 26):
path = 'save/' + dataset[dataset_id] + '/KFold/' + models[models_id] + '/' + str(tr)
test_max_acc = open(path + '/test_max_acc.txt', "r")
string = test_max_acc.read()
acc = string.split('best_acc=')[1]
acc = float(acc)
test_acc.append(acc)
test_acc = np.array(test_acc)
print('mean = %.2f' % np.mean(test_acc))
print('std = %.2f' % np.std(test_acc))
| 2.734375 | 3 |
shopping_cart/models.py | devsingh-code/django-digital-marketplace | 1 | 12766894 | <gh_stars>1-10
from django.db import models
from django.conf import settings
from books.models import Book
from django.db.models import Sum
# Create your models here.
class OrderItem(models.Model):
book = models.ForeignKey(Book, on_delete=models.CASCADE)
def __str__(self):
return self.book.title
class Order(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL,on_delete=models.CASCADE)
is_ordered = models.BooleanField(default=False)
items = models.ManyToManyField(OrderItem)
ref_code = models.CharField(max_length=50)
def __str__(self):
return self.user.username
def get_total(self):
return self.items.all().aggregate(order_total = Sum('book__price'))['order_total']
class Payment(models.Model):
order = models.ForeignKey(Order, on_delete=models.CASCADE)
total_amount = models.FloatField()
date_paid = models.DateTimeField(auto_now_add=True)
stripe_charge_id = models.CharField(max_length=100)
def __str__(self):
return self.stripe_charge_id
| 2.375 | 2 |
master/testdata/var/db/baetyl/cmd/bin/cmd.py | winnerineast/openedge | 2 | 12766895 | <reponame>winnerineast/openedge<filename>master/testdata/var/db/baetyl/cmd/bin/cmd.py
#!/usr/bin/env python
#
import time
if __name__ == '__main__':
while True:
try:
time.sleep(10)
except KeyboardInterrupt:
break
| 1.507813 | 2 |
django_mercadopago/migrations/0001_initial.py | victorpluna/django-mp | 1 | 12766896 | <filename>django_mercadopago/migrations/0001_initial.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Notification',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('topic', models.CharField(choices=[('o', 'Merchant Order'), ('p', 'Payment')], max_length=1)),
('resource_id', models.CharField(max_length=46)),
('processed', models.BooleanField(default=False)),
('last_update', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Payment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mp_id', models.IntegerField(unique=True)),
('status', models.CharField(max_length=16)),
('status_detail', models.CharField(max_length=16)),
('created', models.DateTimeField()),
('approved', models.DateTimeField()),
],
),
migrations.CreateModel(
name='Preference',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mp_id', models.CharField(max_length=46)),
('payment_url', models.URLField()),
('sandbox_url', models.URLField()),
('reference', models.CharField(max_length=128, unique=True)),
],
),
migrations.AddField(
model_name='payment',
name='preference',
field=models.ForeignKey(related_name='payments', to='mp.Preference'),
),
migrations.AlterUniqueTogether(
name='notification',
unique_together=set([('topic', 'resource_id')]),
),
]
| 1.726563 | 2 |
colossalai/engine/_base_engine.py | jiangz17THU/ColossalAI | 0 | 12766897 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
from asyncio.log import logger
from typing import List
from torch.nn import Module
from torch.nn.modules.loss import _Loss
from torch.optim import Optimizer
from colossalai.logging import get_dist_logger
from torch import Tensor
from colossalai.engine.ophooks import register_ophooks_recursively, BaseOpHook
from typing import Optional, Type
from colossalai.engine.gradient_handler import BaseGradientHandler
from colossalai.logging import get_dist_logger
class Engine:
"""Basic engine class for training and evaluation. It runs a specific process method
:meth:`step` which is based on the given :attr:`schedule` over each batch of a dataset.
It controls a iteration in training.
Args:
model (``torch.nn.Module``): The neural network model.
optimizer (``torch.optim.Optimizer``): Optimizer for updating the parameters.
criterion (``torch.nn.modules.loss._Loss``, optional): Loss function for calculating loss.
gradient_handlers (List[``BaseGradientHandler``], optional): A list of gradient handler used in backward.
clip_grad_norm (float, optional): The norm of gradient clipping.
ophook_list (list): List of ophook.
verbose (bool): whether to display log info.
Examples:
>>> # define model, criterion, optimizer, lr_scheduler, train_dataloader for your training
>>> model = ...
>>> criterion = ...
>>> optimizer = ...
>>> train_dataloader = ...
>>> engine, _, _, _ = colossalai.initialize(model, optimizer, criterion)
>>> engine.train()
>>> for inputs, labels in train_dataloader
>>> # set gradients to zero
>>> engine.zero_grad()
>>> # run forward pass
>>> outputs = engine(inputs)
>>> # compute loss value and run backward pass
>>> loss = engine.criterion(outputs, labels)
>>> engine.backward(loss)
>>> # update parameters
>>> engine.step()
The example of using Engine in training could be find in
`Training with engine and trainer <https://www.colossalai.org/docs/basics/engine_trainer>`_. and
`Run resnet cifar10 with engine <https://github.com/hpcaitech/ColossalAI-Examples/blob/main/image/resnet/run_resnet_cifar10_with_engine.py>`_.
"""
def __init__(self,
model: Module,
optimizer: Optimizer,
criterion: Optional[_Loss] = None,
gradient_handlers: Optional[List[BaseGradientHandler]] = None,
clip_grad_norm: float = 0.0,
ophook_list: Optional[List[BaseOpHook]] = None,
verbose: bool = True):
self._model = model
self._optimizer = optimizer
self._criterion = criterion
self._clip_grad_norm = clip_grad_norm
self._verbose = verbose
self._logger = get_dist_logger()
# state
self.training = True # default
# build gradient handler
if gradient_handlers:
self._gradient_handlers = gradient_handlers
else:
self._gradient_handlers = []
if ophook_list is None:
self._ophook_list = []
else:
self._ophook_list = ophook_list
register_ophooks_recursively(self._model, self._ophook_list)
@property
def ophooks(self):
"""show current activated ophooks"""
return self._ophook_list
@property
def model(self):
"""Model attached to the engine"""
return self._model
@property
def optimizer(self):
"""Optimizer attached to the engine"""
return self._optimizer
@property
def criterion(self):
"""Criterion attached to the engine"""
return self._criterion
def add_hook(self, ophook: Type[BaseOpHook]) -> None:
"""add necessary hook"""
# whether this hook exist
for h in self._ophook_list:
if type(h) == type(ophook):
logger = get_dist_logger()
logger.warning(f"duplicate hooks, at least two instance of {type(ophook)}")
self._ophook_list.append(ophook)
register_ophooks_recursively(self._model, self._ophook_list)
def remove_hook(self, ophook: Type[BaseOpHook]) -> None:
"""remove hook"""
logger = get_dist_logger()
logger.warning(f"removing hooks is currently not supported")
def zero_grad(self):
"""Set the gradient of parameters to zero
"""
self.optimizer.zero_grad()
def step(self):
"""Execute parameter update
"""
self._all_reduce_gradients()
self.optimizer.clip_grad_norm(self.model, self._clip_grad_norm)
return self.optimizer.step()
def backward(self, loss: Tensor):
"""Start backward propagation given the loss value computed by a loss function.
Args:
loss (:class:`torch.Tensor`): Loss value computed by a loss function.
"""
ret = self.optimizer.backward(loss)
for ophook in self._ophook_list:
ophook.post_iter()
return ret
def backward_by_grad(self, tensor, grad):
"""Start backward propagation given the gradient of the output tensor.
Args:
tensor (:class:`torch.Tensor`): Output tensor.
grad (:class:`torch.Tensor`): Gradient passed back to the output.
"""
ret = self.optimizer.backward_by_grad(tensor, grad)
for ophook in self._ophook_list:
ophook.post_iter()
return ret
def __call__(self, *args, **kwargs):
"""Run the forward step for the model.
Returns:
Tuple[:class:`torch.Tensor`] or :class:`torch.Tensor`: Output of the model.
"""
return self.model(*args, **kwargs)
def _all_reduce_gradients(self):
"""Handles all-reduce operations of gradients across different parallel groups.
"""
for handler in self._gradient_handlers:
handler.handle_gradient()
def train(self):
"""Sets the model to training mode.
"""
self.training = True
self._model.train()
def eval(self):
"""Sets the model to evaluation mode.
"""
self.training = False
self._model.eval() | 2.578125 | 3 |
functions/process_data.py | donglinwu6066/SDEdit | 330 | 12766898 | import torch
import os
def download_process_data(path="colab_demo"):
os.makedirs(path, exist_ok=True)
print("Downloading data")
torch.hub.download_url_to_file('https://image-editing-test-12345.s3-us-west-2.amazonaws.com/colab_examples/lsun_bedroom1.pth', os.path.join(path, 'lsun_bedroom1.pth'))
torch.hub.download_url_to_file('https://image-editing-test-12345.s3-us-west-2.amazonaws.com/colab_examples/lsun_bedroom2.pth', os.path.join(path, 'lsun_bedroom2.pth'))
torch.hub.download_url_to_file('https://image-editing-test-12345.s3-us-west-2.amazonaws.com/colab_examples/lsun_bedroom3.pth', os.path.join(path, 'lsun_bedroom3.pth'))
torch.hub.download_url_to_file('https://image-editing-test-12345.s3-us-west-2.amazonaws.com/colab_examples/lsun_edit.pth', os.path.join(path, 'lsun_edit.pth'))
torch.hub.download_url_to_file('https://image-editing-test-12345.s3-us-west-2.amazonaws.com/colab_examples/lsun_church.pth', os.path.join(path, 'lsun_church.pth'))
print("Data downloaded")
| 2.734375 | 3 |
ascent/datastore/v1/libraries/adaptors/ascent_es_adaptor.py | ahwspl/ascent-datastore | 0 | 12766899 | <reponame>ahwspl/ascent-datastore
import os
import json
import logging
import requests
import elasticsearch
import functools
from ..helpers import common_util as utils
from ..helpers.DBOperationError import DBOperationError
class AscentESAdaptor(object):
"""An adaptor to create and manage ElasticSearch, Neo4j, etc. database client. Currently only Elasticsearch is supported.
Args:
ds_config (str): An abosolute path to the config file.
"""
def __init__(self, ds_config=None):
self.logger = logging.getLogger(__name__)
configs = ds_config if isinstance(ds_config, dict) else utils.load_config_file(ds_config)
self.db = None
if 'elasticsearch' not in configs:
raise KeyError('elasticsearch')
self.es_configs = dict()
self.es_configs['uri'] = configs['elasticsearch']['uri']
self.es_configs['idx_uri'] = configs['elasticsearch']['idx_uri']
self.es_configs['verify_certs'] = configs['elasticsearch']['ssl_verify']
self.auth_type, es_username, es_password = None, configs['elasticsearch'].get('username', None), None
if es_username is not None:
self.auth_type = "Basic"
if configs['elasticsearch'].get('password', None) is not None:
es_password = configs['elasticsearch']['password']
elif os.environ.get("ES_PASS") is not None:
es_password = os.environ.get("ES_PASS")
else:
self.logger.warn("Database password is not defined!")
self.es_configs['http_auth'] = (es_username, es_password)
else:
self.auth_type = "Bearer"
self.es_configs['http_auth'] = ()
self.es_configs['timeout'] = configs['elasticsearch']['timeout']
self.internal_conf = utils.read_config_file(
utils.get_config_dir() + "/handler_config.cfg"
)
kwargs = dict()
for item, value in self.es_configs.items():
if value is not None:
kwargs[item] = value
self.logger.info("Connecting to {} database...".format('elasticsearch'))
self.db = elasticsearch.Elasticsearch([self.es_configs['uri']], **kwargs)
def _oauth2_decorator():
def decorator(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
if self.auth_type == 'Bearer':
if 'access_token' in kwargs:
self.db.transport.connection_pool.connection.headers.update(
{'authorization': "Bearer {}".format(kwargs['access_token'])})
del kwargs['access_token']
else:
raise DBOperationError('Access Token not provided')
result = func(self, *args, **kwargs)
return result
return wrapper
return decorator
# elasticsearch generic crud operations -> get, post
def _requests_get(self, uri, data=None, timeout=60, **kwargs):
"""Wrapper method around request.post to support multiple authorization types
Args:
uri ([type]): [description]
data ([type], optional): [description]. Defaults to None.
timeout (int, optional): [description]. Defaults to 60.
Returns:
[type]: [description]
"""
if self.auth_type == "Basic":
return requests.get(
uri,
data=json.dumps(data),
timeout=timeout,
headers={
"Content-Type": "application/json"
},
verify=self.es_configs['verify_certs'],
auth=requests.auth.HTTPBasicAuth(
self.es_configs['http_auth'][0], self.es_configs['http_auth'][1]
)
)
else:
if 'access_token' not in kwargs:
raise DBOperationError('Access Token not provided!')
return requests.get(
uri,
data=json.dumps(data),
timeout=timeout,
headers={
"Content-Type": "application/json",
"Authorization": "Bearer {}".format(kwargs['access_token'])
},
verify=self.es_configs['verify_certs']
)
def _requests_post(self, uri, data=None, json=None, params=None, timeout=60, **kwargs):
"""Wrapper method around request.post to support multiple authorization types
Args:
uri ([type]): [description]
data ([type], optional): [description]. Defaults to None.
json ([type], optional): [description]. Defaults to None.
params ([type], optional): [description]. Defaults to None.
timeout (int, optional): [description]. Defaults to 60.
access_token ([type], optional): [description]. Defaults to None.
Returns:
[type]: [description]
"""
if self.auth_type == 'Basic':
return requests.post(
url=uri,
data=data,
json=json,
timeout=timeout,
params=params,
headers={
"Content-Type": "application/json"
},
verify=self.es_configs['verify_certs'],
auth=requests.auth.HTTPBasicAuth(
self.es_configs['http_auth'][0], self.es_configs['http_auth'][1]
)
)
else:
if 'access_token' not in kwargs:
raise DBOperationError('Access Token not provided')
return requests.post(
url=uri,
data=data,
json=json,
timeout=timeout,
params=params,
headers={
"Content-Type": "application/json",
"Authorization": "Bearer {}".format(kwargs['access_token'])
},
verify=self.es_configs['verify_certs']
)
def build_search_query(self, fields=None, search_all=False, method='term', criteria='must', range=None, custom=None):
""" Query builder function for ES
This function will create Elasticsearch queries based on the options.
Args:
fields (list of `dict` or `dict`, optional): List of fields that needs to keep in the body, None if all fields are needed. Defaults to None.
search_all (bool, optional): Get all documents in the index or not. Defaults to False.
method (str, optional): term or match. Defaults to 'term'.
criteria (str, optional): must/must_not/should. Defaults to 'must'.
range (dict, optional): Range for matching. Defaults to None.
custom (dict, optional): Custom query bodies, will use this body for query instead build by parameters. Defaults to None.
Returns:
`dict`: A query body built on the input parameters
"""
if search_all:
query = {
'query': {
'match_all': {}
}
}
elif isinstance(custom, dict) or type(fields) in [list, dict, type(None)]:
if fields is None:
filter_items = {'must': []}
elif criteria == 'must':
filter_items = {criteria: [{method: fields}] if isinstance(fields, dict) else [
{method: field} for field in fields
]}
else:
filter_items = {'must': [{'bool': {criteria: [{method: fields}] if isinstance(fields, dict) else [
{method: field} for field in fields
]}}]}
query = {
'query': {
'constant_score': {
'filter': {
'bool': custom or filter_items
}
}
}
}
else:
err = 'Term {} is not valid.'.format(fields)
self.logger.error(err)
return {'error': err}
if isinstance(range, dict) and not search_all:
must_filter = query['query']['constant_score']['filter']['bool']
if not isinstance(must_filter.get('must'), list):
must_filter['must'] = []
must_filter['must'].append({'range': range})
self.logger.debug(json.dumps(query, indent=4))
return query
def get_watcher_with_id(self, watcher_id, **kwargs):
"""Get a watcher with ID
Args:
watcher_id (int): Watcher ID
Returns:
dict: Returns a dictionary with responses
"""
body = {
"query": {
"match": {
"_id": watcher_id
}
}
}
path = self.internal_conf['elasticsearch']['WATCHER_API_SEARCH_PATH']
response = self._requests_get(
uri=self.es_configs['uri'] + path,
data=json.dumps(body),
timeout=self.es_configs['timeout'],
**kwargs
)
return json.loads(response.text)
def search_watcher(self, body=None, **kwargs):
"""Search a watcher
Args:
body (str):
Returns:
dict: Returns a dictionary with responses
"""
path = self.internal_conf['elasticsearch']['WATCHER_API_SEARCH_PATH']
response = self._requests_get(
uri=self.es_configs['uri'] + path,
data=body,
timeout=self.es_configs['timeout'],
**kwargs
)
return json.loads(response.text)
def create_watcher(self, watcher_id, body, params=None, **kwargs):
"""Create a watcher
Args:
watcher_id (int): Watcher ID
body (str):
params (dict(str)):
Returns:
dict: Returns a dictionary with responses
"""
watcher_api_prefix = self.internal_conf['elasticsearch']['WATCHER_API_PREFIX']
url = self.es_configs['uri'] + watcher_api_prefix + watcher_id
response = self._requests_post(
uri=url,
data=body,
params=params,
**kwargs
)
return response
def execute_watcher(self, watcher_id, **kwargs):
"""Execute a Watcher
Args:
watcher_id (int): Watcher ID
Returns:
dict: Returns a dictionary with responses
"""
self.logger.info("Executing watcher {}".format(watcher_id))
if not self.check_watcher_exists(watcher_id, **kwargs):
self.logger.info(f"{watcher_id} does not exists")
return
suffix = self.internal_conf['elasticsearch']['WATCHER_API_EXECUTE_SUFFIX']
watcher_api_prefix = self.internal_conf['elasticsearch']['WATCHER_API_PREFIX']
self.logger.debug(self.es_configs['uri'] + watcher_api_prefix + watcher_id + suffix)
response_execute = self._requests_post(
uri=self.es_configs['uri'] + watcher_api_prefix + watcher_id + suffix,
timeout=self.es_configs['timeout'],
**kwargs
)
self.logger.debug(response_execute)
return response_execute
def activate_watcher(self, watcher_id, **kwargs):
"""Activate a watcher
Args:
watcher_id (int): Watcher ID
Returns:
dict: Returns a dictionary with responses
"""
self.logger.info("Activating watcher {}".format(watcher_id))
if not self.check_watcher_exists(watcher_id, **kwargs):
self.logger.info(f"{watcher_id} does not exists")
return
suffix = self.internal_conf['elasticsearch']['WATCHER_API_ACTIVATE_SUFFIX']
watcher_api_prefix = self.internal_conf['elasticsearch']['WATCHER_API_PREFIX']
self.logger.info(self.es_configs['uri'] + watcher_api_prefix + watcher_id + suffix)
response_activate = self._requests_post(
uri=self.es_configs['uri'] + watcher_api_prefix + watcher_id + suffix,
timeout=self.es_configs['timeout'],
**kwargs
)
self.logger.debug(response_activate)
return response_activate
def deactivate_watcher(self, watcher_id, **kwargs):
"""Deactivate a watcher
Args:
watcher_id (int): Watcher ID
Returns:
dict: Returns a dictionary with responses
"""
self.logger.info("Deactivating watcher {}".format(watcher_id))
if not self.check_watcher_exists(watcher_id, **kwargs):
self.logger.info(f"{watcher_id} does not exists")
return
suffix = self.internal_conf['elasticsearch']['WATCHER_API_DEACTIVATE_SUFFIX']
watcher_api_prefix = self.internal_conf['elasticsearch']['WATCHER_API_PREFIX']
self.logger.info(self.es_configs['uri'] + watcher_api_prefix + watcher_id + suffix)
response_activate = self._requests_post(
uri=self.es_configs['uri'] + watcher_api_prefix + watcher_id + suffix,
timeout=self.es_configs['timeout'],
**kwargs
)
self.logger.debug(response_activate)
return response_activate
def delete_watcher(self, watcher_id, **kwargs):
"""Delete a watcher
Args:
watcher_id (int): Watcher ID
Returns:
dict: Returns a dictionary with responses
"""
self.logger.info("Deleting watcher {}".format(watcher_id))
watcher_api_prefix = self.internal_conf['elasticsearch']['WATCHER_API_PREFIX']
response_delete = requests.delete(
uri=self.es_configs['uri'] + watcher_api_prefix + watcher_id,
timeout=self.es_configs['timeout'],
headers={
"Content-Type": "application/json"
}
)
if json.loads(response_delete.text)['found']:
self.logger.info('watcher deleted')
else:
self.logger.info('watcher does not exist or already deleted')
return response_delete
def check_watcher_exists(self, watcher_id, **kwargs):
"""Check if watcher exists
Args:
watcher_id (int): Watcher ID
Returns:
bool: True if watcher exists
"""
body = {
"stored_fields": [],
"query": {
"match": {
"_id": watcher_id
}
}
}
path = self.internal_conf['elasticsearch']['WATCHER_API_SEARCH_PATH']
response_exists = self._requests_get(
uri=self.es_configs['uri'] + path,
data=json.dumps(body),
timeout=self.es_configs['timeout'],
**kwargs
)
num_watcher = json.loads(response_exists.text)['hits']['total']
if num_watcher == 1:
return True
elif num_watcher == 0:
return False
else:
self.logger.warning('Warning: incorrect number of watches for ' + str(watcher_id))
return True
@_oauth2_decorator()
def search(self, index, body=None, scroll=None, request_timeout=60, size=10000, **kwargs):
""" Function wrapped around Elasticsearch python client search function
Search function to query Elasticsearch
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/search-search.html>`
Args:
index (str): Index name
body (dict, optional): Elasticsearch search query. Defaults to None.
scroll (str, optional): Scan and scroll option. Scroll parameter telling Elasticsearch how long it should keep
the scroll open e.g 1m (1 minute). Defaults to None.
request_timeout (int, optional): Request timeout set value. Defaults to 60.
size (int, optional): Size of records to return. Defaults to 10000.
Raises:
DBOperationError: Index not found
DBOperationError: Connection error
Returns:
dict: Returns a dictionary with Elasticsearch responses
"""
try:
if scroll:
return self.db.search(index=index, body=body, scroll=scroll, request_timeout=self.es_configs['timeout'], size=size, **kwargs)
else:
return self.db.search(index=index, body=body, request_timeout=self.es_configs['timeout'], size=size, **kwargs)
except elasticsearch.exceptions.NotFoundError:
raise DBOperationError('ES search failed. Index {} not found'.format(index))
except elasticsearch.exceptions.ConnectionError:
raise DBOperationError('ES search failed. Connection Error')
@_oauth2_decorator()
def msearch(self, index, body=None, request_timeout=60, **kwargs):
""" Function wrapped around Elasticsearch python client multisearch search function
MultiSearch function to query Elasticsearch
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html>`_
Args:
index (str): Index name
body (dict, optional): Elasticsearch search query. Defaults to None.
request_timeout (int, optional): Request timeout set value. Defaults to 60.
Raises:
DBOperationError: Index not found
DBOperationError: Connection error
Returns:
dict: Returns a dictionary with Elasticsearch responses
"""
try:
return self.db.msearch(index=index, body=body, request_timeout=self.es_configs['timeout'], **kwargs)
except elasticsearch.exceptions.NotFoundError:
raise DBOperationError('ES search failed. Index {} not found'.format(index))
except elasticsearch.exceptions.ConnectionError:
raise DBOperationError('ES search failed. Connection Error')
@_oauth2_decorator()
def mget(self, index, body=None, request_timeout=60, **kwargs):
""" Function wrapped around Elasticsearch python client multi get function
Args:
index (str): Index name
body (dict, optional): Elasticsearch search query. Defaults to None.
request_timeout (int, optional): Request timeout set value. Defaults to 60.
Raises:
DBOperationError: Index not found
DBOperationError: Connection error
Returns:
dict: Returns a dictionary with Elasticsearch responses
"""
try:
return self.db.mget(index=index, body=body, request_timeout=self.es_configs['timeout'], **kwargs)
except elasticsearch.exceptions.NotFoundError:
raise DBOperationError('ES search failed. Index {} not found'.format(index))
except elasticsearch.exceptions.ConnectionError:
raise DBOperationError('ES search failed. Connection Error')
@_oauth2_decorator()
def get(self, index, doc_id=None, request_timeout=60, **kwargs):
""" Function wrapped around Elasticsearch python client get function
function to get a typed JSON document from the index based on its id
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html>`_
Args:
index (str): Index name
doc_id (str, optional): Optional argument for Elasticsearch document id. Defaults to None.
request_timeout (int, optional): Request timeout set value. Defaults to 60.
Raises:
DBOperationError: Index not found
DBOperationError: Connection error
Returns:
dict: Returns a dictionary with Elasticsearch responses
"""
try:
return self.db.get(index=index, id=doc_id, request_timeout=self.es_configs['timeout'], **kwargs)
except elasticsearch.exceptions.NotFoundError:
raise DBOperationError('ES get failed. Index {} not found'.format(index))
except elasticsearch.exceptions.ConnectionError:
raise DBOperationError('ES get failed. Connection Error')
@_oauth2_decorator()
def update_by_query(self, index, doc_id=None, request_timeout=60, **kwargs):
""" Function wrapped around Elasticsearch python client get function
function to get a typed JSON document from the index based on its id
<http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html>
Args:
index (str): Index name
doc_id (str, optional): Optional argument for Elasticsearch document id. Defaults to None.
request_timeout (int, optional): Request timeout set value. Defaults to 60.
Raises:
DBOperationError: Index not found
DBOperationError: Connection error
Returns:
dict: Returns a dictionary with Elasticsearch responses
"""
try:
return self.db.update_by_query(index=index, request_timeout=self.es_configs['timeout'], **kwargs)
except elasticsearch.exceptions.NotFoundError:
raise DBOperationError('ES get failed. Index {} not found'.format(index))
except elasticsearch.exceptions.ConnectionError:
raise DBOperationError('ES get failed. Connection Error')
@_oauth2_decorator()
def create_document(self, index, body, doc_id=None, request_timeout=60, **kwargs):
""" Function wrapped around Elasticsearch python client index function
Adds or updates a typed JSON document in a specific index, making it searchable.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html>`_
Args:
index (str): Index name
body (dict): Document
doc_id (str, optional): Optional argument for Elasticsearch document id. Defaults to None.
request_timeout (int, optional): Request timeout set value. Defaults to 60.
Raises:
DBOperationError: Connection error
Returns:
dict: Returns a dictionary with Elasticsearch responses
"""
try:
return self.db.index(index=index, id=doc_id, body=body, request_timeout=self.es_configs['timeout'], **kwargs)
except elasticsearch.exceptions.ConnectionError:
raise DBOperationError('ES create failed. Connection Error')
@_oauth2_decorator()
def create_index(self, index, body=None, request_timeout=60, **kwargs):
""" Function wrapped around Elasticsearch python client indices function
Adds or updates a typed JSON document in a specific index, making it searchable.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html>`_
Args:
index (str): Index name
body (dict): The document. Defaults to None.
ignore (int, optional): Ignore error codes. Defaults to None.
request_timeout (int, optional): Request timeout set value. Defaults to 60.
Raises:
DBOperationError: Connection error
Returns:
dict: Returns a dictionary with Elasticsearch responses
"""
try:
return self.db.indices.create(index=index, body=body, request_timeout=self.es_configs['timeout'], **kwargs)
except elasticsearch.exceptions.ConnectionError:
raise DBOperationError('ES create failed. Connection Error')
@_oauth2_decorator()
def delete_documents_by_query(self, index, body, request_timeout=120, **kwargs):
"""Delete a document
Args:
index (str): Index name
body (dict): The document. Defaults to None.
request_timeout (int, optional): Request timeout set value. Defaults to 60.
Raises:
DBOperationError: Connection error
Returns:
dict: Returns a dictionary with Elasticsearch responses
"""
try:
return self.db.delete_by_query(index=index, body=body, request_timeout=self.es_configs['timeout'], **kwargs)
except elasticsearch.exceptions.ConnectionError:
raise DBOperationError('ES delete failed. Connection Error')
@_oauth2_decorator()
def delete_document(self, index, doc_id, request_timeout=60, **kwargs):
""" Function wrapped around Elasticsearch python client delete function
Delete a typed JSON document from a specific index based on its id.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete.html>`_
Args:
index (str): Index name
doc_id (str): Elasticsearch Document ID
request_timeout (int, optional): Request timeout set value. Defaults to 60.
Raises:
DBOperationError: Connection error
Returns:
dict: Returns a dictionary with Elasticsearch responses
"""
try:
return self.db.delete(index=index, id=doc_id, request_timeout=self.es_configs['timeout'], **kwargs)
except elasticsearch.exceptions.ConnectionError:
raise DBOperationError('ES delete failed. Connection Error')
@_oauth2_decorator()
def delete_index(self, index, request_timeout=60, **kwargs):
""" Function wrapped around Elasticsearch python client indices.delete function
Delete an index in Elasticsearch
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-delete-index.html>`_
Args:
index (str): Index name
request_timeout (int, optional): Request timeout set value. Defaults to 60.
Raises:
DBOperationError: Connection error
Returns:
dict: Returns a dictionary with Elasticsearch responses
"""
try:
return self.db.indices.delete(index=index, request_timeout=self.es_configs['timeout'], **kwargs)
except elasticsearch.exceptions.ConnectionError:
raise DBOperationError('ES delete failed. Connection Error')
@_oauth2_decorator()
def exists_document(self, index, doc_id, request_timeout=60, **kwargs):
""" Function wrapped around Elasticsearch python client exists function
Returns a boolean indicating whether or not given document exists in Elasticsearch.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html>`_
Args:
index (str): Index name
doc_id (str): Elasticsearch Document ID
request_timeout (int, optional): Request timeout set value. Defaults to 60.
Raises:
DBOperationError: Connection error
Returns:
bool: Returns a boolean indicating whether or not given document exists in Elasticsearch.
"""
try:
return self.db.exists(index=index, id=doc_id, request_timeout=self.es_configs['timeout'], **kwargs)
except elasticsearch.exceptions.ConnectionError:
raise DBOperationError('ES exists failed. Connection Error')
@_oauth2_decorator()
def exists_index(self, index, request_timeout=60, **kwargs):
""" Function wrapped around Elasticsearch python client indices.exists function
Return a boolean indicating whether given index exists.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-exists.html>`_
Args:
index (str): Index name
request_timeout (int, optional): Request timeout set value. Defaults to 60.
Raises:
DBOperationError: Connection error
Returns:
bool: Return a boolean indicating whether given index exists.
"""
try:
return self.db.indices.exists(index=index, request_timeout=self.es_configs['timeout'], **kwargs)
except elasticsearch.exceptions.ConnectionError:
raise DBOperationError('ES exists failed. Connection Error')
@_oauth2_decorator()
def exists_alias(self, name, request_timeout=60, **kwargs):
"""Check if alias exists
Args:
name (str): Name
request_timeout (int, optional): Request timeout set value. Defaults to 60.
Raises:
DBOperationError: Connection error
Returns:
dict: Returns a dictionary with responses
"""
try:
return self.db.indices.exists_alias(name=name, request_timeout=self.es_configs['timeout'], **kwargs)
except elasticsearch.exceptions.ConnectionError:
raise DBOperationError('ES exists failed. Connection Error')
@_oauth2_decorator()
def put_alias(self, index, name, request_timeout=60, **kwargs):
"""Put alias
Args:
index (str): Index name
name (str): Name
request_timeout (int, optional): Request timeout set value. Defaults to 60.
Raises:
DBOperationError: Connection error
Returns:
dict: Returns a dictionary with responses
"""
try:
return self.db.indices.put_alias(index=index, name=name, request_timeout=self.es_configs['timeout'], **kwargs)
except elasticsearch.exceptions.ConnectionError:
raise DBOperationError('ES exists failed. Connection Error')
@_oauth2_decorator()
def update_aliases(self, body, request_timeout=60, **kwargs):
""" Update aliases
Args:
body (dict): The document. Defaults to None.
request_timeout (int, optional): Request timeout set value. Defaults to 60.
Raises:
DBOperationError: Connection error
Returns:
dict: Returns a dictionary with responses
"""
try:
return self.db.indices.update_aliases(body=body, request_timeout=self.es_configs['timeout'], **kwargs)
except elasticsearch.exceptions.ConnectionError:
raise DBOperationError('ES exists failed. Connection Error')
@_oauth2_decorator()
def indices_delete(self, index, request_timeout=60, **kwargs):
""" Function wrapped around Elasticsearch python client indices.delete function
Delete an index in Elasticsearch
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-delete-index.html>`_
Args:
index (str): Index name
ignore (int, optional): Ignore error codes. Defaults to None.
request_timeout (int, optional): Request timeout set value. Defaults to 60.
Raises:
DBOperationError: Connection error
Returns:
dict: Returns a dictionary with responses
"""
try:
return self.db.indices.delete(index=index, request_timeout=self.es_configs['timeout'], **kwargs)
except elasticsearch.exceptions.ConnectionError:
raise DBOperationError('ES exists failed. Connection Error')
@_oauth2_decorator()
def refresh(self, index, request_timeout=60, **kwargs):
""" Function wrapped around Elasticsearch python client refresh function
Explicitly refresh one or more index, making all operations performed
since the last refresh available for search.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-refresh.html>`_
Args:
index (str): Index name
request_timeout (int, optional): Request timeout set value. Defaults to 60.
Raises:
DBOperationError: Connection error
Returns:
dict: Returns a dictionary with responses
"""
try:
return self.db.indices.refresh(index=index, request_timeout=self.es_configs['timeout'], **kwargs)
except elasticsearch.exceptions.ConnectionError:
raise DBOperationError('ES refresh failed. Connection Error')
def reindex(self, data, **kwargs):
""" Function wrapped around post method of request python client
Sends a post request
Args:
data (dict): Post data for Elasticsearch reindex
Raises:
DBOperationError: Internal Error
DBOperationError: Connection error
Returns:
obj: Returns a `Response` object
"""
try:
response = self._requests_post(
uri=self.es_configs['uri'] + "/_reindex",
json=data,
**kwargs
)
if response.status_code >= 400:
raise DBOperationError('ES reindex failed. Internal Error')
return json.loads(response.text)
except elasticsearch.exceptions.ConnectionError or json.decoder.JSONDecodeError or DBOperationError:
raise DBOperationError('ES reindex failed. Connection Error')
@_oauth2_decorator()
def percolate(self, index, body, request_timeout=60, **kwargs):
""" Function wrapped around Elasticsearch python client refresh function
The percolator allows to register queries against an index, and then
send percolate requests which include a doc, and getting back the
queries that match on that doc out of the set of registered queries.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/search-percolate.html>`_
Args:
index (str): Index Name
body (json): Body
request_timeout (int, optional): Request timeout set value. Defaults to 60.
Raises:
DBOperationError: Connection error
Returns:
dict: Returns a dictionary with responses
"""
try:
return self.db.percolate(index=index, body=body, request_timeout=self.es_configs['timeout'], **kwargs)
except elasticsearch.exceptions.ConnectionError:
raise DBOperationError('ES percolate failed. Connection Error')
@_oauth2_decorator()
def update(self, index, body, doc_id=None, bulk=False, request_timeout=60, **kwargs):
""" Function wrapped around Elasticsearch python client refresh function
Update a document based on a script or partial data provided.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update.html>`_
Args:
index (str): Index Name
body (json): Body
doc_id (str, optional): Optional argument for Elasticsearch document id. Defaults to None.
bulk (bool, optional): Update in bulk. Defaults to False.
request_timeout (int, optional): Request timeout set value. Defaults to 60.
Raises:
DBOperationError: Connection error
Returns:
dict: Returns a dictionary with responses
"""
try:
if bulk:
return self.bulk_update(index, body)
else:
self.logger.info(type(body))
return self.db.update(index=index, id=doc_id, body=body, request_timeout=self.es_configs['timeout'], **kwargs)
except elasticsearch.exceptions.ConnectionError:
raise DBOperationError('ES update failed. Connection Error')
@_oauth2_decorator()
def scroll(self, scroll_id, scroll='10m', request_timeout=60, **kwargs):
"""
Args:
scroll_id (int): Scroll ID
scroll (str, optional): Defaults to '10m'.
request_timeout (int, optional): Request timeout set value. Defaults to 60.
Raises:
DBOperationError: Connection error
DBOperationError: Scroll failure
Returns:
dict: Returns a dictionary with responses
"""
try:
return self.db.scroll(scroll_id=scroll_id, scroll=scroll, request_timeout=self.es_configs['timeout'], **kwargs)
except elasticsearch.exceptions.ConnectionError:
raise DBOperationError('ES Scroll failed. Connection Error')
except elasticsearch.ElasticsearchException as e:
# elasticsearch.exceptions.RequestError would be caught here in the case scroll id was invalid
self.logger.error(e)
raise DBOperationError('ES Scroll failed.')
def create(self, index, data, bulk=False, doc_id=None, request_timeout=60, **kwargs):
""" Function to create document
Documents can be created in bulk or individually. Calls either bulk index or create document.
Args:
index (str): Index Name
data (json): Index Name
bulk (bool, optional): Specify if bulk indexing. Defaults to False.
doc_id (str, optional): Optional argument for Elasticsearch document id. Defaults to None.
ignore (int, optional): Ignore error codes. Defaults to None.
request_timeout (int, optional): Request timeout set value. Defaults to 60.
Returns:
dict: Returns a dictionary with responses
"""
if bulk:
return self.bulk_index(index, data, doc_id=doc_id, **kwargs)
else:
return self.create_document(index, data, doc_id=doc_id, request_timeout=self.es_configs['timeout'], **kwargs)
def bulk_index(self, index, data, doc_id=None, **kwargs):
""" Bulk index function for ES
This function will index documents in bulk fashion with a single request to ES
Args:
index (str): Index Name
data (json): Index Name
doc_id (str, optional): Optional argument for Elasticsearch document id. Defaults to None.
Raises:
DBOperationError: Connection Error
DBOperationError: Index Failure
Returns:
dict: Summary of status of all index operations
"""
op_descriptor = {
'index': {
'_index': index
}
}
all_json_data = ""
for doc in data:
if doc_id is not None:
op_descriptor['index']['_id'] = doc.pop('doc_id')
indexible_json_doc = json.dumps(op_descriptor) + '\n' + json.dumps(doc)
all_json_data = all_json_data + '\n' + indexible_json_doc
all_json_data = all_json_data + '\n'
try:
pub_response = self._requests_post(
uri=self.es_configs['idx_uri'] + "/_bulk/",
data=all_json_data,
**kwargs
)
if 'errors' in (json.loads(pub_response.text)).keys():
if json.loads(pub_response.text)['errors'] is False:
response = {
"All_published": True
}
else:
response = {
"All_published": False
}
else:
response = json.loads(pub_response.text)
return response
except elasticsearch.exceptions.ConnectionError:
raise DBOperationError('ES reindex failed. Connection Error')
except elasticsearch.ElasticsearchException as e:
self.logger.error(e)
raise DBOperationError('ES bulk index failed')
@_oauth2_decorator()
def put_template(self, template_name, template, request_timeout=60, **kwargs):
""" Function wrapped around Elasticsearch python client put_template function
Create an index template that will automatically be applied to new indices created.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html>`_
Args:
template_name (str): Template Name
template (dict): Template/Schema
request_timeout (int, optional): Request timeout set value. Defaults to 60.
Raises:
DBOperationError: Failed Template
Returns:
dict: Returns a dictionary with responses
"""
try:
return self.db.indices.put_template(name=template_name, body=template, request_timeout=self.es_configs['timeout'], **kwargs)
except elasticsearch.ElasticsearchException as e:
self.logger.error(e)
raise DBOperationError('ES put template failed')
@_oauth2_decorator()
def force_merge(self, index, request_timeout=60, **kwargs):
""" Function wrapped around Elasticsearch python client forcemerge function
Force merge one or more indices.
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-forcemerge.html>`_
Args:
index (str): Index Name
request_timeout (int, optional): Request timeout set value. Defaults to 60.
Raises:
DBOperationError: Failed Merge
Returns:
dict: Returns a dictionary with responses
"""
try:
return self.db.indices.forcemerge(index=index, request_timeout=self.es_configs['timeout'], **kwargs)
except elasticsearch.ElasticsearchException as e:
self.logger.error(e)
raise DBOperationError('ES force merge failed')
@_oauth2_decorator()
def update_settings(self, index, body, request_timeout=60, **kwargs):
""" Function wrapped around Elasticsearch python client put_settings function
Change specific index level settings in real time.
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-update-settings.html>`
Args:
index (str): Index Name
body (dict): Index settings
request_timeout (int, optional): Request timeout set value. Defaults to 60.
Raises:
DBOperationError: Failed Update
Returns:
dict: Returns a dictionary with responses
"""
try:
return self.db.indices.put_settings(index=index, body=body, request_timeout=self.es_configs['timeout'], **kwargs)
except elasticsearch.ElasticsearchException as e:
self.logger.error(e)
raise DBOperationError('ES update settings failed')
@_oauth2_decorator()
def exists_template(self, template_name, request_timeout=60, **kwargs):
""" Function wrapped around Elasticsearch python client exists_template function
Return a boolean indicating whether given template exists.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html>`
Args:
template_name (str): Template name
request_timeout (int, optional): Request timeout set value. Defaults to 60.
Raises:
DBOperationError: Failed Search
Returns:
dict: Returns a dictionary with responses
"""
try:
return self.db.indices.exists_template(template_name, request_timeout=self.es_configs['timeout'], **kwargs)
except elasticsearch.ElasticsearchException as e:
self.logger.error(e)
raise DBOperationError('ES template search failed')
def bulk_update(self, index, data, **kwargs):
""" Bulk update function for ES
This function will update documents in bulk fashion with a single request to ES
Args:
index (str): Index name
data (list): List of Elasticsearch update document bodies. Must include document Id as `doc_id` in
each update document body
Raises:
KeyError: Invalid Key
DBOperationError: Connection Error
DBOperationError: Failed Update
Returns:
dict: Summary of status of all update operations
"""
op_descriptor = {
'update': {
'_index': index
}
}
all_json_data = ""
for doc in data:
if 'doc_id' in doc:
op_descriptor['update']['_id'] = doc.pop('doc_id')
update_json_doc = '{0}\n{1}'.format(json.dumps(op_descriptor), json.dumps({"doc": doc}))
all_json_data = '{0}\n{1}'.format(all_json_data, update_json_doc)
else:
raise KeyError('doc_id key missing or invalid')
all_json_data = all_json_data + '\n'
try:
pub_response = self._requests_post(
uri=self.es_configs['idx_uri'] + "/_bulk/",
data=all_json_data,
**kwargs
)
if 'errors' in (json.loads(pub_response.text)).keys():
if json.loads(pub_response.text)['errors'] is False:
response = {
"All_updated": True
}
else:
response = {
"All_updated": False
}
else:
response = json.loads(pub_response.text)
return response
except elasticsearch.exceptions.ConnectionError:
raise DBOperationError('ES bulk update failed. Connection Error')
except elasticsearch.ElasticsearchException as e:
self.logger.error(e)
raise DBOperationError('ES bulk update failed')
def bulk_delete(self, index, doc_id_list, **kwargs):
""" Bulk delete function for ES
This function will delete documents in bulk fashion with a single request to ES
Args:
index (str): Index name
doc_id_list (list): List of Elasticsearch document IDs to be deleted
Raises:
DBOperationError: Connection Error
DBOperationError: Failed Update
Returns:
dict: Summary of status of all delete operation.
"""
all_json_data = ""
for id in doc_id_list:
op_descriptor = {
'delete': {
'_index': index,
'_id': id
}
}
all_json_data = '{0}\n{1}'.format(all_json_data, json.dumps(op_descriptor))
all_json_data = all_json_data + '\n'
try:
pub_response = self._requests_post(
self.es_configs['idx_uri'] + "/_bulk/",
data=all_json_data,
**kwargs)
if 'errors' in (json.loads(pub_response.text)).keys():
if json.loads(pub_response.text)['errors'] is False:
response = {
"All_deleted": True
}
else:
response = {
"All_deleted": False
}
else:
response = json.loads(pub_response.text)
return response
except elasticsearch.exceptions.ConnectionError:
raise DBOperationError('ES bulk update failed. Connection Error')
except elasticsearch.ElasticsearchException as e:
self.logger.error(e)
raise DBOperationError('ES bulk update failed')
@staticmethod
def parse_search_result(data):
""" Parse search result from ES to remove metadata fields
Args:
data (dict): [description]
Raises:
Exception: Failed Parse
Returns:
list: list of hits
"""
try:
result = []
hits = data['hits']['hits']
for hit in hits:
result.append(hit.get('_source'))
return result
except Exception as e:
raise Exception('Could not parse search result. {}'.format(e))
| 2.4375 | 2 |
geniepy/tests/unit/test_pcp_classifier.py | geralddzx/genie | 1 | 12766900 | <gh_stars>1-10
"""Test base classifier class."""
import pandas as pd
import geniepy.classmgmt.classifiers as clsfr
CLSFR_NAME = "pub_score"
SAMPLE_RECORD = pd.Series(["g", "e", "e", "k", "s"])
class TestClassifier:
"""Base Classifier pytest class."""
classifier = clsfr.Classifier("pub_score")
def test_constructor(self):
"""Test classifier object is created."""
assert self.classifier is not None
assert self.classifier.name == CLSFR_NAME
assert self.classifier.is_trained is False
def test_predict_not_trained(self):
"""Brand new classifier is not trained, should return -1."""
self.classifier = clsfr.Classifier(CLSFR_NAME)
assert self.classifier.is_trained is False
prediction = self.classifier.predict(None)
assert isinstance(prediction, float)
assert prediction == clsfr.ERROR_SCORE
def test_predict_none(self):
"""Predict should always return a number so doesn't halt classmgr."""
prediction = self.classifier.predict(None)
assert isinstance(prediction, float)
assert prediction == clsfr.ERROR_SCORE
| 2.859375 | 3 |
src/frequent_phrase_mining/frequent_pattern_mining.py | paperplanet/SegPhrase | 275 | 12766901 | from sets import Set
def frequentPatternMining(tokens, patternOutputFilename, threshold):
dict = {}
tokensNumber = len(tokens)
for i in xrange(tokensNumber):
token = tokens[i]
if token == '$':
continue
if token in dict:
dict[token].append(i)
else:
dict[token] = [i]
print "# of distinct tokens = ", len(dict)
patternOutput = open(patternOutputFilename, 'w')
frequentPatterns = []
patternLength = 1
while (len(dict) > 0):
if patternLength > 6:
break
#print "working on length = ", patternLength
patternLength += 1
newDict = {}
for pattern, positions in dict.items():
occurrence = len(positions)
if occurrence >= threshold:
frequentPatterns.append(pattern)
patternOutput.write(pattern + "," + str(occurrence) + "\n")
for i in positions:
if i + 1 < tokensNumber:
if tokens[i + 1] == '$':
continue
newPattern = pattern + " " + tokens[i + 1]
if newPattern in newDict:
newDict[newPattern].append(i + 1)
else:
newDict[newPattern] = [i + 1]
dict.clear()
dict = newDict
patternOutput.close()
return frequentPatterns
| 3.0625 | 3 |
server/api.py | sideral/worney | 0 | 12766902 | import nltk
import random
import re
from flask import Flask, request
from flask_restful import Resource, Api
from gensim.models import KeyedVectors
from flask_cors import CORS
from functools import lru_cache
app = Flask(__name__)
api = Api(app)
CORS(app)
class RandomWordPair(Resource):
def get(self, degrees):
nouns = get_nouns()
first_word = nouns[random.randint(0, len(nouns) - 1)]
second_word = first_word
for i in range(degrees):
similar_words = get_similar_words(second_word)
second_word = similar_words[random.randint(0, len(similar_words) - 1)]
return {'words': [first_word, second_word]}
class SimilarWords(Resource):
def get(self, word):
return {'words': get_similar_words(word)}
api.add_resource(RandomWordPair, '/word-pair/<int:degrees>')
api.add_resource(SimilarWords, '/similar-words/<string:word>')
def get_similar_words(word):
model = load_vectors()
# Check if the word exists in vocabulary, return 404 if not
similar = model.most_similar([word], topn=50)
result = [item[0] for item in similar]
tagged = nltk.pos_tag(result)
nouns = [tag[0] for tag in tagged if (tag[1] == "NN" or tag[1] == "NNS")]
common = ["things", "something", "everything", "nothing", "anything", "thing", "anyone", "anybody", "nobody", "somebody", "everybody", "everyone", "knows"]
less_common = [noun for noun in nouns if noun not in common]
expr = re.compile('[0-9]+|@|\.')
less_common = [word for word in less_common if not expr.search(word)]
return less_common[0:25]
@lru_cache(maxsize=None)
def load_vectors():
return KeyedVectors.load(r"data/embeddings/vectors.300.kv")
@lru_cache(maxsize=None)
def get_nouns():
with open('data/nouns.txt', 'r') as file:
return [noun.replace('\n', '') for noun in file.readlines()]
if __name__ == '__main__':
app.run(debug=True) | 2.8125 | 3 |
tools/schema.py | kmpm/py-aiogopro | 1 | 12766903 | from datetime import datetime
from collections import namedtuple
import re
from aiogopro.types import CommandType, StatusType
RESERVED_WORDS = ['type', 'class']
T1 = ' ' * 4
T2 = T1 * 2
T3 = T1 * 3
T4 = T1 * 4
SUBMODE_PREFIX = {
'resolution': 'res_',
'aspect_ratio': 'aspect_',
'fps': 'Fps_',
'looping': 'Time_',
'protune_white_balance': 'color_',
'protune_iso': 'iso_',
'protune_iso_min': 'iso_',
'protune_exposure_time': 'time_',
'capture_delay': 'delay_',
'burst_rate': 'rate_',
'timelapse_rate': 'rate_',
'nightlapse_rate': 'rate_',
'short_clip_length': 'length_',
'protune_ev': 'ev_',
'timewarp_speed': 'speed_',
'exposure_time': 'time_',
'record_resolution': 'r_',
'record_fps': 'fps_',
'window_size': 'size_',
'bit_rate': 'rate_',
'lcd_sleep': 'sleep_',
'auto_power_down': 'off_',
'gop_size': 'size_',
'idr_interval': 'interval_',
'stream_bit_rate': 'rate_',
'stream_window_size': 'size_',
'stream_gop_size': 'size_',
'stream_idr_interval': 'interval_',
'lcd_brightness_v2': 'percent_'
}
rx_replace1 = re.compile(r'([\.])')
rx_replace2 = re.compile(r'([_]{2,})')
rx_remove = re.compile(r'([%])')
def filterbyvalue(seq, value):
for el in seq:
if el.attribute == value:
yield el
def dictToObject(d, name):
if not isinstance(d, dict):
return d
for k, v in d.copy().items():
if isinstance(v, dict):
d[k] = dictToObject(v, f"{capitalize(k)}Type")
elif isinstance(v, list):
d[k] = list(map(lambda x: dictToObject(x, f"{capitalize(k)}List"), v))
return namedtuple(name, d.keys())(*d.values())
def pythonify(text):
s = str(text).lower().replace(' ', '_')
s = s.replace('-', '_neg_').replace('+', '_plus_')
s = s.replace(':', 'to') # a ratio is always TO
s = s.replace('/', '_in_')
s = rx_remove.sub('', s)
s = rx_replace1.sub('_', s)
s = rx_replace2.sub('_', s)
return s
def capitalize(text):
s = str(text).replace(' ', '_').split('_')
regex = re.compile(r'([\/:])')
for index in range(len(s)):
s[index] = regex.sub('_', s[index]).capitalize()
s[index] = s[index].replace('.', '').replace('-', 'Neg').replace('%', '')
return ''.join(s)
def prefix_reserved(value, prefix):
if value in RESERVED_WORDS:
return f'{prefix}_{value}'
return value
class SchemaType(object):
def __init__(self, schema_version, version):
self.schema_version = schema_version
self.version = version
self.commands = {}
self.modes = {}
self.status = {}
def addCommand(self, cmd):
if 'wifi' not in cmd['network_types']:
return
key = cmd['key']
self.commands[key] = CommandType(
key,
cmd['url'],
cmd['widget_type'],
cmd['display_name']
)
def addMode(self, mode):
key = mode['path_segment']
self.modes[key] = dictToObject(mode, 'ModeType')
def addStatus(self, groupname, field):
group = self.status.get(groupname, {})
group[field['name']] = StatusType(**field)
self.status[groupname] = group
@staticmethod
def parse(data):
parser = SchemaType(data['schema_version'], data['version'])
for cmd in data['commands']:
parser.addCommand(cmd)
if data['modes']:
for mode in data['modes']:
parser.addMode(mode)
for group in data['status']['groups']:
for field in group['fields']:
parser.addStatus(group['group'], field)
return parser
def schema_pythonify(schema, filename):
header = [
f'# Autogenerated by {__name__}.schema_pythonify at {datetime.now()}\n'
]
types = []
# Status options
types.append('StatusType')
status = ['\n\n']
status.append('class Status(object):')
extra = ''
for groupname in schema.status.keys():
status.append(f'{extra} class {capitalize(groupname)}(object):')
for key, field in schema.status[groupname].items():
status.append(f' {prefix_reserved(field.name, groupname)} = StatusType("{field.name}", {field.id})')
extra = '\n'
# Commands
types.append('CommandType')
command = ['\n\n']
extra = False
command.append('class Command(object):')
for key, cmd in schema.commands.items():
if extra:
command.append('')
extra = True
command.append(' {0} = CommandType(\n "{0}",\n "{1}",\n "{2}",\n "{3}")'.format(
cmd.name,
cmd.url,
cmd.widget,
cmd.display_name
))
# Modes
# types.append('ModeType')
mode = ['\n\n']
othermode = []
extra = False
mode.append('class Mode(Enum):')
submode = [f'\n\nclass SubMode(object):']
for k, v in schema.modes.items():
mode.append(f"{T1}{pythonify(k)} = '{v.value}'")
if v.settings:
# default settings
defaults = [x for x in v.settings if x.path_segment == 'default_sub_mode']
if len(defaults) > 0:
submode.append(f"\n{T1}class {capitalize(k)}(Enum):")
for sm in defaults:
prefix = ''
if sm.path_segment in SUBMODE_PREFIX:
prefix = SUBMODE_PREFIX[sm.path_segment]
for o in sm.options:
submode.append(f"{T2}{pythonify(prefix + o.display_name)} = '{o.value}'")
# non default settings
others = [x for x in v.settings if x.path_segment != 'default_sub_mode']
if len(others) > 0:
othermode.append(f'\n\nclass {capitalize(k)}(object):')
for sm in others:
othermode.append(f"\n{T1}{capitalize(sm.path_segment).upper()} = '{sm.id}'")
othermode.append(f"\n{T1}class {capitalize(sm.path_segment)}(Enum):")
prefix = ''
if sm.path_segment in SUBMODE_PREFIX:
prefix = SUBMODE_PREFIX[sm.path_segment]
for o in sm.options:
othermode.append(f"{T2}{pythonify(prefix + o.display_name)} = '{o.value}'")
mode = mode + submode + othermode
# finalize
lines = []
header.append("from enum import Enum")
header.append("from aiogopro.types import {0}".format(", ".join(types)))
lines.append("\n".join(header))
lines.append("\n".join(status))
lines.append("\n".join(command))
lines.append("\n".join(mode))
lines.append("\n")
with open(filename, 'w') as f:
f.writelines(lines)
def command_compare(firsts, seconds, firstname='first', secondname='second'):
for key, cmdA in firsts.items():
if key in seconds:
cmdB = seconds[key]
if cmdA.url != cmdB.url:
print("Same command {0} different url {1} != {2} ({3}/{4})".format(cmdA.url, cmdB.url, firstname, secondname))
else:
print('Only in {2}, Command {0}: {1}'.format(cmdA.name.ljust(47), cmdA.display_name, firstname))
def mode_compare(firsts, seconds, firstname='first', secondname='last'):
for key, modeA in firsts.items():
if key in seconds:
modeB = seconds[key]
if (modeA.value != modeB.value):
print("Same mode {0} different value {1} != {2} ({3}/{4})".format(modeA.value, modeB.value, firstname, secondname))
else:
print('Only in {2}, Mode {0}: {1}'.format(modeA.name.ljust(20), modeA.display_name, firstname))
def status_compare(groupname, firsts, seconds, firstname='first', secondname='second'):
for key, statusA in firsts.items():
if key in seconds:
statusB = seconds[key]
if (statusA.id != statusB.id):
print("Same status {0}.{1} different id {2} != {3} ({4}/{5})".format(
groupname, key, statusA.id, statusB.id, firstname, secondname))
else:
print('Only in {3}, Status {0}.{1}: {2}'.format(groupname, statusA.name.ljust(47), statusA.id, firstname))
def schema_compare(first, second, firstname='first', secondname='second'):
command_compare(first.commands, second.commands, firstname, secondname)
command_compare(second.commands, first.commands, secondname, firstname)
mode_compare(first.modes, second.modes, firstname, secondname)
mode_compare(second.modes, first.modes, secondname, firstname)
for groupname in first.status.keys():
status_compare(groupname, first.status[groupname], second.status, firstname, secondname)
status_compare(groupname, second.status[groupname], first.status, secondname, firstname)
| 2.21875 | 2 |
getDomainAge/services/notification.py | ljnath/getDomainAge | 2 | 12766904 | from flask import flash
from getDomainAge.models.enums import NotificationCategory
class NotificationService:
"""
Service class for showing all kinds of notificatin in the webpage
"""
def notify_success(self, message: str) -> None:
"""
method to show success message
:param message: the message to be flashed
"""
if message:
flash(message, NotificationCategory.SUCCESS.value)
def notify_warning(self, message: str) -> None:
"""
method to show warning message
:param message: the message to be flashed
"""
if message:
flash(message, NotificationCategory.WARNING.value)
def notify_error(self, message: str) -> None:
"""
method to show dannger or error message
:param message: the message to be flashed
"""
if message:
flash(message, NotificationCategory.DANGER.value)
| 2.765625 | 3 |
web/server/codechecker_server/migrations/report/versions/f8291ab1d6be_fix_setting_analysis_info_id_seq.py | ryankurte/codechecker | 1,601 | 12766905 | <filename>web/server/codechecker_server/migrations/report/versions/f8291ab1d6be_fix_setting_analysis_info_id_seq.py<gh_stars>1000+
"""Fix setting analysis_info_id_seq
Revision ID: f8291ab1d6be
Revises: <PASSWORD>
Create Date: 2021-07-15 16:49:05.354455
"""
# revision identifiers, used by Alembic.
revision = 'f8291ab1d6be'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
from alembic import op
def upgrade():
ctx = op.get_context()
dialect = ctx.dialect.name
if dialect == 'postgresql':
op.execute("""
SELECT SETVAL(
'analysis_info_id_seq',
(SELECT MAX(id) + 1 FROM analysis_info)
)
""")
| 1.453125 | 1 |
pints/tests/shared.py | iamleeg/pints | 0 | 12766906 | <filename>pints/tests/shared.py
#
# Shared classes and methods for testing.
#
# This file is part of PINTS.
# Copyright (c) 2017-2018, University of Oxford.
# For licensing information, see the LICENSE file distributed with the PINTS
# software package.
#
from __future__ import absolute_import, division
from __future__ import print_function, unicode_literals
import os
import sys
import shutil
import tempfile
import numpy as np
import pints
# StringIO in Python 3 and 2
try:
from io import StringIO
except ImportError:
from cStringIO import StringIO
class StreamCapture(object):
"""
A context manager that redirects and captures the output stdout, stderr,
or both.
"""
def __init__(self, stdout=True, stderr=False):
super(StreamCapture, self).__init__()
# True if currently capturing
self._capturing = False
# Settings
self._stdout_enabled = True if stdout else False
self._stderr_enabled = True if stderr else False
# Captured output
self._stdout_captured = None
self._stderr_captured = None
# Original streams
self._stdout_original = None
self._stderr_original = None
# Buffers to redirect to
self._stdout_buffer = None
self._stderr_buffer = None
def __enter__(self):
""" Called when the context is entered. """
self._start_capturing()
return self
def __exit__(self, exc_type, exc_value, traceback):
""" Called when exiting the context. """
self._stop_capturing()
def _start_capturing(self):
""" Starts capturing output. """
if self._capturing:
return
self._capturing = True
# stdout
if self._stdout_enabled:
# Create buffer
self._stdout_buffer = StringIO()
# Save current stream
self._stdout_original = sys.stdout
# If possible, flush current output stream
try:
self._stdout_original.flush()
except AttributeError:
pass
# Redirect
sys.stdout = self._stdout_buffer
# stderr
if self._stderr_enabled:
# Create buffer
self._stderr_buffer = StringIO()
# Save current stream
self._stderr_original = sys.stderr
# If possible, flush current output stream
try:
self._stderr_original.flush()
except AttributeError:
pass
# Redirect
sys.stderr = self._stderr_buffer
def _stop_capturing(self):
""" Stops capturing output. """
if not self._capturing:
return
# stdout
if self._stdout_enabled:
self._stdout_buffer.flush()
sys.stdout = self._stdout_original
self._stdout_captured = self._stdout_buffer.getvalue()
self._stdout_buffer = None
# stderr
if self._stderr_enabled:
self._stderr_buffer.flush()
sys.stderr = self._stderr_original
self._stderr_captured = self._stderr_buffer.getvalue()
self._stderr_buffer = None
self._capturing = False
def text(self):
"""
Disables capturing and returns the captured text.
If only ``stdout`` or ``stderr`` was enabled, a single string is
returned. If both were enabled a tuple of strings is returned.
"""
self._stop_capturing()
if self._stdout_enabled:
if self._stderr_enabled:
return self._stdout_captured, self._stderr_captured
return self._stdout_captured
return self._stderr_captured # Could be None
class TemporaryDirectory(object):
"""
ContextManager that provides a temporary directory to create temporary
files in. Deletes the directory and its contents when the context is
exited.
"""
def __init__(self):
super(TemporaryDirectory, self).__init__()
self._dir = None
def __enter__(self):
self._dir = os.path.realpath(tempfile.mkdtemp())
return self
def path(self, path):
"""
Returns an absolute path to a file or directory name inside this
temporary directory, that can be used to write to.
Example::
with TemporaryDirectory() as d:
filename = d.path('test.txt')
with open(filename, 'w') as f:
f.write('Hello')
with open(filename, 'r') as f:
print(f.read())
"""
if self._dir is None:
raise RuntimeError(
'TemporaryDirectory.path() can only be called from inside the'
' context.')
path = os.path.realpath(os.path.join(self._dir, path))
if path[0:len(self._dir)] != self._dir:
raise ValueError(
'Relative path specified to location outside of temporary'
' directory.')
return path
def __exit__(self, exc_type, exc_value, traceback):
try:
shutil.rmtree(self._dir)
finally:
self._dir = None
def __str__(self):
if self._dir is None:
return '<TemporaryDirectory, outside of context>'
else:
return self._dir
class CircularBoundaries(pints.Boundaries):
"""
Circular boundaries, to test boundaries that are non-rectangular.
Arguments:
``center``
The point these boundaries are centered on.
``radius``
The radius (in all directions).
"""
def __init__(self, center, radius=1):
super(CircularBoundaries, self).__init__()
# Check arguments
center = pints.vector(center)
if len(center) < 1:
raise ValueError('Number of parameters must be at least 1.')
self._center = center
self._n_parameters = len(center)
radius = float(radius)
if radius <= 0:
raise ValueError('Radius must be greater than zero.')
self._radius2 = radius**2
def check(self, parameters):
""" See :meth:`pints.Boundaries.check()`. """
return np.sum((parameters - self._center)**2) < self._radius2
def n_parameters(self):
""" See :meth:`pints.Boundaries.n_parameters()`. """
return self._n_parameters
| 1.890625 | 2 |
api/modules/users/model.py | nikochiko/server | 43 | 12766907 | <reponame>nikochiko/server
from django.contrib.auth.models import User
from django.core.validators import URLValidator
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from api.modules.users.enums import PasswordVerificationModeChoice
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
profile_image = models.TextField(validators=[URLValidator()], default=None, null=True)
status = models.TextField(null=True, default=None)
last_active = models.DateTimeField(null=True)
is_verified = models.BooleanField(default=False)
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def update_user_profile(sender, instance, **kwargs):
if hasattr(instance, 'profile'):
instance.profile.save()
else:
# for users already created
Profile.objects.create(user=instance)
class PasswordVerification(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
code = models.CharField(max_length=6)
mode = models.TextField(max_length=30,
default=PasswordVerificationModeChoice.FORGET_PASSWORD,
choices=[(tag, tag.value) for tag in PasswordVerificationModeChoice])
created = models.DateTimeField(auto_now=True)
| 2.171875 | 2 |
helpers/write_names_to_txt.py | JSC-NIIAS/TwGoA4aij2021 | 0 | 12766908 | train_imgs_path="path_to_train_images"
test_imgs_path="path_to_val/test images"
dnt_names=[]
import os
with open("dont_include_to_train.txt","r") as dnt:
for name in dnt:
dnt_names.append(name.strip("\n").strip(".json"))
dnt.close()
print(dnt_names)
with open("baseline_train.txt","w") as btr:
for file in os.listdir(train_imgs_path):
if file not in dnt_names:
btr.write(train_imgs_path+file+"\n")
btr.close()
with open("baseline_val.txt","w") as bv:
for file in os.listdir(test_imgs_path):
bv.write(test_imgs_path+file+"\n")
bv.close()
| 2.640625 | 3 |
hexgrid/enums.py | Notgnoshi/hexgrid | 1 | 12766909 | <reponame>Notgnoshi/hexgrid<gh_stars>1-10
from enum import Enum, auto
class HexagonType(Enum):
"""Hexagons can be either flat-topped or pointy-topped."""
POINTY = auto()
FLAT = auto()
class CoordinateSystem(Enum):
"""Describes the different coordinate systems available."""
OFFSET = auto()
OFFSET_ODD_ROWS = auto()
OFFSET_ODD_COLUMNS = auto()
OFFSET_EVEN_ROWS = auto()
OFFSET_EVEN_COLUMNS = auto()
AXIAL = auto()
CUBIC = auto()
POINTY = HexagonType.POINTY
FLAT = HexagonType.FLAT
OFFSET = CoordinateSystem.OFFSET
OFFSET_ODD_ROWS = CoordinateSystem.OFFSET_ODD_ROWS
OFFSET_EVEN_ROWS = CoordinateSystem.OFFSET_EVEN_ROWS
OFFSET_ODD_COLUMNS = CoordinateSystem.OFFSET_ODD_COLUMNS
OFFSET_EVEN_COLUMNS = CoordinateSystem.OFFSET_EVEN_COLUMNS
CUBIC = CoordinateSystem.CUBIC
AXIAL = CoordinateSystem.AXIAL
| 2.78125 | 3 |
DB/bdware.py | danielvicentini/distancebot | 1 | 12766910 | # -*- coding: utf-8 -*-
## Variaveis
# 21.07.2020
# DB Functions for DB writes and reports
import sys
import os
sys.path.append(os.path.abspath('..'))
sys.path.append(os.path.abspath('bot'))
from bdFluxQueries import TraceReport, OccupancyReport, BestDayReport, BestDay
from config import le_config
"""
Definition of paramater that allow connecting to the SocialDistance DB
"""
from config_shared import INFLUXDB_HOST, INFLUXDB_PORT, INFLUXDB_DBUSER, INFLUXDB_DBPASSWORD, INFLUXDB_DBNAME, INFLUXDB_USER, INFLUXDB_PASSWORD
from config_shared import TABELA_MV, TABELA_TOTAL, TABELA_TRACE
from influxdb import InfluxDBClient
import json
import time
"""TimeSeries DataBase Class."""
# Objeto do banco
# metodo de escrita do trace
class DBClient():
"""
This class allows to instantiate a handler for the SocialDistance InfluxDB database.
When a object is instantitated, connection to the database is opened.
"""
def __init__(self):
# Connects to the database
self._host = INFLUXDB_HOST
self._port = INFLUXDB_PORT
self._user = INFLUXDB_USER
self._password = <PASSWORD>
self._dbname = INFLUXDB_DBNAME
self._client = InfluxDBClient(self._host, self._port, self._user, self._password, self._dbname)
def Close(self):
# Closes DB
self._client.close()
def peopleLog(self, local: str, userid: str, status: str, origem: str):
"""
Escreve no banco quando user entra ou sai
"""
# Prepare JSON with data to be writte in PeopleLog (trace) measurement
json_body = {}
json_body["measurement"] = TABELA_TRACE
json_body["tags"] = {}
json_body["tags"]["local"] = local
json_body["fields"] = {}
json_body["fields"]["userid"] = userid
json_body["fields"]["state"]=status
json_body["fields"]["origin"]=origem
# Write data to InfluxDB
self._client.write_points([json_body])
#Return True to indicate that data was recorded
return True
def TotalCount(self, local: str, total: int, origem: str, people:list):
"""
Escreve no banco total de usuarios naquele local
"""
# Prepare JSON with data to be writte in TotalPeopleCount measurement
json_body = {}
json_body["measurement"] = TABELA_TOTAL
json_body["tags"] = {}
json_body["tags"]["location"] = local
json_body["tags"]["origin"] = origem
json_body["tags"]["people"] = people
json_body["fields"] = {}
json_body["fields"]["count"] = total
# Write data to InfluxDB
self._client.write_points([json_body])
#Return True to indicate that data was recorded
return True
def SanityMask(self, local: str, network:str, serial:str, url:str, time:str):
"""
Escreve no banco eventos quando pessoas sem máscara
"""
# Prepare JSON with data to be writte in SanityMask measurement
json_body = {}
json_body["measurement"] = TABELA_MV
json_body["tags"] = {}
json_body["tags"]["local"] = local
json_body["tags"]["network"] = network
json_body["tags"]["serial"] = serial
json_body["fields"] = {}
json_body["fields"]["url"] = url
json_body["time"]= time
# Write data to InfluxDB
self._client.write_points([json_body])
#Return True to indicate that data was recorded
return True
### FALTA FUNCOES DE CONSULTA
def ConsultaMask(self, tabela: str, tempo:str):
# Count qty of events in the Mask table
query= 'SELECT count(url) from ' + tabela
# Consulta
# tenta a consulta, do contrário devolve erro
try:
consulta=self._client.query(query+" where time > now() - "+tempo)
# trabalha resultado
resultado=list(consulta.get_points())
try:
x=resultado[0]['count']
except:
# se chegou aqui, não voltou nenhum resultado, portanto zero.
x=0
texto = f"{x} events in the past {tempo} time."
except:
texto = "Error in the query. Try XXd (for past days)"
msg=texto
return msg
# BD report
def bd_consulta(tabela,filtro):
# 21.7.2020
# English
# This function will run reports on the DB
# Reports use regular influx for (Mask detection) and Flux for complex reports (BestDay, Occupancy/History and TracePeople)
# Code will call each function basead on a table (tabela) variable and filter (filtro) defined by user
# Most of cases, filtro is a point in the time where data is collected for the investigation
# Returns code in JSON format
msg=""
global TABELA_MV, TABELA_TOTAL, TABELA_TRACE
if tabela=="totalcount":
# Reports Social Distance Out of Compliance in room by shifts
# Chama consulta e retorna os períodos fora de compliance
# Chama consulta Flux para Coleta da base de dados conforme filtro
try:
x=OccupancyReport(le_config(), filtro)
except:
#Msg de erro caso filtro venha errado
msg="Query Error"
try:
#Formata msg de saída do Filtro
for b in x['PerShiftHistory']:
msg=msg+f"Room: {b['location']} Data: {b['day']}-{b['month']}-{b['year']} Shift: {b['shift']} \n"
print (msg)
except:
#Msg de erro caso filtro venha errado
msg="Error in the query. Try XXd (for past days)"
if msg=="":
msg="No data \n"
elif tabela=="peoplelog":
# Reports tracing of people (who's close to) in a certain period of time
params=filtro.split('&')
# Checa e só continua se qtde de parametros correta
if len(params)!=3:
msg="Trace: Missing parameters. Requires: userid, start time, end time. \n"
elif len(params)==3:
# parametros ok, continua
personid=params[0]
start=params[1]
end=params[2]
print(params)
try:
# realiza consulta
print ("trace")
x=TraceReport(personid,start,end)
print (x)
minhalista=list()
except:
#Msg de erro caso filtro venha errado
msg="Query Error.Format: personid: username, start: YYYY-MM-DD, end: YYYY-MM-dd"
try:
#montagem da resposta
for b in x:
minhalista.append(b['userid'])
minhalista=list(dict.fromkeys(minhalista))
msg="List of users close: \n"
for b in minhalista:
msg=msg+f"{b} \n"
if len(minhalista)==0:
msg="No data."
except:
msg="No Data or Query Error. \n"
elif tabela=="bestday":
# Report Best Day to go to the office
#
#try:
# Massa de dados do periodo informado
x=BestDayReport(filtro)
# consolida os melhores dias durante o horário comercial
y=BestDay(x,9,12)
print (y)
# monta a mensagem
for b in y:
msg=msg+f"Room: {b['location']} - Best Days: {b['bestday']} \n"
print (msg)
#return msg
#except:
#msg="No Data and/or Query Error. \n"
#return "No Data and/or Query Error. \n"
elif tabela=="sanityMask":
# Reports qty of events of people not wearing Mask during a certain period of time
# Chama consulta dos eventos sem mascara
banco=DBClient()
tabela=TABELA_MV
msg=banco.ConsultaMask(tabela,filtro)
banco.Close()
else:
msg="Table information not found."
json = { "msg":msg}
return json
# BD update via JSON content
def bd_update(json_content):
# 21.7.2020
# English
# This function updates DB according to data type and parameters send in format of a JSON content
# Details bellow of JSON format
# Code will identify tipo and calls appropiate function to write to the DB
# initiate DB
banco=DBClient()
# faz upload do BD conforme JSON POSTADO
# Tipo 1 - raw de entrada e saida de usuario
# Type 1 - raw data of login/logoff user
# {
# "type":"peoplelog",
# "local":"LOG1",
# "origem":"python",
# "userid":"dvicenti",
# "status":"entrou"
#}
# Tipo 2 - Total de pessoas na sala
# Typo 2 - Total people in a room
# {
# "type":"totalcount",
# "local":"SALA_log2",
# "origin": "camera"
# "total":100
# "people":"people1@email,people2@email"
#}
# Tipo 3 - Pessoas detectadas sem mascara
# Type 3 - No Mask detected people
# {
# "type":"sanitymask",
# "local":"SALA-Log3"
# "network":"XPTO",
# "serial":"XPTO",
# "url":"http://x.com/foto.png"
# "time": "2018-03-28T8:01:00Z"
#}
# check if it an expected data
try:
tipo = json_content["type"]
except:
print ("not a valid json expected")
print (json_content)
return "erro"
# calls BD to write date
try:
if tipo == "peoplelog":
banco.peopleLog(json_content["local"],json_content["userid"],json_content["status"],json_content["origem"])
elif tipo == "totalcount":
banco.TotalCount(json_content["location"],json_content["count"],json_content['origin'],json_content['people'])
elif tipo == "sanitymask":
banco.SanityMask(json_content["local"],json_content['network'],json_content['serial'],json_content['url'],json_content["time"])
# returns ok if ok
banco.Close()
print (json_content)
return "ok"
except:
# returns error if not ok
print ("missing fields or BD error")
print (json_content)
return "erro" | 2.78125 | 3 |
solutions/reverse-bits.py | Shuailong/Leetcode | 3 | 12766911 | <reponame>Shuailong/Leetcode
#!/usr/bin/env python
# encoding: utf-8
"""
reverse-bits.py
Created by Shuailong on 2016-03-02.
https://leetcode.com/problems/reverse-bits/.
"""
class Solution(object):
def reverseBits(self, n):
"""
:type n: int
:rtype: int
"""
res = 0
count = 0
while n:
d = n & 1
n >>= 1
res <<= 1
res += d
count += 1
res <<= 32-count
return res
def main():
solution = Solution()
n = 43261596
print solution.reverseBits(n)
if __name__ == '__main__':
main()
| 3.375 | 3 |
doxhooks/preprocessors.py | nre/Doxhooks | 1 | 12766912 | """
General-purpose and HTML lexical preprocessors.
The `preprocessors <preprocessor>`:term: accept lines of text
(`Preprocessor.insert_lines`) and files (`Preprocessor.insert_file`). A
preprocessor remembers all the input files that it opens
(`Preprocessor.input_paths`).
Preprocessor `directives <preprocessor directive>`:term: and variables
(also known as `nodes <preprocessor node>`:term:) are distinguished from
the source text by customisable delimiters (`directive_delimiter`,
`node_delimiters`, `code_nodes`).
The HTML preprocessor recognises character references (also known as
character entities) and can replace them with characters, other
character references, etc (`HTMLPreprocessor.character_references`).
Warnings about unknown character references can be suppressed
(`HTMLPreprocessor.suppress_character_reference_warnings`).
Exports
-------
Preprocessor
A general-purpose lexical preprocessor.
HTMLPreprocessor
A lexical preprocessor for HTML.
directive_delimiter
Customise the opening delimiter of the preprocessor directives.
node_delimiters
Customise the delimiters of the preprocessor *nodes*.
code_nodes
Modify a preprocessor to use code-syntax-friendly *node* delimiters.
"""
import inspect
import re
import shlex
import doxhooks.console as console
__all__ = [
"HTMLPreprocessor",
"Preprocessor",
"code_nodes",
"directive_delimiter",
"node_delimiters",
]
def _compile_match_directive(opening_delimiter):
# Return a regex match method for a preprocessor directive pattern.
#
# A directive can be distinguished from a node. The keyword of a
# directive is immediately followed by whitespace, whereas
# whitespace is not allowed inside a node.
directive_pattern = "".join((
r"(?P<indentation>[ \t]*)", opening_delimiter,
r"(?P<keyword>\w+)(?:[ \t]+(?P<block>.+))?[ \t]*\n"))
# fullmatch is new in Python 3.4.
# return re.compile(directive_pattern).fullmatch
return re.compile(directive_pattern + "$").match
def _compile_replace_nodes(opening_delimiter, closing_delimiter=None):
# Return a regex substitution method for a 'node' pattern.
if closing_delimiter is None:
closing_delimiter = opening_delimiter
node_pattern = "".join((
opening_delimiter, r"(?P<identifier>(?:\w+\.)*\w+)",
closing_delimiter))
return re.compile(node_pattern).sub
class Preprocessor:
"""
A general-purpose lexical preprocessor.
The default opening delimiter for a preprocessor directive is
``##``. This delimiter can be customised with the
`directive_delimiter` class decorator. The closing delimiter is the
end of the line.
The default opening and closing delimiters for a preprocessor *node*
are both ``##``. These delimiters can be replaced with the
code-syntax-friendly `code_nodes` class decorator or customised with
the `node_delimiters` class decorator.
The `~doxhooks.preprocessor_factories.PreprocessorFactory` not only
hides the construction of a preprocessor from the caller, but also
parameterises the types of preprocessor and preprocessor context
that the caller receives.
Class Interface
---------------
insert_file
Push the contents of a file onto the preprocessor stack.
insert_lines
Push some lines of text onto the preprocessor stack.
"""
def __init__(self, context, input_file_domain, output_file):
"""
Initialise the preprocessor with a context and files.
Parameters
----------
context : ~doxhooks.preprocessor_contexts.BasePreprocessorContext
A context that defines and interprets the preprocessor
directives and variables in the input files.
input_file_domain : ~doxhooks.file_domains.InputFileDomain
The input-file domain.
output_file : TextIO
An open file object that the preprocessor writes its output
to.
Attributes
----------
input_paths : set
The path to each input file that has been preprocessed.
"""
self._context = context
self._input = input_file_domain
self._output = output_file
self._indentation = ""
self.input_paths = set()
_match_directive = _compile_match_directive("##")
def _eval_directive(self, indentation, directive):
# Tokenise a directive and interpret the tokens in the context.
self._indentation = indentation + directive.group("indentation")
keyword_token, block = directive.group("keyword", "block")
tokens = shlex.split(block, comments=True) if block is not None else ()
self._context.interpret(keyword_token, *tokens, preprocessor=self)
_replace_nodes = _compile_replace_nodes("##")
def _flatten_node(self, node):
# Recursively flatten a 'node' and return the output text.
identifier = node.group("identifier")
node_value = self._context.get(identifier)
try:
return self._replace_nodes(self._flatten_node, node_value)
except Exception:
console.error_trace("Node `{}`".format(identifier), node_value)
raise
def _eval_line(self, line):
# Evaluate a line of input text and return the output text.
return self._replace_nodes(self._flatten_node, line)
def insert_lines(self, lines, name=None):
"""
Push some lines of text onto the preprocessor stack.
Parameters
----------
lines : Iterable[str]
The lines of text.
name : str or None, optional
A name for the lines. The name is only used when tracing the
source of an error. If a name is not provided, the name of
the function that called `insert_lines` is used. Defaults to
``None``.
"""
indentation = self._indentation
# Silently fix a deceptive user error:
# lines should be Iterable[str], but not str[str].
if isinstance(lines, str):
lines = lines.splitlines(keepends=True)
for line_no, line in enumerate(lines, start=1):
directive = self._match_directive(line)
try:
if directive:
self._eval_directive(indentation, directive)
continue
output_line = self._eval_line(line)
except Exception:
# inspect.stack()[1][3] references the name
# of the function that called insert_lines:
name = name or inspect.stack()[1][3] + "()"
console.error_trace(
"In: {}\n >> line {:3}".format(name, line_no), line)
raise
if output_line == "\n":
# Do not write indentation without content.
self._output.write("\n")
elif output_line:
self._output.write(indentation + output_line)
self._indentation = indentation
def insert_file(self, filename, *, idempotent=False):
"""
Push the contents of a file onto the preprocessor stack.
The file path is added to the set of *input paths* opened by
this `Preprocessor`.
Parameters
----------
filename : str or None
The name of the file.
idempotent : bool, optional
Keyword-only. Whether to silently ignore the file if it has
already been preprocessed and written to the output file.
Defaults to ``False``.
Raises
------
~doxhooks.errors.DoxhooksFileError
If the file cannot be opened.
"""
if not filename:
return
path = self._input.path(filename)
if idempotent and path in self.input_paths:
return
self.input_paths.add(path)
with self._input.open(filename) as lines:
self.insert_lines(lines, filename)
class HTMLPreprocessor(Preprocessor):
"""
A lexical preprocessor for HTML.
The base class is `Preprocessor`.
Class Interface
---------------
character_references
A dictionary of character names and their replacement string
values.
suppress_character_reference_warnings
Suppress warning messages about unknown HTML character
references.
"""
character_references = {}
"""
A dictionary of character names and their replacement string values.
*dict*
Each dictionary item is a character name (also known as a character
'entity') paired with a replacement string value. The name does not
include the ``&`` and ``;`` delimiters.
The character name can also be a decimal or hexadecimal code point.
The code point does not include the preceding ``#``, but a
hexadecimal code point does require the ``x`` prefix.
Defaults to an empty dictionary.
"""
suppress_character_reference_warnings = False
"""
Suppress warning messages about unknown HTML character references.
*bool*
A warning message is written to stderr each time that an unknown
character reference (i.e. a reference that is not in
`self.character_references`) is written to the output file. These
warnings can be suppressed by setting
`self.suppress_character_reference_warnings` to ``True`` (or another
'truthy' value). Defaults to ``False``.
"""
_replace_character_references = re.compile(r"&#?(?P<reference>\w+);").sub
def _get_character(self, character_reference):
reference = character_reference.group("reference")
try:
character = self.character_references[reference]
except KeyError:
if not self.suppress_character_reference_warnings:
console.warning(
"Unknown HTML character reference:", reference)
character = character_reference.group()
return character
def _eval_line(self, line):
preprocessed_line = super()._eval_line(line)
return self._replace_character_references(
self._get_character, preprocessed_line)
def directive_delimiter(opening_delimiter):
"""
Customise the opening delimiter of the preprocessor directives.
It is usually desirable for the opening delimiter to start with the
line-comment delimiter of a particular language (often ``#`` or
``//``), followed by additional characters to distinguish the
directives from comments.
The closing delimiter is the end of the line.
The delimiters must be valid regular-expression patterns.
Parameters
----------
opening_delimiter : str
The opening-delimiter pattern.
Returns
-------
~collections.abc.Callable
A decorator for modifying a subclass of `Preprocessor`.
"""
def decorator(preprocessor_class):
preprocessor_class._match_directive = _compile_match_directive(
opening_delimiter)
return preprocessor_class
return decorator
def node_delimiters(opening_delimiter, closing_delimiter=None):
"""
Customise the delimiters of the preprocessor *nodes*.
The delimiters must be valid regular-expression patterns.
Parameters
----------
opening_delimiter : str
The opening-delimiter pattern.
closing_delimiter : str or None, optional
The closing-delimiter pattern. The pattern is the same as
`opening_delimiter` if the argument is ``None`` or not provided.
Defaults to ``None``.
Returns
-------
~collections.abc.Callable
A decorator for modifying a subclass of `Preprocessor`.
"""
def decorator(preprocessor_class):
preprocessor_class._replace_nodes = _compile_replace_nodes(
opening_delimiter, closing_delimiter)
return preprocessor_class
return decorator
_replace_code_nodes = _compile_replace_nodes(
r"(?:(##|\$\$)|(['\"])\+\+)", r"(?:\1|\+\+\2)")
def code_nodes(preprocessor_class):
"""
Modify a preprocessor to use code-syntax-friendly *node* delimiters.
These delimiters do not break the syntax of programming languages
for which ``$`` is a valid character in identifiers. (``#`` is
usually not a valid character in identifiers.)
===================== ====================== ===================
Preprocessor variable Preprocessor input Preprocessor output
===================== ====================== ===================
``my_var = "x"`` ``$$my_var$$ = 0`` ``x = 0``
``obj.$$my_var$$ = 0`` ``obj.x = 0``
``my_str = "abc"`` ``s = '##my_str##'`` ``s = 'abc'``
``s = "##my_str##"`` ``s = "abc"``
``my_num = 1.23`` ``i = '++my_num++'`` ``i = 1.23``
``i = "++my_num++"`` ``i = 1.23``
``my_bool = "true"`` ``ok = '++my_bool++'`` ``ok = true``
``ok = "++my_bool++"`` ``ok = true``
``my_bool = True`` ``ok = "++my_bool++"`` ``ok = true`` [1]_
``ok = "++my_bool++"`` ``ok = True`` [2]_
===================== ====================== ===================
.. [1] With `doxhooks.preprocessor_contexts.lowercase_booleans`.
.. [2] With `doxhooks.preprocessor_contexts.startcase_booleans`.
Parameters
----------
preprocessor_class : type
The subclass of `Preprocessor` to be modified.
Returns
-------
type
The modified subclass of `Preprocessor`.
"""
preprocessor_class._replace_nodes = _replace_code_nodes
return preprocessor_class
| 2.75 | 3 |
lldb/test/API/lang/swift/swiftieformatting/TestSwiftieFormatting.py | RLovelett/llvm-project | 0 | 12766913 | <gh_stars>0
# TestSwiftieFormatting.py
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ------------------------------------------------------------------------------
"""
Test that data formatters honor Swift conventions
"""
import lldb
from lldbsuite.test.lldbtest import *
from lldbsuite.test.decorators import *
import lldbsuite.test.lldbutil as lldbutil
import os
import unittest2
class TestSwiftieFormatting(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
TestBase.setUp(self)
@expectedFailureAll(bugnumber="rdar://60396797",
setting=('symbols.use-swift-clangimporter', 'false'))
@skipUnlessDarwin
@swiftTest
def test_swiftie_formatting(self):
"""Test that data formatters honor Swift conventions"""
self.build()
lldbutil.run_to_source_breakpoint(
self, 'Set breakpoint here', lldb.SBFileSpec('main.swift'))
swcla = self.frame().FindVariable("swcla")
swcla.SetPreferDynamicValue(lldb.eDynamicCanRunTarget)
swcla.SetPreferSyntheticValue(True)
ns_a = swcla.GetChildMemberWithName("ns_a")
self.assertTrue(
ns_a.GetSummary() == '"Hello Swift"',
"ns_a summary wrong")
ns_d = swcla.GetChildMemberWithName("ns_d")
self.assertTrue(ns_d.GetSummary() == '0 bytes', "ns_d summary wrong")
IntWidth = 64
if self.getArchitecture() in ['arm', 'armv7', 'armv7k', 'i386']:
IntWidth = 32
ns_n = swcla.GetChildMemberWithName("ns_n")
self.assertTrue(ns_n.GetSummary() == ("Int%d(30)" % IntWidth), "ns_n summary wrong")
ns_u = swcla.GetChildMemberWithName("ns_u")
self.assertTrue(ns_u.GetSummary() == ('"page.html -- http://www.apple.com"'), "ns_u summary wrong")
swcla = self.frame().EvaluateExpression("swcla")
swcla.SetPreferDynamicValue(lldb.eDynamicCanRunTarget)
swcla.SetPreferSyntheticValue(True)
ns_a = swcla.GetChildMemberWithName("ns_a")
self.assertTrue(
ns_a.GetSummary() == '"Hello Swift"',
"ns_a summary wrong")
ns_d = swcla.GetChildMemberWithName("ns_d")
self.assertTrue(ns_d.GetSummary() == '0 bytes', "ns_d summary wrong")
ns_n = swcla.GetChildMemberWithName("ns_n")
self.assertTrue(ns_n.GetSummary() == ("Int%d(30)" % IntWidth), "ns_n summary wrong")
ns_u = swcla.GetChildMemberWithName("ns_u")
self.assertTrue(ns_u.GetSummary() == ('"page.html -- http://www.apple.com"'), "ns_u summary wrong")
nsarr = self.frame().FindVariable("nsarr")
nsarr.SetPreferDynamicValue(lldb.eDynamicCanRunTarget)
nsarr.SetPreferSyntheticValue(True)
nsarr0 = nsarr.GetChildAtIndex(0)
nsarr0.SetPreferDynamicValue(lldb.eDynamicCanRunTarget)
nsarr0.SetPreferSyntheticValue(True)
nsarr1 = nsarr.GetChildAtIndex(1)
nsarr1.SetPreferDynamicValue(lldb.eDynamicCanRunTarget)
nsarr1.SetPreferSyntheticValue(True)
nsarr3 = nsarr.GetChildAtIndex(3)
nsarr3.SetPreferDynamicValue(lldb.eDynamicCanRunTarget)
nsarr3.SetPreferSyntheticValue(True)
self.assertTrue(
nsarr0.GetSummary() == ("Int%d(2)" % IntWidth),
'nsarr[0] summary wrong')
self.assertTrue(
nsarr1.GetSummary() == ("Int%d(3)" % IntWidth),
'nsarr[1] summary wrong')
self.assertTrue(
nsarr3.GetSummary() == ("Int%d(5)" % IntWidth),
'nsarr[3] summary wrong')
self.expect(
'frame variable -d run nsarr[4] --ptr-depth=1',
substrs=[
'"One"',
'"Two"',
'"Three"'])
self.expect(
'frame variable -d run nsarr[5] --ptr-depth=1',
substrs=[
("Int%d(1)" % IntWidth),
("Int%d(2)" % IntWidth),
("Int%d(3)" % IntWidth)])
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lldb.SBDebugger.Terminate)
unittest2.main()
| 2.03125 | 2 |
cgi-bin/serialize.py | autorouting/main | 3 | 12766914 | <gh_stars>1-10
def serializeCgiToServer(coors): #coors is a n by 2 2D array of doubles
string = ""
for i in range(0, len(coors)):
string += str(coors[i][0]) + "," + str(coors[i][1]) # add comma between x and y coor
string += ";" # add semicolon to seperate coors
string += "\n" #ending character
return string.encode('utf-8')
def deserializeCgiToServer(string):
string = string.decode('utf-8')
splitted = string.split(";")
coors = [None]*(len(splitted)-1) # create list: [None, None, None,...] with length len(splitted)-1
for i in range(0,len(splitted)-1):
coors[i] = splitted[i].split(",")
coors[i] = [float(x) for x in coors[i]] # convert to float
return coors
def serializeServerToCgi(distanceMatrix): # distanceMatrix is matrix of doubles
string = ""
for i in range(0, len(distanceMatrix)):
for j in range(0, len(distanceMatrix[i])):
string += str(distanceMatrix[i][j]) + "," # seperate distances with comma
string += ";" # seperate rows with semicolon
string += "\n" # ending character
return string.encode('utf-8')
def deserializeServerToCgi(string):
string = string.decode('utf-8')
splitted = string.split(";")
distanceMatrix = [None]*(len(splitted)-1)
for i in range(0,len(splitted)-1):
row = splitted[i].split(",")
distanceMatrix[i] = row[0:len(row)-1]
distanceMatrix[i] = [float(x) for x in distanceMatrix[i]] # convert to float
return distanceMatrix
def testFunctions():
string = serializeCgiToServer([[1,2]])
coors = deserializeCgiToServer(string)
print(coors)
string = serializeServerToCgi([[10,6,5]])
distanceMatrix = deserializeServerToCgi(string)
print(distanceMatrix)
#if __name__ == '__main__':
# testFunctions() | 3.078125 | 3 |
test/test_path.py | afermanian/signatory | 156 | 12766915 | <gh_stars>100-1000
# Copyright 2019 <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
"""Tests the Path class."""
import copy
import gc
import pytest
import random
import torch
import warnings
import weakref
from helpers import helpers as h
from helpers import validation as v
tests = ['Path']
depends = ['signature', 'logsignature']
signatory = v.validate_tests(tests, depends)
def _update_lengths_update_grads(maxlength):
update_lengths = []
update_grads = []
num = int(torch.randint(low=0, high=3, size=(1,)))
for _ in range(num):
update_lengths.append(int(torch.randint(low=1, high=maxlength, size=(1,))))
update_grads.append(random.choice([True, False]))
return update_lengths, update_grads
def test_path():
"""Tests that Path behaves correctly."""
# Test small edge cases thoroughly
for device in h.get_devices():
for batch_size in (1, 2):
for input_stream, basepoints in zip((1, 2), ((True, h.without_grad, h.with_grad),
(False, True, h.without_grad, h.with_grad))):
for input_channels in (1, 2):
for depth in (1, 2):
for scalar_term in (True, False):
for path_grad in (False, True):
basepoint = random.choice(basepoints)
update_lengths, update_grads = _update_lengths_update_grads(3)
_test_path(device, path_grad, batch_size, input_stream, input_channels, depth,
basepoint, update_lengths, update_grads, scalar_term, extrarandom=False,
which='all')
# Randomly test larger cases
for _ in range(50):
device = random.choice(h.get_devices())
batch_size = random.choice((1, 2, 5))
input_stream = random.choice([3, 6, 10])
input_channels = random.choice([1, 2, 6])
depth = random.choice([1, 2, 4, 6])
basepoint = random.choice([False, True, h.without_grad, h.with_grad])
path_grad = random.choice([False, True])
update_lengths, update_grads = _update_lengths_update_grads(10)
scalar_term = random.choice([False, True])
_test_path(device, path_grad, batch_size, input_stream, input_channels, depth,
basepoint, update_lengths, update_grads, scalar_term, extrarandom=True, which='random')
# Do at least one large test
for device in h.get_devices():
_test_path(device, path_grad=True, batch_size=5, input_stream=10, input_channels=6, depth=6,
basepoint=True, update_lengths=[5, 6], update_grads=[False, True], scalar_term=False,
extrarandom=False, which='none')
def _randint(value):
return torch.randint(low=0, high=value, size=(1,)).item()
def _test_path(device, path_grad, batch_size, input_stream, input_channels, depth, basepoint, update_lengths,
update_grads, scalar_term, extrarandom, which):
path = h.get_path(batch_size, input_stream, input_channels, device, path_grad)
basepoint = h.get_basepoint(batch_size, input_channels, device, basepoint)
path_obj = signatory.Path(path, depth, basepoint=basepoint, scalar_term=scalar_term)
if isinstance(basepoint, torch.Tensor):
full_path = torch.cat([basepoint.unsqueeze(1), path], dim=1)
elif basepoint is True:
full_path = torch.cat([torch.zeros(batch_size, 1, input_channels, device=device, dtype=torch.double), path],
dim=1)
else:
full_path = path
if not path_grad and not (isinstance(basepoint, torch.Tensor) and basepoint.requires_grad):
backup_path_obj = copy.deepcopy(path_obj)
# derived objects to test
copy_path_obj = copy.copy(path_obj)
shuffle_path_obj1, perm1 = path_obj.shuffle()
shuffle_path_obj2, perm2 = copy.deepcopy(path_obj).shuffle_()
getitem1 = _randint(batch_size)
getitem_path_obj1 = path_obj[getitem1] # integer
all_derived = [(copy_path_obj, slice(None)),
(shuffle_path_obj1, perm1),
(shuffle_path_obj2, perm2),
(getitem_path_obj1, getitem1)]
start = _randint(batch_size)
end = _randint(batch_size)
getitem2 = slice(start, end)
getitem3 = torch.randint(low=0, high=batch_size, size=(_randint(int(1.5 * batch_size)),))
getitem4 = torch.randint(low=0, high=batch_size, size=(_randint(int(1.5 * batch_size)),)).numpy()
getitem5 = torch.randint(low=0, high=batch_size, size=(_randint(int(1.5 * batch_size)),)).tolist()
try:
getitem_path_obj2 = path_obj[getitem2] # slice, perhaps a 'null' slice
except IndexError as e:
if start >= end:
pass
else:
pytest.fail(str(e))
else:
all_derived.append((getitem_path_obj2, getitem2))
try:
getitem_path_obj3 = path_obj[getitem3] # 1D tensor
except IndexError as e:
if len(getitem3) == 0:
pass
else:
pytest.fail(str(e))
else:
all_derived.append((getitem_path_obj3, getitem3))
try:
getitem_path_obj4 = path_obj[getitem4] # array
except IndexError as e:
if len(getitem4) == 0:
pass
else:
pytest.fail(str(e))
else:
all_derived.append((getitem_path_obj4, getitem4))
try:
getitem_path_obj5 = path_obj[getitem5] # list
except IndexError as e:
if len(getitem5) == 0:
pass
else:
pytest.fail(str(e))
else:
all_derived.append((getitem_path_obj5, getitem5))
if which == 'random':
all_derived = [random.choice(all_derived)]
elif which == 'none':
all_derived = []
for derived_path_obj, derived_index in all_derived:
# tests that the derived objects do what they claim to do
_test_derived(path_obj, derived_path_obj, derived_index, extrarandom)
# tests that the derived objects are consistent wrt themselves
full_path_ = full_path[derived_index]
if isinstance(derived_index, int):
full_path_ = full_path_.unsqueeze(0)
_test_path_obj(full_path_.size(0), input_channels, device, derived_path_obj, full_path_, depth,
update_lengths, update_grads, scalar_term, extrarandom)
# tests that the changes to the derived objects have not affected the original
assert path_obj == backup_path_obj
# finally test the original object
_test_path_obj(batch_size, input_channels, device, path_obj, full_path, depth, update_lengths, update_grads,
scalar_term, extrarandom)
def _test_path_obj(batch_size, input_channels, device, path_obj, full_path, depth, update_lengths, update_grads,
scalar_term, extrarandom):
# First of all test a Path with no updates
_test_signature(path_obj, full_path, depth, scalar_term, extrarandom)
_test_logsignature(path_obj, full_path, depth, extrarandom)
_test_equality(path_obj)
assert path_obj.depth == depth
if len(update_lengths) > 1:
# Then test Path with variable amounts of updates
for length, grad in zip(update_lengths, update_grads):
new_path = torch.rand(batch_size, length, input_channels, dtype=torch.double, device=device,
requires_grad=grad)
path_obj.update(new_path)
full_path = torch.cat([full_path, new_path], dim=1)
_test_signature(path_obj, full_path, depth, scalar_term, extrarandom)
_test_logsignature(path_obj, full_path, depth, extrarandom)
_test_equality(path_obj)
assert path_obj.depth == depth
def _test_signature(path_obj, full_path, depth, scalar_term, extrarandom):
def candidate(start=None, end=None):
return path_obj.signature(start, end)
def true(start, end):
return signatory.signature(full_path[:, start:end], depth, scalar_term=scalar_term)
def extra(true_signature):
assert (path_obj.signature_size(-3), path_obj.signature_size(-1)) == true_signature.shape
assert path_obj.signature_channels() == true_signature.size(-1)
assert path_obj.shape == full_path.shape
assert path_obj.channels() == full_path.size(-1)
_test_operation(path_obj, candidate, true, extra, '_BackwardShortcutBackward', extrarandom)
def _test_logsignature(path_obj, full_path, depth, extrarandom):
if extrarandom:
if random.choice([True, False, False]):
modes = h.all_modes
else:
modes = (h.expand_mode, h.words_mode)
else:
modes = h.all_modes
for mode in modes:
def candidate(start=None, end=None):
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message="The logsignature with mode='brackets' has been requested on "
"the GPU.", category=UserWarning)
return path_obj.logsignature(start, end, mode=mode)
def true(start, end):
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message="The logsignature with mode='brackets' has been requested on "
"the GPU.", category=UserWarning)
return signatory.logsignature(full_path[:, start:end], depth, mode=mode)
def extra(true_logsignature):
if mode != h.expand_mode:
assert (path_obj.logsignature_size(-3),
path_obj.logsignature_size(-1)) == true_logsignature.shape
assert path_obj.logsignature_channels() == true_logsignature.size(-1)
_test_operation(path_obj, candidate, true, extra, '_SignatureToLogsignatureFunctionBackward', extrarandom)
def _test_equality(path_obj):
assert path_obj == path_obj
assert not (path_obj != path_obj)
shuffled_path_obj, perm = path_obj.shuffle()
assert shuffled_path_obj == path_obj[perm]
assert not (shuffled_path_obj != path_obj[perm])
def _test_derived(path_obj, derived_path_obj, derived_index, extrarandom):
def candidate(start=None, end=None):
return torch.cat(derived_path_obj.path, dim=-2)
def true(start, end):
return torch.cat(path_obj.path, dim=-2)[derived_index]
def extra(true_path):
pass
_test_operation(path_obj, candidate, true, extra, None, extrarandom)
def candidate(start=None, end=None):
return derived_path_obj.signature(start, end)
def true(start, end):
return path_obj.signature(start, end)[derived_index]
def extra(true_signature):
pass
_test_operation(path_obj, candidate, true, extra, '_BackwardShortcutBackward', extrarandom)
if extrarandom:
if random.choice([True, False, False]):
modes = h.all_modes
else:
modes = (h.expand_mode, h.words_mode)
else:
modes = h.all_modes
for mode in modes:
def candidate(start=None, end=None):
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message="The logsignature with mode='brackets' has been requested on "
"the GPU.", category=UserWarning)
return derived_path_obj.logsignature(start, end, mode=mode)
def true(start, end):
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message="The logsignature with mode='brackets' has been requested on "
"the GPU.", category=UserWarning)
return path_obj.logsignature(start, end, mode=mode)[derived_index]
def extra(true_logsignature):
pass
_test_operation(path_obj, candidate, true, extra, '_SignatureToLogsignatureFunctionBackward', extrarandom)
def _boundaries(length):
yield -length - 1
yield -length
yield -1
yield 0
yield 1
yield length - 1
yield length
yield None
def _start_end(length, extrarandom):
for start in _boundaries(length):
for end in _boundaries(length):
if (not extrarandom) or random.choice([True, False]):
yield start, end
for _ in range(5):
start = int(torch.randint(low=-length, high=length, size=(1,)))
end = int(torch.randint(low=-length, high=length, size=(1,)))
yield start, end
def _test_operation(path_obj, candidate, true, extra, backward_name, extrarandom):
# We perform multiple tests here.
# Test #1: That the memory usage is consistent
# Test #2: That the backward 'ctx' is correctly garbage collected
# Test #3: The forward accuracy of a particular operation
# Test #4: The backward accuracy of the same operation
def one_iteration(start, end):
gc.collect()
if torch.cuda.is_available():
torch.cuda.synchronize()
torch.cuda.reset_max_memory_allocated()
try:
tensor = candidate(start, end)
except ValueError as e:
try:
true(start, end)
except ValueError:
return 0
else:
pytest.fail(str(e))
try:
true_tensor = true(start, end)
except ValueError as e:
pytest.fail(str(e))
h.diff(tensor, true_tensor) # Test #3
extra(true_tensor) # Any extra tests
if tensor.requires_grad:
grad = torch.rand_like(tensor)
tensor.backward(grad)
path_grads = []
for path in path_obj.path:
if path.grad is None:
path_grads.append(None)
else:
path_grads.append(path.grad.clone())
path.grad.zero_()
true_tensor.backward(grad)
for path, path_grad in zip(path_obj.path, path_grads):
if path_grad is None:
assert (path.grad is None) or (path.grad.nonzero().numel() == 0)
else:
h.diff(path.grad, path_grad) # Test #4
path.grad.zero_()
ctx = tensor.grad_fn
assert type(ctx).__name__ == backward_name
ref = weakref.ref(ctx)
del ctx
del tensor
gc.collect()
assert ref() is None # Test #2
if torch.cuda.is_available():
torch.cuda.synchronize()
return torch.cuda.max_memory_allocated()
else:
return 0
# Computations involving the start or not operate differently, so we take the max over both
memory_used = max(one_iteration(0, None), one_iteration(1, None))
for start, end in _start_end(path_obj.size(1), extrarandom):
# This one seems to be a bit inconsistent with how much memory is used on each run, so we give some
# leeway by doubling
assert one_iteration(start, end) <= 2 * memory_used
| 1.96875 | 2 |
samples/my_strategy_2.py | fredmontet/tars | 3 | 12766916 | <filename>samples/my_strategy_2.py
from tars import portfolios, traders, strategies
def func():
portfolio = portfolios.VirtualPortfolio({'ZUSD': 1000})
trader = traders.VirtualCryptoTrader(portfolio)
strategy = strategies.PredictionStrategy(trader, 'XETHZUSD', 0.2)
return strategy
| 1.820313 | 2 |
model/networks.py | FiveEyes/CycleGAN | 0 | 12766917 | import torch
import torchvision
from torch import nn
from torch import optim
from torch.nn import init
import torch.nn.functional as F
from torch.autograd import Variable
from torch import autograd
from torchvision import transforms, utils
import os
def init_weights(m):
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
init.xavier_normal_(m.weight.data, gain=0.02)
def conv_block(in_channels, out_channels, kernel_size, stride, padding, bias=False):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=bias),
nn.BatchNorm2d(out_channels),
nn.ReLU(True),
)
def convt_block(in_channels, out_channels, kernel_size, stride, padding, output_padding, bias=False):
return nn.Sequential(
nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, output_padding=output_padding, bias=bias),
nn.BatchNorm2d(out_channels),
nn.ReLU(True),
)
class ResidualBlock(nn.Module):
def __init__(self, in_channels):
super(ResidualBlock, self).__init__()
self.model = nn.Sequential(
nn.ReflectionPad2d(1),
nn.Conv2d(in_channels, in_channels, 3, stride=1, padding=0),
nn.BatchNorm2d(in_channels),
nn.ReLU(True),
nn.ReflectionPad2d(1),
nn.Conv2d(in_channels, in_channels, 3, stride=1, padding=0),
nn.BatchNorm2d(in_channels),
)
def forward(self, x):
#return F.relu(self.shortcut(x) + self.model(x))
return x + self.model(x)
class Generator(nn.Module):
def __init__(self, in_channels=3, out_channels=3, dim=64, n_blocks=6, use_bias=False):
super(Generator, self).__init__()
self.model = nn.Sequential(
nn.ReflectionPad2d(3),
conv_block(in_channels, dim, 7, stride=1, padding=0),
conv_block(dim, dim*2, 3, stride=2, padding=1, bias=use_bias),
conv_block(dim*2, dim*4, 3, stride=2, padding=1, bias=use_bias),
ResidualBlock(dim * 4),
ResidualBlock(dim * 4),
ResidualBlock(dim * 4),
ResidualBlock(dim * 4),
ResidualBlock(dim * 4),
ResidualBlock(dim * 4),
ResidualBlock(dim * 4),
ResidualBlock(dim * 4),
ResidualBlock(dim * 4),
convt_block(dim*4, dim*2, 3, stride=2,padding=1, output_padding=1, bias=use_bias),
convt_block(dim*2, dim, 3, stride=2, padding=1, output_padding=1, bias=use_bias),
nn.ReflectionPad2d(3),
nn.Conv2d(dim, out_channels, 7, stride=1, padding=0),
nn.Tanh()
)
init_weights(self.model)
def forward(self, x):
return self.model(x)
class Discriminator(nn.Module):
def __init__(self, in_channels=3, dim=64):
super(Discriminator, self).__init__()
kw = 4
self.model = nn.Sequential(
nn.Conv2d(in_channels, dim, kw, stride=2, padding=1),
nn.LeakyReLU(0.2, True),
nn.Conv2d(dim, dim*2, kw, stride=2, padding=1, bias=False),
nn.BatchNorm2d(dim*2),
nn.LeakyReLU(0.2, True),
nn.Conv2d(dim*2, dim*4, kw, stride=2, padding=(1,2), bias=False),
nn.BatchNorm2d(dim*4),
nn.LeakyReLU(0.2, True),
nn.Conv2d(dim*4, dim*8, kw, stride=1, padding=(2,1), bias=False),
nn.BatchNorm2d(dim*8),
nn.LeakyReLU(0.2, True),
#nn.Conv2d(dim*8, dim*8, kw, stride=1, padding=1, bias=False),
#nn.BatchNorm2d(dim*8),
#nn.LeakyReLU(0.2, True),
nn.Conv2d(dim*8, 1, kw, stride=1, padding=1),
)
init_weights(self.model)
def forward(self, x):
return self.model(x)
class CycleGANLoss(nn.Module):
def __init__(self,):
super(CycleGANLoss, self).__init__()
self.register_buffer('real_label', torch.tensor(1.0).cuda())
self.register_buffer('fake_label', torch.tensor(0.0).cuda())
self.loss = nn.MSELoss()
def __call__(self, pred, target_is_real):
#if target_is_real:
# return -pred.mean()
#else:
# return pred.mean()
if target_is_real:
target_tensor = self.real_label
else:
target_tensor = self.fake_label
target_tensor = target_tensor.expand_as(pred)
#print("pred, target:", pred.shape, target_tensor.shape)
return self.loss(pred, target_tensor)
def calculate_gradient_penalty(netD, real_images, fake_images):
batch_size = real_images.size(0)
eta = torch.FloatTensor(batch_size,1,1,1).uniform_(0,1)
eta = eta.expand(batch_size, real_images.size(1), real_images.size(2), real_images.size(3)).cuda()
interpolated = eta * real_images + ((1 - eta) * fake_images)
interpolated = interpolated.cuda()
# define it to calculate gradient
interpolated = Variable(interpolated, requires_grad=True)
# calculate probability of interpolated examples
prob_interpolated = netD(interpolated)
# calculate gradients of probabilities with respect to examples
gradients = autograd.grad(outputs=prob_interpolated, inputs=interpolated,
grad_outputs=torch.ones(
prob_interpolated.size()).cuda(),
create_graph=True, retain_graph=True)[0]
grad_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * 10.0
return grad_penalty
| 2.59375 | 3 |
src/cv_evaluate_climate_metrics.py | joAschauer/evaluating_methods_for_reconstructing_large_gaps_in_historic_snow_depth_time_series | 0 | 12766918 | <reponame>joAschauer/evaluating_methods_for_reconstructing_large_gaps_in_historic_snow_depth_time_series<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 27 15:27:51 2021
@author: aschauer
"""
import os
import logging
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from cv_results_database import get_cv_results_as_df
import plotting_utils as pu
import scoring_utils as scu
from sklearn.metrics import r2_score, mean_squared_error
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
sns.set_color_codes(palette='deep')
sc = get_cv_results_as_df()
sc = sc.loc[sc['gap_type']=='LOWO']
sc = sc.rename(columns={'bias': 'BIAS'})
methods_used = [
'SingleStation_best_correlated_mean_ratio',
'Inverse distance squared',
'matiu vertical weighted_min_corr_-1.0',
'Elastic Net Regression',
'RandomForest_V3.5',
'SWE2HS_Snow17_shifted_dates']
plot_data = sc.loc[sc['fill_method'].isin(methods_used)].copy()
plot_data.replace(to_replace={'fill_method':pu.METHOD_NAMES}, inplace=True)
def calculate_scores(df):
scores = {}
for climate_metric in ['HSavg','HSmax','dHS1']:
data = (df.loc[:,[f'{climate_metric}_true',f'{climate_metric}_pred']]
.copy()
.dropna())
scores[f'r2_{climate_metric}'] = r2_score(data[f'{climate_metric}_true'], data[f'{climate_metric}_pred'])
scores[f'rmse_{climate_metric}'] = np.sqrt(mean_squared_error(data[f'{climate_metric}_true'], data[f'{climate_metric}_pred']))
scores[f'MAAPE_{climate_metric}'] = scu._maape_score(data[f'{climate_metric}_true'], data[f'{climate_metric}_pred'])
scores[f'bias_{climate_metric}'] = scu._bias_score(data[f'{climate_metric}_true'], data[f'{climate_metric}_pred'])
return pd.Series(scores)
grouped_scores = (plot_data.groupby(['fill_method','station_grid'])
.apply(calculate_scores)
.reset_index()
)
for metric in ['rmse','r2','MAAPE','bias']:
for clim_indi in ['HSavg', 'HSmax', 'dHS1']:
sns.barplot(data=grouped_scores,
x='fill_method',
y=f'{metric}_{clim_indi}',
order=[pu.METHOD_NAMES[m] for m in methods_used],
hue = 'station_grid',
hue_order = ['full', 'only_target_stations'],
palette=['C1', 'C0']
)
plt.show() | 2.15625 | 2 |
member_manager/apps.py | 4sfc/django-member-manager | 0 | 12766919 | '''MemberManagerConfig class'''
from django.apps import AppConfig
class MemberManagerConfig(AppConfig):
'''MemberMangerConfig sets up the MemberManager app'''
name = 'member_manager'
verbose_name = 'Member Manager'
| 1.703125 | 2 |
Project - Open Spaces - Script.py | mckenzie-h/EMG722-OpenSpace | 0 | 12766920 | <filename>Project - Open Spaces - Script.py
#!/usr/bin/env python
# coding: utf-8
# In[1]:
#Importing required libaries for code to run
get_ipython().run_line_magic('matplotlib', 'notebook')
import geopandas as gpd
import pandas as pd
import matplotlib.pyplot as plt
from cartopy.feature import ShapelyFeature
import cartopy.crs as ccrs
import cartopy.feature as cf
import matplotlib.patches as mpatches
import matplotlib.lines as mlines
import fiona
import os
from shapely.geometry import Point, LineString, Polygon
plt.ion() # makes the plotting of the map interactive
# generate matplotlib handles to create a legend of the features we put in our map.
def generate_handles(labels, colors, edge='k', alpha=1):
lc = len(colors) # get the length of the color list
handles = []
for i in range(len(labels)):
handles.append(mpatches.Rectangle((0, 0), 1, 1, facecolor=colors[i % lc], edgecolor=edge, alpha=alpha))
return handles
#Create variable for open spaces dataset and load data
openspace_data = gpd.read_file(r'C:\Users\angel\Programming\Project\Data\NS_GreenspaceSite.shp')
#Create variable for boundary dataset and load data, this example uses Glasgow as an area of interest
boundary_data = gpd.read_file(r'C:\Users\angel\Programming\Project\Data\GlasgowBoundary.shp')
#Create variable for roads dataset and load data
road_data = gpd.read_file(r'C:\Users\angel\Programming\Project\Data\NS_RoadLink.shp')
#Create Variable for glasgow postcode data
postcode_g = gpd.read_file (r'C:\Users\angel\Programming\Project\Data\g_postcode_data.shp')
# In[2]:
#display open space dataset table
openspace_data
# In[3]:
#Display Boundary dataset table
boundary_data
# In[4]:
#Display road dataset table
road_data
# In[5]:
#display postcode dataset table
postcode_g
# In[6]:
# Check co-ordinate reference systems for openspace layers
openspace_data.crs
# In[7]:
# Check co-ordinate reference systems for boundary layer
boundary_data.crs
# In[8]:
# Check co-ordinate reference systems for roads layer
road_data.crs
# In[9]:
#check co-ordinate reference systsem for postcode layer
postcode_g.crs
# In[10]:
#Clip the open space dataset to the extent of the project boundary layer, using geopandas clip function
# Clip data
openspace_glasgow = gpd.clip(openspace_data, boundary_data)
# Ignore missing/empty geometries
openspace_glasgow = openspace_glasgow[~openspace_glasgow.is_empty]
#Print number of rows in new clipped dataset compared to original dataset. Ensures that clip has worked.
print("The clipped data has fewer polygons (represented by rows):",
openspace_glasgow.shape, openspace_data.shape)
#export clipped open space dataset into ESRI shapefile into data folder
openspace_glasgow.to_file('C:\\Users\\angel\\Programming\\Project\\Data\\openspace_glasgow.shp', driver = 'ESRI Shapefile')
# In[11]:
#Clip the roads dataset to the extent of the project boundary layer, using geopandas clip function
# Clip data
road_glasgow = gpd.clip(road_data, boundary_data)
# Ignore missing/empty geometries
road_glasgow = road_glasgow[~road_glasgow.is_empty]
#Print number of rows in new clipped dataset compared to original dataset. Ensures that clip has worked.
print("The clipped data has fewer line sections (represented by rows):",
road_glasgow.shape, road_data.shape)
#export clipped road dataset into ESRI Shapefile into data folder
road_glasgow.to_file('C:\\Users\\angel\\Programming\\Project\\Data\\road_glasgow.shp', driver = 'ESRI Shapefile')
# In[12]:
# identify the number of open space functions in the dataset to identify how many colours are required for symboloisation
num_openspace = len(openspace_glasgow.function.unique())
print('Number of unique Open Space Types: {}'.format(num_openspace))
print(openspace_glasgow['function'].unique())
# In[13]:
# Identify the number of unique road types in the roads datasetto identify how many colours are required for symboloisation
num_road = len(road_glasgow.function.unique())
print('Number of unique road classes: {}'.format(num_road))
# In[14]:
myFig = plt.figure(figsize=(16, 8)) # create a figure of size 10x10 (representing the page size in inches)
myCRS = ccrs.UTM(30) # create a Universal Transverse Mercator reference system to transform our data.
# be sure to fill in XX above with the correct number for the area we're working in.
ax = plt.axes(projection=ccrs.Mercator()) # finally, create an axes object in the figure, using a Mercator
# projection, where we can actually plot our data.
# In[15]:
# first, we just add the outline of glasgow city council using cartopy's ShapelyFeature
outline_feature = ShapelyFeature(boundary_data['geometry'], myCRS, edgecolor='yellow', facecolor='w')
xmin, ymin, xmax, ymax = boundary_data.total_bounds
ax.add_feature(outline_feature) # add the features we've created to the map.
# using the boundary of the shapefile features, zoom the map to our area of interest
ax.set_extent([xmin, xmax, ymin, ymax], crs=myCRS) # because total_bounds gives output as xmin, ymin, xmax, ymax,
#add gridlines to the map, turning off the top and rightside labels
gridlines = ax.gridlines(draw_labels=True, color='black', alpha=0.6, linestyle='--')
gridlines.right_labels =False # turn off the right side labels
gridlines.top_labels =False # turn off the top labels
myFig # re-display the figure here.
# In[16]:
#add title to map figure
plt.title('Glasgow Open Spaces')
# In[17]:
#create colours for the open space types - for this dataset 10 colours need to be identified
openspace_colors = ['lightgreen','palevioletred', 'crimson', 'dimgrey', 'lime', 'darkorchid', 'darkorange', 'hotpink', 'indigo', 'aqua' ]
# get a list of unique names for the Open Space type
openspace_types = list(openspace_glasgow.function.unique())
# sort the open space types alphabetically by name
openspace_types.sort()
# add the open spaces data to the map
for i, openspace in enumerate(openspace_types):
feat = ShapelyFeature(openspace_glasgow['geometry'][openspace_glasgow['function'] == openspace], myCRS,
edgecolor='black',
facecolor=openspace_colors[i],
linewidth=1,
alpha=0.25)
ax.add_feature(feat)
# In[18]:
#add roads layer to map
road_colors = ['darkslategrey', 'navy', 'silver', 'darkmagenta', 'sienna', 'darkred', 'darkgoldenrod', 'olive']
# get a list of unique road types from the function attribute for the roads dataset
road_types = list(road_glasgow.function.unique())
# sort the open space types alphabetically by name
road_types.sort()
# add the road data to the map
for i, road in enumerate(road_types):
road_feat = ShapelyFeature(road_glasgow['geometry'][road_glasgow['function'] == road], myCRS,
edgecolor='black',
facecolor=road_colors[i],
linewidth=0.25)
ax.add_feature(road_feat)
# In[19]:
# generate a list of handles for the openspace dataset
openspace_handles = generate_handles(openspace_types, openspace_colors, alpha=0.25)
# generate a list of handles for the road dataset
road_handles = generate_handles(road_types, road_colors, alpha=0.25)
#generate handle for boundary data
#boundary_handle = [mpatches.Patch([], [], edgecolor='yellow')]
#ax.legend() takes a list of handles and a list of labels corresponding to the objects you want to add to the legend
handles = openspace_handles + road_handles #+ boundary_handle
labels = openspace_types + road_types #+ 'Glasgow Boundary'
leg = ax.legend(handles, labels, title='Legend', title_fontsize=12,
fontsize=10, loc='upper right',bbox_to_anchor=(1.5, 1), frameon=True, framealpha=1)
myFig #show the updated figure
# In[20]:
#save the map as a png file
myFig.savefig('Glasgow_OpenSpace_Map.png', dpi=300)
# In[21]:
##Now we can see the map of the relevent openspace and road data within the Glasgow City Council Area, analysis can be carried out to interegate the data and find out more about the openspaces and roads within Glasgow
# Create a new column within the openspace glasgow called area and populate it with the area in m2 for each
# Area / 1000 to get total area in km2
openspace_glasgow['area km'] = openspace_glasgow.area/1000
# In[22]:
#Display the openspace glasgow table with the added areas column
openspace_glasgow
# In[23]:
#run the a groupby with count operation on the glasgow road layer to identify the number of each type of road in the glasgow area
openspace_groupcount = openspace_glasgow.groupby('function')['function'].count()
#Display the open space group with count by table
openspace_groupcount
# In[24]:
#run the a group by operation on the glasgow open space layer to identify the number of each type of open space in the glasgow area
openspace_group = openspace_glasgow.groupby('function')
#Display the open space group by table
openspace_group
# In[25]:
#Print to show user that group
print('Groupby Successful')
# In[26]:
#for key values in the open space groups create indivadual tables
for key, values in openspace_group:
openspace_type = values
openspace_type.head()
#display a sample table of the openspace table seperated by key values
# In[27]:
#Create individual shapefiles for the types of openspace data.
#determind output folder location
outFolder = r'C:\Users\angel\Programming\Project\Data'
# Create a new folder called 'results' (if does not exist already) to cretae the folder use os.makedirs() function
resultFolder = os.path.join(outFolder, 'results')
if not os.path.exists(resultFolder):
os.makedirs(resultFolder)
# Iterate over the key values in the open space group to create seperate shapefiles for each
for key, values in openspace_group:
# Format the filename (replace spaces with underscores)
outName = "%s.shp" % key.replace(" ", "_")
# Print some information for the user
print("Processing: %s" % key)
# Create an output path
outpath = os.path.join(resultFolder, outName)
# Export the data
values.to_file(outpath)
#print finishing statement to ensure shapefiles have be saved
print('Shapefile Saved')
# In[28]:
#display the length of each road section in the road dataset
road_glasgow.length
# In[29]:
# Create a new column within the road_glasgow called length and populate it with the length in meters for each
road_glasgow['length - m'] = road_glasgow.length
# In[30]:
#display the updated road glasgow with the length field included in the table
road_glasgow
# In[31]:
#run the a group by operation on the glasgow open space layer to identify the number of each type of open space in the glasgow area
road_groupcount = road_glasgow.groupby('function')['function'].count()
#Display the open space group with count by table
road_groupcount
# In[32]:
#run the a group by operation on the glasgow road layer to identify the number of each type of open space in the glasgow area
road_group = road_glasgow.groupby('function')
#Display the open space group by table
road_group
print('Groupby Successful')
# In[33]:
#Create individual shapefiles for the types of road data.
#determind output folder location
outFolder = r'C:\Users\angel\Programming\Project\Data'
# Create a new folder called 'Results' (if does not exist) to that folder using os.makedirs() function
resultFolder = os.path.join(outFolder, 'results')
if not os.path.exists(resultFolder):
os.makedirs(resultFolder)
# Iterate over the
for key, values in road_group:
# Format the filename (replace spaces with underscores)
outName = "%s.shp" % key.replace(" ", "_")
# Print some information for the user
print("Processing: %s" % key)
# Create an output path
outpath = os.path.join(resultFolder, outName)
# Export the data
values.to_file(outpath)
#print finishing statement to ensure shapefiles have be saved
print('Shapefiles Saved')
# In[34]:
#Call in the new shapefiles which are going to be use in the analysis
#public spaces
openspace_public = gpd.read_file(r'C:\Users\angel\Programming\Project\Data\results\Public_Park_Or_Garden.shp')
#playing field
openspace_field = gpd.read_file(r'C:\Users\angel\Programming\Project\Data\results\Playing_Field.shp')
#play space
openspace_play = gpd.read_file(r'C:\Users\angel\Programming\Project\Data\results\Play_Space.shp')
# In[35]:
#calcualte the total area of public Gardens or Parks within the Glasgow City Council Boundary
total_publicarea = openspace_public['area km'].sum()
print ('The total area -km2- for all Public Gardens of Parks within Glasgow City is:', total_publicarea)
# In[36]:
#calcualte the total area of public Gardens or Parks within the Glasgow City Council Boundary
total_playarea = openspace_play['area km'].sum()
print('The total area -km2- for all Play Spaces within Glasgow City is:',total_playarea)
# In[37]:
#calcualte the total area of public Gardens or Parks within the Glasgow City Council Boundary
total_fieldarea = openspace_field['area km'].sum()
print('The total area -km2- for all Playing Fields within Glasgow City is:',total_fieldarea)
# In[38]:
#buffer open space polygons
#create 100m buffer around the public open spaces
openpublic_300 = openspace_public.buffer(300)
#create 100m buffer around playing fields
openfield_300 = openspace_field.buffer(300)
#create 100m buffer aroung play spaces
openplay_300 = openspace_play.buffer(300)
#save 100m buffer of open space public to file
openpublic_300.to_file('C:\\Users\\angel\\Programming\\Project\\Data\\openpublic_100m_buffer.shp', driver = 'ESRI Shapefile')
#save 100m buffer of open space public to file
openfield_300.to_file('C:\\Users\\angel\\Programming\\Project\\Data\\openfield_100m_buffer.shp', driver = 'ESRI Shapefile')
#save 100m buffer of open space public to file
openplay_300.to_file('C:\\Users\\angel\\Programming\\Project\\Data\\openplay_100m_buffer.shp', driver = 'ESRI Shapefile')
#print statement to ensure save has worked
print ('Save Complete')
# In[39]:
# clip g_postcode_data to glasgow city council boundary
# Clip data
postcode_glasgow = gpd.clip(postcode_g, boundary_data)
# Ignore missing/empty geometries
postcode_glasgow = postcode_glasgow[~postcode_glasgow.is_empty]
#save glasgow postcode data to file
postcode_glasgow.to_file('C:\\Users\\angel\\Programming\\Project\\Data\\postcode_glasgow.shp', driver = 'ESRI Shapefile')
# In[40]:
#Calcualate the total number of postcodes within Glasgow City Council
postcode_count = postcode_glasgow['Field1'].count()
print('The total number of postcodes within GLasgow City Council is:',postcode_count)
# In[41]:
#Calculate the population within Glasgow City Council
total_pop = postcode_count *13 *2.7 # 13 is the average households within a poualation area, 2.7 is the average houshold sive within glasgow city council.
print ('The approximate Population within Glasgow City Council is:', total_pop)
# In[42]:
#clip postcode data to those inside the 100m buffer of the open space
# Clip data
postcode_play300 = gpd.clip(postcode_glasgow, openplay_300)
# Ignore missing/empty geometries
postcode_play300 = postcode_play300[~postcode_play300.is_empty]
#save glasgow postcode data to file
postcode_play300.to_file('C:\\Users\\angel\\Programming\\Project\\Data\\postcode_glasgow.shp', driver = 'ESRI Shapefile')
# In[43]:
#Calcualate the total number of postcodes within Glasgow City Council taht fall within 100m of a playspace
postcode_playcount = postcode_play300['Field1'].count()
print('The total number of postcodes within 300m of a play space within GLasgow City Council is:',postcode_playcount)
# In[44]:
#Calulate the approximate population within 100m of a play space
play300_pop = postcode_playcount *13 *2.5
print('The approx Population within 300m of a Play Space is:', play300_pop)
# In[45]:
#clip postcode data to those inside the 100m buffer of the open space
# Clip data
postcode_field300 = gpd.clip(postcode_glasgow, openfield_300)
# Ignore missing/empty geometries
postcode_field300 = postcode_field300[~postcode_field300.is_empty]
#save glasgow postcode data to file
postcode_field300.to_file('C:\\Users\\angel\\Programming\\Project\\Data\\postcode_glasgow.shp', driver = 'ESRI Shapefile')
# In[46]:
#Calcualate the total number of postcodes within Glasgow City Council
postcode_fieldcount = postcode_field300['Field1'].count()
print('The total number of postcodes within 300m of a Playing field is:', postcode_fieldcount)
# In[47]:
#Calulate the approximate population within 100m of a playing field
field300_pop = postcode_fieldcount *13 *2.5
print('The approximate Population within 300m of a Playing Field is:', field300_pop)
# In[48]:
#clip postcode data to those inside the 100m buffer of the open space
# Clip data
postcode_public300 = gpd.clip(postcode_glasgow, openpublic_300)
# Ignore missing/empty geometries
postcode_public300 = postcode_public300[~postcode_public300.is_empty]
#save glasgow postcode data to file
postcode_public300.to_file('C:\\Users\\angel\\Programming\\Project\\Data\\postcode_glasgow.shp', driver = 'ESRI Shapefile')
# In[49]:
#Calcualate the total number of postcodes within Glasgow City Council
postcode_publiccount = postcode_public300['Field1'].count()
print('The total number of postcodes within 300m of a Public Park or Garden is:',postcode_publiccount)
# In[50]:
#Calulate the approximate population within 100m of a playing field
public300_pop = postcode_publiccount *13 *2.5
print('The approximate Population within 300m of a Playing Field is:', public300_pop)
| 3.203125 | 3 |
setup.py | GuoJing/c3po-grpc-gateway | 1 | 12766921 | <filename>setup.py
from c3po import __version__
from setuptools import setup, find_packages
setup(
name='c3po',
version=__version__,
description='A light weight Python gRPC transport layer gateway with tornado named c3po.',
author='GuoJing',
author_email='<EMAIL>',
license='MIT',
url='https://github.com/qiajigou/c3po-grpc-gateway',
zip_safe=False,
packages=find_packages(exclude=['examples', 'tests']),
)
| 1.195313 | 1 |
wifi_attendance/views.py | elvinzeng/wifi-attendance | 14 | 12766922 | <gh_stars>10-100
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from django.db.models.aggregates import Min, Max
from django.http import HttpResponse
from django.shortcuts import render
from django.views.generic import View
import qrcode
from cStringIO import StringIO
import uuid
from mobile_scanner.models import OnlineHistory
from users.models import VerificationToken
import datetime
__author__ = "<NAME>"
class HomeView(View):
"""
home page view
"""
def get(self, request):
if request.user.is_authenticated():
histories = OnlineHistory.objects.filter(mac=request.user.username)\
.values('date').annotate(min_time=Min('time'), max_time=Max('time')).order_by("-date")
return render(request, "index.html", locals())
else:
verification_token = str(uuid.uuid4())
request.session["verification_token"] = verification_token
return render(request, "authentication.html", locals())
class StaffOnlineHistoryView(View):
"""
staff online history
"""
def get(self, request):
if request.user.has_perm("mobile_scanner.view_staffonlinehistory"):
histories = OnlineHistory.objects.filter()\
.values('date', 'user__last_name', 'user__first_name').annotate(min_time=Min('time'), max_time=Max('time')).order_by("-date")
return render(request, "staff.html", locals())
else:
msg = "无访问权限"
return render(request, "msg.html", locals())
class QRView(View):
"""
QR Code View
"""
def get(self, request):
host = request.META['HTTP_HOST']
token = request.session.get("verification_token", str(uuid.uuid4()))
verification_token = VerificationToken()
verification_token.token = token
verification_token.expire_time = datetime.datetime.now() + datetime.timedelta(minutes=1)
verification_token.save()
data = "http://" + host + "/authorize?token=" + token
img = qrcode.make(data)
buf = StringIO()
img.save(buf)
image_stream = buf.getvalue()
response = HttpResponse(image_stream, content_type="image/png")
return response
| 2.15625 | 2 |
CommandRecognition/model.py | c-z-h123/https-github.com-Ryuk17-SpeechAlgorithms | 338 | 12766923 | <reponame>c-z-h123/https-github.com-Ryuk17-SpeechAlgorithms<filename>CommandRecognition/model.py
"""
@FileName: model.py
@Description: Implement model
@Author: Ryuk
@CreateDate: 2020/05/12
@LastEditTime: 2020/05/12
@LastEditors: Please set LastEditors
@Version: v0.1
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
class basicBlock(nn.Module):
expansion = 1
def __init__(self, in_channels, out_channels, stride=1):
super(basicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(out_channels)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_channels)
# shortcut is a convolution layer with BatchNormalization
self.shortcut = nn.Sequential()
if stride != 1 or in_channels != self.expansion * in_channels:
self.shortcut = nn.Sequential(
nn.Conv2d(in_channels, self.expansion * out_channels, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * out_channels)
)
def forward(self, input):
x = F.relu(self.bn1(self.conv1(input)))
x = self.bn2(self.conv2(x))
x += self.shortcut(input)
x = F.relu(x)
return x
class bottleneckBlock(nn.Module):
expansion = 4
def __init__(self, in_channels, out_channels, stride=1):
super(bottleneckBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, bias=False)
self.bn1 = nn.BatchNorm2d(out_channels)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=stride, bias=False)
self.bn2 = nn.BatchNorm2d(out_channels)
self.conv3 = nn.Conv2d(out_channels, self.expansion * out_channels, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion * out_channels)
if stride != 1 or in_channels != self.expansion * out_channels:
self.shortcut = nn.Sequential(
nn.Conv2d(in_channels, self.expansion * out_channels, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * out_channels)
)
def forward(self, input):
x = F.relu(self.bn1(self.conv1(input)))
x = F.relu(self.bn2(self.conv2(x)))
x = self.bn3(self.conv3(x))
x += self.shortcut(input)
x = F.relu(x)
return x
class Resnet(nn.Module):
def __init__(self, block, num_blocks, num_classes=6):
super(Resnet, self).__init__()
self.in_channels = 64
self.conv1 = nn.Conv2d(1, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512 * block.expansion, num_classes)
def _make_layer(self, block, out_channels, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_channels, out_channels, stride))
self.in_channels = out_channels * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
x = F.relu(self.bn1(self.conv1(x)))
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = F.avg_pool2d(x, 4)
x = x.view(x.size(0), -1)
x = self.linear(x)
return x
def ResNet18():
return Resnet(basicBlock, [2, 2, 2, 2])
def ResNet152():
return Resnet(bottleneckBlock, [3, 8, 36, 3])
def main():
x = torch.randn(1, 1, 50, 32)
net = ResNet18()
print(net(x))
if __name__ == "__main__":
main() | 2.765625 | 3 |
reports/srp/code/symbol_classifier_test.py | klawr/deepmech | 1 | 12766924 | from os.path import join
import cv2
import numpy as np
from numpy.random import uniform
from sys import exit
import tensorflow as tf
model_path = join('models', 'symbol_classifier', 'model.h5')
model = tf.keras.models.load_model(model_path)
path = join('data', 'raw', 'n', '1.jpeg')
image_name = "data"
drawing = False
pt1_x , pt1_y = None , None
img = None
color = None
thickness = None
def draw(event, x, y, r1, r2):
global pt1_x, pt1_y, drawing, img, color
if event==cv2.EVENT_LBUTTONDOWN:
drawing=True
pt1_x,pt1_y=x,y
elif event==cv2.EVENT_LBUTTONUP:
drawing=False
cv2.line(img,(pt1_x,pt1_y),(x,y),color=color,thickness=thickness)
elif event==cv2.EVENT_MOUSEMOVE:
if drawing==True:
cv2.line(img,(pt1_x,pt1_y),(x,y),color=color,thickness=thickness)
pt1_x,pt1_y=x,y
elif event==cv2.EVENT_RBUTTONUP:
image = tf.convert_to_tensor(np.asarray(img, np.uint8), np.uint8)
tensor = tf.io.encode_jpeg(image)
print(predict(tensor))
new_image()
elif event==cv2.EVENT_MBUTTONUP:
new_image()
def new_image():
global img, color, thickness
w_on_b = round(uniform())
thickness = 5 + round(uniform(0, 255))
img = np.ones((512,512,3), np.uint8)
img *= round(uniform(0, 255))
color = (255,255,255) if w_on_b else (0,0,0)
def predict(image):
label = ['n', 'o', 'x']
blob = tf.io.decode_jpeg(image, channels=1)
blob = tf.image.convert_image_dtype(blob, tf.float32)
blob = tf.image.resize(blob, (32, 32))
blob = tf.reshape(blob, (1, 32, 32, 1))
pred = list(model.predict(blob, steps = 1)[0])
index = pred.index(max(pred))
return label[index]
new_image()
cv2.namedWindow(image_name)
cv2.setMouseCallback(image_name, draw)
while(1):
cv2.imshow(image_name, img)
if cv2.waitKey(1)&0xFF==27:
break
cv2.destroyAllWindows()
| 2.453125 | 2 |
check.py | psorus/xtech | 0 | 12766925 | import numpy as np
import os
import os.path
from rechtschreib import correct
folder="../../write/data/"
def fixstring(qq,bef=None):
# print("fixing",qq)
intags=False
inhash=False
ac=""
ret=""
stopat=[".","(",")",">","\n"]
lq=len(qq)
for ii,zw in enumerate(qq):
basei=[intags,inhash]
if zw=="<":intags=True
if zw=="#" and inhash==False:inhash=True
ac+=zw
if zw==">":intags=False
if zw=="#" and inhash==True:inhash=False
if len(ret)>5:
if ret[-6:]=="<note ":
intags=False
if zw in stopat or zw==">" or (zw=="#" and inhash==False) or (ii==lq-1):
if len(ac)==0:continue
if len(ac)<5 or intags or inhash or zw==">" or (zw=="#" and not inhash):
ret+=ac
ac=""
continue
# print(intags,inhash,zw)
ac=correct(ac,bef)
# print(ac)
# exit()
ret+=ac
ac=""
return ret
def noignorefix2(q,befstr=None):
fix1="#"
fix2="#"
ret=""
while fix1 in q:
i1=q.find(fix1)
if i1<0:continue
bef=q[:i1]
q=q[i1:]
i2=q.find(fix2)
if i2<0:continue
mid=q[:i2+len(fix2)]
post=q[i2+len(fix2):]
# print("bef",bef)
# print("mid",mid)
# print("post",post)
bef=fixstring(bef,befstr)
# print("bef2",bef)
# exit()
ret+=bef
ret+=mid
q=post
if len(q)>0:ret+=fixstring(q,befstr)
# exit()
return ret
def noignorefix(q,befstr=None):
fix1="<ignore>"
fix2="</ignore>"
ret=""
while fix1 in q:
i1=q.find(fix1)
if i1<0:continue
bef=q[:i1]
q=q[i1:]
i2=q.find(fix2)
if i2<0:continue
mid=q[:i2+len(fix2)]
post=q[i2+len(fix2):]
# print("bef",bef)
# print("mid",mid)
# print("post",post)
bef=noignorefix2(bef,befstr)
# print("bef2",bef)
# exit()
ret+=bef
ret+=mid
q=post
if len(q)>0:ret+=noignorefix2(q,befstr)
# exit()
return ret
def correctfile(q):
print("correcting file",q)
with open(q,"r") as f:
qq=f.read()
ret=noignorefix(qq,"working on "+q)
with open(q,"w") as f:
f.write(ret)
for dirpath, dirnames, filenames in os.walk(folder):
for filename in [f for f in filenames]:
correctfile(dirpath+"/"+filename)
| 2.734375 | 3 |
tests/test_choices.py | SamSchott/travertino | 0 | 12766926 | <reponame>SamSchott/travertino
from unittest import TestCase
from unittest.mock import Mock
from travertino.colors import NAMED_COLOR, rgb
from travertino.constants import TOP, GOLDENROD, REBECCAPURPLE
from travertino.declaration import Choices, BaseStyle
class PropertyChoiceTests(TestCase):
def assert_property(self, obj, value, check_mock=True):
self.assertEqual(obj.prop, value)
if check_mock:
obj.apply.assert_called_once_with('prop', value)
obj.apply.reset_mock()
def test_none(self):
class MyObject(BaseStyle):
def __init__(self):
self.apply = Mock()
MyObject.validated_property('prop', choices=Choices(None), initial=None)
obj = MyObject()
self.assertIsNone(obj.prop)
with self.assertRaises(ValueError):
obj.prop = 10
with self.assertRaises(ValueError):
obj.prop = 3.14159
with self.assertRaises(ValueError):
obj.prop = REBECCAPURPLE
with self.assertRaises(ValueError):
obj.prop = '#112233'
with self.assertRaises(ValueError):
obj.prop = 'a'
with self.assertRaises(ValueError):
obj.prop = 'b'
obj.prop = None
self.assert_property(obj, None, check_mock=False)
obj.prop = 'none'
self.assert_property(obj, None, check_mock=False)
# Check the error message
try:
obj.prop = 'invalid'
self.fail('Should raise ValueError')
except ValueError as v:
self.assertEqual(
str(v),
"Invalid value 'invalid' for property 'prop'; Valid values are: none"
)
def test_allow_string(self):
class MyObject(BaseStyle):
def __init__(self):
self.apply = Mock()
MyObject.validated_property('prop', choices=Choices(string=True), initial='start')
obj = MyObject()
self.assertEqual(obj.prop, 'start')
with self.assertRaises(ValueError):
obj.prop = 10
with self.assertRaises(ValueError):
obj.prop = 3.14159
obj.prop = REBECCAPURPLE
self.assert_property(obj, 'rebeccapurple')
obj.prop = '#112233'
self.assert_property(obj, '#112233')
obj.prop = 'a'
self.assert_property(obj, 'a')
obj.prop = 'b'
self.assert_property(obj, 'b')
with self.assertRaises(ValueError):
obj.prop = None
obj.prop = 'none'
self.assert_property(obj, 'none')
# Check the error message
try:
obj.prop = 99
self.fail('Should raise ValueError')
except ValueError as v:
self.assertEqual(
str(v),
"Invalid value '99' for property 'prop'; Valid values are: <string>"
)
def test_allow_integer(self):
class MyObject(BaseStyle):
def __init__(self):
self.apply = Mock()
MyObject.validated_property('prop', choices=Choices(integer=True), initial=0)
obj = MyObject()
self.assertEqual(obj.prop, 0)
obj.prop = 10
self.assert_property(obj, 10)
# This is an odd case; Python happily rounds floats to integers.
# It's more trouble than it's worth to correct this.
obj.prop = 3.14159
self.assert_property(obj, 3)
with self.assertRaises(ValueError):
obj.prop = REBECCAPURPLE
with self.assertRaises(ValueError):
obj.prop = '#112233'
with self.assertRaises(ValueError):
obj.prop = 'a'
with self.assertRaises(ValueError):
obj.prop = 'b'
with self.assertRaises(ValueError):
obj.prop = None
with self.assertRaises(ValueError):
obj.prop = 'none'
# Check the error message
try:
obj.prop = 'invalid'
self.fail('Should raise ValueError')
except ValueError as v:
self.assertEqual(
str(v),
"Invalid value 'invalid' for property 'prop'; Valid values are: <integer>"
)
def test_allow_number(self):
class MyObject(BaseStyle):
def __init__(self):
self.apply = Mock()
MyObject.validated_property('prop', choices=Choices(number=True), initial=0)
obj = MyObject()
self.assertEqual(obj.prop, 0)
obj.prop = 10
self.assert_property(obj, 10.0)
obj.prop = 3.14159
self.assert_property(obj, 3.14159)
with self.assertRaises(ValueError):
obj.prop = REBECCAPURPLE
with self.assertRaises(ValueError):
obj.prop = '#112233'
with self.assertRaises(ValueError):
obj.prop = 'a'
with self.assertRaises(ValueError):
obj.prop = 'b'
with self.assertRaises(ValueError):
obj.prop = None
with self.assertRaises(ValueError):
obj.prop = 'none'
# Check the error message
try:
obj.prop = 'invalid'
self.fail('Should raise ValueError')
except ValueError as v:
self.assertEqual(
str(v),
"Invalid value 'invalid' for property 'prop'; Valid values are: <number>"
)
def test_allow_color(self):
class MyObject(BaseStyle):
def __init__(self):
self.apply = Mock()
MyObject.validated_property('prop', choices=Choices(color=True), initial='goldenrod')
obj = MyObject()
self.assertEqual(obj.prop, NAMED_COLOR[GOLDENROD])
with self.assertRaises(ValueError):
obj.prop = 10
with self.assertRaises(ValueError):
obj.prop = 3.14159
obj.prop = REBECCAPURPLE
self.assert_property(obj, NAMED_COLOR[REBECCAPURPLE])
obj.prop = '#112233'
self.assert_property(obj, rgb(0x11, 0x22, 0x33))
with self.assertRaises(ValueError):
obj.prop = 'a'
with self.assertRaises(ValueError):
obj.prop = 'b'
with self.assertRaises(ValueError):
obj.prop = None
with self.assertRaises(ValueError):
obj.prop = 'none'
# Check the error message
try:
obj.prop = 'invalid'
self.fail('Should raise ValueError')
except ValueError as v:
self.assertEqual(
str(v),
"Invalid value 'invalid' for property 'prop'; Valid values are: <color>"
)
def test_values(self):
class MyObject(BaseStyle):
def __init__(self):
self.apply = Mock()
MyObject.validated_property('prop', choices=Choices('a', 'b', None), initial='a')
obj = MyObject()
self.assertEqual(obj.prop, 'a')
with self.assertRaises(ValueError):
obj.prop = 10
with self.assertRaises(ValueError):
obj.prop = 3.14159
with self.assertRaises(ValueError):
obj.prop = REBECCAPURPLE
with self.assertRaises(ValueError):
obj.prop = '#112233'
obj.prop = None
self.assert_property(obj, None)
obj.prop = 'a'
self.assert_property(obj, 'a')
obj.prop = 'none'
self.assert_property(obj, None)
obj.prop = 'b'
self.assert_property(obj, 'b')
# Check the error message
try:
obj.prop = 'invalid'
self.fail('Should raise ValueError')
except ValueError as v:
self.assertEqual(
str(v),
"Invalid value 'invalid' for property 'prop'; Valid values are: a, b, none"
)
def test_multiple_choices(self):
class MyObject(BaseStyle):
def __init__(self):
self.apply = Mock()
MyObject.validated_property('prop', choices=Choices(
'a', 'b', None,
number=True, color=True
), initial=None)
obj = MyObject()
obj.prop = 10
self.assert_property(obj, 10.0)
obj.prop = 3.14159
self.assert_property(obj, 3.14159)
obj.prop = REBECCAPURPLE
self.assert_property(obj, NAMED_COLOR[REBECCAPURPLE])
obj.prop = '#112233'
self.assert_property(obj, rgb(0x11, 0x22, 0x33))
obj.prop = None
self.assert_property(obj, None)
obj.prop = 'a'
self.assert_property(obj, 'a')
obj.prop = 'none'
self.assert_property(obj, None)
obj.prop = 'b'
self.assert_property(obj, 'b')
# Check the error message
try:
obj.prop = 'invalid'
self.fail('Should raise ValueError')
except ValueError as v:
self.assertEqual(
str(v),
"Invalid value 'invalid' for property 'prop'; "
"Valid values are: a, b, none, <number>, <color>"
)
def test_string_symbol(self):
class MyObject(BaseStyle):
def __init__(self):
self.apply = Mock()
MyObject.validated_property('prop', choices=Choices(TOP, None), initial=None)
obj = MyObject()
# Set a symbolic value using the string value of the symbol
# We can't just use the string directly, though - that would
# get optimized by the compiler. So we create a string and
# transform it into the value we want.
val = 'TOP'
obj.prop = val.lower()
# Both equality and instance checking should work.
self.assertEqual(obj.prop, TOP)
self.assertIs(obj.prop, TOP)
| 2.765625 | 3 |
examples/sptam/motion.py | linpeisensh/dynaslam2py | 0 | 12766927 | import numpy as np
import g2o
class MotionModel(object):
def __init__(self,
timestamp=None,
initial_position=np.zeros(3),
initial_orientation=g2o.Quaternion(),
initial_covariance=None):
self.timestamp = timestamp
self.position = initial_position
self.orientation = initial_orientation
self.covariance = initial_covariance # pose covariance
self.v_linear = np.zeros(3) # linear velocity
self.v_angular_angle = 0
self.v_angular_axis = np.array([1, 0, 0])
self.initialized = False
# damping factor
self.damp = 0.95
def current_pose(self):
'''
Get the current camera pose.
'''
return (g2o.Isometry3d(self.orientation, self.position),
self.covariance)
def predict_pose(self, timestamp):
'''
Predict the next camera pose.
'''
if not self.initialized:
return (g2o.Isometry3d(self.orientation, self.position),
self.covariance)
dt = timestamp - self.timestamp
delta_angle = g2o.AngleAxis(
self.v_angular_angle * dt * self.damp,
self.v_angular_axis)
delta_orientation = g2o.Quaternion(delta_angle)
position = self.position + self.v_linear * dt * self.damp
orientation = self.orientation * delta_orientation
return (g2o.Isometry3d(orientation, position), self.covariance)
def update_pose(self, timestamp,
new_position, new_orientation, new_covariance=None):
'''
Update the motion model when given a new camera pose.
'''
if self.initialized:
dt = timestamp - self.timestamp
assert dt != 0
v_linear = (new_position - self.position) / dt
self.v_linear = v_linear
delta_q = self.orientation.inverse() * new_orientation
delta_q.normalize()
delta_angle = g2o.AngleAxis(delta_q)
angle = delta_angle.angle()
axis = delta_angle.axis()
if angle > np.pi:
axis = axis * -1
angle = 2 * np.pi - angle
self.v_angular_axis = axis
self.v_angular_angle = angle / dt
self.timestamp = timestamp
self.position = new_position
self.orientation = new_orientation
self.covariance = new_covariance
self.initialized = True
def apply_correction(self, correction): # corr: g2o.Isometry3d or matrix44
'''
Reset the model given a new camera pose.
Note: This method will be called when it happens an abrupt change in the pose (LoopClosing)
'''
if not isinstance(correction, g2o.Isometry3d):
correction = g2o.Isometry3d(correction)
current = g2o.Isometry3d(self.orientation, self.position)
current = current * correction
self.position = current.position()
self.orientation = current.orientation()
self.v_linear = (
correction.inverse().orientation() * self.v_linear)
self.v_angular_axis = (
correction.inverse().orientation() * self.v_angular_axis) | 2.6875 | 3 |
nonbonded/backend/database/models/targets/__init__.py | SimonBoothroyd/nonbonded | 5 | 12766928 | from nonbonded.backend.database.models.targets.targets import OptimizationTarget
__all__ = [OptimizationTarget]
| 1.070313 | 1 |
nbgrader/alembic/versions/ea281d3f1673_added_organization_table.py | kwanso-waqas/nbgrader | 0 | 12766929 | """Added organization table
Revision ID: ea281d3f1673
Revises: <KEY>
Create Date: 2021-11-04 15:04:47.282526
"""
from uuid import uuid4
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = 'ea281d3f1673'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
def new_uuid() -> str:
return uuid4().hex
def upgrade():
op.create_table(
'organization',
sa.Column('id', sa.String(32), primary_key=True, default=new_uuid),
sa.Column('name', sa.String(128), unique=True, nullable=False),
)
def downgrade():
op.drop_table('organization')
| 1.359375 | 1 |
modules/old/generators.py | jperezvisaires/tfg-intphys | 1 | 12766930 | <filename>modules/old/generators.py
import numpy as np
import h5py
from skimage.io import imread
from skimage.transform import rescale
import matplotlib.pyplot as plt
from tensorflow.keras.utils import Sequence, to_categorical
class SegmentationObjectDataGenerator(Sequence):
'Generates data for Keras fit training function'
def __init__(self,
list_samples,
targets,
path_hdf5,
batch_size=32,
dim=(288, 288),
scale=1,
num_channels=3,
num_classes=1,
shuffle=True,
first=True):
'Initialization'
self.dim = dim
self.path_hdf5 = path_hdf5
self.scale = scale
self.batch_size = batch_size
self.targets = targets
self.list_samples = list_samples
self.num_channels = num_channels
self.num_classes = num_classes
self.shuffle = shuffle
self.first = first
self.on_epoch_end()
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.floor(len(self.list_samples) / self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
# Find list of samples
list_samples_temp = [self.list_samples[i] for i in indexes]
# Generate data
X, Y = self.__data_generation(list_samples_temp)
return X, Y
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(len(self.list_samples))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __int_to_float(self, array, bits):
array = array.astype(np.float32) / (2**bits - 1)
return array
def __mask_oneshot(self, mask_array):
oneshot_array = np.empty((mask_array.shape))
oneshot_array[mask_array == 3] = 1
oneshot_array[mask_array != 3] = 0
return oneshot_array
def __rescale(self, array):
if self.scale != 1:
array = rescale(image=array,
scale=self.scale,
order=0,
preserve_range=True,
multichannel=True,
anti_aliasing=False)
return array
def __data_generation(self, list_samples_temp):
'Generates data containing batch_size samples'
# Initialization
X = np.empty((self.batch_size, *self.dim, self.num_channels))
Y = np.empty((self.batch_size, *self.dim, 1))
# Generate data
for i, sample in enumerate(list_samples_temp):
with h5py.File(self.path_hdf5, "r") as f:
# Store sample
X[i,] = f[sample][:]
# Store target
target = self.targets[sample]
Y[i,] = f[target][:]
X = self.__int_to_float(X, 8)
Y = self.__mask_oneshot(Y)
X = self.__rescale(X)
Y = self.__rescale(Y)
return X, Y
class SegmentationOccluderDataGenerator(Sequence):
'Generates data for Keras fit training function'
def __init__(self,
list_samples,
targets,
path_hdf5,
batch_size=32,
dim=(288, 288),
scale=1,
num_channels=3,
num_classes=1,
shuffle=True,
first=True):
'Initialization'
self.dim = dim
self.path_hdf5 = path_hdf5
self.scale = scale
self.batch_size = batch_size
self.targets = targets
self.list_samples = list_samples
self.num_channels = num_channels
self.num_classes = num_classes
self.shuffle = shuffle
self.first = first
self.on_epoch_end()
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.floor(len(self.list_samples) / self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
# Find list of samples
list_samples_temp = [self.list_samples[i] for i in indexes]
# Generate data
X, Y = self.__data_generation(list_samples_temp)
return X, Y
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(len(self.list_samples))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __int_to_float(self, array, bits):
array = array.astype(np.float32) / (2**bits - 1)
return array
def __mask_oneshot(self, mask_array):
oneshot_array = np.empty((mask_array.shape))
oneshot_array[mask_array == 4] = 1
oneshot_array[mask_array != 4] = 0
return oneshot_array
def __rescale(self, array):
if self.scale != 1:
array = rescale(image=array,
scale=self.scale,
order=0,
preserve_range=True,
multichannel=True,
anti_aliasing=False)
return array
def __data_generation(self, list_samples_temp):
'Generates data containing batch_size samples'
# Initialization
X = np.empty((self.batch_size, *self.dim, self.num_channels))
Y = np.empty((self.batch_size, *self.dim, 1))
# Generate data
for i, sample in enumerate(list_samples_temp):
with h5py.File(self.path_hdf5, "r") as f:
# Store sample
X[i,] = f[sample][:]
# Store target
target = self.targets[sample]
Y[i,] = f[target][:]
X = self.__int_to_float(X, 8)
Y = self.__mask_oneshot(Y)
X = self.__rescale(X)
Y = self.__rescale(Y)
return X, Y
class DepthDataGenerator(Sequence):
'Generates data for Keras fit training function'
def __init__(self,
list_samples,
targets,
path_hdf5,
batch_size=32,
dim=(288, 288),
scale=1,
num_channels=3,
shuffle=True,
first=True):
'Initialization'
self.dim = dim
self.path_hdf5 = path_hdf5
self.scale = scale
self.batch_size = batch_size
self.targets = targets
self.list_samples = list_samples
self.num_channels = num_channels
self.shuffle = shuffle
self.first = first
self.on_epoch_end()
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.floor(len(self.list_samples) / self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
# Find list of samples
list_samples_temp = [self.list_samples[i] for i in indexes]
# Generate data
X, Y = self.__data_generation(list_samples_temp)
return X, Y
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(len(self.list_samples))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __int_to_float(self, array, bits):
array = array.astype(np.float32) / (2**bits - 1)
return array
def __rescale(self, array):
if self.scale != 1:
array = rescale(image=array,
scale=self.scale,
order=0,
preserve_range=True,
multichannel=True,
anti_aliasing=False)
return array
def __data_generation(self, list_samples_temp):
'Generates data containing batch_size samples'
# Initialization
X = np.empty((self.batch_size, *self.dim, self.num_channels))
Y = np.empty((self.batch_size, *self.dim, 1))
# Generate data
for i, sample in enumerate(list_samples_temp):
with h5py.File(self.path_hdf5, "r") as f:
# Store sample
X[i,] = f[sample][:]
# Store target
target = self.targets[sample]
Y[i,] = f[target][:]
X = self.__int_to_float(X, 8)
Y = self.__int_to_float(Y, 16)
X = self.__rescale(X)
Y = self.__rescale(Y)
return X, Y
class PredictionConvLSTMDataGenerator(Sequence):
'Generates data for Keras fit training function'
def __init__(self,
list_samples,
targets,
path_hdf5,
input_frames=3,
batch_size=32,
dim=(288, 288),
scale=1,
num_channels=3,
shuffle=True,
first=True):
'Initialization'
self.dim = dim
self.input_frames = input_frames
self.path_hdf5 = path_hdf5
self.scale = scale
self.batch_size = batch_size
self.targets = targets
self.list_samples = list_samples
self.num_channels = num_channels
self.shuffle = shuffle
self.first = first
self.on_epoch_end()
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.floor(len(self.list_samples) / self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
# Find list of samples
list_samples_temp = [self.list_samples[i] for i in indexes]
# Generate data
X, Y = self.__data_generation(list_samples_temp)
return X, Y
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(len(self.list_samples))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __int_to_float(self, array, bits):
array = array.astype(np.float32) / (2**bits - 1)
return array
def __mask_oneshot_object(self, mask_array):
oneshot_array = np.empty((mask_array.shape))
oneshot_array[mask_array == 3] = 1
oneshot_array[mask_array != 3] = 0.5
return oneshot_array
def __mask_oneshot_occluder(self, mask_array):
oneshot_array = np.empty((mask_array.shape))
oneshot_array[mask_array == 4] = 1
oneshot_array[mask_array != 4] = 0
return oneshot_array
def __rescale(self, array):
if self.scale != 1:
array = rescale(image=array,
scale=self.scale,
order=1,
preserve_range=True,
multichannel=True,
anti_aliasing=False)
return array
def __data_generation(self, list_samples_temp):
'Generates data containing batch_size samples'
# Initialization
X = np.empty((self.batch_size, self.input_frames, *(144, 144), self.num_channels))
Y = np.empty((self.batch_size, *(144, 144), self.num_channels))
Z = np.empty((self.batch_size, *(144, 144), self.num_channels * self.input_frames))
# Generate data
for i, sample in enumerate(list_samples_temp):
with h5py.File(self.path_hdf5, "r") as f:
# Store sample
array_list = []
for j in range(self.input_frames):
array_list.append(self.__rescale((f[sample[j]][:]/4)))
Z[i,] = np.concatenate(array_list, axis=-1)
X[i,] = np.reshape(Z[i,], (self.input_frames, *(144, 144), self.num_channels))
# Store target
target = self.targets[sample[0]]
Y[i,] = self.__rescale(f[target][:]/4)
return X, Y
class PredictionShortDataGenerator(Sequence):
'Generates data for Keras fit training function'
def __init__(self,
list_samples,
targets,
path_hdf5,
memory_frames=3,
batch_size=32,
dim=(288, 288),
scale=1,
num_channels=3,
shuffle=True,
first=True):
'Initialization'
self.dim = dim
self.memory_frames = memory_frames
self.path_hdf5 = path_hdf5
self.scale = scale
self.batch_size = batch_size
self.targets = targets
self.list_samples = list_samples
self.num_channels = num_channels
self.shuffle = shuffle
self.first = first
self.on_epoch_end()
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.floor(len(self.list_samples) / self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
# Find list of samples
list_samples_temp = [self.list_samples[i] for i in indexes]
# Generate data
X, Y = self.__data_generation(list_samples_temp)
return X, Y
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(len(self.list_samples))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __int_to_float(self, array, bits):
array = array.astype(np.float32) / (2**bits - 1)
return array
def __rescale(self, array):
if self.scale != 1:
array = rescale(image=array,
scale=self.scale,
order=1,
preserve_range=True,
multichannel=True,
anti_aliasing=True)
return array
def __data_generation(self, list_samples_temp):
'Generates data containing batch_size samples'
# Initialization
X = np.empty((self.batch_size, *self.dim, self.num_channels * self.memory_frames))
Y = np.empty((self.batch_size, *self.dim, self.num_channels))
# Generate data
for i, sample in enumerate(list_samples_temp):
with h5py.File(self.path_hdf5, "r") as f:
# Store sample
array_list = []
for j in range(self.memory_frames):
array_list.append(f[sample[j]][:])
X[i,] = np.concatenate(array_list, axis=-1)
# Store target
target = self.targets[sample[0]]
Y[i,] = f[target][:]
X = self.__int_to_float(X, 8)
Y = self.__int_to_float(Y, 8)
X = self.__rescale(X)
Y = self.__rescale(Y)
return X, Y
class PredictionLongDataGenerator(Sequence):
'Generates data for Keras fit training function'
def __init__(self,
list_samples,
targets,
path_hdf5,
memory_frames=5,
batch_size=32,
dim=(288, 288),
scale=1,
num_channels=3,
shuffle=True,
first=True):
'Initialization'
self.dim = dim
self.memory_frames = memory_frames
self.path_hdf5 = path_hdf5
self.scale = scale
self.batch_size = batch_size
self.targets = targets
self.list_samples = list_samples
self.num_channels = num_channels
self.shuffle = shuffle
self.first = first
self.on_epoch_end()
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.floor(len(self.list_samples) / self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
# Find list of samples
list_samples_temp = [self.list_samples[i] for i in indexes]
# Generate data
X, Y = self.__data_generation(list_samples_temp)
return X, Y
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(len(self.list_samples))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __int_to_float(self, array, bits):
array = array.astype(np.float32) / (2**bits - 1)
return array
def __rescale(self, array):
if self.scale != 1:
array = rescale(image=array,
scale=self.scale,
order=1,
preserve_range=True,
multichannel=True,
anti_aliasing=True)
return array
def __data_generation(self, list_samples_temp):
'Generates data containing batch_size samples'
# Initialization
X = np.empty((self.batch_size, *self.dim, self.num_channels * self.memory_frames))
Y = np.empty((self.batch_size, *self.dim, self.num_channels))
# Generate data
for i, sample in enumerate(list_samples_temp):
with h5py.File(self.path_hdf5, "r") as f:
# Store sample
array_list = []
for j in range(self.memory_frames):
array_list.append(f[sample[j]][:])
X[i,] = np.concatenate(array_list, axis=-1)
# Store target
target = self.targets[sample[0]]
Y[i,] = f[target][:]
X = self.__int_to_float(X, 8)
Y = self.__int_to_float(Y, 8)
X = self.__rescale(X)
Y = self.__rescale(Y)
return X, Y
class PredictionOpticDataGenerator(Sequence):
'Generates data for Keras fit training function'
def __init__(self,
list_samples,
targets,
path_hdf5,
memory_frames=5,
batch_size=10,
dim=(288, 288),
scale=1,
num_channels=3,
shuffle=True,
first=True):
'Initialization'
self.dim = dim
self.memory_frames = memory_frames
self.path_hdf5 = path_hdf5
self.scale = scale
self.batch_size = batch_size
self.targets = targets
self.list_samples = list_samples
self.num_channels = num_channels
self.shuffle = shuffle
self.first = first
self.on_epoch_end()
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.floor(len(self.list_samples) / self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
# Find list of samples
list_samples_temp = [self.list_samples[i] for i in indexes]
# Generate data
X, Y = self.__data_generation(list_samples_temp)
return X, Y
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(len(self.list_samples))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __int_to_float(self, array, bits):
array = array.astype(np.float32) / (2**bits - 1)
return array
def __rescale(self, array):
if self.scale != 1:
array = rescale(image=array,
scale=self.scale,
order=1,
preserve_range=True,
multichannel=True,
anti_aliasing=True)
return array
def __data_generation(self, list_samples_temp):
'Generates data containing batch_size samples'
# Initialization
X = np.empty((self.batch_size, *self.dim, self.num_channels * self.memory_frames))
Y = np.empty((self.batch_size, *self.dim, self.num_channels + self.num_channels * self.memory_frames))
# Generate data
for i, sample in enumerate(list_samples_temp):
with h5py.File(self.path_hdf5, "r") as f:
# Store sample
array_list = []
for j in range(self.memory_frames):
array_list.append(f[sample[j]][:])
X[i,] = np.concatenate(array_list, axis=-1)
# Store target
target = self.targets[sample[0]]
array_list.append(f[target][:])
Y[i,] = np.concatenate(array_list, axis=-1)
X = self.__int_to_float(X, 8)
Y = self.__int_to_float(Y, 8)
X = self.__rescale(X)
Y = self.__rescale(Y)
return X, Y | 2.5625 | 3 |
nidm/nidm-results/test/TestProvStoreLinks.py | khelm/nidm | 14 | 12766931 | <reponame>khelm/nidm
#!/usr/bin/env python
'''Test thats link to example files on the Prov Store are up to date
@author: <NAME> <<EMAIL>>, <NAME>
@copyright: University of Warwick 2014
'''
import unittest
from nidmresults.test.test_commons import *
import logging
import re
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
RELPATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
class TestProvStoreLinks(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestProvStoreLinks, self).__init__(*args, **kwargs)
self.provstore_url = dict()
# self.ttl_from_provn_file_url = dict()
self.ttl_file = dict()
for example_file in example_filenames:
# Read ttl
ttl_file = os.path.join(RELPATH, example_file)
# Get corresponding turtle
# ttl_from_provn_file_url = get_turtle(provn_file)
# self.ttl_from_provn_file_url[example_file] = \
# ttl_from_provn_file_url
self.ttl_file[example_file] = ttl_file
#provn_file.replace(".provn", ".ttl")
# Read README
readme_file = os.path.join(
RELPATH, os.path.dirname(example_file), 'README.md')
readme_fid = open(readme_file)
readme_txt = readme_fid.read()
readme_fid.close()
provstore_url_index = re.search(
"https://provenance.ecs.soton.ac.uk/store/documents/[^/]*/",
readme_txt)
# Get corresponding turtle on Prov Store
if provstore_url_index:
provstore_url = readme_txt[
provstore_url_index.start():provstore_url_index.end()-1]\
+ ".ttl"
else:
provstore_url = None
# Save URL to Prov Store document
self.provstore_url[example_file] = provstore_url
def setUp(self):
logger.info("Test: TestProvStoreLinks")
def test_provstore_links(self):
error_msg = list()
for example_file in example_filenames:
if self.provstore_url[example_file]:
logger.info('\tProv store URL: '+self.provstore_url[
example_file])
found_difference = compare_ttl_documents(
self.ttl_file[example_file],
self.provstore_url[example_file])
if found_difference:
error_msg.append(
example_file +
": Prov store link outdated, please update README.md u\
sing nidm/nidm-results/scripts/UpdateExampleReadmes.py")
else:
error_msg.append(
example_file+': No document URL found in README.')
if self.ttl_file[example_file]:
if os.path.isfile(self.ttl_file[example_file]):
# We keep this here (even now that provn is not stored
# anymore) so that if we want to introduce other
# serialisations we can re-use this code
# found_difference = compare_ttl_documents(
# self.ttl_from_provn_file_url[example_file],
# self.ttl_file[example_file])
found_difference = False
if found_difference:
error_msg.append(
example_file +
": Provn file outdated, please update using nidm/n\
idm-results/scripts/UpdateExampleReadmes.py")
else:
error_msg.append(example_file+": No turtle file.")
else:
error_msg.append(example_file+': No turle file.')
# Raise errors
if error_msg:
raise Exception("\n".join(error_msg))
if __name__ == '__main__':
unittest.main()
| 2.15625 | 2 |
jsonrpc/__init__.py | yosida95/python-jsonrpc | 0 | 12766932 | <reponame>yosida95/python-jsonrpc
# -*- coding: utf-8 -*-
from .client import (
Error,
ServerProxy,
)
from .transport import (
TCPSocketTransport,
UnixDomainSocketTransport,
)
__all__ = [
# .client
'Error',
'ServerProxy',
# .transport
'TCPSocketTransport',
'UnixDomainSocketTransport',
]
| 1.671875 | 2 |
third_party/universal-ctags/ctags/Units/parser-python.r/python2-arglists.d/input.py | f110/wing | 4 | 12766933 | <filename>third_party/universal-ctags/ctags/Units/parser-python.r/python2-arglists.d/input.py
#!/usr/bin/env python2
def func01():
pass
def func02(a):
pass
def func03(a, b = 2):
pass
def func04(a = 1, b = 2):
pass
def func05(*args):
pass
def func06(**kwargs):
pass
def func07(*args, **kwargs):
pass
def func08(a, b = 2, *args):
pass
def func09(a = [1, 2], b = 2, **kwargs):
pass
def func10(a = (1, 2), b = 2, *args, **kwargs):
pass
def func11(a = {1:1, 2:1}, b = 2, *args, **kwargs):
pass
# Python2 only
def func12((a1, a2)):
pass
def func13((a1, a2), b):
pass
def func14((a1, a2) = (1, 2), b = 2):
pass
def func15((a1, a2), *args):
pass
lamb01 = lambda: 0
lamb02 = lambda a: 0
lamb03 = lambda a, b: 0
lamb04 = lambda a, b = 2: 0
lamb05 = lambda *args: 0
lamb06 = lambda *args, **kwargs: 0
lamb07 = lambda a, *args, **kwargs: 0
lamb08 = lambda a = 1, *args, **kwargs: 0
lamb09 = lambda a = [1, 2], *args, **kwargs: 0
lamb10 = lambda a = (1, 2), *args, **kwargs: 0
lamb11 = lambda a = {1:1, 2:1}, *args, **kwargs: 0
lamb12 = lambda a = lambda:0, *args, **kwargs: a
# Python2 only
lamb13 = lambda (a1, a2): 0
lamb14 = lambda (a1, a2), b: 0
lamb15 = lambda (a1, a2) = (1, 2), b = 2: 0
lamb16 = lambda (a1, a2), *args: 0
| 2.609375 | 3 |
mathy_envs/envs/__init__.py | mathy/mathy_envs | 1 | 12766934 | from .binomial_distribute import BinomialDistribute
from .complex_simplify import ComplexSimplify
from .poly_combine_in_place import PolyCombineInPlace
from .poly_commute_like_terms import PolyCommuteLikeTerms
from .poly_grouping import PolyGroupLikeTerms
from .poly_haystack_like_terms import PolyHaystackLikeTerms
from .poly_simplify import PolySimplify
from .poly_simplify_blockers import PolySimplifyBlockers
MATHY_BUILTIN_ENVS = [
BinomialDistribute,
ComplexSimplify,
PolySimplifyBlockers,
PolyCombineInPlace,
PolyCommuteLikeTerms,
PolyGroupLikeTerms,
PolyHaystackLikeTerms,
PolySimplify,
]
| 1.164063 | 1 |
flair/training_files/ag_news/train_ag_news.py | AshishMahendra/flair | 1 | 12766935 | <filename>flair/training_files/ag_news/train_ag_news.py
from flair.models import TARSClassifier
from flair.data import Corpus
from flair.datasets import SentenceDataset
from flair.trainers import ModelTrainer
import pickle
import time,logging
log = logging.getLogger("flair")
import argparse
train_ds=[]
test_ds=[]
def load_data():
global train_ds,test_ds,dev_ds
file = open('train_data_agnews', 'rb')
train=pickle.load(file)
print("Loaded Train Data")
file = open('test_data_agnews', 'rb')
test=pickle.load(file)
print("Loaded Test Data")
train_ds=[]
test_ds=[]
train_ds=SentenceDataset(train)
test_ds=SentenceDataset(test)
print (train_ds[0])
print (test_ds[0])
def train_module(model,fine_tune,ff_dim,nhead):
global train_ds,test_ds
load_data()
corpus = Corpus(train=train_ds,test=test_ds)
start_time= time.time()
# 1. load base TARS
tars = TARSClassifier()#.load("tars-base")
# print(tars)
print(f"\n\nTime taken to load the model : {time.time()-start_time}\n\n")
# 2. make the model aware of the desired set of labels from the new corpus
tars.add_and_switch_to_new_task("ag_news_data", label_dictionary=corpus.make_label_dictionary(label_type="ag_news_data"),label_type="ag_news_data")
# 3. initialize the text classifier trainer with your corpus
trainer = ModelTrainer(tars, corpus)
start_time= time.time()
# start_perf_test()
# 4. train model
data=trainer.train(base_path=f'taggers/agnews_full_bert_big_head_{ff_dim}_{nhead}', # path to store the model artifact
learning_rate=0.02,
mini_batch_size=16,
max_epochs=2,
shuffle=True,
monitor_train=False,
train_with_dev =True,
embeddings_storage_mode="cuda")
# stop_perf_test()
log.info(f"\n\nTime taken to complete the model training : {time.time()-start_time}\n\n")
log.info(data)
if __name__ == "__main__":
# Initialize parser
parser = argparse.ArgumentParser()
# Adding optional argument
parser.add_argument("-m", "--model", help = "TARS/BERT", default="BERT")
parser.add_argument("-ft", "--fine_tune", help = "Train the model (True/False)", type=bool,default=False)
parser.add_argument("-dim", "--ffdim", help = "Feedforward Dimension Size (2048/1024/512/256)",type=int, default=2048)
parser.add_argument("-nh", "--nhead", help = "Feedforward attention head numbers (8/4/2)", default=8,type=int)
# Read arguments from command line
args = parser.parse_args()
#print(args.model,args.fine_tune,args.ffdim,args.nhead)
train_module(args.model,args.fine_tune,args.ffdim,args.nhead) | 2.28125 | 2 |
measurements/migrations/0003_auto_20200707_1237.py | JakubWolak/blood_pressure_monitor | 0 | 12766936 | # Generated by Django 3.0.8 on 2020-07-07 10:37
from django.db import migrations, models
import measurements.validators
class Migration(migrations.Migration):
dependencies = [
('measurements', '0002_auto_20200706_1258'),
]
operations = [
migrations.AlterField(
model_name='measurement',
name='diastolic_pressure',
field=models.SmallIntegerField(default=80, validators=[measurements.validators.max_diastolic_pressure, measurements.validators.min_diastolic_pressure], verbose_name='Ciśnienie rozkurczowe'),
),
migrations.AlterField(
model_name='measurement',
name='pulse',
field=models.SmallIntegerField(default=60, validators=[measurements.validators.max_pulse, measurements.validators.min_pulse], verbose_name='Tętno'),
),
migrations.AlterField(
model_name='measurement',
name='systolic_pressure',
field=models.SmallIntegerField(default=120, validators=[measurements.validators.max_systolic_pressure, measurements.validators.min_systolic_pressure], verbose_name='Ciśnienie skurczowe'),
),
]
| 1.46875 | 1 |
satori.core/satori/core/entities/Role.py | Cloud11665/satori-git | 4 | 12766937 | <reponame>Cloud11665/satori-git
# vim:ts=4:sts=4:sw=4:expandtab
import logging
from django.db import models, DatabaseError
from satori.core.dbev import Events
from satori.core.models import Entity
@ExportModel
class Role(Entity):
"""Model. Base for authorization "levels".
"""
parent_entity = models.OneToOneField(Entity, parent_link=True, related_name='cast_role')
name = models.CharField(max_length=256)
sort_field = models.CharField(max_length=256)
children = models.ManyToManyField('self', related_name='parents', through='RoleMapping', symmetrical=False)
class ExportMeta(object):
fields = [('name', 'VIEW'), ('sort_field', 'VIEW')]
class RightsMeta(object):
rights = ['EDIT']
inherit_VIEW = ['EDIT']
inherit_EDIT = ['MANAGE']
@classmethod
def inherit_rights(cls):
inherits = super(Role, cls).inherit_rights()
cls._inherit_add(inherits, 'EDIT', 'id', 'MANAGE')
cls._inherit_add(inherits, 'VIEW', 'id', 'EDIT')
return inherits
def __str__(self): #TODO
return self.name
def get_members(self):
return self.children.all()
def add_member(self, member):
RoleMapping.objects.get_or_create(parent=self, child=member)[0].save()
def delete_member(self, member):
try:
RoleMapping.objects.get(parent=self, child=member).delete()
except RoleMapping.DoesNotExist:
pass
class RoleEvents(Events):
model = Role
on_insert = on_update = ['name']
on_delete = []
| 1.898438 | 2 |
GGProject/workflow/views.py | VarenTechInternship/greeterguru | 0 | 12766938 | <reponame>VarenTechInternship/greeterguru
from django.http import Http404, HttpResponse
from rest_framework import status
from django.views.generic import View
from rest_framework.permissions import IsAdminUser, AllowAny
from django.views.generic import View
from django.shortcuts import render
from scripts import adminOptions
# View for synchronizing web database with active directory
class UpdateAD(View):
def get(self, request):
if request.user.is_superuser:
return render(request, "update_ad.html")
return HttpResponse(status=405)
def post(self, request):
adminOptions.populate()
return HttpResponse(status=status.HTTP_202_ACCEPTED)
# View for the admin option to set the authentication level
class AuthFactor(View):
def get(self, request):
return render(request, "auth_options.html")
| 2 | 2 |
scripts/build_aln_store.py | wharvey31/project-diploid-assembly | 0 | 12766939 | <gh_stars>0
#!/usr/bin/env python3
import os
import argparse
import pandas as pd
def parse_command_line():
parser = argparse.ArgumentParser()
parser.add_argument(
'--bed-folder',
'-b',
dest='bed_folder',
type=str,
)
parser.add_argument(
'--chrom-sizes',
'-c',
dest='chrom_sizes',
type=str
)
parser.add_argument(
'--sample-table',
'-s',
type=str,
dest='sample_table'
)
parser.add_argument(
'--output',
'-o',
dest='output',
type=str
)
args = parser.parse_args()
return args
def load_tabular_data(file_path, header='infer', names=None):
if names is not None:
df = pd.read_csv(file_path, sep='\t', names=names, header=0)
else:
df = pd.read_csv(file_path, sep='\t', header=header)
return df
def load_chromosome_table(file_path):
chrom_table = load_tabular_data(file_path, header=None)
chrom_table = chrom_table[chrom_table.columns[:2]]
chrom_table.columns = ['chrom', 'size']
chrom_table['size'] = chrom_table['size'].astype('int32')
return chrom_table
def load_sample_table(file_path):
sample_table = load_tabular_data(file_path)
# ? TODO ?
# drop "skip" samples...
return sample_table
def extract_sample_info(file_name):
sample = file_name.split('_')[0]
if 'pbsq2-clr' in file_name:
tech = 'CLR'
elif 'pbsq2-ccs' in file_name:
tech = 'HiFi'
else:
raise ValueError('Cannot determine read type: {}'.format(file_name))
if 'h1-un' in file_name:
hap = 'H10'
elif 'h2-un' in file_name:
hap = 'H20'
else:
raise ValueError('Cannot determine haplotype: {}'.format(file_name))
return sample, tech, hap
def load_contig_alignments(bed_folder, sample_table):
if 'sample' in sample_table:
sample_column = 'sample'
elif 'individual' in sample_table:
sample_column = 'individual'
else:
raise ValueError('Cannot identify sample column in table: {}'.format(sample_table.head()))
orientation_map = {
'+': 1,
'-': -1,
'.': 0
}
store_values = []
for root, dirs, files in os.walk(bed_folder, followlinks=False):
files = [f for f in files if f.endswith('.bed')]
for f in files:
sample, technology, haplotype = extract_sample_info(f)
super_pop = sample_table.loc[sample_table[sample_column] == sample, 'super_population']
assert len(super_pop) == 1, 'Multi super pop: {}'.format(super_pop)
sex = sample_table.loc[sample_table[sample_column] == sample, 'sex']
assert len(sex) == 1, 'Multi sample sex: {}'.format(sex)
sex = sex.values[0]
assert sex in ['male', 'female'], 'Unexpected sample sex: {}'.format(sex)
super_pop = super_pop.values[0]
store_key = os.path.join(super_pop, sample, sex, technology, haplotype)
aln = load_tabular_data(
os.path.join(root, f),
0,
['chrom', 'start', 'end', 'contig_name', 'mapq', 'orientation']
)
aln['orientation'] = aln['orientation'].apply(lambda x: orientation_map[x])
aln['orientation'] = aln['orientation'].astype('int8')
aln['start'] = aln['start'].astype('int32')
aln['end'] = aln['end'].astype('int32')
aln['mapq'] = aln['mapq'].astype('int16')
store_values.append((store_key, aln))
return store_values
def main():
args = parse_command_line()
chrom_table = load_chromosome_table(args.chrom_sizes)
sample_table = load_sample_table(args.sample_table)
contig_alignments = load_contig_alignments(
args.bed_folder,
sample_table
)
with pd.HDFStore(args.output, mode='w', complevel=9, complib='blosc') as hdf:
hdf.put(os.path.join('metadata', 'chrom'), chrom_table, format='fixed')
hdf.put(os.path.join('metadata', 'sample'), sample_table, format='fixed')
for k, v in contig_alignments:
hdf.put(k, v, format='fixed')
return
if __name__ == '__main__':
main()
| 2.71875 | 3 |
tests/system/test_examples.py | McCrearyD/simple_sagemaker | 16 | 12766940 | <reponame>McCrearyD/simple_sagemaker
import logging
import os
import shutil
import subprocess
import sys
from time import time
from .compare_outputs import isAsExpected
file_path = os.path.split(__file__)[0]
examples_path = os.path.abspath(os.path.join(file_path, "..", "..", "examples"))
sys.path.append(examples_path)
def _internalTestExample(caplog, tmp_path, runner):
caplog.set_level(logging.INFO)
# print(os.environ)
print("Temp path:", tmp_path)
print("Running", runner, runner.__name__, runner.__module__)
example_path = os.path.dirname(runner.__code__.co_filename)
output_path = os.path.join(tmp_path, os.path.split(example_path)[-1], "output")
# remove current local output
shutil.rmtree(output_path, ignore_errors=True)
# prefix/suffix for project name
py_version_string = f"py{sys.version_info.major}{sys.version_info.minor}"
time_string = int(time())
postfix = f"-{time_string}-{py_version_string}"
prefix = "tests/"
sm_project = runner(postfix=postfix, prefix=prefix, output_path=output_path)
sm_project = sm_project
# sm_project.cleanFolder()
expected_path = os.path.join(example_path, "expected_output")
# check for expected_output also one level up
if not os.path.isdir(expected_path):
expected_path = os.path.join(os.path.dirname(example_path), "expected_output")
assert isAsExpected(output_path, expected_path)
def _internalTestCli(test_path, caplog, tmp_path):
caplog.set_level(logging.INFO)
print("Temp path:", tmp_path)
print("Running cli:", test_path)
output_path = os.path.join(tmp_path, test_path, "output")
# remove current local output
shutil.rmtree(output_path, ignore_errors=True)
# prefix/suffix for project name
py_version_string = f"py{sys.version_info.major}{sys.version_info.minor}"
time_string = int(time())
postfix = f"-{time_string}-{py_version_string}"
prefix = "tests/"
run_shell = os.path.join(examples_path, test_path, "run.sh")
subprocess.run(
[run_shell, output_path, prefix, postfix, "--cs --force_running"], check=True
)
expected_path = os.path.join(examples_path, test_path, "expected_output")
assert isAsExpected(output_path, expected_path)
def skip_test_cli_multi(caplog, tmp_path):
_internalTestCli("cli_multi", caplog, tmp_path)
def test_readme_examples(caplog, tmp_path):
_internalTestCli("readme_examples", caplog, tmp_path)
def test_processing_cli_examples(caplog, tmp_path):
_internalTestCli("processing_cli", caplog, tmp_path)
def test_multiple_tasks(caplog, tmp_path):
from multiple_tasks.example import runner
_internalTestExample(caplog, tmp_path, runner)
def test_single_file_tasks(caplog, tmp_path):
from single_file.example import runner
_internalTestExample(caplog, tmp_path, runner)
def test_single_task(caplog, tmp_path):
from single_task.example import runner
_internalTestExample(caplog, tmp_path, runner)
| 1.875 | 2 |
raspberrypi/news.py | jbrucepayne/pitoys | 0 | 12766941 | <reponame>jbrucepayne/pitoys
import sys
import subprocess
# TODO: Find and download better news source
def get_news():
return "Breaking News! Area man converts old rotary telephone to internet connected super-device!"
| 1.929688 | 2 |
custodian/lib/python3.7/site-packages/ldap3/__init__.py | farrukh90/custodian | 1 | 12766942 | <reponame>farrukh90/custodian
"""
"""
# Created on 2013.05.15
#
# Author: <NAME>
#
# Copyright 2013 - 2020 <NAME>
#
# This file is part of ldap3.
#
# ldap3 is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ldap3 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with ldap3 in the COPYING and COPYING.LESSER files.
# If not, see <http://www.gnu.org/licenses/>.
from types import GeneratorType
# authentication
ANONYMOUS = 'ANONYMOUS'
SIMPLE = 'SIMPLE'
SASL = 'SASL'
NTLM = 'NTLM'
# SASL MECHANISMS
EXTERNAL = 'EXTERNAL'
DIGEST_MD5 = 'DIGEST-MD5'
KERBEROS = GSSAPI = 'GSSAPI'
PLAIN = 'PLAIN'
AUTO_BIND_DEFAULT = 'DEFAULT' # binds connection when using "with" context manager
AUTO_BIND_NONE = 'NONE' # same as False, no bind is performed
AUTO_BIND_NO_TLS = 'NO_TLS' # same as True, bind is performed without tls
AUTO_BIND_TLS_BEFORE_BIND = 'TLS_BEFORE_BIND' # start_tls is performed before bind
AUTO_BIND_TLS_AFTER_BIND = 'TLS_AFTER_BIND' # start_tls is performed after bind
# server IP dual stack mode
IP_SYSTEM_DEFAULT = 'IP_SYSTEM_DEFAULT'
IP_V4_ONLY = 'IP_V4_ONLY'
IP_V6_ONLY = 'IP_V6_ONLY'
IP_V4_PREFERRED = 'IP_V4_PREFERRED'
IP_V6_PREFERRED = 'IP_V6_PREFERRED'
# search scope
BASE = 'BASE'
LEVEL = 'LEVEL'
SUBTREE = 'SUBTREE'
# search alias
DEREF_NEVER = 'NEVER'
DEREF_SEARCH = 'SEARCH'
DEREF_BASE = 'FINDING_BASE'
DEREF_ALWAYS = 'ALWAYS'
# search attributes
ALL_ATTRIBUTES = '*'
NO_ATTRIBUTES = '1.1' # as per RFC 4511
ALL_OPERATIONAL_ATTRIBUTES = '+' # as per RFC 3673
# modify type
MODIFY_ADD = 'MODIFY_ADD'
MODIFY_DELETE = 'MODIFY_DELETE'
MODIFY_REPLACE = 'MODIFY_REPLACE'
MODIFY_INCREMENT = 'MODIFY_INCREMENT'
# client strategies
SYNC = 'SYNC'
SAFE_SYNC = 'SAFE_SYNC'
ASYNC = 'ASYNC'
LDIF = 'LDIF'
RESTARTABLE = 'RESTARTABLE'
REUSABLE = 'REUSABLE'
MOCK_SYNC = 'MOCK_SYNC'
MOCK_ASYNC = 'MOCK_ASYNC'
ASYNC_STREAM = 'ASYNC_STREAM'
# get rootDSE info
NONE = 'NO_INFO'
DSA = 'DSA'
SCHEMA = 'SCHEMA'
ALL = 'ALL'
OFFLINE_EDIR_8_8_8 = 'EDIR_8_8_8'
OFFLINE_EDIR_9_1_4 = 'EDIR_9_1_4'
OFFLINE_AD_2012_R2 = 'AD_2012_R2'
OFFLINE_SLAPD_2_4 = 'SLAPD_2_4'
OFFLINE_DS389_1_3_3 = 'DS389_1_3_3'
# server pooling
FIRST = 'FIRST'
ROUND_ROBIN = 'ROUND_ROBIN'
RANDOM = 'RANDOM'
# Hashed password
HASHED_NONE = 'PLAIN'
HASHED_SHA = 'SHA'
HASHED_SHA256 = 'SHA256'
HASHED_SHA384 = 'SHA384'
HASHED_SHA512 = 'SHA512'
HASHED_MD5 = 'MD5'
HASHED_SALTED_SHA = 'SALTED_SHA'
HASHED_SALTED_SHA256 = 'SALTED_SHA256'
HASHED_SALTED_SHA384 = 'SALTED_SHA384'
HASHED_SALTED_SHA512 = 'SALTED_SHA512'
HASHED_SALTED_MD5 = 'SALTED_MD5'
if str is not bytes: # Python 3
NUMERIC_TYPES = (int, float)
INTEGER_TYPES = (int, )
else:
NUMERIC_TYPES = (int, long, float)
INTEGER_TYPES = (int, long)
# types for string and sequence
if str is not bytes: # Python 3
STRING_TYPES = (str, )
SEQUENCE_TYPES = (set, list, tuple, GeneratorType, type(dict().keys())) # dict.keys() is a iterable memoryview in Python 3
else: # Python 2
try:
from future.types.newstr import newstr
except ImportError:
pass
STRING_TYPES = (str, unicode)
SEQUENCE_TYPES = (set, list, tuple, GeneratorType)
# centralized imports # must be at the end of the __init__.py file
from .version import __author__, __version__, __email__, __description__, __status__, __license__, __url__
from .utils.config import get_config_parameter, set_config_parameter
from .core.server import Server
from .core.connection import Connection
from .core.tls import Tls
from .core.pooling import ServerPool
from .abstract.objectDef import ObjectDef
from .abstract.attrDef import AttrDef
from .abstract.attribute import Attribute, WritableAttribute, OperationalAttribute
from .abstract.entry import Entry, WritableEntry
from .abstract.cursor import Reader, Writer
from .protocol.rfc4512 import DsaInfo, SchemaInfo
| 1.4375 | 1 |
GBM/GBMSlidingwindows_V2/GBMSlidingWindows_QualityControl/RawMeanColorMap.py | joshlyman/TextureAnalysis | 1 | 12766943 |
import csv
import matplotlib.pyplot as plt
rawmeanbeforeNormFile = '/Users/yanzhexu/Desktop/Research/Sliding box GBM/MyAlgorithm_V2/GBM_SlidingWindow_TextureMap/CE_slice22_T2_ROI_Texture_Map.csv'
rawmeanafterNormFile ='/Users/yanzhexu/Desktop/Research/Sliding box GBM/MyAlgorithm_V2/addYlabel/GBM_SlidingWindow_TextureMap/CE_slice22_T2_ROI_Texture_Map.csv'
saveDir = '/Users/yanzhexu/Desktop/Research/Sliding box GBM/MyAlgorithm_V2/Test/'
with open(rawmeanbeforeNormFile,'r') as featuresfile:
featuresfile.readline()
rowFile = csv.reader(featuresfile, delimiter=',')
xlist = list()
ylist = list()
rawmeanlist = list()
for row in rowFile:
if row[0] =='SPGRC':
xlist.append(int(row[2]))
ylist.append(int(row[3]))
rawmeanlist.append(float(row[42]))
cm = plt.cm.get_cmap('jet')
plt.scatter(xlist, ylist, c=rawmeanlist, cmap=cm)
plt.colorbar()
plt.savefig(saveDir + 'Raw Mean Before Normalization.png')
plt.cla()
plt.close()
# plt.show()
| 2.40625 | 2 |
setup.py | mugiseyebrows/mugi-sync | 0 | 12766944 | <reponame>mugiseyebrows/mugi-sync
from setuptools import setup, find_packages
with open('README.rst', encoding='utf-8') as f:
long_description = f.read()
setup(
packages = find_packages(),
name = 'mugisync',
version='0.0.7',
author="<NAME>",
author_email="<EMAIL>",
url='https://github.com/mugiseyebrows/mugi-sync',
description='File synchronization utility',
long_description = long_description,
install_requires = ['eventloop','colorama'],
entry_points={
'console_scripts': [
'mugisync = mugisync:main'
]
},
) | 1.234375 | 1 |
load_assets.py | pedroivoal/Dessoft-PF | 0 | 12766945 | <reponame>pedroivoal/Dessoft-PF
def load_assets():
assets = {}
# background do jogo
assets['background'] = pygame.image.load(r'img\spacebg.jpg').convert()
assets['background']= pygame.transform.scale(assets['background'],(largura,altura))
# tela inicial
assets['tela_init'] = pygame.image.load(r'img\screen_start1.png').convert()
assets['tela_init']= pygame.transform.scale(assets['tela_init'],(largura,altura))
# tela do gameover
assets['tela_fin'] = pygame.image.load(r'img\screen_gameover1.png').convert()
assets['tela_fin']= pygame.transform.scale(assets['tela_fin'],(largura,altura))
# tela de vitória
assets['tela_fin2'] = pygame.image.load(r'img\screen_final1.png').convert()
assets['tela_fin2']= pygame.transform.scale(assets['tela_fin2'],(largura,altura))
# imagem do ufo
assets['image_aviao'] = pygame.image.load(r'img\ufo2.png').convert_alpha()
assets['image_aviao'] = pygame.transform.scale(assets['image_aviao'],(largura_aviao,altura_aviao))
# imagem da nave 1 do jogador
assets['image_nave'] = pygame.image.load(r'img\ITS1.png').convert_alpha()
assets['image_nave'] = pygame.transform.scale(assets['image_nave'],(largura_nave,altura_nave))
# imagem da nave 2 do jogador
assets['image_nave2'] = pygame.image.load(r'img\ITS2.png').convert_alpha()
assets['image_nave2'] = pygame.transform.scale(assets['image_nave2'],(largura_nave,altura_nave2))
anim_explosao_av = []
anim_explosao_nav = []
for i in range(9):
# arquivos da animacao
animacao = 'regularExplosion0{}.png'.format(i)
img = pygame.image.load(animacao).convert()
img = pygame.transform.scale(img, (72, 72))
anim_explosao_av.append(img)
assets["anim_explosao_av"] = anim_explosao_av
for i in range(9):
# arquivos da animacao
animacao = 'regularExplosion0{}.png'.format(i)
img = pygame.image.load(animacao).convert()
img = pygame.transform.scale(img, (150, 150))
anim_explosao_nav.append(img)
assets["anim_explosao_nav"] = anim_explosao_nav
# carrega os sons do jogo
pygame.mixer.music.load(r'som\music.mp3')
pygame.mixer.music.load(r'som\musicgame.mp3')
pygame.mixer.music.load(r'som\endmusic.mp3')
pygame.mixer.music.set_volume(0.2)
assets['explosao'] = pygame.mixer.Sound(r'som\explosao.mp3')
return assets
| 2.390625 | 2 |
dataset.py | seujung/KoGPT2-LoRA-summarization | 0 | 12766946 | <reponame>seujung/KoGPT2-LoRA-summarization
import os
import glob
import torch
import ast
import numpy as np
import pandas as pd
from tqdm import tqdm, trange
from torch.utils.data import Dataset, DataLoader
MASK = '<unused0>'
SUMMARY = '<unused1>'
BOS = '</s>'
EOS = '</s>'
PAD = '<pad>'
class KoGPTSummaryDataset(Dataset):
def __init__(self, file, tok, max_len,
bos_token=BOS, eos_token=EOS,
pad_token=PAD, mask_token=MASK,
summary_token = SUMMARY,
ignore_index = -100
):
super().__init__()
self.tok = tok
self.max_len = max_len
self.docs = pd.read_csv(file, sep='\t')
self.len = self.docs.shape[0]
self.bos_token = bos_token
self.eos_token = eos_token
self.pad_token = pad_token
self.mask_token = mask_token
self.summary_token = summary_token
self.ignore_index = ignore_index
def add_padding_data(self, inputs, pad_index):
if len(inputs) < self.max_len:
pad = [pad_index] *(self.max_len - len(inputs))
inputs = inputs + pad
else:
inputs = inputs[:self.max_len]
return inputs
def __getitem__(self, idx):
instance = self.docs.iloc[idx]
article = self.tok.encode(self.bos_token) + self.tok.encode(instance['news'])
len_article = len(article)
summary = self.tok.encode(self.summary_token) + self.tok.encode(instance['summary']) + self.tok.encode(self.eos_token)
len_summary = len(summary)
context = article + summary
if len(context) > self.max_len:
additional_len = len(context) - self.max_len
article = article[:-additional_len]
len_article = len(article)
context = article + summary
labels = [-100] * len_article + summary[1:]
mask = [0] * len_article + [1] * len_summary + [0] * (self.max_len - len_article - len_summary)
if len(context) < self.max_len:
context = self.add_padding_data(context, self.tok.pad_token_id)
if len(labels) < self.max_len:
labels = self.add_padding_data(labels, -100)
return {'input': np.array(context, dtype=np.int_),
'mask': np.array(mask, dtype=np.int_),
'label': np.array(labels, dtype=np.int_)}
def __len__(self):
return self.len
| 2.484375 | 2 |
1 - Algorithms/2 - Strings/Super Reduced String.py | Terence-Guan/Python.HackerRank | 88 | 12766947 | <gh_stars>10-100
from collections import deque
import re
prev, curr = "", "aaabccddd"
while prev != curr:
prev, curr = curr, re.sub("(.)\1", "", curr)
print (curr)
def reduce(s):
d = deque()
for c in s:
if d and d[-1] == c:
d.pop()
else:
d.append()
print(*d if d else "Empty String", sep="")
sa = ["aaabccddd", "abd", "baab"]
| 2.984375 | 3 |
Day4/program2.py | CAG2Mark/Advent-Of-Code-Solutions | 0 | 12766948 | count = 0
fields = ["byr", "iyr", "eyr", "hgt", "hcl", "ecl", "pid"]
hex_not = "1234567890abcdef"
ecls = ["amb", "blu", "brn", "gry", "grn", "hzl", "oth"]
def check_pprt(pprt):
global count
flag = True
i = 0
while flag and i < len(fields):
flag = flag and fields[i] in pprt
i = i + 1
if flag:
try:
byr = int(pprt["byr"])
flag = flag and (byr >= 1920 and byr <= 2002)
iyr = int(pprt["iyr"])
flag = flag and (iyr >= 2010 and iyr <= 2020)
eyr = int(pprt["eyr"])
flag = flag and (eyr >= 2020 and eyr <= 2030)
hgt = int(pprt["hgt"][:-2])
is_cm = pprt["hgt"][-2:] == "cm"
lwr = 150 if is_cm else 59
upr = 193 if is_cm else 76
flag = flag and (hgt >= lwr and hgt <= upr)
flag = flag and pprt["hcl"][0] == "#"
for ch in pprt["hcl"][1:]:
flag = flag and ch in hex_not
flag = flag and pprt["ecl"] in ecls
flag = flag and len(pprt["pid"]) == 9
except:
flag = False
count = count + flag
pprt = dict()
while True:
try:
ln = input()
if not ln.strip():
check_pprt(pprt)
pprt = dict()
continue
data = ln.split(" ")
for d in data:
d_spl = d.split(":")
pprt[d_spl[0]] = d_spl[1]
except EOFError:
check_pprt(pprt)
break
print(count) | 2.859375 | 3 |
helper.py | Khushmeet/char-conv-classification | 1 | 12766949 | import numpy as np
import string
import pandas as pd
from keras.preprocessing.sequence import pad_sequences
char_limit = 1014
def get_data(path):
labels = []
inputs = []
df = pd.read_csv(path, names=['one','second','third'])
df = df.drop('second', axis=1)
data = df.values
for label,text in data:
inputs.append(text.lower())
if label == 1:
labels.append([1, 0, 0, 0])
elif label == 2:
labels.append([0, 1, 0, 0])
elif label == 3:
labels.append([0, 0, 1, 0])
else:
labels.append([0, 0, 0, 1])
return inputs, np.array(labels, dtype=np.float32)
def create_vocab_set(inputs):
vocab = set()
for i in inputs:
for j in i.split(' '):
vocab.add(j)
vocab_size = len(vocab)
word2idx = {}
for i, c in enumerate(vocab):
word2idx[c] = i
return vocab, vocab_size, word2idx
def _encode_text(s, word2idx):
vec = []
for i in s.split(' '):
vec.append(word2idx[i])
return np.array(vec)
def get_encoded_text(text, word2idx, sent_limit):
encoded_text = []
for single_text in text:
encoded_text.append(_encode_text(single_text, word2idx))
encoded_text = pad_sequences(encoded_text, maxlen=sent_limit, value=0.)
return np.array(encoded_text)
def batch_gen(encoded_text, labels, batch_size):
for ii in range(0, len(encoded_text), batch_size):
x = encoded_text[ii:ii + batch_size]
y = labels[ii:ii + batch_size]
yield (x, y) | 2.84375 | 3 |
examples/state_machine_examples/global_timers/global_timer_example_pwm_loop_no_events.py | ckarageorgkaneen/pybpod-api | 1 | 12766950 | <gh_stars>1-10
% Example state matrix: 3 global timers in "loop mode" blink port 1-3 LEDs at different rates.
% The timers are triggered in the first state. Next, the state machine goes into a state
% where it waits for two events:
% 1. Port1In momentarily enters a state that stops global timer 3. Port 3 will stop blinking.
% 2. Exits the state machine.
sma = NewStateMachine;
sma = SetGlobalTimer(sma, 'TimerID', 1, 'Duration', 0.1, 'OnsetDelay', 1,...
'Channel', 'PWM1', 'PulseWidthByte', 255, 'PulseOffByte', 0,...
'Loop', 1, 'SendGlobalTimerEvents', 0, 'LoopInterval', 0.1);
sma = SetGlobalTimer(sma, 'TimerID', 2, 'Duration', 0.2, 'OnsetDelay', 1,...
'Channel', 'PWM2', 'PulseWidthByte', 255, 'PulseOffByte', 0,...
'Loop', 1, 'SendGlobalTimerEvents', 0, 'LoopInterval', 0.2);
sma = SetGlobalTimer(sma, 'TimerID', 3, 'Duration', 0.3, 'OnsetDelay', 1,...
'Channel', 'PWM3', 'PulseWidthByte', 255, 'PulseOffByte', 0,...
'Loop', 1, 'SendGlobalTimerEvents', 0, 'LoopInterval', 0.3);
sma = AddState(sma, 'Name', 'TimerTrig1', ...
'Timer', 0,...
'StateChangeConditions', {'Tup', 'WaitForPoke'},...
'OutputActions', {'GlobalTimerTrig', '111'}); % Use binary strings to specify multiple timers (integers specify only one)
sma = AddState(sma, 'Name', 'WaitForPoke', ...
'Timer', 0,...
'StateChangeConditions', {'Port1In', 'StopGlobalTimer', 'Port2In', '>exit'},...
'OutputActions', {});
sma = AddState(sma, 'Name', 'StopGlobalTimer', ...
'Timer', 0,...
'StateChangeConditions', {'Tup', 'WaitForPoke'},...
'OutputActions', {'GlobalTimerCancel', 3});
| 2.375 | 2 |