max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
lib/preprocess.py | S-Hun/news-keyword-analysis | 1 | 12764751 | <gh_stars>1-10
import re
import json
import pandas as pd
from collections import Counter
from konlpy.tag import Okt
import os
import configparser
def getData(srcText, data_path):
fileName = srcText + "_naver_news"
return json.loads(open(data_path + '/' + fileName+'.json', 'r', encoding='utf-8').read())
def extractText(data):
message = ''
for item in data:
if 'title' in item.keys():
message = message + re.sub(r'[^\w]', ' ', item['title']) + ''
if 'description' in item.keys():
message = message + re.sub(r'[^\w]', ' ', item['description']) + ''
return message
def nounTagging(message):
nlp = Okt()
return nlp.nouns(message)
def getConfig(config_path):
if config_path == None:
config_path = '.\\config.ini'
print(os.getcwd())
if os.path.isfile(config_path) != True:
print("ERR: \"%s\" config file is missing." % (config_path))
return None
config = configparser.ConfigParser()
config.read(config_path, encoding='utf-8')
for section in config.sections():
for key in config[section]:
if config[section][key] == '':
print("ERR: config \"%s\" value is blank." % (key))
return None
return config
def wordCount(data_path = '.', config_path = None):
config = getConfig(config_path)
if config == None:
return
for srcText in config['API']['search_words'].split():
message = extractText(getData(srcText, data_path))
count = Counter(nounTagging(message))
word_count = dict()
for tag, counts in count.most_common(100):
if(len(str(tag))>1):
word_count[tag] = counts
with open(data_path + '/' + srcText + '_count.json', 'w', encoding='utf8') as outfile:
jsonFile = json.dumps(word_count, indent = 4, ensure_ascii = False)
outfile.write(jsonFile)
print(data_path + '/' + srcText + '_count.json SAVED') | 2.78125 | 3 |
Unet/utils.py | mcchran/segmentations | 3 | 12764752 | <filename>Unet/utils.py<gh_stars>1-10
#
# Utilities file
#
from keras.callbacks import EarlyStopping
from glob import glob
import os, numpy as np
from keras.preprocessing import image
from PIL import Image
from config import DEBUG_LEVEL
class meetExpectations(EarlyStopping):
'''
Applies an early stopping when a metric threshold is reached
TODO: now the metric applies only to the val_acc but this must be defined though the parameter "monitor"
'''
def __init__ (self, threshold, min_epochs, **kwargs):
super(meetExpectations, self).__init__(**kwargs)
self.threshold = threshold
self.min_epochs = min_epochs
def on_epoch_end(self, epoch, logs=None):
current = logs.get(self.monitor) # self_monitor is the way to get the monitored metric ...
if current is None:
super.warnings.warn(
'Early stopping conditioned on metric `%s` '
'which is not available. Available metrics are: %s' %
(self.monitor, ','.join(list(logs.keys()))), RuntimeWarning
)
return
if (epoch >= self.min_epochs) & (current >= self.threshold):
self.stopped_epoch = epoch
print("The model now stops because of reaching the val_acc threshold, in epoch: ", epoch)
self.model.stop_training = True
class adaptLearningRate(EarlyStopping):
'''
Applies updates learning rate to adapt to steep slopes.
'''
#TODO: to be done
pass
def load_image(path, target_size = (1024, 1024)):
'''
returns a np.array of the loaded image
'''
im = image.load_img(path, target_size=target_size)
img = image.img_to_array(im)
if hasattr(im, 'close'):
im.close()
return img
def store_image(img, dst):
'''
stores image to dst
'''
if type(img) is not "numpy.ndarray":
img = np.asarray(img)
result = Image.fromarray(img.astype(np.uint8))
result.save(dst)
def greyscale(x):
'''
converts rgb image to greyscale one
'''
x = x.astype(int)
res = (0.21 * x[:,:,:1]) + (0.72 * x[:,:,1:2]) + (0.07 * x[:,:,-1:])
res = res / 255
return res.squeeze()
def resize_img(img, min_size=256, fill_color=(0, 0, 0)):
'''
Resizes any numpy based image to ta square one ...
Respects original image ratio
Parameters:
-----------
im: np.ndarray typed image
min_size: integer
file_color: RGBA color to pad image
Returns:
-----------
new_im: np.ndarray typed image
'''
im = Image.fromarray(img.astype('uint8'))
im.thumbnail((min_size, min_size), Image.ANTIALIAS) #Hint: side-effects!
x, y = im.size
size = max(min_size, x, y)
new_im = Image.new('RGB', (size, size), fill_color)
new_im.paste(im, ((size - x) // 2, (size - y) // 2))
new_im = np.array(new_im)
return new_im
def get_segment_crop(img, tol=0, mask=None, tile_shape = None):
'''
corps any image based on some particular mask
if mask is not provided segmentation can take
place using the tol threshold
if tile_shape is set to some shape the image shoudl
feature an exact manifold of that particular tile
Params:
-------
img: np.ndarray
tol: int a threshold to generate an ad hoc mask
mask: mask as it is laoded
tile_shape: unit area that we need image are to be manifold of
Returns:
cropped image: np.ndarray
'''
# ================ Debugging routine follows =============
if DEBUG_LEVEL > 1:
assert type(img)==np.ndarray
if mask:
assert mask.shape[0]==img.shape[0], "Image, mask rows mismatch"
assert mask.shape[1]==img.shape[1], "Image, mask cols mismatch"
# ========================================================
if mask is None:
img = greyscale(img)
mask = img > tol
else:
mask = greyscale(mask)
mask = mask > 0
if tile_shape: # that is for a potential tiling process...
# get the only the rows and columns that mask is activated on
img_rows = mask.any(1)
img_cols = mask.any(0)
# Hint img_cols and img_rows are np.arrays of booleans
num_of_missing_rows = np.where(img_rows==True)[0].shape[0] % tile_shape[0]
num_of_missing_cols = np.where(img_cols==True)[0].shape[0] % tile_shape[1]
if num_of_missing_rows > 0:
num_of_missing_rows = tile_shape[0] - num_of_missing_rows
if num_of_missing_cols > 0:
num_of_missing_cols = tile_shape[0] - num_of_missing_cols
# get the furthest row and column of True
f_row = np.where(img_rows==True)[0].max()
f_col = np.where(img_cols==True)[0].max()
for i in range(0, num_of_missing_rows+1):
img_rows[f_row+i] = True
for i in range(0, num_of_missing_cols+1):
img_cols[f_col+i] = True
cropped_img = img[np.ix_(img_rows, img_cols)]
return cropped_img
else:
return img[np.ix_(mask.any(1), mask.any(0))]
def generate_paths(input_example, output_example):
'''
This is a "smart" function to understand the data organization layout
Hint: the assumed structure is as indicated bellow
1. inps: <root_path>/<identifier><suffix>
2. outs: <root_path>/<identifier><separator><attribute><suffix>
for outs the <separator><attribute> are optional to exist
'''
def lcs (S, T):
'''
returns the longest common substring for the given strings s, t
'''
m = len(S)
n = len(T)
counter = [[0]*(n+1) for x in range(m+1)]
longest = 0
lcs_set = set()
for i in range(m):
for j in range(n):
if S[i] == T[j]:
c = counter[i][j] + 1
counter[i+1][j+1] = c
if c > longest:
lcs_set = set()
longest = c
lcs_set.add(S[i-c+1:i+1])
elif c == longest:
lcs_set.add(S[i-c+1:i+1])
return "".join(lcs_set)
# check if input and output paths are relative or absolute
if input_example[0] == "/":
input_is_absolute = True
else:
input_is_absolute = False
if output_example[0] == "/":
output_is_absolute = True
else:
output_is_absolute = False
# let's od it deflate input example
input_deflated = input_example.split("/")
input_root = os.path.join(*input_deflated[:-1])
if input_is_absolute:
input_root = "/"+input_root
input_suffix = "."+input_deflated[-1].split(".")[-1]
input_name = input_deflated[-1].split('.')[0]
output_deflated = output_example.split("/")
output_root = os.path.join(*output_deflated[:-1])
if output_is_absolute:
output_root = "/"+output_root
output_name = output_deflated[-1].split(".")[0]
output_suffix = "." + output_deflated[-1].split(".")[-1]
identifier = lcs(input_name, output_name)
mask_sep = output_name.split(identifier)[-1]
if mask_sep != "" : # if there is more info in mask add it to the suffix
output_suffix = mask_sep + output_suffix
# TODO: now investigate the directory to check for more information
mask_paths = glob(os.path.join(output_root, identifier + "*"))
if len(mask_paths) > 1: # we have more classes and attributes
#TODO:
mask_paths = list(map(lambda x: x.split("/")[-1].split(".")[0], mask_paths))
mask_paths = list(map(lambda x: x.split(identifier)[-1], mask_paths))
attribute_prefix = lcs(mask_paths[0], mask_paths[1])
else:
attribute_prefix = ""
#check if input_root or output_root are relative or absolutes
return input_root, input_suffix, output_root, output_suffix, attribute_prefix
def pretty_print(message, *kwords):
'''
This is a printing wrapper to enable optional
debugging functionality
Params:
-------
message: string, debigger message
multiple: other things we need to print along with the message
Returns:
-------
nothing --> just prints on the screen
'''
if DEBUG_LEVEL == 2:
print(message, *kwords)
| 2.40625 | 2 |
Resources/Code/open-grok/opengrok-1.1-rc41/tools/src/main/python/mirror.py | briancabbott/xtrax | 2 | 12764753 | <reponame>briancabbott/xtrax
#!/usr/bin/env python3
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# See LICENSE.txt included in this distribution for the specific
# language governing permissions and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at LICENSE.txt.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
#
"""
This script performs mirroring of single OpenGrok project.
It is intended to work on Unix systems.
"""
import argparse
import os
import sys
import filelock
from filelock import Timeout
import logging
from logging.handlers import RotatingFileHandler
import tempfile
from repofactory import get_repository
from utils import is_exe, check_create_dir, get_int, diff_list
from hook import run_hook
from readconfig import read_config
from opengrok import get_repos, get_config_value, get_repo_type
import re
major_version = sys.version_info[0]
if (major_version < 3):
print("Need Python 3, you are running {}".format(major_version))
sys.exit(1)
__version__ = "0.2"
if __name__ == '__main__':
ret = 0
output = []
dirs_to_process = []
# "constants"
HOOK_TIMEOUT_PROPERTY = 'hook_timeout'
CMD_TIMEOUT_PROPERTY = 'command_timeout'
IGNORED_REPOS_PROPERTY = 'ignored_repos'
PROXY_PROPERTY = 'proxy'
COMMANDS_PROPERTY = 'commands'
DISABLED_PROPERTY = 'disabled'
HOOKDIR_PROPERTY = 'hookdir'
HOOKS_PROPERTY = 'hooks'
LOGDIR_PROPERTY = 'logdir'
PROJECTS_PROPERTY = 'projects'
parser = argparse.ArgumentParser(description='project mirroring')
parser.add_argument('project')
parser.add_argument('-D', '--debug', action='store_true',
help='Enable debug prints')
parser.add_argument('-c', '--config',
help='config file in JSON/YAML format')
parser.add_argument('-U', '--uri', default='http://localhost:8080/source',
help='uri of the webapp with context path')
parser.add_argument('-b', '--batch', action='store_true',
help='batch mode - will log into a file')
parser.add_argument('-B', '--backupcount', default=8,
help='how many log files to keep around in batch mode')
args = parser.parse_args()
if args.debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig()
logger = logging.getLogger(os.path.basename(sys.argv[0]))
if args.config:
config = read_config(logger, args.config)
if config is None:
logger.error("Cannot read config file from {}".format(args.config))
sys.exit(1)
else:
config = {}
GLOBAL_TUNABLES = [HOOKDIR_PROPERTY, PROXY_PROPERTY, LOGDIR_PROPERTY,
COMMANDS_PROPERTY, PROJECTS_PROPERTY,
HOOK_TIMEOUT_PROPERTY, CMD_TIMEOUT_PROPERTY]
diff = diff_list(config.keys(), GLOBAL_TUNABLES)
if diff:
logger.error("unknown global configuration option(s): '{}'"
.format(diff))
sys.exit(1)
# Make sure the log directory exists.
logdir = config.get(LOGDIR_PROPERTY)
if logdir:
check_create_dir(logger, logdir)
uri = args.uri
if not uri:
logger.error("uri of the webapp not specified")
sys.exit(1)
logger.debug("Uri = {}".format(uri))
source_root = get_config_value(logger, 'sourceRoot', uri)
if not source_root:
logger.error("Cannot get the sourceRoot config value")
sys.exit(1)
logger.debug("Source root = {}".format(source_root))
project_config = None
projects = config.get(PROJECTS_PROPERTY)
if projects:
if projects.get(args.project):
project_config = projects.get(args.project)
else:
for proj in projects.keys():
try:
pattern = re.compile(proj)
except re.error:
logger.error("Not a valid regular expression: {}".
format(proj))
continue
if pattern.match(args.project):
logger.debug("Project '{}' matched pattern '{}'".
format(args.project, proj))
project_config = projects.get(proj)
break
hookdir = config.get(HOOKDIR_PROPERTY)
if hookdir:
logger.debug("Hook directory = {}".format(hookdir))
command_timeout = get_int(logger, "command timeout",
config.get(CMD_TIMEOUT_PROPERTY))
if command_timeout:
logger.debug("Global command timeout = {}".format(command_timeout))
hook_timeout = get_int(logger, "hook timeout",
config.get(HOOK_TIMEOUT_PROPERTY))
if hook_timeout:
logger.debug("Global hook timeout = {}".format(hook_timeout))
prehook = None
posthook = None
use_proxy = False
ignored_repos = None
if project_config:
logger.debug("Project '{}' has specific (non-default) config".
format(args.project))
# Quick sanity check.
KNOWN_PROJECT_TUNABLES = [DISABLED_PROPERTY, CMD_TIMEOUT_PROPERTY,
HOOK_TIMEOUT_PROPERTY, PROXY_PROPERTY,
IGNORED_REPOS_PROPERTY, HOOKS_PROPERTY]
diff = diff_list(project_config.keys(), KNOWN_PROJECT_TUNABLES)
if diff:
logger.error("unknown project configuration option(s) '{}' "
"for project {}".format(diff, args.project))
sys.exit(1)
project_command_timeout = get_int(logger, "command timeout for "
"project {}".format(args.project),
project_config.
get(CMD_TIMEOUT_PROPERTY))
if project_command_timeout:
command_timeout = project_command_timeout
logger.debug("Project command timeout = {}".
format(command_timeout))
project_hook_timeout = get_int(logger, "hook timeout for "
"project {}".format(args.project),
project_config.
get(HOOK_TIMEOUT_PROPERTY))
if project_hook_timeout:
hook_timeout = project_hook_timeout
logger.debug("Project hook timeout = {}".
format(hook_timeout))
ignored_repos = project_config.get(IGNORED_REPOS_PROPERTY)
if ignored_repos:
if type(ignored_repos) is not list:
logger.error("{} for project {} is not a list".
format(IGNORED_REPOS_PROPERTY, args.project))
sys.exit(1)
logger.debug("has ignored repositories: {}".
format(ignored_repos))
hooks = project_config.get(HOOKS_PROPERTY)
if hooks:
if not hookdir:
logger.error("Need to have '{}' in the configuration "
"to run hooks".format(HOOKDIR_PROPERTY))
sys.exit(1)
if not os.path.isdir(hookdir):
logger.error("Not a directory: {}".format(hookdir))
sys.exit(1)
for hookname in hooks:
if hookname == "pre":
prehook = hookpath = os.path.join(hookdir, hooks['pre'])
logger.debug("pre-hook = {}".format(prehook))
elif hookname == "post":
posthook = hookpath = os.path.join(hookdir, hooks['post'])
logger.debug("post-hook = {}".format(posthook))
else:
logger.error("Unknown hook name {} for project {}".
format(hookname, args.project))
sys.exit(1)
if not is_exe(hookpath):
logger.error("hook file {} does not exist or not "
"executable".format(hookpath))
sys.exit(1)
if project_config.get(PROXY_PROPERTY):
if not config.get(PROXY_PROPERTY):
logger.error("global proxy setting is needed in order to"
"have per-project proxy")
sys.exit(1)
logger.debug("will use proxy")
use_proxy = True
if not ignored_repos:
ignored_repos = []
# Log messages to dedicated log file if running in batch mode.
if args.batch:
if not logdir:
logger.error("The logdir property is required in batch mode")
sys.exit(1)
logfile = os.path.join(logdir, args.project + ".log")
logger.debug("Switching logging to the {} file".
format(logfile))
logging.shutdown()
# Remove the existing handler so that logger can be reconfigured.
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
logging.basicConfig(filename=logfile, filemode='a',
level=logging.DEBUG if args.debug
else logging.INFO)
logger = logging.getLogger(os.path.basename(sys.argv[0]))
handler = RotatingFileHandler(logfile, maxBytes=0,
backupCount=args.backupcount)
formatter = logging.Formatter("%(asctime)s - %(levelname)s: "
"%(message)s", '%m/%d/%Y %I:%M:%S %p')
handler.setFormatter(formatter)
handler.doRollover()
#
# Technically, adding a handler to the logger is not necessary
# since log rotation is done above using doRollover() however
# it is done anyway in case the handler changes to use implicit
# rotation in the future.
#
logger.addHandler(handler)
# We want this to be logged to the log file (if any).
if project_config:
if project_config.get(DISABLED_PROPERTY):
logger.info("Project {} disabled, exiting".
format(args.project))
sys.exit(2)
lock = filelock.FileLock(os.path.join(tempfile.gettempdir(),
args.project + "-mirror.lock"))
try:
with lock.acquire(timeout=0):
proxy = config.get(PROXY_PROPERTY) if use_proxy else None
if prehook:
logger.info("Running pre hook")
if run_hook(logger, prehook,
os.path.join(source_root, args.project), proxy,
hook_timeout) != 0:
logger.error("pre hook failed")
logging.shutdown()
sys.exit(1)
#
# If one of the repositories fails to sync, the whole project sync
# is treated as failed, i.e. the program will return 1.
#
for repo_path in get_repos(logger, args.project, uri):
logger.debug("Repository path = {}".format(repo_path))
if repo_path in ignored_repos:
logger.info("repository {} ignored".format(repo_path))
continue
repo_type = get_repo_type(logger, repo_path, uri)
if not repo_type:
logger.error("cannot determine type of {}".
format(repo_path))
continue
logger.debug("Repository type = {}".format(repo_type))
repo = get_repository(logger,
source_root + repo_path,
repo_type,
args.project,
config.get(COMMANDS_PROPERTY),
proxy,
None,
command_timeout)
if not repo:
logger.error("Cannot get repository for {}".
format(repo_path))
ret = 1
else:
logger.info("Synchronizing repository {}".
format(repo_path))
if repo.sync() != 0:
logger.error("failed to sync repository {}".
format(repo_path))
ret = 1
if posthook:
logger.info("Running post hook")
if run_hook(logger, posthook,
os.path.join(source_root, args.project), proxy,
hook_timeout) != 0:
logger.error("post hook failed")
logging.shutdown()
sys.exit(1)
except Timeout:
logger.warning("Already running, exiting.")
sys.exit(1)
logging.shutdown()
sys.exit(ret)
| 1.695313 | 2 |
interactions/tests/test_models.py | Ins-V/wc_crm | 0 | 12764754 | from django.test import TestCase
from django.urls import reverse
from interactions.models import Interaction
from interactions.tests.factories import InteractionFactory
class InteractionModelTestCase(TestCase):
"""Testing the interaction model class."""
def test_create_interaction(self):
interaction_from_factory = InteractionFactory()
interaction_from_db = Interaction.objects.first()
self.assertEqual(Interaction.objects.count(), 1)
self.assertEqual(interaction_from_factory.project, interaction_from_db.project)
self.assertEqual(interaction_from_factory.channel, interaction_from_db.channel)
self.assertEqual(interaction_from_factory.manager, interaction_from_db.manager)
self.assertEqual(interaction_from_factory.description, interaction_from_db.description)
self.assertEqual(interaction_from_factory.evaluation, interaction_from_db.evaluation)
self.assertEqual(
reverse('interaction:detail', kwargs={'pk': interaction_from_factory.pk}),
interaction_from_db.get_absolute_url()
)
self.assertEqual(
f"Взаимодействие с компанией {interaction_from_factory.project.company.name} #{interaction_from_factory.pk}",
interaction_from_db.__str__()
)
| 2.6875 | 3 |
bert-distillation/bert_distill/data/dataset.py | distillbert/code | 0 | 12764755 | from torchtext.data import Field, TabularDataset, Iterator
from torchtext.vocab import Vectors
import torch
from .base import allennlp_tokenize, basic_tokenize, uniform_unk_init, space_tokenize, \
bert_tokenize, gpt2_tokenize
_REGISTRY = {}
class RegisteredDataset(TabularDataset):
def __init_subclass__(cls, name):
_REGISTRY[name.lower()] = cls
def list_field_mappings(field_tgt, vocab):
mapping = []
for word in vocab.stoi:
if word not in field_tgt.vocab.stoi:
continue
mapping.append((vocab.stoi[word], field_tgt.vocab.stoi[word]))
return mapping
def replace_embeds(embeds_tgt, embeds_src, field_mappings):
for idx_src, idx_tgt in field_mappings:
embeds_tgt.weight.data[idx_tgt] = embeds_src.weight.data[idx_src]
class SST2Dataset(RegisteredDataset, name="sst2"):
N_CLASSES = 2
TEXT_FIELD = Field(batch_first=True, tokenize=basic_tokenize, include_lengths=True)
LABEL_FIELD = Field(sequential=False, use_vocab=False, batch_first=True)
LOGITS_0 = Field(sequential=False, use_vocab=False, batch_first=True, dtype=torch.float)
LOGITS_1 = Field(sequential=False, use_vocab=False, batch_first=True, dtype=torch.float)
@staticmethod
def sort_key(ex):
return len(ex.sentence)
@classmethod
def splits(cls, folder_path, train="train.tsv", dev="dev.tsv", test="test.tsv"):
fields = [("label", cls.LABEL_FIELD), ("sentence", cls.TEXT_FIELD), ("logits_0", cls.LOGITS_0),
("logits_1", cls.LOGITS_1)]
train_ds, dev_ds, test_ds = super(SST2Dataset, cls).splits(folder_path, train=train, validation=dev, test=test, format="tsv",
fields=fields, skip_header=True)
del test_ds.fields["logits_0"]
del test_ds.fields["logits_1"]
del test_ds.fields["label"]
return train_ds, dev_ds, test_ds
@classmethod
def iters(cls, path, vectors_name, vectors_cache, batch_size=64, vectors=None,
unk_init=uniform_unk_init(), device="cuda:0", train="train.tsv", dev="dev.tsv", test="test.tsv"):
if vectors is None:
vectors = Vectors(name=vectors_name, cache=vectors_cache, unk_init=unk_init)
train, val, test = cls.splits(path, train=train, dev=dev, test=test)
cls.TEXT_FIELD.build_vocab(train, val, test, vectors=vectors)
sort_within_batch = False
if sort_within_batch:
print("SORTING WITHIN BATCH!!!!!!!!!!!!!!!!!!!!!!!")
return Iterator.splits((train, val, test), batch_size=batch_size, repeat=False,
sort_within_batch=sort_within_batch, device=device, sort=False)
class CoLADataset(RegisteredDataset, name="cola"):
N_CLASSES = 2
TEXT_FIELD = Field(batch_first=True, tokenize=basic_tokenize, include_lengths=True)
LABEL_FIELD = Field(sequential=False, use_vocab=False, batch_first=True)
LOGITS_0 = Field(sequential=False, use_vocab=False, batch_first=True, dtype=torch.float)
LOGITS_1 = Field(sequential=False, use_vocab=False, batch_first=True, dtype=torch.float)
@staticmethod
def sort_key(ex):
return len(ex.sentence)
@classmethod
def splits(cls, folder_path, train="train.tsv", dev="dev.tsv", test="test.tsv"):
fields = [("label", cls.LABEL_FIELD), ("sentence", cls.TEXT_FIELD), ("logits_0", cls.LOGITS_0),
("logits_1", cls.LOGITS_1)]
return super(CoLADataset, cls).splits(folder_path, train=train, validation=dev, test=test, format="tsv",
fields=fields, skip_header=True)
@classmethod
def iters(cls, path, vectors_name, vectors_cache, batch_size=64, vectors=None,
unk_init=uniform_unk_init(), device="cuda:0", train="train.tsv", dev="dev.tsv", test="test.tsv"):
if vectors is None:
vectors = Vectors(name=vectors_name, cache=vectors_cache, unk_init=unk_init)
train, val, test = cls.splits(path, train=train, dev=dev, test=test)
cls.TEXT_FIELD.build_vocab(train, val, test, vectors=vectors)
return Iterator.splits((train, val, test), batch_size=batch_size, repeat=False,
sort_within_batch=False, device=device, sort=False)
class STSDataset(RegisteredDataset, name="sts"):
N_CLASSES = 1
TEXT_FIELD = Field(batch_first=True, tokenize=basic_tokenize, include_lengths=True)
SCORE = Field(sequential=False, use_vocab=False, batch_first=True, dtype=torch.float)
@staticmethod
def sort_key(ex):
return len(ex.sentence1)
@classmethod
def splits(cls, folder_path, train="train.tsv", dev="dev.tsv", test="test.tsv"):
fields = [("score", cls.SCORE), ("sentence1", cls.TEXT_FIELD), ("sentence2", cls.TEXT_FIELD)]
return super(STSDataset, cls).splits(folder_path, train=train, validation=dev, test=test, format="tsv",
fields=fields, skip_header=True)
@classmethod
def iters(cls, path, vectors_name, vectors_cache, batch_size=64, vectors=None,
unk_init=uniform_unk_init(), device="cuda:0", train="train.tsv", dev="dev.tsv", test="test.tsv"):
if vectors is None:
vectors = Vectors(name=vectors_name, cache=vectors_cache, unk_init=unk_init)
train, val, test = cls.splits(path, train=train, dev=dev, test=test)
cls.TEXT_FIELD.build_vocab(train, val, test, vectors=vectors)
return Iterator.splits((train, val, test), batch_size=batch_size, repeat=False,
sort_within_batch=False, device=device, sort=False)
class MRPCDataset(RegisteredDataset, name="mrpc"):
N_CLASSES = 2
TEXT_FIELD = Field(batch_first=True, tokenize=basic_tokenize, include_lengths=True)
LABEL_FIELD = Field(sequential=False, use_vocab=False, batch_first=True)
LOGITS_0 = Field(sequential=False, use_vocab=False, batch_first=True, dtype=torch.float)
LOGITS_1 = Field(sequential=False, use_vocab=False, batch_first=True, dtype=torch.float)
@staticmethod
def sort_key(ex):
return len(ex.question1)
@classmethod
def splits(cls, folder_path, train="train.tsv", dev="dev.tsv", test="test.tsv"):
fields = [("label", cls.LABEL_FIELD), ("sentence1", cls.TEXT_FIELD), ("sentence2", cls.TEXT_FIELD),
("logits_0", cls.LOGITS_0), ("logits_1", cls.LOGITS_1)]
return super(MRPCDataset, cls).splits(folder_path, train=train, validation=dev, test=test, format="tsv",
fields=fields, skip_header=True)
@classmethod
def iters(cls, path, vectors_name, vectors_cache, batch_size=64, vectors=None,
unk_init=uniform_unk_init(), device="cuda:0", train="train.tsv", dev="dev.tsv", test="test.tsv"):
if vectors is None:
vectors = Vectors(name=vectors_name, cache=vectors_cache, unk_init=unk_init)
train, val, test = cls.splits(path, train=train, dev=dev, test=test)
cls.TEXT_FIELD.build_vocab(train, val, test, vectors=vectors)
return Iterator.splits((train, val, test), batch_size=batch_size, repeat=False,
sort_within_batch=False, device=device, sort=False)
class QQBDataset(RegisteredDataset, name="qqb"):
N_CLASSES = 2
LABEL_FIELD = Field(sequential=False, use_vocab=False, batch_first=True)
TEXT_FIELD = Field(batch_first=True, tokenize=basic_tokenize, include_lengths=True)
LOGITS_0 = Field(sequential=False, use_vocab=False, batch_first=True, dtype=torch.float)
LOGITS_1 = Field(sequential=False, use_vocab=False, batch_first=True, dtype=torch.float)
@staticmethod
def sort_key(ex):
return len(ex.question1)
@classmethod
def splits(cls, folder_path, train="train.tsv", dev="dev.tsv", test="test.tsv"):
fields = [("is_duplicate", cls.LABEL_FIELD), ("question1", cls.TEXT_FIELD), ("question2", cls.TEXT_FIELD),
("logits_0", cls.LOGITS_0), ("logits_1", cls.LOGITS_1)]
return super(QQBDataset, cls).splits(folder_path, train=train, validation=dev, test=test, format="tsv",
fields=fields, skip_header=True)
@classmethod
def iters(cls, path, vectors_name, vectors_cache, batch_size=64, vectors=None,
unk_init=uniform_unk_init(), device="cuda:0", train="train.tsv", dev="dev.tsv", test="test.tsv"):
if vectors is None:
vectors = Vectors(name=vectors_name, cache=vectors_cache, unk_init=unk_init)
train, val, test = cls.splits(path, train=train, dev=dev, test=test)
cls.TEXT_FIELD.build_vocab(train, val, test, vectors=vectors)
return Iterator.splits((train, val, test), batch_size=batch_size, repeat=False,
sort_within_batch=False, device=device, sort=False)
class QNLIDataset(RegisteredDataset, name="qnli"):
N_CLASSES = 2
TEXT_FIELD = Field(batch_first=True, tokenize=basic_tokenize, include_lengths=True)
LABEL_FIELD = Field(sequential=False, use_vocab=False, batch_first=True)
LOGITS = Field(sequential=False, use_vocab=False, batch_first=True, dtype=torch.float)
@staticmethod
def sort_key(ex):
return len(ex.question1)
@classmethod
def splits(cls, folder_path, train="train.tsv", dev="dev.tsv", test="test.tsv"):
fields = [("index", cls.LABEL_FIELD), ("question", cls.TEXT_FIELD), ("sentence", cls.TEXT_FIELD), ("label", cls.LABEL_FIELD),
("logits_0", cls.LOGITS), ("logits_1", cls.LOGITS)]
return super(QNLIDataset, cls).splits(folder_path, train=train, validation=dev, test=test, format="tsv",
fields=fields, skip_header=True)
@classmethod
def iters(cls, path, vectors_name, vectors_cache, batch_size=64, vectors=None,
unk_init=uniform_unk_init(), device="cuda:0", train="train.tsv", dev="dev.tsv", test="test.tsv"):
if vectors is None:
vectors = Vectors(name=vectors_name, cache=vectors_cache, unk_init=unk_init)
train, val, test = cls.splits(path, train=train, dev=dev, test=test)
cls.TEXT_FIELD.build_vocab(train, val, test, vectors=vectors)
return Iterator.splits((train, val, test), batch_size=batch_size, repeat=False,
sort_within_batch=False, device=device, sort=False)
class MNLIDataset_MisMatch(RegisteredDataset, name="mnli_mismatch"):
N_CLASSES = 3
TEXT_FIELD = Field(batch_first=True, tokenize=basic_tokenize)
LABEL_FIELD = Field(sequential=False, use_vocab=False, batch_first=True)
LOGITS_0 = Field(sequential=False, use_vocab=False, batch_first=True, dtype=torch.float)
LOGITS_1 = Field(sequential=False, use_vocab=False, batch_first=True, dtype=torch.float)
LOGITS_2 = Field(sequential=False, use_vocab=False, batch_first=True, dtype=torch.float)
@staticmethod
def sort_key(ex):
return len(ex.sentence1)
@classmethod
def splits(cls, folder_path, train="train.tsv", dev="dev_mismatched.tsv", test="test_mismatched.tsv"):
fields = [("gold_label", cls.LABEL_FIELD), ("sentence1", cls.TEXT_FIELD), ("sentence2", cls.TEXT_FIELD),
("logits_0", cls.LOGITS_0), ("logits_1", cls.LOGITS_1), ("logits_2", cls.LOGITS_2)]
return super(MNLIDataset_MisMatch, cls).splits(folder_path, train=train, validation=dev, test=test, format="tsv",
fields=fields, skip_header=True)
@classmethod
def iters(cls, path, vectors_name, vectors_cache, batch_size=64, vectors=None,
unk_init=uniform_unk_init(), device="cuda:0", train="train.tsv", dev="dev_mismatched.tsv", test="test_mismatched.tsv"):
if vectors is None:
vectors = Vectors(name=vectors_name, cache=vectors_cache, unk_init=unk_init)
train, val, test = cls.splits(path, train=train, dev=dev, test=test)
cls.TEXT_FIELD.build_vocab(train, val, test, vectors=vectors)
return Iterator.splits((train, val, test), batch_size=batch_size, repeat=False,
sort_within_batch=False, device=device, sort=False)
def find_dataset(name):
return _REGISTRY[name]
def list_datasets():
return list(_REGISTRY.keys())
| 2.28125 | 2 |
pipeline/util/generate-makefile.py | goodb/go-site | 0 | 12764756 | <gh_stars>0
#!/usr/bin/env python3
"""
Generates a Makefile based on metadata descriptions of source files.
"""
__author__ = 'cjm'
import argparse
import yaml
from json import dumps
SKIP = ["goa_pdb", "goa_uniprot_gcrp"]
ONLY_GAF = ["goa_uniprot_all"]
def main():
parser = argparse.ArgumentParser(description='GO Metadata'
'',
formatter_class=argparse.RawTextHelpFormatter)
print("## AUTOGENERATED MAKEFILE\n")
parser.add_argument('files',nargs='*')
args = parser.parse_args()
artifacts = []
artifacts_by_dataset = {}
for fn in args.files:
f = open(fn, 'r')
obj = yaml.load(f)
artifacts.extend(obj['datasets'])
f.close()
for a in artifacts:
if 'source' not in a:
# TODO
print("## WARNING: no source for: {}".format(a['id']))
continue
ds = a['dataset']
if ds == 'paint':
print("## WARNING: Skipping PAINT: {}".format(a['id']))
# TODO
continue
if ds == 'rnacentral':
print("## WARNING: Skipping RNAC: {}".format(a['id']))
# TODO
continue
if ds not in artifacts_by_dataset:
artifacts_by_dataset[ds] = []
artifacts_by_dataset[ds].append(a)
for (ds,alist) in artifacts_by_dataset.items():
generate_targets(ds, alist)
targets = [all_files(ds) for ds in artifacts_by_dataset.keys()]
rule('all_targets', targets)
simple_ds_list = [ds for (ds, data) in artifacts_by_dataset.items() if not skip_source(ds, data)]
simple_targets = [all_files(ds) for ds in simple_ds_list]
rule('all_targets_simple', simple_targets, comments='Excludes aggregated (goa_uniprot)')
# for now, do not do ttl on goa_uniprot_all
ttl_targets = [all_ttl(ds) for ds in simple_ds_list]
rule('all_targets_ttl', ttl_targets, comments='RDF targets. Excludes aggregated (goa_uniprot)')
def generate_targets(ds, alist):
"""
Generate makefile targets for a dataset, e.g. sgd, goa_human_rna
Note any dataset can have multiple artifacts associated with it: gaf, gpad, gpi, ...
"""
types = [a['type'] for a in alist]
print("## --------------------")
print("## {}".format(ds))
print("## --------------------\n")
if 'gaf' not in types and 'gpad' not in types:
print("# Metadata incomplete\n")
rule(all_files(ds))
return
if ds in SKIP:
# TODO move to another config file for 'skips'
print("# Skipping\n")
rule(all_files(ds))
return
# If any item has the aggregate field, then we just want to pass it through and not run
# all the targets
is_ds_aggregated = any([("aggregates" in item) for item in alist])
ds_targets = [targetdir(ds)]
if ds not in ONLY_GAF:
ds_targets += [gzip(filtered_gaf(ds)), gzip(filtered_gpad(ds)), gzip(gpi(ds)), owltools_gafcheck(ds)]
if is_ds_aggregated:
ds_targets = [targetdir(ds), gzip(filtered_gaf(ds))]
rule(all_files(ds), ds_targets)
ds_all_ttl = ds_targets + [ttl(ds)]
if ds in ONLY_GAF:
ds_all_ttl = ds_targets
rule(all_ttl(ds), ds_all_ttl)
rule(targetdir(ds),[],
'mkdir -p '+targetdir(ds))
# for now we assume everything comes from a GAF
if 'gaf' in types:
[gaf] = [a for a in alist if a['type']=='gaf']
url = gaf['source']
# GAF from source
rule(src_gaf_zip(ds),[targetdir(ds)],
'wget --retry-connrefused --waitretry=10 -t 10 --no-check-certificate {url} -O $@.tmp && mv $@.tmp $@ && touch $@'.format(url=url))
def skip_source(ds, data):
types = [a['type'] for a in data]
return ds in SKIP or ('gaf' not in types and 'gpad' not in types)
def create_targetdir(ds):
return 'create_targetdir_'+ds
def targetdir(ds):
return 'target/groups/{ds}/'.format(ds=ds)
def all_files(ds):
return 'all_'+ds
def all_ttl(ds):
return 'ttl_all_'+ds
def src_gaf_zip(ds):
return '{dir}{ds}-src.gaf.gz'.format(dir=targetdir(ds),ds=ds)
def src_gaf(ds):
return "{dir}{ds}-src.gaf".format(dir=targetdir(ds), ds=ds)
def filtered_gaf(ds):
return '{dir}{ds}.gaf'.format(dir=targetdir(ds),ds=ds)
def filtered_data(fmt, ds):
return '{dir}{ds}.{fmt}'.format(dir=targetdir(ds), ds=ds, fmt=fmt)
def noiea_gaf(ds):
return '{dir}{ds}_noiea.gaf'.format(dir=targetdir(ds),ds=ds)
def filtered_gpad(ds):
return '{dir}{ds}.gpad'.format(dir=targetdir(ds),ds=ds)
def ttl(ds):
return '{dir}{ds}_cam.ttl'.format(dir=targetdir(ds),ds=ds)
def inferred_ttl(ds):
return "{dir}{ds}_inferred.ttl".format(dir=targetdir(ds), ds=ds)
def owltools_gafcheck(ds):
return '{dir}{ds}-owltools-check.txt'.format(dir=targetdir(ds),ds=ds)
def gpi(ds):
return '{dir}{ds}.gpi'.format(dir=targetdir(ds),ds=ds)
def gzip(f):
return '{}.gz'.format(f)
def rule(target,dependencies=[],executable=None, comments=None):
if comments != None:
print("## {}".format(comments))
if isinstance(dependencies,list):
dependencies = " ".join(dependencies)
print("{}: {}".format(target,dependencies))
if executable is not None:
for line in executable.split("\n"):
print("\t{}".format(line))
print()
if __name__ == "__main__":
main()
| 2.453125 | 2 |
plat.py | hsoft/hscommon | 5 | 12764757 | <gh_stars>1-10
# Created On: 2011/09/22
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
#
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html
# Yes, I know, there's the 'platform' unit for this kind of stuff, but the thing is that I got a
# crash on startup once simply for importing this module and since then I don't trust it. One day,
# I'll investigate the cause of that crash further.
import sys
ISWINDOWS = sys.platform == 'win32'
ISOSX = sys.platform == 'darwin'
ISLINUX = sys.platform.startswith('linux') | 1.679688 | 2 |
ptf/scripts/helpers/target_helpers/python_term.py | sommercharles/fprime | 1 | 12764758 | <reponame>sommercharles/fprime
# This script will create two PTYs. One through a forkpty call
# that creates a copy of this script on a new PTY for control/shell
# PTY control. The second is in raw mode and just transfers bytes
# to the first one, allowing the TTY layer of the first to perform
# all the canonical translations
import os, sys, select, re, time, termios, fcntl
import subprocess
import optparse
import time
parser = optparse.OptionParser()
parser.disable_interspersed_args()
parser.add_option("-s", "--symbolic-link", dest="symlink",
help="The name of a symbolic link to create, pointing to the user's TTY", default="", type="string", action="store")
parser.add_option("-l", "--logfile", dest="logfile",
help="The path to a log file that this program will store all traffic between the PTY and contained process.", default="", type="string", action="store")
(options, args) = parser.parse_args()
# An enumeration for which part of the
# termios structure we want to access
IFLAG = 0
OFLAG = 1
LFLAG = 3
# An enumeration for whether we're referencing
# the master tty for the child proc or the
# master tty for the pseudo serial port
PROC = 0
EXT = 1
masters = dict()
slaves = dict()
symlink = ""
log_fd = None
pid, masters[PROC] = os.forkpty()
if pid == 0: # Child
# The current process is replaced by the desired process with this call.
ret = os.execvp(args[0], args)
os._exit(ret)
else: # Parent
old = dict()
new = dict()
try:
masters[EXT], slaves[EXT] = os.openpty()
print("Access this terminal at ", os.ttyname(slaves[EXT]))
if options.symlink is not "":
if os.path.exists(options.symlink):
print("Symbolic link '%s' already exists!" % options.symlink)
symlink = ""
else:
os.symlink(os.ttyname(slaves[EXT]), options.symlink)
symlink = options.symlink
for key in masters:
old[key] = termios.tcgetattr(masters[key])
new[key] = termios.tcgetattr(masters[key])
# Configure Process's PTY connection
new[PROC][IFLAG] &= ~(termios.IGNCR | termios.INLCR)
new[PROC][IFLAG] |= termios.ICRNL
new[PROC][OFLAG] |= termios.ONLCR
new[PROC][LFLAG] |= termios.ICANON | termios.ECHO | termios.ECHOE | termios.ECHOK | termios.ECHOKE | termios.ECHONL
new[PROC][LFLAG] &= ~(termios.ECHOPRT)
#new[PROC][IFLAG] &= ~(termios.ICRNL)
#new[PROC][OFLAG] &= ~(termios.ONLCR)
#new[PROC][LFLAG] &= ~(termios.ECHO | termios.ECHOE | termios.ECHOK | termios.ECHOKE | termios.ECHONL)
# Configure External PTY as RAW
new[EXT][IFLAG] &= ~(termios.IGNCR | termios.INLCR | termios.ICRNL)
new[EXT][OFLAG] &= ~(termios.ONLCR)
new[EXT][LFLAG] &= ~(termios.ECHOPRT | termios.ICANON | termios.ECHO | termios.ECHOE | termios.ECHOK | termios.ECHOKE | termios.ECHONL)
# Set attributes for all master PTYs
for key in masters:
termios.tcsetattr(masters[key], termios.TCSADRAIN, new[key])
# Define data routing
transfers = {masters[EXT]: [masters[PROC]], masters[PROC]: [masters[EXT]]}
# Open a logfile if we should
if options.logfile != "":
log_fd = open(options.logfile, "w")
need_timetag = True
keep_running = True
while (keep_running):
readlist, writelist, exceptlist = select.select(list(transfers.keys()), [], list(transfers.keys()))
for src_fd in readlist:
str = ""
try :
str = os.read(src_fd, 1024)
except Exception as e:
print("Connected process has terminated.")
keep_running = False
if len(str) > 0:
# Log the data to the file, if it's open
if (log_fd) and (src_fd == masters[PROC]):
logstr = str
timetag = time.strftime("[%Y%m%dT%H%M%S] ", time.gmtime())
# If we need to timetag from last time, do it now
if need_timetag:
log_fd.write(timetag)
# If our last character is a timetag, remove it and set
# the need timetag flag
if logstr[-1] == "\n":
logstr = logstr[:-1]
need_timetag = True
else:
need_timetag = False
# Write the data to the log file
log_fd.write(logstr.replace("\n", "\n" + timetag))
# If we need a timetag, then put the newline we removed
# earlier (to avoid it triggering the replace function)
if need_timetag:
log_fd.write("\n")
#print "READ (%d) '%s'" % (src_fd, str)
#print "READ (%d) '" % src_fd,
#for char in str:
# print "%02x " % ord(char),
#print "'"
#if src_fd == tty_master:
# str = str.replace("\r", "\r\n")
for dest_fd in transfers[src_fd]:
size = os.write(dest_fd, str)
for src_fd in exceptlist:
print("I GOT A EXCEPTIONAL EVENT ON FD %d" % src_fd)
except KeyboardInterrupt as e:
print("Buuuuuhhh bye!")
except Exception as e:
print("Recorded an unexpected exception!")
print(e)
if log_fd is not None:
log_fd.close()
for key in masters:
if masters[key] > 0:
termios.tcsetattr(masters[key], termios.TCSANOW, old[key])
os.close(masters[key])
for key in slaves:
if slaves[key] > 0:
os.close(slaves[key])
if symlink is not "":
os.remove(symlink)
| 2.515625 | 3 |
tests/test_node.py | glennlaughlin/ariadne-relay | 0 | 12764759 | <reponame>glennlaughlin/ariadne-relay
from typing import Dict
from ariadne import InterfaceType, make_executable_schema, QueryType
from graphql import graphql_sync
from graphql_relay import to_global_id
from graphql_relay.utils import base64
from ariadne_relay import NodeInterfaceType, RelayObjectType, RelayQueryType
from .conftest import Foo, Qux
def test_node_instance_resolver(
type_defs: str,
foo_nodes: Dict[str, Foo],
foo_type: RelayObjectType,
query_type: RelayQueryType,
node_query: str,
node_interface_type: InterfaceType,
) -> None:
schema = make_executable_schema(
type_defs,
foo_type,
query_type,
node_interface_type,
)
for obj in foo_nodes.values():
type_name = obj.__class__.__name__
global_id = to_global_id(type_name, str(obj.id))
result = graphql_sync(schema, node_query, variable_values={"id": global_id})
assert result.errors is None
assert result.data == {
"node": {
"__typename": type_name,
"id": global_id,
}
}
def test_node_interface_instance_resolver(
type_defs: str,
baz_interface_type: NodeInterfaceType,
qux_nodes: Dict[str, Qux],
qux_type: RelayObjectType,
query_type: RelayQueryType,
node_query: str,
node_interface_type: InterfaceType,
) -> None:
schema = make_executable_schema(
type_defs,
query_type,
baz_interface_type,
qux_type,
node_interface_type,
)
for obj in qux_nodes.values():
global_id = to_global_id("Baz", str(obj.id))
result = graphql_sync(schema, node_query, variable_values={"id": global_id})
assert result.errors is None
assert result.data == {
"node": {
"__typename": obj.__class__.__name__,
"id": global_id,
}
}
# a NodeInterfaceType instance resolver does not
# propagate to types that implement the interface
invalid_global_id = to_global_id("Qux", str(obj.id))
result = graphql_sync(
schema,
node_query,
variable_values={"id": invalid_global_id},
)
assert result.errors is None
assert result.data == {"node": None}
def test_node_typename_resolver(
type_defs: str,
baz_interface_type: NodeInterfaceType,
qux_nodes: Dict[str, Qux],
qux_type: RelayObjectType,
query_type: RelayQueryType,
qux_connection_query: str,
) -> None:
schema = make_executable_schema(type_defs, query_type, baz_interface_type, qux_type)
result = graphql_sync(schema, qux_connection_query)
assert result.errors is None
assert result.data == {
"quxes": {
"edges": [
{
"node": {
"__typename": obj.__class__.__name__,
"id": to_global_id("Baz", str(obj.id)),
},
}
for obj in qux_nodes.values()
],
},
}
def test_non_node_typename(
type_defs: str,
query_type: QueryType,
node_query: str,
node_interface_type: InterfaceType,
) -> None:
schema = make_executable_schema(type_defs, query_type, node_interface_type)
global_id = to_global_id("Bar", "bar")
result = graphql_sync(schema, node_query, variable_values={"id": global_id})
assert result.errors is None
assert result.data == {"node": None}
def test_invalid_encoding(
type_defs: str,
query_type: QueryType,
node_query: str,
node_interface_type: InterfaceType,
) -> None:
schema = make_executable_schema(type_defs, query_type, node_interface_type)
global_id = "invalid"
result = graphql_sync(schema, node_query, variable_values={"id": global_id})
assert result.errors is None
assert result.data == {"node": None}
def test_missing_separator(
type_defs: str,
query_type: QueryType,
node_query: str,
node_interface_type: InterfaceType,
) -> None:
schema = make_executable_schema(type_defs, query_type, node_interface_type)
global_id = base64("foo")
result = graphql_sync(schema, node_query, variable_values={"id": global_id})
assert result.errors is None
assert result.data == {"node": None}
def test_missing_typename(
type_defs: str,
query_type: QueryType,
node_query: str,
node_interface_type: InterfaceType,
) -> None:
schema = make_executable_schema(type_defs, query_type, node_interface_type)
global_id = base64(":bar")
result = graphql_sync(schema, node_query, variable_values={"id": global_id})
assert result.errors is None
assert result.data == {"node": None}
def test_missing_id(
type_defs: str,
query_type: QueryType,
node_query: str,
node_interface_type: InterfaceType,
) -> None:
schema = make_executable_schema(type_defs, query_type, node_interface_type)
global_id = base64("foo:")
result = graphql_sync(schema, node_query, variable_values={"id": global_id})
assert result.errors is None
assert result.data == {"node": None}
def test_empty_global_id(
type_defs: str,
query_type: QueryType,
node_query: str,
node_interface_type: InterfaceType,
) -> None:
schema = make_executable_schema(type_defs, query_type, node_interface_type)
global_id = ""
result = graphql_sync(schema, node_query, variable_values={"id": global_id})
assert result.errors is None
assert result.data == {"node": None}
| 1.976563 | 2 |
indep_node_alarm.py | chandrodaya-net/indep_node_alarm | 0 | 12764760 | <reponame>chandrodaya-net/indep_node_alarm<filename>indep_node_alarm.py
import json
import time
import requests # sudo pip3 install requests
from pdpyras import APISession
import subprocess
import shutil
import config
import sys
from utils import create_logger
import traceback
# import socket
# print(socket.gethostname())
logger = create_logger(config.log_file_path, __name__ , config.log_level, True)
def send_msg_to_telegram(msg):
try:
requestURL = "https://api.telegram.org/bot" + str(config.telegram_token) + "/sendMessage?chat_id=" + config.telegram_chat_id + "&text="
requestURL = requestURL + str(msg)
requests.get(requestURL, timeout=1)
except Exception:
error_msg = traceback.format_exc()
logger.error(error_msg)
def loop():
logger.info("start loop")
try:
status = json.loads(requests.get("http://localhost:26657/status").text)
last_height = int(status["result"]["sync_info"]["latest_block_height"])
except:
last_height = 0
logger.info("last_height={}".format(last_height))
while True:
logger.info(" ********************** sleep: {}s ****************************".format(config.height_increasing_time_period))
sys.stdout.flush()
time.sleep(config.height_increasing_time_period)
#cmd = "sudo <chain deamon> version --long > /home/ubuntu/ansible/<chain deamon>_version.out"
#subprocess.check_output(cmd, shell=True)
#cmd = "sudo <chaincli deamon> version --long > /home/ubuntu/ansible/<chaincli deamon>_version.out"
#subprocess.check_output(cmd, shell=True)
#cmd = "sudo python3 /home/ubuntu/ansible/indep_node_alarm_check.py"
#subprocess.check_output(cmd, shell=True)
alarm = False
alarm_content = ""
alarm_title = ""
total, used, free = shutil.disk_usage("/")
try:
current_height = int(json.loads(requests.get("http://localhost:26657/status").text)["result"]["sync_info"]["latest_block_height"])
except:
current_height = last_height
logger.info("check disk space")
if (free//(2**30)) < 10:
alarm = True
alarm_title = "disk free 9GB"
alarm_content = config.node_name + ": " + alarm_title
logger.info("check height stucked")
if current_height == last_height:
alarm = True
alarm_title = "height stucked!"
alarm_content = config.node_name + ": " + alarm_title
else:
height_start = last_height+1
height_end = current_height+1
logger.info("check missing count in range({}, {})".format(height_start, height_end))
missing_block_cnt = 0
for height in range(height_start, height_end):
precommit_match = False
logger.debug(" Height={}: missing_block_cnt={} """.format(height, missing_block_cnt))
try:
precommits = json.loads(requests.get("http://localhost:26657/commit?height=" + str(height)).text)["result"]["signed_header"]["commit"]["signatures"]
for precommit in precommits:
try:
validator_address = precommit["validator_address"]
except:
validator_address = ""
if validator_address == config.my_validator_address:
precommit_match = True
break
if precommit_match == False:
missing_block_cnt += 1
except:
alarm = True
alarm_title = "chain daemon dead!"
alarm_content = config.node_name + ": " + alarm_title
if missing_block_cnt >= config.missing_block_trigger:
alarm = True
alarm_title = "missing blocks >= {}".format(config.missing_block_trigger)
alarm_content = """{node}: missing blocks={mb} >= {mbt}
counting from height in range({height_start}, {height_end})""".format(node=config.node_name,
mb=missing_block_cnt,
mbt=config.missing_block_trigger,
height_start=height_start,
height_end=height_end)
logger.info("alarm={}".format(alarm))
if alarm:
if config.pd_notification:
session = APISession(config.pd_api_key, default_from=config.pd_default_email)
payload = {
"type": "incident",
"title": alarm_title,
"service": {"id": config.pd_service_id, "type": "service_reference"},
"urgency": "high",
"body": {
"type": "incident_body",
"details": alarm_content
}
}
session.rpost("incidents", json=payload)
logger.info("""send alert to pageyduty:
alarm_content: {}""".format(alarm_content))
logger.info("alarm msg to telegram")
send_msg_to_telegram(alarm_content)
else:
logger.info("Ok msg to telegram")
msg = "{}: status OK!".format(config.node_name)
send_msg_to_telegram(msg)
last_height = current_height
if __name__ == "__main__":
loop()
| 2.21875 | 2 |
wonk/policy.py | aminohealth/wonk | 103 | 12764761 | <filename>wonk/policy.py
"""Manage AWS policies."""
import json
import pathlib
import re
from typing import Dict, List, Tuple
from xdg import xdg_cache_home
from wonk import aws, exceptions, optimizer
from wonk.constants import MAX_MANAGED_POLICY_SIZE
from wonk.models import Policy, Statement, canonicalize_resources, smallest_json, to_set
POLICY_CACHE_DIR = xdg_cache_home() / "com.amino.wonk" / "policies"
def minify(policies: List[Policy]) -> List[Statement]:
"""Reduce the input policies to the minimal set of functionally identical equivalents."""
internal_statements: List[Statement] = []
for policy in policies:
internal_statements.extend(policy.statements)
this_changed = True
while this_changed:
changed, internal_statements = grouped_actions(internal_statements)
if not changed:
this_changed = False
changed, internal_statements = grouped_resources(internal_statements)
if not changed:
this_changed = False
return internal_statements
def grouped_actions(statements: List[Statement]) -> Tuple[bool, List[Statement]]:
"""Merge similar policies' actions.
Returns a list of statements whose actions have been combined when possible.
"""
statement_sets: Dict[str, Statement] = {}
changed = False
for statement in statements:
group = statement.grouping_for_actions()
try:
existing_item = statement_sets[group]
except KeyError:
statement_sets[group] = statement
continue
new_action_value = existing_item.action_value | statement.action_value
if existing_item.action_value != new_action_value:
changed = True
statement_sets[group] = existing_item.replace(action_value=new_action_value)
return changed, list(statement_sets.values())
def grouped_resources(statements: List[Statement]) -> Tuple[bool, List[Statement]]:
"""Merge similar policies' resources.
Returns a list of statements whose resources have been combined when possible.
"""
statement_sets: Dict[str, Statement] = {}
changed = False
for statement in statements:
group = statement.grouping_for_resources()
try:
existing_item = statement_sets[group]
except KeyError:
statement_sets[group] = statement
continue
new_resource_value = canonicalize_resources(
to_set(existing_item.resource_value) | to_set(statement.resource_value)
)
if existing_item.resource_value != new_resource_value:
changed = True
statement_sets[group] = existing_item.replace(resource_value=new_resource_value)
return changed, list(statement_sets.values())
def combine(policies: List[Policy]) -> List[Policy]:
"""Combine policy files into the smallest possible set of outputs."""
new_policy = Policy(statements=minify(policies))
# Simplest case: we're able to squeeze everything into a single file. This is the ideal.
try:
new_policy.render()
except exceptions.UnshrinkablePolicyError:
pass
else:
return [new_policy]
# Well, that didn't work. Now we need to split the policy into several documents. Subtract the
# length of the tightest packaging of the policy "envelope" from the maximum size, then
# subtract the number of statements[1] (because we might have to glue the results together
# with commas). This is how much room we have to pack statements into.
#
# [1] Why "len(statements) - 2"? Because you can glue n statements together with n-1 commas,
# and it's guaranteed that we can fit at most n-1 statements into a single document because if
# we could fit all n then we wouldn't have made it to this point in the program. And yes, this
# is exactly the part of the program where we start caring about every byte.
minimum_possible_policy_size = len(str(Policy(statements=[])))
max_number_of_commas = len(new_policy.statements) - 2
max_statement_size = (
MAX_MANAGED_POLICY_SIZE - minimum_possible_policy_size - max_number_of_commas
)
packed_list = []
for statement in new_policy.statements:
packed = str(statement)
if len(packed) <= max_statement_size:
packed_list.append(packed)
continue
for statement_dict in statement.split(max_statement_size):
packed_list.append(smallest_json(statement_dict))
statement_sets = optimizer.pack_statements(packed_list, max_statement_size, 10)
policies = []
for statement_set in statement_sets:
# The splitting process above might have resulted in this policy having multiple statements
# that could be merged back together. The easiest way to handle this is to create a new
# policy as-is, then group its statements together into *another* new, optimized policy,
# and emit that one.
unmerged_policy = Policy(
statements=[Statement(json.loads(statement)) for statement in statement_set]
)
merged_policy = Policy(statements=minify([unmerged_policy]))
policies.append(merged_policy)
return policies
def policy_set_pattern(policy_set: str) -> re.Pattern:
"""Return a regexp matching the policy set's name."""
final = policy_set.rsplit("/", maxsplit=1)[-1]
return re.compile(rf"^{final}_\d+$")
def write_policy_set(output_dir: pathlib.Path, base_name: str, policies: List[Policy]):
"""Write the packed sets, return the names of the files written, and collect garbage."""
# Get the list of existing files for this policy set so that we can delete them later. First,
# get a list of candidates with Path.glob() because that's faster and easier than getting a
# list of _every_ file and filtering it with Python. Then use a regular expression to match
# each candidate so that policy set "foo" doesn't unintentionally delete policy set "foo_bar"'s
# files.
pattern = policy_set_pattern(base_name)
cleanup = {
candidate
for candidate in output_dir.glob(f"{base_name}_*")
if pattern.match(candidate.stem)
}
if len(cleanup) > 10:
# Wonk only creates at most 10 policies for a policy set. If we've found more than 10
# matches then something's gone awry, like the policy set is "*" or such. Either way, pull
# the plug and refuse to delete them.
raise exceptions.TooManyPoliciesError(base_name, len(cleanup))
# For consistency, delete all of the pre-existing files before we start so we can't be left
# with a mix of old and new files.
for old in cleanup:
old.unlink()
# Write each of the files that file go into this policy set, and create a list of the filenames
# we've written.
output_filenames = []
for i, policy in enumerate(policies, 1):
output_path = output_dir / f"{base_name}_{i}.json"
output_filenames.append(str(output_path))
output_path.write_text(policy.render())
return output_filenames
def make_cache_file(name: str, version: str) -> pathlib.Path:
"""Return the path to the document's cache file."""
cache_dir = POLICY_CACHE_DIR / name
cache_dir.mkdir(parents=True, exist_ok=True)
return cache_dir / f"{version}.json"
def fetch(client, arn: str, force: bool = False) -> str:
"""Return the contents of the policy."""
current_version = aws.get_policy_version(client, arn)
cache_file = make_cache_file(aws.name_for(arn), current_version)
policy_doc = None
try:
if not force:
policy_doc = cache_file.read_text()
except FileNotFoundError:
pass
if policy_doc is None:
policy_doc = aws.get_policy(client, arn, current_version)
cache_file.write_text(policy_doc)
return policy_doc
| 2.5 | 2 |
src/main/1.launch_spiders.py | Peter-32/scrapy2 | 0 | 12764762 | <reponame>Peter-32/scrapy2
import os
import re
import scrapy
import shutil
import scrapy
from bs4 import BeautifulSoup
from scrapy.crawler import CrawlerProcess
try:
shutil.rmtree("html")
except:
pass
os.mkdir("html")
if not os.path.exists("../../data/processed"):
os.mkdir("../../data/processed")
# Customize Spiders
class ExplorationSpider(scrapy.Spider):
name = "exploration"
start_urls = ["https://wiki.guildwars.com/wiki/Explorable_area"]
def parse(self, response):
next_pages = response.css('a::attr(href)').getall()
next_pages = next_pages[next_pages.index('/wiki/Battle_Isles'):next_pages.index('/wiki/Verdant_Cascades')+1]
next_pages = [x for x in next_pages if x not in ["Charr Homelands", "Depths of Tyria", "Far Shiverpeaks", "Tarnished Coast", "Istan", "Kourna", "Vabbi", "The Desolation", "Realm of Torment", "Shing Jea Island", "Kaineng City", "Echovald Forest", "The Jade Sea", "Ascalon", "Northern Shiverpeaks", "Kryta", "Maguuma Jungle", "Crystal Desert", "Southern Shiverpeaks", "Ring of Fire Islands", "The Mists", "Battle Isles", "The Mists", "Ascalon"]]
next_pages = ["https://wiki.guildwars.com" + x for x in next_pages]
next_pages = list(set(next_pages))
for href in next_pages:
yield response.follow(href, self.parse_new_page)
def parse_new_page(self, response):
page = response.url.split("/")[-2]
filename = 'html/{}_{}.html'.format(self.name, page)
i = 2
while os.path.exists(filename):
filename = "html/{}_{}_{}.html".format(self.name, page, str(i).zfill(3))
i += 1
with open(filename, 'wb') as f:
f.write(response.body)
self.log('Saved file {}'.format(filename))
class PropheciesSpider(scrapy.Spider):
name = "prophecies"
start_urls = ['https://wiki.guildwars.com/wiki/List_of_Prophecies_missions_and_primary_quests']
def parse(self, response):
next_pages = response.css('a::attr(href)').getall()
next_pages = next_pages[next_pages.index('/wiki/The_Great_Northern_Wall'):next_pages.index('/wiki/The_Titan_Source')+1]
next_pages = [x for x in next_pages if "/wiki/" in x]
next_pages = ["https://wiki.guildwars.com" + x for x in next_pages]
next_pages = list(set(next_pages))
for href in next_pages:
yield response.follow(href, self.parse_new_page)
def parse_new_page(self, response):
page = response.url.split("/")[-2]
filename = 'html/{}_{}.html'.format(self.name, page)
i = 2
while os.path.exists(filename):
filename = "html/{}_{}_{}.html".format(self.name, page, str(i).zfill(3))
i += 1
with open(filename, 'wb') as f:
f.write(response.body)
self.log('Saved file {}'.format(filename))
class FactionsSpider(scrapy.Spider):
name = "factions"
start_urls = ['https://wiki.guildwars.com/wiki/List_of_Factions_missions_and_primary_quests']
def parse(self, response):
next_pages = response.css('a::attr(href)').getall()
next_pages = next_pages[next_pages.index('/wiki/Minister_Cho%27s_Estate'):next_pages.index('/wiki/The_Deep')+1]
next_pages = [x for x in next_pages if "/wiki/" in x]
next_pages = ["https://wiki.guildwars.com" + x for x in next_pages]
next_pages = list(set(next_pages))
for href in next_pages:
yield response.follow(href, self.parse_new_page)
def parse_new_page(self, response):
page = response.url.split("/")[-2]
filename = 'html/{}_{}.html'.format(self.name, page)
i = 2
while os.path.exists(filename):
filename = "html/{}_{}_{}.html".format(self.name, page, str(i).zfill(3))
i += 1
with open(filename, 'wb') as f:
f.write(response.body)
self.log('Saved file {}'.format(filename))
class NightfallSpider(scrapy.Spider):
name = "nightfall"
start_urls = ['https://wiki.guildwars.com/wiki/List_of_Nightfall_missions_and_primary_quests']
def parse(self, response):
next_pages = response.css('a::attr(href)').getall()
next_pages = next_pages[next_pages.index('/wiki/Into_Chahbek_Village'):next_pages.index('/wiki/The_Ebony_Citadel_of_Mallyx')+1]
next_pages = [x for x in next_pages if "/wiki/" in x]
next_pages = ["https://wiki.guildwars.com" + x for x in next_pages]
next_pages = list(set(next_pages))
for href in next_pages:
yield response.follow(href, self.parse_new_page)
def parse_new_page(self, response):
page = response.url.split("/")[-2]
filename = 'html/{}_{}.html'.format(self.name, page)
i = 2
while os.path.exists(filename):
filename = "html/{}_{}_{}.html".format(self.name, page, str(i).zfill(3))
i += 1
with open(filename, 'wb') as f:
f.write(response.body)
self.log('Saved file {}'.format(filename))
class EOTNSpider(scrapy.Spider):
name = "eotn"
start_urls = ['https://wiki.guildwars.com/wiki/List_of_Factions_missions_and_primary_quests']
def parse(self, response):
next_pages = response.css('a::attr(href)').getall()
next_pages = next_pages[next_pages.index('/wiki/The_Beginning_of_the_End'):next_pages.index('/wiki/Glint%27s_Challenge')+1]
next_pages = [x for x in next_pages if "/wiki/" in x]
next_pages = ["https://wiki.guildwars.com" + x for x in next_pages]
next_pages = list(set(next_pages))
for href in next_pages:
yield response.follow(href, self.parse_new_page)
def parse_new_page(self, response):
page = response.url.split("/")[-2]
filename = 'html/{}_{}.html'.format(self.name, page)
i = 2
while os.path.exists(filename):
filename = "html/{}_{}_{}.html".format(self.name, page, str(i).zfill(3))
i += 1
with open(filename, 'wb') as f:
f.write(response.body)
self.log('Saved file {}'.format(filename))
# Launch spiders
process = CrawlerProcess()
process.crawl(ExplorationSpider)
process.crawl(PropheciesSpider)
process.crawl(FactionsSpider)
process.crawl(NightfallSpider)
process.crawl(EOTNSpider)
process.start()
| 2.8125 | 3 |
generate_set.py | WilliamCSA04/sigver_wiwd | 0 | 12764763 | <reponame>WilliamCSA04/sigver_wiwd<filename>generate_set.py
import random
import os
from process_helper import filter_genuine, filter_forgery, filter_by_text, remove_invalid_files
def split_into_train_test(array, genuine_user, dataset_path, genuine_train_quantity, forgery_train_quantity, random_train_quantity):
print("user: " + genuine_user)
signature_images = os.listdir(dataset_path+genuine_user) #Get images from folder
signature_images = filter(remove_invalid_files, signature_images)
signature_images = [dataset_path + genuine_user + file for file in signature_images]
#Split genuine signature for train and test
genuine_signature_images = get_images_splited(signature_images, genuine_train_quantity, filter_genuine)
genuine_signature_images_for_train = genuine_signature_images[0]
genuine_signature_images_for_test = genuine_signature_images[1]
#Split forgery signature for train and test
forgery_signature_images = get_images_splited(signature_images, forgery_train_quantity, filter_forgery)
forgery_signature_images_for_train = forgery_signature_images[0]
forgery_signature_images_for_test = forgery_signature_images[1]
#Split random signature for train and test
array.remove(genuine_user) #Removing genuine_user to avoid get a invalid random signature
random_signature_images = get_random_signatures(array, dataset_path, random_train_quantity)
random_signature_images_for_train = random_signature_images[0]
random_signature_images_for_test = random_signature_images[1]
#Merge lists to create train and test set
train_set = genuine_signature_images_for_train + forgery_signature_images_for_train + random_signature_images_for_train
test_set = [genuine_signature_images_for_test, forgery_signature_images_for_test, random_signature_images_for_test]
#Creating classification list
number_of_genuines_for_train = len(genuine_signature_images_for_train)
number_of_forgeries_and_randoms_for_train = len(forgery_signature_images_for_train) + len(random_signature_images_for_train)
train_classification_list = generate_classes_list(number_of_genuines_for_train, number_of_forgeries_and_randoms_for_train)
array.append(genuine_user) #Removing genuine_user to avoid get a invalid random signature
return [[train_set, test_set], train_classification_list]
def generate_classes_list(number_of_genuine, number_of_forgery_and_random):
genuine = list()
for i in range(0, number_of_genuine):
genuine.append(1)
forgery = list()
for i in range(0, number_of_forgery_and_random):
forgery.append(0)
return genuine + forgery
def get_random_signatures(folders, dataset_path, number_for_train):
random_signatures_for_train = []
random_signatures_for_test = []
for folder in folders:
path = dataset_path + folder
signature_images = os.listdir(dataset_path + folder)
signature_images = filter(remove_invalid_files, signature_images)
signature_images = filter(filter_genuine, signature_images)
signature_images = [path + file for file in signature_images]
random.shuffle(signature_images)
random_signatures_for_train = random_signatures_for_train + signature_images[:number_for_train]
random_signatures_for_test = random_signatures_for_test + signature_images[number_for_train:]
return [random_signatures_for_train, random_signatures_for_test]
def get_images_splited(signature_images, number_for_train, filter_function):
signature_images = filter(filter_function, signature_images)
random.shuffle(signature_images)
signature_images_for_train = signature_images[:number_for_train]
signature_images_for_test = signature_images[number_for_train:]
return [signature_images_for_train, signature_images_for_test] | 2.5625 | 3 |
Example/ExampleModel.py | djamal2727/Main-Bearing-Analytical-Model | 0 | 12764764 | # -*- coding: utf-8 -*-
"""
Created on Mon Sep 28 20:41:43 2020
@author: djamal
"""
import numpy as np
import matplotlib.pyplot as plt
import math
import pandas as pd
import sys
sys.path.append('C:/Users/DJAMAL/Documents/GitHub/Jamal_NREL2020')
#External Module
import MainBearing_Analytical_Model
import rwtparameters
from datetime import datetime
# Define turbine and drivetrain characteristics
Parameters = rwtparameters.RWTParameters()
FF_timestep, g, m_gr, m_s, m_rh, rho, L_gr, L_g, L_s, L_r, L_h, C1, e1, X1, Y1, C2, e2, X2 = Parameters.RWT_5MW()
#Assign Model Parameters for Analytical Model
MainBearingCalc = MainBearing_Analytical_Model.MainBearing_Analytical_Model(
FF_timestep = FF_timestep,
m_s = m_s,
m_gr = m_gr,
m_rh = m_rh,
g = g,
L_gr = L_gr,
L_g = L_g,
L_s = L_s,
L_r = L_r,
L_h = L_h,
rho = rho,
)
#Define load channel inputs
file = "/Users/DJAMAL/Documents/GitHub/Jamal_NREL2020/Example/5MWFastData.outb"
data, ChanName, info = MainBearingCalc.load_binary_output(file)
rot_speed = data[:,7] #translate rotor speed to planet speed (rpm)
torque = data[:,5] * 1E3 # in N-m
RotThrust = data[:,6] * 1E3 # in N
m_y = data[:,8] * 1E3 # in N-m
m_z = data[:,9] * 1E3 # in N-m
f_y = data[:,10] * 1E3 # in N
f_z = data[:,11] * 1E3 # in-N
startTime = datetime.now()
f_r1, f_r2, f_a1, f_total1 = MainBearingCalc.MB_forces(rho,torque, RotThrust, m_y, m_z, f_y, f_z, rot_speed, X1, Y1, X2)
MainBearingCalc.plot_loads(f_r1, f_a1, f_total1, f_r2, "Radial Force on MB1", "Axial Force on MB1", "Resultant Force on MB1","Radial Force on MB2", "Time (s)", "Load (N-m)" )
L101, L10_total_MB1 = MainBearingCalc.L10_Calc(rot_speed, f_total1, C1, e1)
L102, L10_total_MB2 = MainBearingCalc.L10_Calc(rot_speed, f_r2, C2, e2)
print('MB1 L10 Calculated: ', L10_total_MB1, "hours or", L10_total_MB1/24/365 , "years" )
print('MB2 L10 Calculated: ', L10_total_MB2, "hours or", L10_total_MB2/24/365 , "years" )
print('Run Time: ', datetime.now() - startTime)
| 2.28125 | 2 |
app/belinsky/routes/observability.py | riZZZhik/word_finder | 0 | 12764765 | """Belinsky observability blueprint."""
import os
from flask import Blueprint
from healthcheck import HealthCheck
from healthcheck.security import safe_dict
from prometheus_client import CollectorRegistry, generate_latest, multiprocess
from ..database import get_all
from ..models import User
# Create healthcheck function
def check_database() -> tuple[bool, str]:
"""Check database is available."""
get_all(User)
return True, "Belinsky database is ok"
# Create observability function
def metrics_prometheus() -> tuple[bytes, int]:
"""Generate prometheus metrics response."""
registry = CollectorRegistry()
multiprocess.MultiProcessCollector(registry)
data = generate_latest(registry)
return data, 200
# Create environment function
def environment() -> tuple[dict, int]:
"""Generate application environment response."""
return safe_dict(os.environ, ["key", "token", "pass", "credentials"]), 200
def create_blueprint_observability() -> Blueprint:
"""Create observability blueprint."""
# Create blueprint
observability_bp = Blueprint("observability", __name__)
# Register healthcheck route
health = HealthCheck()
health.add_check(check_database)
observability_bp.add_url_rule("/healthcheck", "healthcheck", view_func=health.run)
# Register environment route
observability_bp.add_url_rule("/environment", "environment", view_func=environment)
# Register prometheus route
observability_bp.add_url_rule(
"/metrics/prometheus", "prometheus", view_func=metrics_prometheus
)
return observability_bp
| 2.3125 | 2 |
mil_benchmark/utils.py | AntonValk/BagGraph-Graph-MIL | 8 | 12764766 | <filename>mil_benchmark/utils.py
import numpy as np
from numpy import matlib
from scipy.stats import t
import scipy.sparse as sp
from math import sqrt
from statistics import stdev
def normalize(mx):
"""Row-normalize sparse matrix"""
rowsum = np.array(mx.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_inv_sqrt = np.sqrt(r_inv)
r_mat_inv_sqrt = np.diag(r_inv_sqrt)
mx = r_mat_inv_sqrt.dot(mx)
mx = mx.dot(r_mat_inv_sqrt)
return mx
def accuracy(labels, output):
preds = (output > 0.5).type_as(labels)
correct = preds.eq(labels).double()
correct = correct.sum()
return correct / len(labels)
def compute_distance(embed):
N = embed.shape[0]
p = np.dot(embed, np.transpose(embed))
q = np.matlib.repmat(np.diag(p), N, 1)
dist = q + np.transpose(q) - 2 * p
dist[dist < 1e-8] = 1e-8
return dist
def estimate_graph(gamma, epsilon, dist, max_iter, k, r):
np.random.seed(0)
N = dist.shape[0]
dist += 1e10 * np.eye(N)
deg_exp = np.minimum(int(N-1), int(k * r))
dist_sort_col_idx = np.argsort(dist, axis=0)
dist_sort_col_idx = np.transpose(dist_sort_col_idx[0:deg_exp, :])
dist_sort_row_idx = np.matlib.repmat(np.arange(N).reshape(N, 1), 1, deg_exp)
dist_sort_col_idx = np.reshape(dist_sort_col_idx, int(N * deg_exp)).astype(int)
dist_sort_row_idx = np.reshape(dist_sort_row_idx, int(N * deg_exp)).astype(int)
dist_idx = np.zeros((int(N * deg_exp), 2)).astype(int)
dist_idx[:, 0] = dist_sort_col_idx
dist_idx[:, 1] = dist_sort_row_idx
dist_idx = np.sort(dist_idx, axis=1)
dist_idx = np.unique(dist_idx, axis=0)
dist_sort_col_idx = dist_idx[:, 0]
dist_sort_row_idx = dist_idx[:, 1]
num_edges = len(dist_sort_col_idx)
w_init = np.random.uniform(0, 1, size=(num_edges, 1))
d_init = k * np.random.uniform(0, 1, size=(N, 1))
w_current = w_init
d_current = d_init
dist_sorted = np.sort(dist, axis=0)
B_k = np.sum(dist_sorted[0:k, :], axis=0)
dist_sorted_k = dist_sorted[k-1, :]
dist_sorted_k_plus_1 = dist_sorted[k, :]
theta_lb = 1 / np.sqrt(k * dist_sorted_k_plus_1 ** 2 - B_k * dist_sorted_k_plus_1)
theta_lb = theta_lb[~np.isnan(theta_lb)]
theta_lb = theta_lb[~np.isinf(theta_lb)]
theta_lb = np.mean(theta_lb)
theta_ub = 1 / np.sqrt(k * dist_sorted_k ** 2 - B_k * dist_sorted_k)
theta_ub = theta_ub[~np.isnan(theta_ub)]
theta_ub = theta_ub[~np.isinf(theta_ub)]
if len(theta_ub) > 0:
theta_ub = np.mean(theta_ub)
else:
theta_ub = theta_lb
theta = (theta_lb + theta_ub) / 2
dist = theta * dist
z = dist[dist_sort_row_idx, dist_sort_col_idx]
z.shape = (num_edges, 1)
for iter in range(max_iter):
# print('Graph inference epoch : ' + str(iter))
St_times_d = d_current[dist_sort_row_idx] + d_current[dist_sort_col_idx]
y_current = w_current - gamma * (2 * w_current + St_times_d)
adj_current = np.zeros((N, N))
adj_current[dist_sort_row_idx, dist_sort_col_idx] = np.squeeze(w_current)
adj_current = adj_current + np.transpose(adj_current)
S_times_w = np.sum(adj_current, axis=1)
S_times_w.shape = (N, 1)
y_bar_current = d_current + gamma * S_times_w
p_current = np.maximum(0, np.abs(y_current) - 2 * gamma * z)
p_bar_current = (y_bar_current - np.sqrt(y_bar_current * y_bar_current + 4 * gamma)) / 2
St_times_p_bar = p_bar_current[dist_sort_row_idx] + p_bar_current[dist_sort_col_idx]
q_current = p_current - gamma * (2 * p_current + St_times_p_bar)
p_matrix_current = np.zeros((N, N))
p_matrix_current[dist_sort_row_idx, dist_sort_col_idx] = np.squeeze(p_current)
p_matrix_current = p_matrix_current + np.transpose(p_matrix_current)
S_times_p = np.sum(p_matrix_current, axis=1)
S_times_p.shape = (N, 1)
q_bar_current = p_bar_current + gamma * S_times_p
w_updated = np.abs(w_current - y_current + q_current)
d_updated = np.abs(d_current - y_bar_current + q_bar_current)
if (np.linalg.norm(w_updated - w_current) / np.linalg.norm(w_current) < epsilon) and \
(np.linalg.norm(d_updated - d_current) / np.linalg.norm(d_current) < epsilon):
break
else:
w_current = w_updated
d_current = d_updated
upper_tri_index = np.triu_indices(N, k=1)
z = dist[upper_tri_index[0], upper_tri_index[1]]
z.shape = (int(N * (N - 1) / 2), 1)
z = z * np.max(w_current)
w_current = w_current / np.max(w_current)
inferred_graph = np.zeros((N, N))
inferred_graph[dist_sort_row_idx, dist_sort_col_idx] = np.squeeze(w_current)
inferred_graph = inferred_graph + np.transpose(inferred_graph) + np.eye(N)
return inferred_graph
def MAP_inference(x, num_neib, r):
N = x.shape[0]
k = int(num_neib)
dist = compute_distance(x)
inferred_graph = estimate_graph(0.01, 0.001, dist, 1000, k, r)
return inferred_graph
| 2.4375 | 2 |
main.py | thisischandanmishra/flight-information | 2 | 12764767 | <reponame>thisischandanmishra/flight-information
import os
from flask import Flask
from src.conf.config import SQLALCHEMY_DATABASE_URI
from src.conf.routes import generate_routes
from src.database.database import db
def create_app():
app = Flask(__name__)
app.config['DEBUG'] = True
app.config['SQLALCHEMY_DATABASE_URI'] = SQLALCHEMY_DATABASE_URI
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
generate_routes(app)
db.init_app(app)
if not os.path.exists(SQLALCHEMY_DATABASE_URI):
db.app = app
db.create_all()
return app
if __name__ == '__main__':
app = create_app()
app.run(port=5000, host='localhost', use_reloader=True)
| 2.453125 | 2 |
pymoon/core/utils/extension_checker.py | hassanMuhamad/pymoon | 1 | 12764768 | <reponame>hassanMuhamad/pymoon
# !TODO:
# - Module that checks the file extension
# @entry: file Object || image Object
# return a flag
| 1.375 | 1 |
scripts/append_album_to_clementine.py | Mdlkxzmcp/various_python | 0 | 12764769 | <gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 7 21:25:42 2018
@author: mdlkxzmcp
@version: 0.1.0
"""
import sys, getopt, os, re, dbus
def displayHelpMessage():
print("""
Usage: append_album_to_clementine.py [options]
Assumes: Clementine is running.
Options:
-h, --help show this help message and exit.
-p, --play [default: False] Used to play the first track
from the appended list.
-i PATH, --input=PATH
[default: cwd] Supply a path to the album(s)
you would wish to append to the current
Clementine playlist.""")
def createTrackList(folder_path):
track_list = []
for subdir, _dirs, files in os.walk(folder_path):
for file in files:
file_path = subdir + os.sep + file
if re.search("\.(mp3|ogg|flac|aac)$", file_path): # HACK
track_list.append(file_path)
return track_list
def appendAlbum(folder_path, play_now=False):
def appendAlbum(folder_path=os.getcwd(), play_now=False):
track_list = createTrackList(folder_path)
interface = createInterface()
last_track_in_playlist_id = interface.GetLength()
track_number = 0 # TODO: use AfterTrack value from the TrackAdded signal instead
for track in track_list:
if track_list[0] == track:
interface.AddTrack(track, last_track_in_playlist_id, play_now)
# TODO: wait for a TrackAdded signal
else:
interface.AddTrack(track, last_track_in_playlist_id + track_number, False)
# TODO: wait for a TrackAdded signal again :>
track_number += 1
def createInterface():
session_bus = dbus.SessionBus()
track_list = session_bus.get_object('org.mpris.MediaPlayer2.clementine', '/Tracklist')
return dbus.Interface(track_list, dbus_interface='org.freedesktop.MediaPlayer')
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "hpi:", ["help", "play_now", "input="])
except getopt.GetoptError as err:
print(err)
displayHelpMessage()
sys.exit(2)
play_now = False
for opt, arg in opts:
if opt in ('-h', "--help"):
displayHelpMessage()
sys.exit()
elif opt in ('-p', '--play_now'):
play_now = True
elif opt in ('-i', '--input'):
appendAlbum(arg, play_now)
else:
appendAlbum()
if __name__ == "__main__":
main()
| 2.640625 | 3 |
Dicts.py | SIEM-Tools/ArcSight | 4 | 12764770 | service_dict = {
'NetworkService':{
'getTargetsWithRelationshipTypeForResourceById':{},
'getTargetsByRelationshipForSourceId':{},
'getSourcesWithThisTargetByRelationshipCount':{},
'deleteResource':{},
'getSourceURIWithThisTargetByRelatiobnshipForResourceId':{},
'getAllAttachmentOnlyResourceIDs':{},
'getNamesAndAliases':{},
'containsDirectMemberByNameOrAlias':{},
'getTargetsWithRelationshipTypeForResource':{},
'getReverseRelationshipsOfThisAndParents':{},
'getPersonalResourceRoots':{},
'getESMVersion':{},
'getResourceByName':{},
'hasXPermission':{},
'findAllIds':{},
'addRelationship':{},
'getReverseRelationshipsOfParents':{},
'getResourcesReferencePages':{},
'containsDirectMemberByName':{},
'getTargetsAsURIByRelationshipForSourceId':{},
'hasWritePermission':{},
'deleteResources':{},
'resolveRelationship':{},
'getAllPathsToRoot':{},
'getResourcesWithVisibilityToUsers':{},
'getReferencePages':{},
'findAll':{},
'getExclusivelyDependentResources':{},
'getAllowedUserTypes':{},
'getResourcesByNameSafely':{},
'getResourcesByNames':{},
'getSourceURIWithThisTargetByRelatiobnship':{},
'getRelationshipsOfParents':{},
'getMetaGroupID':{},
'isValidResourceID':{},
'getServiceMajorVersion':{},
'hasReverseRelationship':{},
'containsDirectMemberByName1':{},
'getChildNamesAndAliases':{},
'getResourcesByIds':{},
'getResourceTypesVisibleToUsers':{},
'update':{},
'getRelationshipsOfThisAndParents':{},
'getPersonalAndSharedResourceRoots':{},
'getSourcesWithThisTargetByRelationship':{},
'containsDirectMemberByNameOrAlias1':{},
'hasReadPermission':{},
'copyResourceIntoGroup':{},
'deleteByLocalId':{},
'getDependentResourceIDsForResourceId':{},
'getResourceIfModified':{},
'findById':{},
'getSourcesWithThisTargetByRelationshipForResourceId':{},
'getSourcesWithThisTargetByACLRelationship':{},
'delete':{},
'getAllPathsToRootAsStrings':{},
'loadAdditional':{},
'insertResource':{},
'insertResources':{},
'getAllUnassignedResourceIDs':{},
'getEnabledResourceIDs':{},
'getSourceURIWithThisTargetByRelationship':{},
'getResourceById':{},
'updateACLForResourceById':{},
'getCorruptedResources':{},
'unloadAdditional':{},
'isDisabled':{},
'getTargetsAsURIByRelationship':{},
'updateResources':{},
'deleteByUUID':{},
'getTargetsByRelationship':{},
'getSourceURIWithThisTargetByRelationshipForResourceId':{},
'getTargetsByRelationshipCount':{},
'getPersonalGroup':{},
'findByUUID':{},
'resetState':{},
'insert':{},
'getServiceMinorVersion':{},
},
'ViewerConfigurationService':{
'getTargetsWithRelationshipTypeForResourceById':{},
'getTargetsByRelationshipForSourceId':{},
'getSourcesWithThisTargetByRelationshipCount':{},
'deleteResource':{},
'getSourceURIWithThisTargetByRelatiobnshipForResourceId':{},
'getAllAttachmentOnlyResourceIDs':{},
'getNamesAndAliases':{},
'containsDirectMemberByNameOrAlias':{},
'getTargetsWithRelationshipTypeForResource':{},
'getReverseRelationshipsOfThisAndParents':{},
'getPersonalResourceRoots':{},
'getESMVersion':{},
'getResourceByName':{},
'hasXPermission':{},
'findAllIds':{},
'addRelationship':{},
'getReverseRelationshipsOfParents':{},
'getResourcesReferencePages':{},
'containsDirectMemberByName':{},
'getTargetsAsURIByRelationshipForSourceId':{},
'hasWritePermission':{},
'deleteResources':{},
'resolveRelationship':{},
'getAllPathsToRoot':{},
'getResourcesWithVisibilityToUsers':{},
'getReferencePages':{},
'findAll':{},
'getExclusivelyDependentResources':{},
'getAllowedUserTypes':{},
'getResourcesByNameSafely':{},
'getResourcesByNames':{},
'getSourceURIWithThisTargetByRelatiobnship':{},
'getRelationshipsOfParents':{},
'getMetaGroupID':{},
'isValidResourceID':{},
'getServiceMajorVersion':{},
'hasReverseRelationship':{},
'containsDirectMemberByName1':{},
'getChildNamesAndAliases':{},
'getResourcesByIds':{},
'getResourceTypesVisibleToUsers':{},
'update':{},
'getRelationshipsOfThisAndParents':{},
'getPersonalAndSharedResourceRoots':{},
'getSourcesWithThisTargetByRelationship':{},
'containsDirectMemberByNameOrAlias1':{},
'hasReadPermission':{},
'copyResourceIntoGroup':{},
'deleteByLocalId':{},
'getDependentResourceIDsForResourceId':{},
'getResourceIfModified':{},
'findById':{},
'getSourcesWithThisTargetByRelationshipForResourceId':{},
'getSourcesWithThisTargetByACLRelationship':{},
'delete':{},
'getAllPathsToRootAsStrings':{},
'loadAdditional':{},
'insertResource':{},
'insertResources':{},
'getEnabledResourceIDs':{},
'getAllUnassignedResourceIDs':{},
'getSourceURIWithThisTargetByRelationship':{},
'getResourceById':{},
'updateACLForResourceById':{},
'getCorruptedResources':{},
'unloadAdditional':{},
'isDisabled':{},
'getTargetsAsURIByRelationship':{},
'updateResources':{},
'deleteByUUID':{},
'getTargetsByRelationship':{},
'getSourceURIWithThisTargetByRelationshipForResourceId':{},
'getTargetsByRelationshipCount':{},
'getPersonalGroup':{},
'getViewerConfigurationIfNewer':{},
'findByUUID':{},
'resetState':{},
'insert':{},
'getServiceMinorVersion':{},
},
'DashboardService':{
'getTargetsWithRelationshipTypeForResourceById':{},
'getTargetsByRelationshipForSourceId':{},
'getDataMonitorDataIfNewer':{},
'getSourcesWithThisTargetByRelationshipCount':{},
'deleteResource':{},
'getSourceURIWithThisTargetByRelatiobnshipForResourceId':{},
'getAllAttachmentOnlyResourceIDs':{},
'getNamesAndAliases':{},
'containsDirectMemberByNameOrAlias':{},
'getTargetsWithRelationshipTypeForResource':{},
'getReverseRelationshipsOfThisAndParents':{},
'getPersonalResourceRoots':{},
'getESMVersion':{},
'getResourceByName':{},
'hasXPermission':{},
'findAllIds':{},
'addRelationship':{},
'getReverseRelationshipsOfParents':{},
'getResourcesReferencePages':{},
'containsDirectMemberByName':{},
'getTargetsAsURIByRelationshipForSourceId':{},
'hasWritePermission':{},
'deleteResources':{},
'resolveRelationship':{},
'getAllPathsToRoot':{},
'getResourcesWithVisibilityToUsers':{},
'getReferencePages':{},
'findAll':{},
'getExclusivelyDependentResources':{},
'getAllowedUserTypes':{},
'getDashboardIfNewer':{},
'getResourcesByNameSafely':{},
'getResourcesByNames':{},
'getSourceURIWithThisTargetByRelatiobnship':{},
'getRelationshipsOfParents':{},
'getMetaGroupID':{},
'isValidResourceID':{},
'getServiceMajorVersion':{},
'hasReverseRelationship':{},
'containsDirectMemberByName1':{},
'getChildNamesAndAliases':{},
'getResourcesByIds':{},
'getResourceTypesVisibleToUsers':{},
'update':{},
'getRelationshipsOfThisAndParents':{},
'getPersonalAndSharedResourceRoots':{},
'getSourcesWithThisTargetByRelationship':{},
'containsDirectMemberByNameOrAlias1':{},
'hasReadPermission':{},
'copyResourceIntoGroup':{},
'getDependentResourceIDsForResourceId':{},
'deleteByLocalId':{},
'getResourceIfModified':{},
'findById':{},
'getSourcesWithThisTargetByRelationshipForResourceId':{},
'getSourcesWithThisTargetByACLRelationship':{},
'delete':{},
'getAllPathsToRootAsStrings':{},
'loadAdditional':{},
'insertResource':{},
'insertResources':{},
'getEnabledResourceIDs':{},
'getAllUnassignedResourceIDs':{},
'getSourceURIWithThisTargetByRelationship':{},
'getResourceById':{},
'updateACLForResourceById':{},
'getCorruptedResources':{},
'unloadAdditional':{},
'isDisabled':{},
'getTargetsAsURIByRelationship':{},
'updateResources':{},
'deleteByUUID':{},
'getTargetsByRelationship':{},
'getSourceURIWithThisTargetByRelationshipForResourceId':{},
'getTargetsByRelationshipCount':{},
'getPersonalGroup':{},
'findByUUID':{},
'resetState':{},
'insert':{},
'getDataMonitorDatasIfNewer':{},
'getServiceMinorVersion':{},
},
'DataMonitorQoSService':{
'disableQoSConstraintsOnDM':{},
'enableQoSConstraintsOnDM':{},
},
'DrilldownService':{
'getTargetsWithRelationshipTypeForResourceById':{},
'getTargetsByRelationshipForSourceId':{},
'getSourcesWithThisTargetByRelationshipCount':{},
'deleteResource':{},
'getSourceURIWithThisTargetByRelatiobnshipForResourceId':{},
'getAllAttachmentOnlyResourceIDs':{},
'getNamesAndAliases':{},
'containsDirectMemberByNameOrAlias':{},
'getTargetsWithRelationshipTypeForResource':{},
'getReverseRelationshipsOfThisAndParents':{},
'getPersonalResourceRoots':{},
'getESMVersion':{},
'getResourceByName':{},
'hasXPermission':{},
'findAllIds':{},
'addRelationship':{},
'getReverseRelationshipsOfParents':{},
'getResourcesReferencePages':{},
'containsDirectMemberByName':{},
'getTargetsAsURIByRelationshipForSourceId':{},
'hasWritePermission':{},
'deleteResources':{},
'resolveRelationship':{},
'getAllPathsToRoot':{},
'getResourcesWithVisibilityToUsers':{},
'getReferencePages':{},
'findAll':{},
'getExclusivelyDependentResources':{},
'getAllowedUserTypes':{},
'getResourcesByNameSafely':{},
'getResourcesByNames':{},
'getSourceURIWithThisTargetByRelatiobnship':{},
'getRelationshipsOfParents':{},
'getMetaGroupID':{},
'isValidResourceID':{},
'getServiceMajorVersion':{},
'hasReverseRelationship':{},
'containsDirectMemberByName1':{},
'getChildNamesAndAliases':{},
'getResourcesByIds':{},
'getResourceTypesVisibleToUsers':{},
'update':{},
'getRelationshipsOfThisAndParents':{},
'getPersonalAndSharedResourceRoots':{},
'getSourcesWithThisTargetByRelationship':{},
'containsDirectMemberByNameOrAlias1':{},
'hasReadPermission':{},
'copyResourceIntoGroup':{},
'deleteByLocalId':{},
'getDependentResourceIDsForResourceId':{},
'getResourceIfModified':{},
'findById':{},
'getSourcesWithThisTargetByRelationshipForResourceId':{},
'getSourcesWithThisTargetByACLRelationship':{},
'delete':{},
'getAllPathsToRootAsStrings':{},
'loadAdditional':{},
'insertResource':{},
'insertResources':{},
'getAllUnassignedResourceIDs':{},
'getEnabledResourceIDs':{},
'getSourceURIWithThisTargetByRelationship':{},
'getResourceById':{},
'updateACLForResourceById':{},
'getCorruptedResources':{},
'unloadAdditional':{},
'isDisabled':{},
'getTargetsAsURIByRelationship':{},
'updateResources':{},
'deleteByUUID':{},
'getTargetsByRelationship':{},
'getSourceURIWithThisTargetByRelationshipForResourceId':{},
'getTargetsByRelationshipCount':{},
'getPersonalGroup':{},
'findByUUID':{},
'resetState':{},
'insert':{},
'getServiceMinorVersion':{},
},
'PortletService':{
'getTargetsWithRelationshipTypeForResourceById':{},
'getTargetsByRelationshipForSourceId':{},
'getSourcesWithThisTargetByRelationshipCount':{},
'deleteResource':{},
'getSourceURIWithThisTargetByRelatiobnshipForResourceId':{},
'getAllAttachmentOnlyResourceIDs':{},
'getNamesAndAliases':{},
'containsDirectMemberByNameOrAlias':{},
'getTargetsWithRelationshipTypeForResource':{},
'getReverseRelationshipsOfThisAndParents':{},
'getPersonalResourceRoots':{},
'getESMVersion':{},
'getResourceByName':{},
'hasXPermission':{},
'findAllIds':{},
'addRelationship':{},
'getReverseRelationshipsOfParents':{},
'getResourcesReferencePages':{},
'containsDirectMemberByName':{},
'getTargetsAsURIByRelationshipForSourceId':{},
'hasWritePermission':{},
'deleteResources':{},
'resolveRelationship':{},
'getAllPathsToRoot':{},
'getResourcesWithVisibilityToUsers':{},
'getReferencePages':{},
'findAll':{},
'getExclusivelyDependentResources':{},
'getAllowedUserTypes':{},
'getResourcesByNameSafely':{},
'getResourcesByNames':{},
'getSourceURIWithThisTargetByRelatiobnship':{},
'getRelationshipsOfParents':{},
'getMetaGroupID':{},
'isValidResourceID':{},
'getServiceMajorVersion':{},
'hasReverseRelationship':{},
'containsDirectMemberByName1':{},
'getChildNamesAndAliases':{},
'getResourcesByIds':{},
'getResourceTypesVisibleToUsers':{},
'update':{},
'getRelationshipsOfThisAndParents':{},
'getPersonalAndSharedResourceRoots':{},
'getSourcesWithThisTargetByRelationship':{},
'containsDirectMemberByNameOrAlias1':{},
'hasReadPermission':{},
'copyResourceIntoGroup':{},
'deleteByLocalId':{},
'getDependentResourceIDsForResourceId':{},
'getResourceIfModified':{},
'findById':{},
'getSourcesWithThisTargetByRelationshipForResourceId':{},
'getSourcesWithThisTargetByACLRelationship':{},
'delete':{},
'getAllPathsToRootAsStrings':{},
'loadAdditional':{},
'insertResource':{},
'insertResources':{},
'getAllUnassignedResourceIDs':{},
'getEnabledResourceIDs':{},
'getSourceURIWithThisTargetByRelationship':{},
'getResourceById':{},
'updateACLForResourceById':{},
'getCorruptedResources':{},
'unloadAdditional':{},
'isDisabled':{},
'getTargetsAsURIByRelationship':{},
'updateResources':{},
'deleteByUUID':{},
'getTargetsByRelationship':{},
'getSourceURIWithThisTargetByRelationshipForResourceId':{},
'getTargetsByRelationshipCount':{},
'getPersonalGroup':{},
'findByUUID':{},
'resetState':{},
'insert':{},
'getServiceMinorVersion':{},
},
'QueryService':{
'getTargetsWithRelationshipTypeForResourceById':{},
'getTargetsByRelationshipForSourceId':{},
'getSourcesWithThisTargetByRelationshipCount':{},
'deleteResource':{},
'getSourceURIWithThisTargetByRelatiobnshipForResourceId':{},
'getAllAttachmentOnlyResourceIDs':{},
'getNamesAndAliases':{},
'containsDirectMemberByNameOrAlias':{},
'getTargetsWithRelationshipTypeForResource':{},
'getReverseRelationshipsOfThisAndParents':{},
'getPersonalResourceRoots':{},
'getQuerySessionID':{},
'getESMVersion':{},
'getResourceByName':{},
'hasXPermission':{},
'findAllIds':{},
'addRelationship':{},
'getReverseRelationshipsOfParents':{},
'getResourcesReferencePages':{},
'containsDirectMemberByName':{},
'getTargetsAsURIByRelationshipForSourceId':{},
'hasWritePermission':{},
'deleteResources':{},
'resolveRelationship':{},
'getQuerySQL':{},
'getAllPathsToRoot':{},
'getResourcesWithVisibilityToUsers':{},
'getReferencePages':{},
'findAll':{},
'getExclusivelyDependentResources':{},
'getAllowedUserTypes':{},
'getResourcesByNameSafely':{},
'getResourcesByNames':{},
'getSourceURIWithThisTargetByRelatiobnship':{},
'getRelationshipsOfParents':{},
'getMetaGroupID':{},
'isValidResourceID':{},
'getServiceMajorVersion':{},
'hasReverseRelationship':{},
'containsDirectMemberByName1':{},
'getChildNamesAndAliases':{},
'getResourcesByIds':{},
'getResourceTypesVisibleToUsers':{},
'update':{},
'getRelationshipsOfThisAndParents':{},
'getPersonalAndSharedResourceRoots':{},
'getSourcesWithThisTargetByRelationship':{},
'containsDirectMemberByNameOrAlias1':{},
'hasReadPermission':{},
'copyResourceIntoGroup':{},
'getDependentResourceIDsForResourceId':{},
'deleteByLocalId':{},
'getResourceIfModified':{},
'findById':{},
'getSourcesWithThisTargetByRelationshipForResourceId':{},
'getSourcesWithThisTargetByACLRelationship':{},
'delete':{},
'getAllPathsToRootAsStrings':{},
'loadAdditional':{},
'insertResource':{},
'insertResources':{},
'getEnabledResourceIDs':{},
'getAllUnassignedResourceIDs':{},
'getSourceURIWithThisTargetByRelationship':{},
'getResourceById':{},
'updateACLForResourceById':{},
'getCorruptedResources':{},
'unloadAdditional':{},
'isDisabled':{},
'getTargetsAsURIByRelationship':{},
'updateResources':{},
'deleteByUUID':{},
'getTargetsByRelationship':{},
'getSourceURIWithThisTargetByRelationshipForResourceId':{},
'getTargetsByRelationshipCount':{},
'getPersonalGroup':{},
'findByUUID':{},
'resetState':{},
'insert':{},
'getServiceMinorVersion':{},
},
'ConnectorService':{
'getTargetsWithRelationshipTypeForResourceById':{},
'getTargetsByRelationshipForSourceId':{},
'getAllAttachmentOnlyResourceIDs':{},
'getAllAgents':{},
'getNamesAndAliases':{},
'getReverseRelationshipsOfThisAndParents':{},
'getAllRunningAgentIDs':{},
'getESMVersion':{},
'getAllAgentIDs':{},
'addRelationship':{},
'containsDirectMemberByName':{},
'getAgentByName':{},
'getAllStoppedAgentIDs':{},
'getResourcesReferencePages':{},
'getReverseRelationshipsOfParents':{},
'getTargetsAsURIByRelationshipForSourceId':{},
'sendCommand':{},
'getReferencePages':{},
'getExclusivelyDependentResources':{},
'getAllowedUserTypes':{},
'getResourcesByNames':{},
'getSourceURIWithThisTargetByRelatiobnship':{},
'getMetaGroupID':{},
'isValidResourceID':{},
'getServiceMajorVersion':{},
'containsDirectMemberByName1':{},
'getResourcesByIds':{},
'getResourceTypesVisibleToUsers':{},
'getConnectorExecStatus':{},
'getPersonalAndSharedResourceRoots':{},
'checkImportStatus':{},
'hasReadPermission':{},
'containsDirectMemberByNameOrAlias1':{},
'getSourcesWithThisTargetByRelationship':{},
'getDeadAgentIDs':{},
'getAgentsByIDs':{},
'findById':{},
'getSourcesWithThisTargetByRelationshipForResourceId':{},
'delete':{},
'insertResource':{},
'insertResources':{},
'updateACLForResourceById':{},
'getCorruptedResources':{},
'unloadAdditional':{},
'initiateImportConfiguration':{},
'getTargetsByRelationship':{},
'getPersonalGroup':{},
'getTargetsByRelationshipCount':{},
'getSourceURIWithThisTargetByRelationshipForResourceId':{},
'getDevicesForAgents':{},
'findByUUID':{},
'resetState':{},
'insert':{},
'getAgentParameterDescriptor':{},
'initiateExportConnectorConfiguration':{},
'deleteResource':{},
'getSourcesWithThisTargetByRelationshipCount':{},
'getSourceURIWithThisTargetByRelatiobnshipForResourceId':{},
'getAgentIDsByOperationalStatusType':{},
'containsDirectMemberByNameOrAlias':{},
'getTargetsWithRelationshipTypeForResource':{},
'getPersonalResourceRoots':{},
'getResourceByName':{},
'hasXPermission':{},
'findAllIds':{},
'hasWritePermission':{},
'deleteResources':{},
'resolveRelationship':{},
'getAllPathsToRoot':{},
'getAgentParameterDescriptors':{},
'getResourcesWithVisibilityToUsers':{},
'findAll':{},
'getResourcesByNameSafely':{},
'getLiveAgentIDs':{},
'getRelationshipsOfParents':{},
'getAgentByID':{},
'hasReverseRelationship':{},
'getCommandsList':{},
'getChildNamesAndAliases':{},
'getParameterGroups':{},
'update':{},
'getAllPausedAgentIDs':{},
'getRelationshipsOfThisAndParents':{},
'updateConnector':{},
'copyResourceIntoGroup':{},
'deleteByLocalId':{},
'getDependentResourceIDsForResourceId':{},
'getResourceIfModified':{},
'getSourcesWithThisTargetByACLRelationship':{},
'getAllPathsToRootAsStrings':{},
'executeCommand':{},
'loadAdditional':{},
'getAllUnassignedResourceIDs':{},
'getEnabledResourceIDs':{},
'getSourceURIWithThisTargetByRelationship':{},
'getResourceById':{},
'initiateDownloadFile':{},
'isDisabled':{},
'getTargetsAsURIByRelationship':{},
'updateResources':{},
'deleteByUUID':{},
'getServiceMinorVersion':{},
},
'QueryViewerService':{
'getTargetsWithRelationshipTypeForResourceById':{},
'getTargetsByRelationshipForSourceId':{},
'getSourcesWithThisTargetByRelationshipCount':{},
'deleteResource':{},
'getSourceURIWithThisTargetByRelatiobnshipForResourceId':{},
'getAllAttachmentOnlyResourceIDs':{},
'getNamesAndAliases':{},
'containsDirectMemberByNameOrAlias':{},
'getTargetsWithRelationshipTypeForResource':{},
'getReverseRelationshipsOfThisAndParents':{},
'getPersonalResourceRoots':{},
'getESMVersion':{},
'getResourceByName':{},
'hasXPermission':{},
'findAllIds':{},
'addRelationship':{},
'getReverseRelationshipsOfParents':{},
'getResourcesReferencePages':{},
'containsDirectMemberByName':{},
'getTargetsAsURIByRelationshipForSourceId':{},
'hasWritePermission':{},
'deleteResources':{},
'resolveRelationship':{},
'getAllPathsToRoot':{},
'getResourcesWithVisibilityToUsers':{},
'getReferencePages':{},
'getMatrixDataForDrilldown':{},
'findAll':{},
'getExclusivelyDependentResources':{},
'getAllowedUserTypes':{},
'getResourcesByNameSafely':{},
'getResourcesByNames':{},
'getSourceURIWithThisTargetByRelatiobnship':{},
'getRelationshipsOfParents':{},
'getMetaGroupID':{},
'isValidResourceID':{},
'getServiceMajorVersion':{},
'hasReverseRelationship':{},
'containsDirectMemberByName1':{},
'getChildNamesAndAliases':{},
'getResourcesByIds':{},
'getResourceTypesVisibleToUsers':{},
'update':{},
'getRelationshipsOfThisAndParents':{},
'getPersonalAndSharedResourceRoots':{},
'getSourcesWithThisTargetByRelationship':{},
'containsDirectMemberByNameOrAlias1':{},
'hasReadPermission':{},
'copyResourceIntoGroup':{},
'getDependentResourceIDsForResourceId':{},
'deleteByLocalId':{},
'getResourceIfModified':{},
'findById':{},
'getSourcesWithThisTargetByRelationshipForResourceId':{},
'getSourcesWithThisTargetByACLRelationship':{},
'delete':{},
'getAllPathsToRootAsStrings':{},
'loadAdditional':{},
'insertResource':{},
'insertResources':{},
'getEnabledResourceIDs':{},
'getAllUnassignedResourceIDs':{},
'getSourceURIWithThisTargetByRelationship':{},
'getResourceById':{},
'updateACLForResourceById':{},
'getCorruptedResources':{},
'unloadAdditional':{},
'isDisabled':{},
'getTargetsAsURIByRelationship':{},
'updateResources':{},
'deleteByUUID':{},
'getTargetsByRelationship':{},
'getSourceURIWithThisTargetByRelationshipForResourceId':{},
'getMatrixData':{},
'getTargetsByRelationshipCount':{},
'getPersonalGroup':{},
'findByUUID':{},
'resetState':{},
'insert':{},
'getServiceMinorVersion':{},
},
'DrilldownListService':{
'getTargetsWithRelationshipTypeForResourceById':{},
'getTargetsByRelationshipForSourceId':{},
'getSourcesWithThisTargetByRelationshipCount':{},
'deleteResource':{},
'getSourceURIWithThisTargetByRelatiobnshipForResourceId':{},
'getAllAttachmentOnlyResourceIDs':{},
'getNamesAndAliases':{},
'containsDirectMemberByNameOrAlias':{},
'getTargetsWithRelationshipTypeForResource':{},
'getReverseRelationshipsOfThisAndParents':{},
'getPersonalResourceRoots':{},
'getESMVersion':{},
'getResourceByName':{},
'hasXPermission':{},
'findAllIds':{},
'addRelationship':{},
'getReverseRelationshipsOfParents':{},
'getResourcesReferencePages':{},
'containsDirectMemberByName':{},
'getDrilldownList':{},
'getTargetsAsURIByRelationshipForSourceId':{},
'hasWritePermission':{},
'deleteResources':{},
'resolveRelationship':{},
'getAllPathsToRoot':{},
'getResourcesWithVisibilityToUsers':{},
'getReferencePages':{},
'findAll':{},
'getExclusivelyDependentResources':{},
'getAllowedUserTypes':{},
'getResourcesByNameSafely':{},
'getResourcesByNames':{},
'getSourceURIWithThisTargetByRelatiobnship':{},
'getRelationshipsOfParents':{},
'getMetaGroupID':{},
'isValidResourceID':{},
'getServiceMajorVersion':{},
'hasReverseRelationship':{},
'containsDirectMemberByName1':{},
'getChildNamesAndAliases':{},
'getResourcesByIds':{},
'getResourceTypesVisibleToUsers':{},
'update':{},
'getRelationshipsOfThisAndParents':{},
'getPersonalAndSharedResourceRoots':{},
'getSourcesWithThisTargetByRelationship':{},
'containsDirectMemberByNameOrAlias1':{},
'hasReadPermission':{},
'copyResourceIntoGroup':{},
'deleteByLocalId':{},
'getDependentResourceIDsForResourceId':{},
'getResourceIfModified':{},
'findById':{},
'getSourcesWithThisTargetByRelationshipForResourceId':{},
'getSourcesWithThisTargetByACLRelationship':{},
'delete':{},
'getAllPathsToRootAsStrings':{},
'loadAdditional':{},
'insertResource':{},
'insertResources':{},
'getEnabledResourceIDs':{},
'getAllUnassignedResourceIDs':{},
'getSourceURIWithThisTargetByRelationship':{},
'getResourceById':{},
'updateACLForResourceById':{},
'getCorruptedResources':{},
'unloadAdditional':{},
'isDisabled':{},
'getTargetsAsURIByRelationship':{},
'updateResources':{},
'deleteByUUID':{},
'getTargetsByRelationship':{},
'getSourceURIWithThisTargetByRelationshipForResourceId':{},
'getTargetsByRelationshipCount':{},
'getPersonalGroup':{},
'findByUUID':{},
'resetState':{},
'insert':{},
'getServiceMinorVersion':{},
},
'CaseService':{
'getTargetsWithRelationshipTypeForResourceById':{},
'getCaseEventIDs':{},
'getTargetsByRelationshipForSourceId':{},
'getSourcesWithThisTargetByRelationshipCount':{},
'deleteResource':{},
'getSourceURIWithThisTargetByRelatiobnshipForResourceId':{},
'getAllAttachmentOnlyResourceIDs':{},
'getNamesAndAliases':{},
'containsDirectMemberByNameOrAlias':{},
'getTargetsWithRelationshipTypeForResource':{},
'getReverseRelationshipsOfThisAndParents':{},
'getPersonalResourceRoots':{},
'getESMVersion':{},
'getResourceByName':{},
'hasXPermission':{},
'findAllIds':{},
'addRelationship':{},
'getCasesGroupID':{},
'getReverseRelationshipsOfParents':{},
'getResourcesReferencePages':{},
'containsDirectMemberByName':{},
'getTargetsAsURIByRelationshipForSourceId':{},
'hasWritePermission':{},
'deleteResources':{},
'resolveRelationship':{},
'getAllPathsToRoot':{},
'getResourcesWithVisibilityToUsers':{},
'getReferencePages':{},
'findAll':{},
'getExclusivelyDependentResources':{},
'getAllowedUserTypes':{},
'getResourcesByNameSafely':{},
'getResourcesByNames':{},
'getSourceURIWithThisTargetByRelatiobnship':{},
'getRelationshipsOfParents':{},
'getMetaGroupID':{},
'isValidResourceID':{},
'getServiceMajorVersion':{},
'hasReverseRelationship':{},
'getChildNamesAndAliases':{},
'containsDirectMemberByName1':{},
'getResourcesByIds':{},
'getResourceTypesVisibleToUsers':{},
'update':{},
'getRelationshipsOfThisAndParents':{},
'getPersonalAndSharedResourceRoots':{},
'getSourcesWithThisTargetByRelationship':{},
'containsDirectMemberByNameOrAlias1':{},
'hasReadPermission':{},
'copyResourceIntoGroup':{},
'getDependentResourceIDsForResourceId':{},
'deleteByLocalId':{},
'getResourceIfModified':{},
'findById':{},
'getSourcesWithThisTargetByRelationshipForResourceId':{},
'getSourcesWithThisTargetByACLRelationship':{},
'delete':{},
'getAllPathsToRootAsStrings':{},
'loadAdditional':{},
'deleteAllCaseEvents':{},
'insertResource':{},
'insertResources':{},
'addCaseEvents':{},
'getEnabledResourceIDs':{},
'getAllUnassignedResourceIDs':{},
'getSourceURIWithThisTargetByRelationship':{},
'getResourceById':{},
'updateACLForResourceById':{},
'getCorruptedResources':{},
'deleteCaseEvents':{},
'unloadAdditional':{},
'isDisabled':{},
'getTargetsAsURIByRelationship':{},
'updateResources':{},
'deleteByUUID':{},
'getTargetsByRelationship':{},
'getSourceURIWithThisTargetByRelationshipForResourceId':{},
'getTargetsByRelationshipCount':{},
'getPersonalGroup':{},
'findByUUID':{},
'resetState':{},
'getCaseEventsTimeSpan':{},
'getSystemCasesGroupID':{},
'insert':{},
'getServiceMinorVersion':{},
'getEventExportStatus':{},
},
'ArchiveReportService':{
'getTargetsWithRelationshipTypeForResourceById':{},
'getTargetsByRelationshipForSourceId':{},
'getSourcesWithThisTargetByRelationshipCount':{},
'deleteResource':{},
'getSourceURIWithThisTargetByRelatiobnshipForResourceId':{},
'getAllAttachmentOnlyResourceIDs':{},
'getNamesAndAliases':{},
'containsDirectMemberByNameOrAlias':{},
'getTargetsWithRelationshipTypeForResource':{},
'getReverseRelationshipsOfThisAndParents':{},
'getPersonalResourceRoots':{},
'getDefaultArchiveReportByURI':{},
'getESMVersion':{},
'getResourceByName':{},
'hasXPermission':{},
'poll':{},
'findAllIds':{},
'addRelationship':{},
'getReverseRelationshipsOfParents':{},
'getResourcesReferencePages':{},
'containsDirectMemberByName':{},
'getTargetsAsURIByRelationshipForSourceId':{},
'hasWritePermission':{},
'deleteResources':{},
'getAllPathsToRoot':{},
'resolveRelationship':{},
'getResourcesWithVisibilityToUsers':{},
'getReferencePages':{},
'findAll':{},
'getExclusivelyDependentResources':{},
'getAllowedUserTypes':{},
'initDefaultArchiveReportDownloadWithOverwrite':{},
'getResourcesByNameSafely':{},
'getResourcesByNames':{},
'getSourceURIWithThisTargetByRelatiobnship':{},
'getDefaultArchiveReportById':{},
'getRelationshipsOfParents':{},
'getMetaGroupID':{},
'isValidResourceID':{},
'archiveReport':{},
'getServiceMajorVersion':{},
'initDefaultArchiveReportDownloadByURI':{},
'hasReverseRelationship':{},
'getChildNamesAndAliases':{},
'containsDirectMemberByName1':{},
'getResourcesByIds':{},
'getResourceTypesVisibleToUsers':{},
'update':{},
'getRelationshipsOfThisAndParents':{},
'getPersonalAndSharedResourceRoots':{},
'getSourcesWithThisTargetByRelationship':{},
'containsDirectMemberByNameOrAlias1':{},
'hasReadPermission':{},
'copyResourceIntoGroup':{},
'getDependentResourceIDsForResourceId':{},
'deleteByLocalId':{},
'initDefaultArchiveReportDownloadById':{},
'getResourceIfModified':{},
'findById':{},
'getSourcesWithThisTargetByRelationshipForResourceId':{},
'getSourcesWithThisTargetByACLRelationship':{},
'delete':{},
'getAllPathsToRootAsStrings':{},
'loadAdditional':{},
'insertResource':{},
'insertResources':{},
'getEnabledResourceIDs':{},
'getAllUnassignedResourceIDs':{},
'getSourceURIWithThisTargetByRelationship':{},
'getResourceById':{},
'initDefaultArchiveReportDownloadByIdASync':{},
'updateACLForResourceById':{},
'getCorruptedResources':{},
'unloadAdditional':{},
'isDisabled':{},
'getTargetsAsURIByRelationship':{},
'updateResources':{},
'deleteByUUID':{},
'getTargetsByRelationship':{},
'getSourceURIWithThisTargetByRelationshipForResourceId':{},
'getTargetsByRelationshipCount':{},
'getPersonalGroup':{},
'findByUUID':{},
'resetState':{},
'insert':{},
'getServiceMinorVersion':{},
},
'ActiveListService':{
'getTargetsWithRelationshipTypeForResourceById':{},
'getTargetsByRelationshipForSourceId':{},
'getSourcesWithThisTargetByRelationshipCount':{},
'deleteResource':{},
'getSourceURIWithThisTargetByRelatiobnshipForResourceId':{},
'getAllAttachmentOnlyResourceIDs':{},
'getNamesAndAliases':{},
'containsDirectMemberByNameOrAlias':{},
'getTargetsWithRelationshipTypeForResource':{},
'getReverseRelationshipsOfThisAndParents':{},
'getPersonalResourceRoots':{},
'getESMVersion':{},
'getResourceByName':{},
'hasXPermission':{},
'addEntries':{},
'findAllIds':{},
'addRelationship':{},
'getReverseRelationshipsOfParents':{},
'getResourcesReferencePages':{},
'containsDirectMemberByName':{},
'getTargetsAsURIByRelationshipForSourceId':{},
'hasWritePermission':{},
'deleteResources':{},
'resolveRelationship':{},
'getAllPathsToRoot':{},
'getResourcesWithVisibilityToUsers':{},
'getReferencePages':{},
'findAll':{},
'getExclusivelyDependentResources':{},
'getAllowedUserTypes':{},
'getResourcesByNameSafely':{},
'getResourcesByNames':{},
'getSourceURIWithThisTargetByRelatiobnship':{},
'getRelationshipsOfParents':{},
'getMetaGroupID':{},
'isValidResourceID':{},
'getServiceMajorVersion':{},
'getEntries':{},
'hasReverseRelationship':{},
'getChildNamesAndAliases':{},
'containsDirectMemberByName1':{},
'getResourcesByIds':{},
'getResourceTypesVisibleToUsers':{},
'update':{},
'getRelationshipsOfThisAndParents':{},
'getPersonalAndSharedResourceRoots':{},
'getSourcesWithThisTargetByRelationship':{},
'containsDirectMemberByNameOrAlias1':{},
'hasReadPermission':{},
'copyResourceIntoGroup':{},
'getDependentResourceIDsForResourceId':{},
'deleteByLocalId':{},
'clearEntries':{},
'getResourceIfModified':{},
'findById':{},
'getSourcesWithThisTargetByRelationshipForResourceId':{},
'getSourcesWithThisTargetByACLRelationship':{},
'delete':{},
'getAllPathsToRootAsStrings':{},
'loadAdditional':{},
'deleteEntries':{},
'insertResource':{},
'insertResources':{},
'getEnabledResourceIDs':{},
'getAllUnassignedResourceIDs':{},
'getSourceURIWithThisTargetByRelationship':{},
'getResourceById':{},
'updateACLForResourceById':{},
'getCorruptedResources':{},
'unloadAdditional':{},
'isDisabled':{},
'getTargetsAsURIByRelationship':{},
'updateResources':{},
'deleteByUUID':{},
'getTargetsByRelationship':{},
'getSourceURIWithThisTargetByRelationshipForResourceId':{},
'getTargetsByRelationshipCount':{},
'getPersonalGroup':{},
'findByUUID':{},
'resetState':{},
'insert':{},
'getServiceMinorVersion':{},
},
'InternalService':{
'newGroupAttributesParameter':{},
'newReportDrilldownDefinition':{},
'newEdge':{},
'newHierarchyMapGroupByHolder':{},
'newActiveChannelDrilldownDefinition':{},
'newMatrixData':{},
'newIntrospectableFieldListParameter':{},
'newGraphData':{},
'newGeoInfoEventGraphNode':{},
'newIntrospectableFieldListHolder':{},
'newGroupAttributeEntry':{},
'newDashboardDrilldownDefinition':{},
'newListWrapper':{},
'newFontHolder':{},
'newEventGraph':{},
'newPropertyHolder':{},
'newQueryViewerDrilldownDefinition':{},
'newGraph':{},
'newFilterFields':{},
'newFontParameter':{},
'newGeographicInformation':{},
'newNode':{},
'newHierarchyMapGroupByParameter':{},
'newErrorCode':{},
'newEventGraphNode':{},
},
'SecurityEventService':{
'getServiceMajorVersion':{},
'getSecurityEventsWithTimeout':{},
'getSecurityEvents':{},
'getServiceMinorVersion':{},
'getSecurityEventsByProfile':{},
},
'GraphService':{
'createSourceTargetGraphFromEventList':{},
'createSourceTargetGraph':{},
'createSourceEventTargetGraph':{},
'getServiceMajorVersion':{},
'getServiceMinorVersion':{},
'createSourceEventTargetGraphFromEventList':{},
},
'GroupService':{
'getTargetsWithRelationshipTypeForResourceById':{},
'childAttributesChanged':{},
'getTargetsByRelationshipForSourceId':{},
'getAllAttachmentOnlyResourceIDs':{},
'getNamesAndAliases':{},
'getReverseRelationshipsOfThisAndParents':{},
'getESMVersion':{},
'addRelationship':{},
'containsDirectMemberByName':{},
'getResourcesReferencePages':{},
'getReverseRelationshipsOfParents':{},
'getTargetsAsURIByRelationshipForSourceId':{},
'isParentOf':{},
'insertGroup':{},
'getAllChildren':{},
'getReferencePages':{},
'getGroupChildCount':{},
'getExclusivelyDependentResources':{},
'getAllowedUserTypes':{},
'getResourcesByNames':{},
'removeChild':{},
'getSourceURIWithThisTargetByRelatiobnship':{},
'getMetaGroupID':{},
'isValidResourceID':{},
'addChild':{},
'getServiceMajorVersion':{},
'containsDirectMemberByName1':{},
'getResourcesByIds':{},
'getResourceTypesVisibleToUsers':{},
'getPersonalAndSharedResourceRoots':{},
'hasReadPermission':{},
'containsDirectMemberByNameOrAlias1':{},
'getSourcesWithThisTargetByRelationship':{},
'findById':{},
'getSourcesWithThisTargetByRelationshipForResourceId':{},
'delete':{},
'insertResource':{},
'insertResources':{},
'getAllChildIDCount':{},
'updateACLForResourceById':{},
'getCorruptedResources':{},
'unloadAdditional':{},
'getTargetsByRelationship':{},
'getPersonalGroup':{},
'getTargetsByRelationshipCount':{},
'getSourceURIWithThisTargetByRelationshipForResourceId':{},
'findByUUID':{},
'resetState':{},
'insert':{},
'deleteResource':{},
'getSourcesWithThisTargetByRelationshipCount':{},
'getSourceURIWithThisTargetByRelatiobnshipForResourceId':{},
'containsDirectMemberByNameOrAlias':{},
'removeChildren':{},
'getTargetsWithRelationshipTypeForResource':{},
'getPersonalResourceRoots':{},
'getMetaGroup':{},
'getChildrenByType':{},
'getResourceByName':{},
'hasXPermission':{},
'findAllIds':{},
'hasWritePermission':{},
'deleteResources':{},
'resolveRelationship':{},
'getAllPathsToRoot':{},
'getResourcesWithVisibilityToUsers':{},
'findAll':{},
'addChildren':{},
'getResourcesByNameSafely':{},
'getRelationshipsOfParents':{},
'getChildResourcesByType':{},
'hasReverseRelationship':{},
'getChildNamesAndAliases':{},
'update':{},
'hasChildWithNameOrAlias':{},
'getRelationshipsOfThisAndParents':{},
'getChildIDByChildNameOrAlias':{},
'containsResourcesRecursively':{},
'copyResourceIntoGroup':{},
'deleteByLocalId':{},
'getDependentResourceIDsForResourceId':{},
'getResourceIfModified':{},
'isGroup':{},
'getSourcesWithThisTargetByACLRelationship':{},
'getAllPathsToRootAsStrings':{},
'updateGroup':{},
'loadAdditional':{},
'getAllUnassignedResourceIDs':{},
'getEnabledResourceIDs':{},
'getSourceURIWithThisTargetByRelationship':{},
'getResourceById':{},
'getGroupByURI':{},
'isDisabled':{},
'getAllChildIDs':{},
'getTargetsAsURIByRelationship':{},
'updateResources':{},
'deleteByUUID':{},
'getGroupByID':{},
'getServiceMinorVersion':{},
},
'ResourceService':{
'getTargetsWithRelationshipTypeForResourceById':{},
'getTargetsByRelationshipForSourceId':{},
'getSourcesWithThisTargetByRelationshipCount':{},
'deleteResource':{},
'getSourceURIWithThisTargetByRelatiobnshipForResourceId':{},
'getAllAttachmentOnlyResourceIDs':{},
'getNamesAndAliases':{},
'containsDirectMemberByNameOrAlias':{},
'getTargetsWithRelationshipTypeForResource':{},
'getReverseRelationshipsOfThisAndParents':{},
'getPersonalResourceRoots':{},
'getESMVersion':{},
'getResourceByName':{},
'hasXPermission':{},
'findAllIds':{},
'addRelationship':{},
'getReverseRelationshipsOfParents':{},
'getResourcesReferencePages':{},
'containsDirectMemberByName':{},
'getTargetsAsURIByRelationshipForSourceId':{},
'hasWritePermission':{},
'deleteResources':{},
'resolveRelationship':{},
'getAllPathsToRoot':{},
'getResourcesWithVisibilityToUsers':{},
'getReferencePages':{},
'findAll':{},
'getExclusivelyDependentResources':{},
'getAllowedUserTypes':{},
'getResourcesByNameSafely':{},
'getResourcesByNames':{},
'getSourceURIWithThisTargetByRelatiobnship':{},
'getRelationshipsOfParents':{},
'getMetaGroupID':{},
'isValidResourceID':{},
'getServiceMajorVersion':{},
'hasReverseRelationship':{},
'containsDirectMemberByName1':{},
'getChildNamesAndAliases':{},
'getResourcesByIds':{},
'getResourceTypesVisibleToUsers':{},
'update':{},
'getRelationshipsOfThisAndParents':{},
'getPersonalAndSharedResourceRoots':{},
'getSourcesWithThisTargetByRelationship':{},
'containsDirectMemberByNameOrAlias1':{},
'hasReadPermission':{},
'copyResourceIntoGroup':{},
'deleteByLocalId':{},
'getDependentResourceIDsForResourceId':{},
'getResourceIfModified':{},
'findById':{},
'getSourcesWithThisTargetByRelationshipForResourceId':{},
'getSourcesWithThisTargetByACLRelationship':{},
'delete':{},
'getAllPathsToRootAsStrings':{},
'loadAdditional':{},
'insertResource':{},
'insertResources':{},
'getAllUnassignedResourceIDs':{},
'getEnabledResourceIDs':{},
'getSourceURIWithThisTargetByRelationship':{},
'getResourceById':{},
'updateACLForResourceById':{},
'getCorruptedResources':{},
'unloadAdditional':{},
'isDisabled':{},
'getTargetsAsURIByRelationship':{},
'updateResources':{},
'deleteByUUID':{},
'getTargetsByRelationship':{},
'getSourceURIWithThisTargetByRelationshipForResourceId':{},
'getTargetsByRelationshipCount':{},
'getPersonalGroup':{},
'findByUUID':{},
'resetState':{},
'insert':{},
'getServiceMinorVersion':{},
},
'UserResourceService':{
'getTargetsWithRelationshipTypeForResourceById':{},
'getTargetsByRelationshipForSourceId':{},
'getAllAttachmentOnlyResourceIDs':{},
'addUserPreferenceById':{},
'getNamesAndAliases':{},
'getReverseRelationshipsOfThisAndParents':{},
'getESMVersion':{},
'addRelationship':{},
'containsDirectMemberByName':{},
'getResourcesReferencePages':{},
'getReverseRelationshipsOfParents':{},
'getTargetsAsURIByRelationshipForSourceId':{},
'getReferencePages':{},
'getUserModificationFlag':{},
'getAllUsers':{},
'getExclusivelyDependentResources':{},
'getAllowedUserTypes':{},
'create':{},
'getResourcesByNames':{},
'getSourceURIWithThisTargetByRelatiobnship':{},
'recordSuccessfulLoginFor':{},
'updateUserPreferencesByName':{},
'getMetaGroupID':{},
'isValidResourceID':{},
'getServiceMajorVersion':{},
'changePassword':{},
'containsDirectMemberByName1':{},
'getResourcesByIds':{},
'getResourceTypesVisibleToUsers':{},
'getCurrentUser':{},
'getPersonalAndSharedResourceRoots':{},
'getSourcesWithThisTargetByRelationship':{},
'hasReadPermission':{},
'containsDirectMemberByNameOrAlias1':{},
'getUserByName':{},
'getSessionProfile':{},
'findById':{},
'getSourcesWithThisTargetByRelationshipForResourceId':{},
'delete':{},
'getRootUserGroup':{},
'updateUserPreferencesById':{},
'getAllUserPreferencesForUserByName':{},
'insertResource':{},
'insertResources':{},
'updateACLForResourceById':{},
'getCorruptedResources':{},
'unloadAdditional':{},
'getFeatureAvailabilities':{},
'isFeatureAvailable':{},
'getTargetsByRelationship':{},
'increaseFailedLoginAttemptsFor':{},
'getSourceURIWithThisTargetByRelationshipForResourceId':{},
'getPersonalGroup':{},
'getTargetsByRelationshipCount':{},
'findByUUID':{},
'resetState':{},
'insert':{},
'isAdministrator':{},
'getSourcesWithThisTargetByRelationshipCount':{},
'deleteResource':{},
'getSourceURIWithThisTargetByRelatiobnshipForResourceId':{},
'containsDirectMemberByNameOrAlias':{},
'getTargetsWithRelationshipTypeForResource':{},
'getPersonalResourceRoots':{},
'getResourceByName':{},
'hasXPermission':{},
'addModuleConfigForUserById':{},
'findAllIds':{},
'hasWritePermission':{},
'deleteResources':{},
'resolveRelationship':{},
'getAllPathsToRoot':{},
'getUserPreferenceForUserByName':{},
'getAllUserPreferencesForUserById':{},
'getModuleConfigForUserByName':{},
'getResourcesWithVisibilityToUsers':{},
'getUserPreferenceForUserById':{},
'findAll':{},
'getResourcesByNameSafely':{},
'getRelationshipsOfParents':{},
'getServerDefaultLocale':{},
'hasReverseRelationship':{},
'getChildNamesAndAliases':{},
'updateUser':{},
'update':{},
'updateModuleConfigForUserById':{},
'getRelationshipsOfThisAndParents':{},
'copyResourceIntoGroup':{},
'deleteByLocalId':{},
'getDependentResourceIDsForResourceId':{},
'checkPassword':{},
'updateModuleConfigForUserByName':{},
'getResourceIfModified':{},
'addModuleConfigForUserByName':{},
'getSourcesWithThisTargetByACLRelationship':{},
'getAllPathsToRootAsStrings':{},
'getRootUserGroupID':{},
'loadAdditional':{},
'resetFailedLoginAttemptsFor':{},
'getAllUnassignedResourceIDs':{},
'getEnabledResourceIDs':{},
'getSourceURIWithThisTargetByRelationship':{},
'getResourceById':{},
'isDisabled':{},
'getTargetsAsURIByRelationship':{},
'updateResources':{},
'deleteByUUID':{},
'getRootUserId':{},
'getModuleConfigForUserById':{},
'addUserPreferenceByName':{},
'getServiceMinorVersion':{},
},
'FileResourceService':{
'getTargetsWithRelationshipTypeForResourceById':{},
'getTargetsByRelationshipForSourceId':{},
'initiateUpload':{},
'getSourcesWithThisTargetByRelationshipCount':{},
'initiateDownloadByUUID':{},
'deleteResource':{},
'getSourceURIWithThisTargetByRelatiobnshipForResourceId':{},
'getAllAttachmentOnlyResourceIDs':{},
'getNamesAndAliases':{},
'containsDirectMemberByNameOrAlias':{},
'getTargetsWithRelationshipTypeForResource':{},
'getReverseRelationshipsOfThisAndParents':{},
'getPersonalResourceRoots':{},
'getESMVersion':{},
'getResourceByName':{},
'hasXPermission':{},
'findAllIds':{},
'addRelationship':{},
'getReverseRelationshipsOfParents':{},
'getResourcesReferencePages':{},
'containsDirectMemberByName':{},
'getTargetsAsURIByRelationshipForSourceId':{},
'hasWritePermission':{},
'deleteResources':{},
'resolveRelationship':{},
'getAllPathsToRoot':{},
'getResourcesWithVisibilityToUsers':{},
'getReferencePages':{},
'findAll':{},
'getUploadStatus':{},
'getExclusivelyDependentResources':{},
'getAllowedUserTypes':{},
'getResourcesByNameSafely':{},
'getResourcesByNames':{},
'getSourceURIWithThisTargetByRelatiobnship':{},
'getRelationshipsOfParents':{},
'getMetaGroupID':{},
'isValidResourceID':{},
'getServiceMajorVersion':{},
'hasReverseRelationship':{},
'containsDirectMemberByName1':{},
'getChildNamesAndAliases':{},
'getResourcesByIds':{},
'getResourceTypesVisibleToUsers':{},
'update':{},
'getRelationshipsOfThisAndParents':{},
'getPersonalAndSharedResourceRoots':{},
'getSourcesWithThisTargetByRelationship':{},
'containsDirectMemberByNameOrAlias1':{},
'hasReadPermission':{},
'copyResourceIntoGroup':{},
'getDependentResourceIDsForResourceId':{},
'deleteByLocalId':{},
'getResourceIfModified':{},
'findById':{},
'getSourcesWithThisTargetByRelationshipForResourceId':{},
'getSourcesWithThisTargetByACLRelationship':{},
'delete':{},
'getAllPathsToRootAsStrings':{},
'loadAdditional':{},
'insertResource':{},
'insertResources':{},
'getEnabledResourceIDs':{},
'getAllUnassignedResourceIDs':{},
'getSourceURIWithThisTargetByRelationship':{},
'getResourceById':{},
'updateACLForResourceById':{},
'getCorruptedResources':{},
'unloadAdditional':{},
'isDisabled':{},
'getTargetsAsURIByRelationship':{},
'updateResources':{},
'deleteByUUID':{},
'getTargetsByRelationship':{},
'getSourceURIWithThisTargetByRelationshipForResourceId':{},
'getTargetsByRelationshipCount':{},
'getPersonalGroup':{},
'findByUUID':{},
'resetState':{},
'insert':{},
'getServiceMinorVersion':{},
},
'InfoService':{
'getActivationDateMillis':{},
'getPropertyByEncodedKey':{},
'isTrial':{},
'hasErrors':{},
'getWebServerUrl':{},
'getWebAdminRelUrlWithOTP':{},
'getWebAdminRelUrl':{},
'isPatternDiscoveryEnabled':{},
'getExpirationDate':{},
'getWebServerUrlWithOTP':{},
'isLicenseValid':{},
'getErrorMessage':{},
'getManagerVersionString':{},
'expires':{},
'isSessionListsEnabled':{},
'setLicensed':{},
'getProperty':{},
'isPartitionArchiveEnabled':{},
'getStatusString':{},
'getServiceMajorVersion':{},
'getCustomerName':{},
'getCustomerNumber':{},
'getServiceMinorVersion':{},
},
'SecurityEventIntrospectorService':{
'getTimeConstraintFields':{},
'hasField':{},
'convertLabelToName':{},
'getFields':{},
'getGroupNames':{},
'getServiceMajorVersion':{},
'getFieldsByFilter':{},
'hasFieldName':{},
'getFieldByName':{},
'getGroupDisplayName':{},
'getServiceMinorVersion':{},
'getRelatedFields':{},
},
'ConAppService':{
'getPathToConApp':{},
'getServiceMajorVersion':{},
'getServiceMinorVersion':{},
},
'FieldSetService':{
'getTargetsWithRelationshipTypeForResourceById':{},
'getTargetsByRelationshipForSourceId':{},
'getSourcesWithThisTargetByRelationshipCount':{},
'deleteResource':{},
'getSourceURIWithThisTargetByRelatiobnshipForResourceId':{},
'getAllAttachmentOnlyResourceIDs':{},
'getNamesAndAliases':{},
'containsDirectMemberByNameOrAlias':{},
'getTargetsWithRelationshipTypeForResource':{},
'getReverseRelationshipsOfThisAndParents':{},
'getPersonalResourceRoots':{},
'getESMVersion':{},
'getResourceByName':{},
'hasXPermission':{},
'findAllIds':{},
'addRelationship':{},
'getReverseRelationshipsOfParents':{},
'getResourcesReferencePages':{},
'containsDirectMemberByName':{},
'getTargetsAsURIByRelationshipForSourceId':{},
'hasWritePermission':{},
'deleteResources':{},
'resolveRelationship':{},
'getAllPathsToRoot':{},
'getResourcesWithVisibilityToUsers':{},
'getReferencePages':{},
'findAll':{},
'getExclusivelyDependentResources':{},
'getAllowedUserTypes':{},
'getResourcesByNameSafely':{},
'getResourcesByNames':{},
'getSourceURIWithThisTargetByRelatiobnship':{},
'getRelationshipsOfParents':{},
'getMetaGroupID':{},
'isValidResourceID':{},
'getServiceMajorVersion':{},
'hasReverseRelationship':{},
'containsDirectMemberByName1':{},
'getChildNamesAndAliases':{},
'getResourcesByIds':{},
'getResourceTypesVisibleToUsers':{},
'update':{},
'getRelationshipsOfThisAndParents':{},
'getPersonalAndSharedResourceRoots':{},
'getSourcesWithThisTargetByRelationship':{},
'containsDirectMemberByNameOrAlias1':{},
'hasReadPermission':{},
'copyResourceIntoGroup':{},
'deleteByLocalId':{},
'getDependentResourceIDsForResourceId':{},
'getResourceIfModified':{},
'findById':{},
'getSourcesWithThisTargetByRelationshipForResourceId':{},
'getSourcesWithThisTargetByACLRelationship':{},
'delete':{},
'getAllPathsToRootAsStrings':{},
'loadAdditional':{},
'insertResource':{},
'insertResources':{},
'getAllUnassignedResourceIDs':{},
'getEnabledResourceIDs':{},
'getSourceURIWithThisTargetByRelationship':{},
'getResourceById':{},
'updateACLForResourceById':{},
'getCorruptedResources':{},
'unloadAdditional':{},
'isDisabled':{},
'getTargetsAsURIByRelationship':{},
'updateResources':{},
'deleteByUUID':{},
'getTargetsByRelationship':{},
'getSourceURIWithThisTargetByRelationshipForResourceId':{},
'getTargetsByRelationshipCount':{},
'getPersonalGroup':{},
'findByUUID':{},
'resetState':{},
'insert':{},
'getServiceMinorVersion':{},
},
'ReportService':{
'getTargetsWithRelationshipTypeForResourceById':{},
'getTargetsByRelationshipForSourceId':{},
'getSourcesWithThisTargetByRelationshipCount':{},
'deleteResource':{},
'getSourceURIWithThisTargetByRelatiobnshipForResourceId':{},
'getAllAttachmentOnlyResourceIDs':{},
'getNamesAndAliases':{},
'containsDirectMemberByNameOrAlias':{},
'getTargetsWithRelationshipTypeForResource':{},
'getReverseRelationshipsOfThisAndParents':{},
'getPersonalResourceRoots':{},
'getESMVersion':{},
'getResourceByName':{},
'hasXPermission':{},
'findAllIds':{},
'addRelationship':{},
'getReverseRelationshipsOfParents':{},
'getResourcesReferencePages':{},
'containsDirectMemberByName':{},
'getTargetsAsURIByRelationshipForSourceId':{},
'hasWritePermission':{},
'deleteResources':{},
'resolveRelationship':{},
'getAllPathsToRoot':{},
'getResourcesWithVisibilityToUsers':{},
'getReferencePages':{},
'findAll':{},
'getExclusivelyDependentResources':{},
'getAllowedUserTypes':{},
'getResourcesByNameSafely':{},
'getResourcesByNames':{},
'getSourceURIWithThisTargetByRelatiobnship':{},
'getRelationshipsOfParents':{},
'getMetaGroupID':{},
'isValidResourceID':{},
'getServiceMajorVersion':{},
'hasReverseRelationship':{},
'containsDirectMemberByName1':{},
'getChildNamesAndAliases':{},
'getResourcesByIds':{},
'getResourceTypesVisibleToUsers':{},
'update':{},
'getRelationshipsOfThisAndParents':{},
'getPersonalAndSharedResourceRoots':{},
'getSourcesWithThisTargetByRelationship':{},
'containsDirectMemberByNameOrAlias1':{},
'hasReadPermission':{},
'copyResourceIntoGroup':{},
'deleteByLocalId':{},
'getDependentResourceIDsForResourceId':{},
'getResourceIfModified':{},
'findById':{},
'getSourcesWithThisTargetByRelationshipForResourceId':{},
'getSourcesWithThisTargetByACLRelationship':{},
'delete':{},
'getAllPathsToRootAsStrings':{},
'loadAdditional':{},
'insertResource':{},
'insertResources':{},
'getAllUnassignedResourceIDs':{},
'getEnabledResourceIDs':{},
'getSourceURIWithThisTargetByRelationship':{},
'getResourceById':{},
'updateACLForResourceById':{},
'getCorruptedResources':{},
'unloadAdditional':{},
'isDisabled':{},
'getTargetsAsURIByRelationship':{},
'updateResources':{},
'deleteByUUID':{},
'getTargetsByRelationship':{},
'getSourceURIWithThisTargetByRelationshipForResourceId':{},
'getTargetsByRelationshipCount':{},
'getPersonalGroup':{},
'findByUUID':{},
'resetState':{},
'insert':{},
'getServiceMinorVersion':{},
},
'ManagerAuthenticationService':{
'getServiceMajorVersion':{},
'getOTP':{},
'getServiceMinorVersion':{},
},
'ManagerSearchService':{
'search':{},
'search1':{},
},
'DataMonitorService':{
'getTargetsWithRelationshipTypeForResourceById':{},
'getTargetsByRelationshipForSourceId':{},
'getSourcesWithThisTargetByRelationshipCount':{},
'deleteResource':{},
'getSourceURIWithThisTargetByRelatiobnshipForResourceId':{},
'getAllAttachmentOnlyResourceIDs':{},
'getNamesAndAliases':{},
'containsDirectMemberByNameOrAlias':{},
'getTargetsWithRelationshipTypeForResource':{},
'getReverseRelationshipsOfThisAndParents':{},
'getPersonalResourceRoots':{},
'getESMVersion':{},
'getResourceByName':{},
'hasXPermission':{},
'findAllIds':{},
'addRelationship':{},
'getReverseRelationshipsOfParents':{},
'getResourcesReferencePages':{},
'containsDirectMemberByName':{},
'getTargetsAsURIByRelationshipForSourceId':{},
'hasWritePermission':{},
'deleteResources':{},
'resolveRelationship':{},
'getAllPathsToRoot':{},
'getResourcesWithVisibilityToUsers':{},
'getReferencePages':{},
'findAll':{},
'getExclusivelyDependentResources':{},
'getAllowedUserTypes':{},
'getResourcesByNameSafely':{},
'getResourcesByNames':{},
'getSourceURIWithThisTargetByRelatiobnship':{},
'getDataMonitorIfNewer':{},
'getRelationshipsOfParents':{},
'getMetaGroupID':{},
'isValidResourceID':{},
'getServiceMajorVersion':{},
'hasReverseRelationship':{},
'containsDirectMemberByName1':{},
'getChildNamesAndAliases':{},
'getResourcesByIds':{},
'getResourceTypesVisibleToUsers':{},
'update':{},
'getRelationshipsOfThisAndParents':{},
'getPersonalAndSharedResourceRoots':{},
'getSourcesWithThisTargetByRelationship':{},
'containsDirectMemberByNameOrAlias1':{},
'hasReadPermission':{},
'copyResourceIntoGroup':{},
'deleteByLocalId':{},
'getDependentResourceIDsForResourceId':{},
'getResourceIfModified':{},
'findById':{},
'getSourcesWithThisTargetByRelationshipForResourceId':{},
'getSourcesWithThisTargetByACLRelationship':{},
'delete':{},
'getAllPathsToRootAsStrings':{},
'loadAdditional':{},
'insertResource':{},
'insertResources':{},
'getEnabledResourceIDs':{},
'getAllUnassignedResourceIDs':{},
'getSourceURIWithThisTargetByRelationship':{},
'getResourceById':{},
'updateACLForResourceById':{},
'getCorruptedResources':{},
'unloadAdditional':{},
'isDisabled':{},
'getTargetsAsURIByRelationship':{},
'updateResources':{},
'deleteByUUID':{},
'getTargetsByRelationship':{},
'getSourceURIWithThisTargetByRelationshipForResourceId':{},
'getTargetsByRelationshipCount':{},
'getPersonalGroup':{},
'findByUUID':{},
'resetState':{},
'insert':{},
'getServiceMinorVersion':{},
},
'ServerConfigurationService':{
'getTargetsWithRelationshipTypeForResourceById':{},
'getTargetsByRelationshipForSourceId':{},
'getSourcesWithThisTargetByRelationshipCount':{},
'deleteResource':{},
'getSourceURIWithThisTargetByRelatiobnshipForResourceId':{},
'getAllAttachmentOnlyResourceIDs':{},
'getNamesAndAliases':{},
'containsDirectMemberByNameOrAlias':{},
'getTargetsWithRelationshipTypeForResource':{},
'getReverseRelationshipsOfThisAndParents':{},
'getPersonalResourceRoots':{},
'getESMVersion':{},
'getResourceByName':{},
'hasXPermission':{},
'findAllIds':{},
'addRelationship':{},
'getReverseRelationshipsOfParents':{},
'getResourcesReferencePages':{},
'containsDirectMemberByName':{},
'getTargetsAsURIByRelationshipForSourceId':{},
'hasWritePermission':{},
'deleteResources':{},
'resolveRelationship':{},
'getAllPathsToRoot':{},
'getResourcesWithVisibilityToUsers':{},
'getReferencePages':{},
'findAll':{},
'getExclusivelyDependentResources':{},
'getAllowedUserTypes':{},
'getResourcesByNameSafely':{},
'getResourcesByNames':{},
'getSourceURIWithThisTargetByRelatiobnship':{},
'getRelationshipsOfParents':{},
'getMetaGroupID':{},
'isValidResourceID':{},
'getServiceMajorVersion':{},
'hasReverseRelationship':{},
'containsDirectMemberByName1':{},
'getChildNamesAndAliases':{},
'getResourcesByIds':{},
'getResourceTypesVisibleToUsers':{},
'update':{},
'getRelationshipsOfThisAndParents':{},
'getPersonalAndSharedResourceRoots':{},
'getSourcesWithThisTargetByRelationship':{},
'containsDirectMemberByNameOrAlias1':{},
'hasReadPermission':{},
'copyResourceIntoGroup':{},
'deleteByLocalId':{},
'getDependentResourceIDsForResourceId':{},
'getResourceIfModified':{},
'findById':{},
'getSourcesWithThisTargetByRelationshipForResourceId':{},
'getSourcesWithThisTargetByACLRelationship':{},
'delete':{},
'getAllPathsToRootAsStrings':{},
'loadAdditional':{},
'insertResource':{},
'insertResources':{},
'getAllUnassignedResourceIDs':{},
'getEnabledResourceIDs':{},
'getSourceURIWithThisTargetByRelationship':{},
'getResourceById':{},
'updateACLForResourceById':{},
'getCorruptedResources':{},
'unloadAdditional':{},
'isDisabled':{},
'getTargetsAsURIByRelationship':{},
'updateResources':{},
'deleteByUUID':{},
'getTargetsByRelationship':{},
'getSourceURIWithThisTargetByRelationshipForResourceId':{},
'getTargetsByRelationshipCount':{},
'getPersonalGroup':{},
'findByUUID':{},
'resetState':{},
'insert':{},
'getServiceMinorVersion':{},
},
} | 1.460938 | 1 |
tcam.py | luinaudt/python_lib | 0 | 12764771 | import sys
import math
import warnings
import logging
class tcam:
""" a basic tcam class
"""
def __init__(self,entryWidth, priWidth=8, addrWidth=int(math.log2(sys.maxsize)), valueWidth=32, size=sys.maxsize):
"""
entryWidth : width in bits of the entry
priWidth : Width of the priority in bits
addrWidth : width of addresses in bits
valueWidth : width of the associated value in bit
size : max number of entries
entryWidth : size of an entry in bits
"""
if math.log2(size) > addrWidth :
warnings.warn("addr width can't represents the size of table")
self.MaxEntries=size
self.PriorityWidth=priWidth
self.AddrWidth=addrWidth
self.ValueWidth=valueWidth
self.EntryWidth=entryWidth
self.Content=[]
def insert(self,key, mask, pri, val, addr=None):
"""insert information
key : key to look
mask : mask of the entry
pri : priority
val : result
addr : position inside the TCAM : optional
"""
line=(key,mask,pri,val,addr)
if len(self.Content) >= self.MaxEntries:
raise MemoryError("memory full content {} not inserted".format(line))
if key > 2**self.EntryWidth-1 or key < 0:
raise ValueError("inserted key {} too large".format(key))
if mask > 2**self.EntryWidth-1 or mask < 0:
raise ValueError("inserted mask {} too large".format(mask))
if pri > 2**self.PriorityWidth-1 or pri < 0:
raise ValueError("inserted priority key {} too large".format(pri))
if val > 2**self.ValueWidth-1 or val < 0:
raise ValueError("inserted value {} too large".format(val))
self.Content.append(line)
logging.info("content {} inserted".format(line))
def search(self,val):
"""
return the value associtated to val with the highest priority
if two match have the same priority take the first one found
TODO: better algorithm for search
"""
res=(0,-1)
for (key,mask,pri,resO,_) in self.Content:
if (key & ~mask) == (val & ~mask) and res[1]<pri:
res = (resO, pri)
if res==(0,-1):
return None
else:
return res[0]
def deleteAddr(self, addr):
"""
delete the entry at addr
"""
i=0
notFind=True
for (_,_,_,_,elem) in self.Content:
if addr==elem:
del self.Content[i]
notFind=False
break
i=i+1
if notFind:
raise ValueError("Address {} is not present".format(addr))
def deleteKM(self, key, mask):
"""
delete the entry corresponding key, mask
"""
i=0
notFind=True
for (keyC,maskC,_,_,_) in self.Content:
if mask==maskC and key==keyC:
del self.Content[i]
notFind=False
else:
i=i+1
if notFind:
raise ValueError("pair (key,mask): ({}, {}) not found".format(key,mask))
def __str__(self):
ret=[]
find_all = lambda c,s: [x for x in range(c.find(s), len(c)) if c[x] == s]
printFormat='{{0:0{0}b}}'.format(self.EntryWidth)
ret.append("number of entries {}".format(len(self.Content)))
for (key,mask,pri,res,_) in self.Content:
l=list(printFormat.format(key))
for i in find_all(printFormat.format(mask),'1'):
l[i]='*'
ret.append("key : {}".format("".join(l)))
ret.append("priority : {0}, result : {1}".format(pri,res))
return "\n".join(ret)
def __len__(self):
"""return number of entries
"""
return len(self.Content)
| 3.359375 | 3 |
month01/all_code/day17/exercise02.py | chaofan-zheng/tedu-python-demo | 4 | 12764772 | # 练习:使用学生列表封装以下三个列表中数据
class Student:
def __init__(self, name="", age=0, sex=""):
self.name = name
self.age = age
self.sex = sex
list_student_name = ["悟空", "八戒", "白骨精"]
list_student_age = [28, 25, 36]
list_student_sex = ["男", "男", "女"]
list_students = []
for item in zip(list_student_name, list_student_age, list_student_sex):
# ('悟空', 28, '男') --> Student 对象
# stu = Student(item[0],item[1],item[2])
stu = Student(*item)
list_students.append(stu)
# 通过调试查看列表中的数据
print(list_students)
| 4.34375 | 4 |
tests/test_feature_engineering.py | tvdboom/ATOM | 62 | 12764773 | # coding: utf-8
"""
Automated Tool for Optimized Modelling (ATOM)
Author: Mavs
Description: Unit tests for feature_engineering.py
"""
# Standard packages
import pandas as pd
import pytest
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.feature_selection import f_regression
# Own modules
from atom.feature_engineering import (
FeatureExtractor,
FeatureGenerator,
FeatureSelector,
)
from atom.utils import to_df
from .utils import X_bin, y_bin, X_class, y_class, X_reg, y_reg, X10_str, X10_dt
# Test FeatureExtractor ============================================ >>
def test_invalid_encoding_type():
"""Assert that an error is raised when encoding_type is invalid."""
with pytest.raises(ValueError, match=r".*the encoding_type parameter.*"):
FeatureExtractor(encoding_type="invalid").transform(X10_dt)
def test_invalid_features():
"""Assert that an error is raised when features are invalid."""
with pytest.raises(ValueError, match=r".*an attribute of pd.Series.dt.*"):
FeatureExtractor(features="invalid").transform(X10_dt)
def test_wrongly_converted_columns_are_ignored():
"""Assert that columns converted unsuccessfully are skipped."""
extractor = FeatureExtractor()
X = extractor.transform(X10_str)
assert "Feature 3" in X.columns
def test_datetime_features_are_used():
"""Assert that datetime64 features are used as is."""
X = to_df(X10_dt.copy())
X["Feature 3"] = pd.to_datetime(X["Feature 3"])
extractor = FeatureExtractor(features="day")
X = extractor.transform(X)
assert "Feature 3_day" in X.columns
assert "Feature 3" not in X.columns
def test_wrongly_converted_features_are_ignored():
"""Assert that wrongly converted features are ignored."""
extractor = FeatureExtractor(features=["tz", "is_leap_year", "day"])
X = extractor.transform(X10_dt)
assert "Feature 2_tz" not in X.columns # Not pd.Series.dt
def test_ordinal_features():
"""Assert that ordinal features are created."""
extractor = FeatureExtractor(features="day")
X = extractor.transform(X10_dt)
assert "Feature 3_day" in X.columns
assert "Feature 3" not in X.columns
def test_order_features():
"""Assert that the new features are in the order provided."""
extractor = FeatureExtractor()
X = extractor.transform(X10_dt)
assert X.columns.get_loc("Feature 3_day") == 2
assert X.columns.get_loc("Feature 3_month") == 3
assert X.columns.get_loc("Feature 3_year") == 4
@pytest.mark.parametrize("fxs", [
("microsecond", "%f"),
("second", "%S"),
("hour", "%H"),
("weekday", "%d/%m/%Y"),
("day", "%d/%m/%Y"),
("dayofyear", "%d/%m/%Y"),
("month", "%d/%m/%Y"),
("quarter", "%d/%m/%Y"),
])
def test_all_cyclic_features(fxs):
"""Assert that all cyclic columns create two features."""
extractor = FeatureExtractor(features=fxs[0], fmt=fxs[1], encoding_type="cyclic")
X = extractor.transform(X10_dt)
assert any(X.columns.str.contains(f"{fxs[0]}_cos"))
assert X.shape[1] == 4 + 1 # 2 new and og is dropped
def test_features_are_not_dropped():
"""Assert that features are kept when drop_columns=False."""
extractor = FeatureExtractor(drop_columns=False)
X = extractor.transform(X10_dt)
assert "Feature 3" in X.columns
# Test FeatureGenerator ============================================ >>
def test_n_features_parameter_negative():
"""Assert that an error is raised when n_features is negative."""
generator = FeatureGenerator(n_features=-2)
with pytest.raises(ValueError, match=r".*should be >0.*"):
generator.fit(X_bin, y_bin)
def test_population_parameter():
"""Assert that an error is raised when population is invalid."""
generator = FeatureGenerator(strategy="gfg", population=30)
pytest.raises(ValueError, generator.fit, X_reg, y_reg)
def test_generations_parameter():
"""Assert that an error is raised when generations is invalid."""
generator = FeatureGenerator(strategy="gfg", generations=0)
pytest.raises(ValueError, generator.fit, X_bin, y_bin)
def test_n_features_parameter_not_one_percent():
"""Assert that the n_features parameter is within 1% of population."""
generator = FeatureGenerator(strategy="gfg", n_features=23, population=200)
with pytest.raises(ValueError, match=r".*should be <1%.*"):
generator.fit(X_bin, y_bin)
def test_strategy_parameter():
"""Assert that the strategy parameter is either "DFS", "GFG" or "genetic"."""
generator = FeatureGenerator(strategy="invalid")
with pytest.raises(ValueError, match=r".*should be either 'dfs'.*"):
generator.fit(X_bin, y_bin)
def test_operators_parameter():
"""Assert that all operators are valid."""
generator = FeatureGenerator("GFG", n_features=None, operators=("div", "invalid"))
with pytest.raises(ValueError, match=r".*value in the operators.*"):
generator.fit(X_bin, y_bin)
def test_n_features_above_maximum():
"""Assert that n_features becomes maximum if more than maximum for "DFS"."""
generator = FeatureGenerator(n_features=1000, operators="log", random_state=1)
X = generator.fit_transform(X_bin, y_bin)
assert X.shape[1] == 60 # 30 og + 30 log
def test_genetic_non_improving_features():
"""Assert that the code doesn't fail if there are no new improving features."""
generator = FeatureGenerator(
strategy="gfg",
generations=5,
population=300,
operators="sqrt",
random_state=1,
)
_ = generator.fit_transform(X_reg, y_reg)
assert generator.genetic_features is None
def test_attribute_genetic_features():
"""Assert that the genetic_features attribute is created."""
generator = FeatureGenerator(
strategy="gfg",
generations=3,
population=200,
random_state=1,
)
_ = generator.fit_transform(X_bin, y_bin)
assert not generator.genetic_features.empty
def test_genetic_maximum_features():
"""Assert that the features are 1% of the population for n_features=None."""
generator = FeatureGenerator(
strategy="gfg",
n_features=None,
generations=4,
population=400,
random_state=1,
)
X = generator.fit_transform(X_bin, y_bin)
assert X.shape[1] == X_bin.shape[1] + 4
def test_updated_dataset():
"""Assert that the feature set contains the new features."""
generator = FeatureGenerator(
strategy="gfg",
n_features=1,
generations=4,
population=1000,
random_state=1,
)
X = generator.fit_transform(X_bin, y_bin)
assert X.shape[1] == X_bin.shape[1] + 1
generator = FeatureGenerator(strategy="dfs", n_features=None, random_state=1)
X = generator.fit_transform(X_bin, y_bin)
assert X.shape[1] > X_bin.shape[1]
# Test FeatureSelector ============================================= >>
def test_unknown_strategy_parameter():
"""Assert that an error is raised when strategy is unknown."""
selector = FeatureSelector(strategy="invalid")
pytest.raises(ValueError, selector.fit, X_reg, y_reg)
def test_solver_parameter_empty_univariate():
"""Assert that an error is raised when solver is None for univariate."""
selector = FeatureSelector(strategy="univariate")
pytest.raises(ValueError, selector.fit, X_reg, y_reg)
def test_raise_unknown_solver_univariate():
"""Assert that an error is raised when the solver is unknown."""
selector = FeatureSelector(strategy="univariate", solver="invalid")
pytest.raises(ValueError, selector.fit, X_reg, y_reg)
def test_solver_auto_PCA():
"""Assert that the solver is set to "auto" when None."""
selector = FeatureSelector(strategy="PCA", solver=None)
selector.fit(X_bin, y_bin)
assert selector._solver == "auto"
def test_solver_parameter_empty_SFM():
"""Assert that an error is raised when solver is None for SFM strategy."""
selector = FeatureSelector(strategy="SFM", solver=None)
pytest.raises(ValueError, selector.fit, X_reg, y_reg)
def test_goal_attribute():
"""Assert that the goal is deduced from the model's name."""
# For classification tasks
selector = FeatureSelector(strategy="SFM", solver="LGB_class")
selector.fit(X_bin, y_bin)
assert selector.goal == "class"
# For regression tasks
selector = FeatureSelector(strategy="SFM", solver="LGB_reg")
selector.fit(X_reg, y_reg)
assert selector.goal == "reg"
def test_solver_parameter_invalid_value():
"""Assert that an error is raised when solver is unknown."""
selector = FeatureSelector(strategy="RFE", solver="invalid")
pytest.raises(ValueError, selector.fit, X_reg, y_reg)
def test_n_features_parameter():
"""Assert that an error is raised when n_features is invalid."""
selector = FeatureSelector(strategy="SFM", solver="XGB_reg", n_features=0)
pytest.raises(ValueError, selector.fit, X_reg, y_reg)
def test_max_frac_repeated_parameter():
"""Assert that an error is raised when max_frac_repeated is invalid."""
selector = FeatureSelector(strategy=None, max_frac_repeated=1.1)
pytest.raises(ValueError, selector.fit, X_reg, y_reg)
def test_max_correlation_parameter():
"""Assert that an error is raised when max_correlation is invalid."""
selector = FeatureSelector(strategy=None, max_correlation=-0.2)
pytest.raises(ValueError, selector.fit, X_reg, y_reg)
def test_error_y_is_None():
"""Assert that an error is raised when y is None for some strategies."""
selector = FeatureSelector(strategy="univariate", solver=f_regression, n_features=9)
pytest.raises(ValueError, selector.fit, X_reg)
def test_remove_low_variance():
"""Assert that the remove_low_variance function works as intended."""
X = X_bin.copy()
X["invalid"] = 3 # Add column with minimum variance
selector = FeatureSelector(max_frac_repeated=1.0)
X = selector.fit_transform(X)
assert X.shape[1] == X_bin.shape[1]
def test_collinear_attribute():
"""Assert that the collinear attribute is created."""
selector = FeatureSelector(max_correlation=0.6)
assert hasattr(selector, "collinear")
def test_remove_collinear():
"""Assert that the remove_collinear function works as intended."""
selector = FeatureSelector(max_correlation=0.9)
X = selector.fit_transform(X_bin)
assert X.shape[1] == 20 # Originally 30
def test_univariate_strategy_custom_solver():
"""Assert that the univariate strategy works for a custom solver."""
selector = FeatureSelector("univariate", solver=f_regression, n_features=9)
X = selector.fit_transform(X_reg, y_reg)
assert X.shape[1] == 9
assert set(selector.feature_importance) == set(X.columns)
def test_PCA_strategy():
"""Assert that the PCA strategy works as intended."""
selector = FeatureSelector(strategy="PCA", n_features=0.7)
X = selector.fit_transform(X_bin)
assert X.shape[1] == 21
def test_PCA_components():
"""Assert that the PCA strategy creates components instead of features."""
selector = FeatureSelector(strategy="PCA")
X = selector.fit_transform(X_bin)
assert "Component 1" in X.columns
def test_SFM_prefit_invalid_estimator():
"""Assert that an error is raised for an invalid estimator in SFM."""
selector = FeatureSelector(
strategy="SFM",
solver=ExtraTreesClassifier(random_state=1).fit(X_class, y_class),
n_features=8,
random_state=1,
)
with pytest.raises(ValueError, match=r".*different columns than X.*"):
selector.fit(X_bin, y_bin)
def test_SFM_strategy_not_threshold():
"""Assert that if threshold is not specified, SFM selects n_features features."""
selector = FeatureSelector(
strategy="SFM",
solver=ExtraTreesClassifier(random_state=1),
n_features=16,
random_state=1,
)
X = selector.fit_transform(X_bin, y_bin)
assert X.shape[1] == 16
def test_SFM_invalid_solver():
"""Assert that an error is raised when solver is invalid."""
selector = FeatureSelector(strategy="SFM", solver="invalid", n_features=5)
with pytest.raises(ValueError, match=r".*Unknown model.*"):
selector.fit_transform(X_bin, y_bin)
def test_SFM_strategy_fitted_solver():
"""Assert that the SFM strategy works when the solver is already fitted."""
selector = FeatureSelector(
strategy="SFM",
solver=ExtraTreesClassifier(random_state=1).fit(X_bin, y_bin),
n_features=7,
random_state=1,
)
X = selector.fit_transform(X_bin)
assert X.shape[1] == 7
assert set(selector.feature_importance) == set(X.columns)
def test_SFM_strategy_not_fitted_solver():
"""Assert that the SFM strategy works when the solver is not fitted."""
selector = FeatureSelector(
strategy="SFM", solver=ExtraTreesClassifier(random_state=1), n_features=5
)
X = selector.fit_transform(X_bin, y_bin)
assert X.shape[1] == 5
assert set(selector.feature_importance) == set(X.columns)
def test_RFE_strategy():
"""Assert that the RFE strategy works as intended."""
selector = FeatureSelector(
strategy="RFE",
solver=ExtraTreesClassifier(random_state=1),
n_features=13,
random_state=1,
)
X = selector.fit_transform(X_bin, y_bin)
assert X.shape[1] == 13
assert set(selector.feature_importance) == set(X.columns)
def test_RFECV_strategy_before_pipeline_classification():
"""Assert that the RFECV strategy works before a fitted pipeline."""
selector = FeatureSelector(
strategy="RFECV",
solver="RF_class",
n_features=None,
random_state=1,
)
X = selector.fit_transform(X_bin, y_bin)
assert X.shape[1] == 17
assert set(selector.feature_importance) == set(X.columns)
def test_RFECV_strategy_before_pipeline_regression():
"""Assert that the RFECV strategy works before a fitted pipeline."""
selector = FeatureSelector("RFECV", solver="RF_reg", n_features=16, random_state=1)
X = selector.fit_transform(X_reg, y_reg)
assert X.shape[1] == 10
assert set(selector.feature_importance) == set(X.columns)
def test_SFS_strategy():
"""Assert that the SFS strategy works."""
selector = FeatureSelector("SFS", solver="RF_reg", n_features=6, cv=3, random_state=1)
X = selector.fit_transform(X_reg, y_reg)
assert X.shape[1] == 6
def test_kwargs_parameter_threshold():
"""Assert that the kwargs parameter works as intended (add threshold)."""
selector = FeatureSelector(
strategy="SFM",
solver=ExtraTreesClassifier(random_state=1),
n_features=21,
threshold="mean",
random_state=1,
)
X = selector.fit_transform(X_bin, y_bin)
assert X.shape[1] == 10
def test_kwargs_parameter_tol():
"""Assert that the kwargs parameter works as intended (add tol)."""
selector = FeatureSelector(
strategy="PCA", solver="arpack", tol=0.001, n_features=12, random_state=1
)
X = selector.fit_transform(X_bin)
assert X.shape[1] == 12
def test_kwargs_parameter_scoring():
"""Assert that the kwargs parameter works as intended (add scoring acronym)."""
selector = FeatureSelector(
strategy="RFECV",
solver="rf_class",
scoring="auc",
n_features=12,
random_state=1,
)
X = selector.fit_transform(X_bin, y_bin)
assert X.shape[1] == 14
| 2.6875 | 3 |
test/conftest.py | larsewi/cfbs | 0 | 12764774 | <gh_stars>0
import os
import pytest
@pytest.fixture(scope="function")
def chdir(request):
os.chdir(os.path.join(os.path.dirname(__file__), request.param))
yield
os.chdir(request.config.invocation_dir)
| 2.109375 | 2 |
gb_to_fasta/gb_to_fasta.py | willpatterson/ncbi_parser | 0 | 12764775 | from Bio import Entrez, SeqIO
import argparse
def gb_to_fasta(db_name, id_name, out_fasta):
Entrez.email = "<EMAIL>"
handle = Entrez.efetch(db=db_name, id=id_name, rettype="gb", retmode='text')
genome = SeqIO.read(handle, 'genbank')
#print(genome.features)
with open(out_fasta, "w") as ofasta:
for feature in genome.features:
gene_name = ">{}_{}\n".format(feature.type, feature.location)
seq = feature.extract(genome.seq)
seq = "{}\n".format(str(seq))
if feature.type != "source":
ofasta.write(gene_name)
ofasta.write(seq)
def main():
parser = argparse.ArgumentParser(description="Downloads a gb file from NCBI and converts it to fasta format")
parser.add_argument("db_name", help="NCBI Database to download from")
parser.add_argument("id_name", help="Species ID to download from")
parser.add_argument("out_fasta", help="Name of the output fasta file")
args = parser.parse_args()
gb_to_fasta(args.db_name, args.id_name, args.out_fasta)
if __name__ == "__main__":
main()
| 2.8125 | 3 |
BlochSolver/QuantumSolvers/numerics/numerical_methods.py | prz3m37/RandomBenchmarking | 0 | 12764776 | <reponame>prz3m37/RandomBenchmarking
import numpy as np
from BlochSolver.QuantumSolvers.rotations import rotation_handler as rh
class NumericalMethods:
n_shape = None
dt = None
h_bar = None
h_k = None
idn_num = None
j_max = None
j_min = None
@classmethod
def load_numerical_settings(cls, control_hamiltonian: np.array, settings: dict, num_settings: dict) -> None:
cls.dt = settings["pulse_time"]
cls.h_bar = settings["h_bar"]
cls.idn_num = num_settings["identities"]
cls.h_k = control_hamiltonian
cls.j_min = rh.RotationHandler.get_pulse_detuning(num_settings["e_min"])
cls.j_max = rh.RotationHandler.get_pulse_detuning(num_settings["e_max"])
return
@classmethod
def get_inverse_matrix(cls, matrix: np.array):
return np.linalg.inv(matrix)
@classmethod
def get_commutator(cls, operator_a: np.array, operator_b: np.array):
return np.dot(operator_a, operator_b) - np.dot(operator_b, operator_a)
@classmethod
def get_matrix_product(cls, operator_a: np.array, operator_b: np.array):
return np.trace(np.dot(np.conj(operator_a.T), operator_b))
@classmethod
def get_gradient(cls, back_operators: np.array, forward_operators: np.array):
grad = np.array(
[-1 * cls.get_matrix_product(back_op, 1j * cls.dt * cls.get_commutator(cls.h_k, fwd_op))
if k < cls.n_shape - cls.idn_num
else -1 * cls.get_matrix_product(back_op, 1j * cls.dt * cls.get_commutator(rh.RotationHandler.idn, fwd_op))
for k, (back_op, fwd_op) in enumerate(zip(back_operators, forward_operators))])
return np.real(grad)
@classmethod
def get_penalty_gradient(cls, backward_operators: np.array, forward_operators: np.array, detunings: np.array):
penalty_gradient = np.array(
[-1 * cls.get_matrix_product(back_op, 1j * cls.dt * cls.get_commutator(cls.h_k, fwd_op)) -
cls.__get_penalty(detunning) if k < cls.n_shape - cls.idn_num
else
-1 * cls.get_matrix_product(back_op,
1j * cls.dt * cls.get_commutator(rh.RotationHandler.idn,
fwd_op)) -
cls.__get_penalty(detunning)
for k, (back_op, fwd_op, detunning) in enumerate(zip(backward_operators, forward_operators, detunings))])
return np.real(penalty_gradient)
@classmethod
def get_propagator_gradient(cls, backward_propagator: np.array, forward_propagator: np.array):
propagator_gradient = np.array(
[-1 * cls.get_matrix_product(back_prop, 1j * cls.dt * rh.RotationHandler.get_dot_product(cls.h_k, fwd_prop))
if k < cls.n_shape - cls.idn_num else
-1 * cls.get_matrix_product(back_prop, 1j * cls.dt *
rh.RotationHandler.get_dot_product(rh.RotationHandler.idn, fwd_prop))
for k, (back_prop, fwd_prop) in enumerate(zip(backward_propagator, forward_propagator))])
return np.real(propagator_gradient)
@classmethod
def get_penalty_propagator_gradient(cls, backward_propagator: np.array,
forward_propagator: np.array, detunings: np.array):
propagator_gradient = np.array(
[-1 * cls.get_matrix_product(back_prop, 1j * cls.dt * rh.RotationHandler.get_dot_product(cls.h_k, fwd_prop))
- cls.__get_penalty(detunning) if k < cls.n_shape - cls.idn_num else
-1 * cls.get_matrix_product(back_prop, 1j * cls.dt *
rh.RotationHandler.get_dot_product(rh.RotationHandler.idn, fwd_prop))
- cls.__get_penalty(detunning) for k, (back_prop, fwd_prop, detunning)
in enumerate(zip(backward_propagator, forward_propagator, detunings))])
return np.real(propagator_gradient)
@classmethod
def __get_penalty(cls, j: float):
if j > cls.j_max:
return (j - cls.j_max) ** 6 # np.log(np.abs(j-(1+cls.j_max)))
elif j < cls.j_min:
return (cls.j_min - j) ** 6 # np.log(-(j - (1+cls.j_min)))
else:
return 0
@classmethod
def get_density_operator(cls, vector_a: np.array):
return np.outer(vector_a, np.conj(vector_a))
@classmethod
def get_hermit_sequence(cls, operator_sequence: np.array):
return np.array([np.conj(operator.T) for operator in operator_sequence])
| 2.109375 | 2 |
script/cal_overlap.py | zeta1999/SpinNet | 166 | 12764777 | <reponame>zeta1999/SpinNet
import os
from os.path import exists, join
import pickle
import numpy as np
import open3d
import cv2
import time
class ThreeDMatch(object):
"""
Given point cloud fragments and corresponding pose in '{root}'.
1. Save the aligned point cloud pts in '{savepath}/3DMatch_{downsample}_points.pkl'
2. Calculate the overlap ratio and save in '{savepath}/3DMatch_{downsample}_overlap.pkl'
3. Save the ids of anchor keypoints and positive keypoints in '{savepath}/3DMatch_{downsample}_keypts.pkl'
"""
def __init__(self, root, savepath, split, downsample):
self.root = root
self.savepath = savepath
self.split = split
self.downsample = downsample
# dict: from id to pts.
self.pts = {}
# dict: from id_id to overlap_ratio
self.overlap_ratio = {}
# dict: from id_id to anc_keypts id & pos_keypts id
self.keypts_pairs = {}
with open(os.path.join(root, f'scene_list_{split}.txt')) as f:
scene_list = f.readlines()
self.ids_list = []
self.scene_to_ids = {}
for scene in scene_list:
scene = scene.replace("\n", "")
self.scene_to_ids[scene] = []
for seq in sorted(os.listdir(os.path.join(self.root, scene))):
if not seq.startswith('seq'):
continue
scene_path = os.path.join(self.root, scene + f'/{seq}')
ids = [scene + f"/{seq}/" + str(filename.split(".")[0]) for filename in os.listdir(scene_path) if
filename.endswith('ply')]
ids = sorted(ids, key=lambda x: int(x.split("_")[-1]))
self.ids_list += ids
self.scene_to_ids[scene] += ids
print(f"Scene {scene}, seq {seq}: num ply: {len(ids)}")
print(f"Total {len(scene_list)} scenes, {len(self.ids_list)} point cloud fragments.")
self.idpair_list = []
self.load_all_ply(downsample)
self.cal_overlap(downsample)
def load_ply(self, data_dir, ind, downsample, aligned=True):
pcd = open3d.io.read_point_cloud(join(data_dir, f'{ind}.ply'))
pcd = open3d.geometry.PointCloud.voxel_down_sample(pcd, voxel_size=downsample)
if aligned is True:
matrix = np.load(join(data_dir, f'{ind}.pose.npy'))
pcd.transform(matrix)
return pcd
def load_all_ply(self, downsample):
pts_filename = join(self.savepath, f'3DMatch_{self.split}_{downsample:.3f}_points.pkl')
if exists(pts_filename):
with open(pts_filename, 'rb') as file:
self.pts = pickle.load(file)
print(f"Load pts file from {self.savepath}")
return
self.pts = {}
for i, anc_id in enumerate(self.ids_list):
anc_pcd = self.load_ply(self.root, anc_id, downsample=downsample, aligned=True)
points = np.array(anc_pcd.points)
print(len(points))
self.pts[anc_id] = points
print('processing ply: {:.1f}%'.format(100 * i / len(self.ids_list)))
with open(pts_filename, 'wb') as file:
pickle.dump(self.pts, file)
def get_matching_indices(self, anc_pts, pos_pts, search_voxel_size, K=None):
match_inds = []
bf_matcher = cv2.BFMatcher(cv2.NORM_L2)
match = bf_matcher.match(anc_pts, pos_pts)
for match_val in match:
if match_val.distance < search_voxel_size:
match_inds.append([match_val.queryIdx, match_val.trainIdx])
return np.array(match_inds)
def cal_overlap(self, downsample):
overlap_filename = join(self.savepath, f'3DMatch_{self.split}_{downsample:.3f}_overlap.pkl')
keypts_filename = join(self.savepath, f'3DMatch_{self.split}_{downsample:.3f}_keypts.pkl')
if exists(overlap_filename) and exists(keypts_filename):
with open(overlap_filename, 'rb') as file:
self.overlap_ratio = pickle.load(file)
print(f"Reload overlap info from {overlap_filename}")
with open(keypts_filename, 'rb') as file:
self.keypts_pairs = pickle.load(file)
print(f"Reload keypts info from {keypts_filename}")
import pdb
pdb.set_trace()
return
t0 = time.time()
for scene, scene_ids in self.scene_to_ids.items():
scene_overlap = {}
print(f"Begin processing scene {scene}")
for i in range(0, len(scene_ids)):
anc_id = scene_ids[i]
for j in range(i + 1, len(scene_ids)):
pos_id = scene_ids[j]
anc_pts = self.pts[anc_id].astype(np.float32)
pos_pts = self.pts[pos_id].astype(np.float32)
try:
matching_01 = self.get_matching_indices(anc_pts, pos_pts, self.downsample)
except BaseException as e:
print(f"Something wrong with get_matching_indices {e} for {anc_id}, {pos_id}")
matching_01 = np.array([])
overlap_ratio = len(matching_01) / len(anc_pts)
scene_overlap[f'{anc_id}@{pos_id}'] = overlap_ratio
if overlap_ratio > 0.30:
self.keypts_pairs[f'{anc_id}@{pos_id}'] = matching_01.astype(np.int32)
self.overlap_ratio[f'{anc_id}@{pos_id}'] = overlap_ratio
print(f'\t {anc_id}, {pos_id} overlap ratio: {overlap_ratio}')
print('processing {:s} ply: {:.1f}%'.format(scene, 100 * i / len(scene_ids)))
print('Finish {:s}, Done in {:.1f}s'.format(scene, time.time() - t0))
with open(overlap_filename, 'wb') as file:
pickle.dump(self.overlap_ratio, file)
with open(keypts_filename, 'wb') as file:
pickle.dump(self.keypts_pairs, file)
if __name__ == '__main__':
ThreeDMatch(root='path to your ply file.',
savepath='data/3DMatch',
split='train',
downsample=0.030
)
| 2.359375 | 2 |
object_database/service_manager/Codebase.py | npang1/nativepython | 0 | 12764778 | # Copyright 2018 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import object_database
from object_database import Schema, Indexed, Index, core_schema, SubscribeLazilyByDefault
from typed_python import *
import threading
from object_database.service_manager.ServiceSchema import service_schema
from typed_python.Codebase import Codebase as TypedPythonCodebase
#singleton state objects for the codebase cache
_codebase_lock = threading.Lock()
_codebase_cache = {}
_codebase_instantiation_dir = None
def setCodebaseInstantiationDirectory(directory, forceReset=False):
"""Called at program invocation to specify where we can instantiate codebases."""
with _codebase_lock:
global _codebase_instantiation_dir
global _codebase_cache
if forceReset:
_codebase_instantiation_dir = None
_codebase_cache = {}
if _codebase_instantiation_dir == directory:
return
assert _codebase_instantiation_dir is None, "Can't modify the codebase instantiation location. (%s != %s)" % (
_codebase_instantiation_dir,
directory
)
_codebase_instantiation_dir = os.path.abspath(directory)
@service_schema.define
@SubscribeLazilyByDefault
class File:
hash = Indexed(str)
contents = str
@staticmethod
def create(contents):
hash = sha_hash(contents).hexdigest
f = File.lookupAny(hash=hash)
if f:
return f
else:
return File(hash=hash, contents=contents)
@service_schema.define
@SubscribeLazilyByDefault
class Codebase:
hash = Indexed(str)
#filename (at root of project import) to contents
files = ConstDict(str, service_schema.File)
@staticmethod
def createFromRootlevelPath(rootPath):
return Codebase.createFromCodebase(
TypedPythonCodebase.FromRootlevelPath(rootPath)
)
@staticmethod
def createFromCodebase(codebase:TypedPythonCodebase):
return Codebase.createFromFiles(codebase.filesToContents)
@staticmethod
def createFromFiles(files):
assert files
files = {k: File.create(v) if not isinstance(v, File) else v for k,v in files.items()}
hashval = sha_hash(files).hexdigest
c = Codebase.lookupAny(hash=hashval)
if c:
return c
return Codebase(hash=hashval, files=files)
def instantiate(self, module_name=None):
"""Instantiate a codebase on disk and load it."""
with _codebase_lock:
assert _codebase_instantiation_dir is not None
if self.hash not in _codebase_cache:
try:
if not os.path.exists(_codebase_instantiation_dir):
os.makedirs(_codebase_instantiation_dir)
except Exception as e:
logging.getLogger(__name__).warn(
"Exception trying to make directory '%s'", _codebase_instantiation_dir)
logging.getLogger(__name__).warn(
"Exception: %s", e)
disk_path = os.path.join(_codebase_instantiation_dir, self.hash)
#preload the files, since they're lazy.
object_database.current_transaction().db().requestLazyObjects(set(self.files.values()))
fileContents = {fpath: file.contents for fpath, file in self.files.items()}
_codebase_cache[self.hash] = TypedPythonCodebase.Instantiate(fileContents, disk_path)
if module_name is None:
return _codebase_cache[self.hash]
return _codebase_cache[self.hash].getModuleByName(module_name)
| 1.84375 | 2 |
host/load.py | flowswitch/phison | 0 | 12764779 | <reponame>flowswitch/phison
"""load code"""
import sys
import PyScsi as drv
import Phison as ph
if len(sys.argv)!=2:
sys.exit("Load and start PRAM code file\nUsage: %s <file>" % (sys.argv[0]))
disk = ph.FindDrive()
if not disk:
sys.exit("No Phison devices found !")
drv.err_mode = drv.err_mode_raise
drv.open(disk)
print "Loading..."
ph.LoadBurner(sys.argv[1])
drv.close()
| 2.3125 | 2 |
insurance_claims/soa_app_data/soa_app_data.py | mlatcl/fbp-vs-oop | 6 | 12764780 | import requests
from insurance_claims.record_types import *
base_url = 'http://127.0.0.1:5000/'
class App():
def evaluate(self, save_dataset=False):
calculated_claims_value = self._calculate_claims_value()
classified_claims_value = self._classify_claims_value(calculated_claims_value)
classified_claims_complexity = self._classify_claims_complexity(classified_claims_value)
self._update_claims_complexity(classified_claims_complexity)
claim_payouts = self._calculate_payments(classified_claims_complexity)
if save_dataset:
self._save_claims(claim_payouts)
return self.get_outputs(claim_payouts)
# Client to calculate claims value
def _calculate_claims_value(self):
url = base_url + 'claim-request/calculate_claims_value'
response = requests.post(url, json={})
calculated_claims_value = response.json()
return calculated_claims_value
# Client to classify claims by value
def _classify_claims_value(self, claims):
url = base_url + 'claim-request/classify_claims_value'
response = requests.post(url, json=claims)
classified_claims_value = response.json()
return classified_claims_value
# Client to classify claims by complexity
def _classify_claims_complexity(self, classified_claims_value):
url = base_url + 'claim-request/classify_claims_complexity'
response = requests.post(url, json=classified_claims_value)
classified_claims_complexity = response.json()
return classified_claims_complexity
# Client to update claims by complexity
def _update_claims_complexity(self, classified_claims_complexity):
url = base_url + 'claim-request/update_claims_complexity'
requests.post(url, json=classified_claims_complexity)
# Client to calculate payments
def _calculate_payments(self, classified_claims_complexity):
url = base_url + 'claim-request/calculate_payments'
response = requests.post(url, json=classified_claims_complexity)
claim_payouts = response.json()
return claim_payouts
# Client to save claims
def _save_claims(self, claim_payouts):
url = base_url + 'claim-request/save_claims'
response = requests.post(url, json=claim_payouts)
claim_payouts = response.json()
return claim_payouts
def add_data(self, input_records):
self._add_claims_requests(input_records)
# Client to add claims data
def _add_claims_requests(self, input_records):
if len(input_records) > 0:
claims = []
for record in input_records:
claims.append(record)
url = base_url + 'claim-request/add_claims'
response = requests.post(url, json=claims)
# print(response.json())
# Parsing data for main program
def get_outputs(self, claim_payouts):
claim_payouts = self._parse_claim_payouts(claim_payouts)
return claim_payouts
# Parses payouts
def _parse_claim_payouts(self, claim_payouts):
claims = []
for claim in claim_payouts:
c = ClaimPayout.from_dict(claim)
claims.append(c)
return claims
if __name__ == "__main__":
app = App()
| 2.46875 | 2 |
manage.py | afh/yabab | 1 | 12764781 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from flask.ext.script import Manager
from flask.ext.migrate import Migrate, MigrateCommand
from yabab import app, db
app.config.from_object('app_conf')
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
| 1.835938 | 2 |
pychron/pipeline/plot/panels/spectrum_panel.py | ael-noblegas/pychron | 0 | 12764782 | # ===============================================================================
# Copyright 2013 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from traits.api import Event
# ============= standard library imports ========================
from pychron.pipeline.plot.panels.figure_panel import FigurePanel
from pychron.pipeline.plot.plotter.spectrum import Spectrum
# ============= local library imports ==========================
from pychron.processing.analysis_graph import SpectrumGraph
class SpectrumPanel(FigurePanel):
_graph_klass = SpectrumGraph
_figure_klass = Spectrum
# make_alternate_figure_event = Event
def _handle_figure_event(self, new):
kind = new[0]
if kind == 'alternate_figure':
self.figure_event = (kind, new[1], [f.analysis_group for f in self.figures])
elif kind == 'tag':
self.figure_event = ('tag', [a for f in self.figures
for a in f.analysis_group.analyses
if not f.analysis_group.get_is_plateau_step(a)])
def _get_init_xlimits(self):
return None, 0, 100
def _make_graph_hook(self, g):
g.on_trait_change(self._handle_figure_event, 'figure_event')
# ============= EOF =============================================
| 1.640625 | 2 |
models/__init__.py | ybCliff/VideoCaptioning | 3 | 12764783 |
from .joint_representation import Joint_Representaion_Learner
from .seq2seq import Seq2Seq
from .rnn import Hierarchical_Encoder#Encoder_Baseline, LSTM_Decoder
from .bert import BertEncoder, BertDecoder, NVADecoder, DirectDecoder, APDecoder, SignalDecoder, Signal3Decoder, Signal2Decoder, NVDecoder, MSDecoder, ARDecoder_with_attribute_generation, BeamDecoder
from .bert_pytorch import BertDecoder as BD
from .decoder import LSTM_Decoder, LSTM_GCC_Decoder, LSTM_Decoder_2stream, Top_Down_Decoder
from .encoder import Encoder_Baseline, Progressive_Encoder, SVD_Encoder, Input_Embedding_Layer, Semantics_Enhanced_IEL, HighWay_IEL, Encoder_HighWay, LEL
from .rnn import ENSEMBLE_Decoder
import torch
import torch.nn as nn
def get_preEncoder(opt, input_size):
preEncoder = None
output_size = input_size.copy()
if opt.get('use_preEncoder', False):
pem = opt.get('preEncoder_modality', '')
if pem:
skip_info = [1] * len(opt['modality'])
for char in pem:
pos = opt['modality'].index(char)
skip_info[pos] = 0
output_size[pos] = opt['dim_hidden']
else:
output_size = [opt.get('dim_iel', opt['dim_hidden'])] * len(input_size)
skip_info = [0] * len(opt['modality'])
preEncoder = Input_Embedding_Layer(
input_size=input_size,
hidden_size=opt.get('dim_iel', opt['dim_hidden']),
skip_info=skip_info,
name=opt['modality'].upper()
)
if opt.get('use_SEIEL', False):
output_size = [opt['num_factor']] * len(input_size)
preEncoder = Semantics_Enhanced_IEL(
input_size=input_size,
semantics_size=opt['dim_s'],
nf=opt['num_factor'],
name=opt['modality'],
multiply=opt.get('SEIEL_multiply', False)
)
return preEncoder, output_size
def get_encoder(opt, input_size, mapping, modality):
hidden_size = [opt['dim_hidden']] * len(modality)
if opt['encoder_type'] == 'IPE':
if opt.get('MLP', False):
from .rnn import MLP
encoder = MLP(sum(input_size), opt['dim_hidden'], 'a' in modality)
elif opt.get('MSLSTM', False):
from .rnn import Encoder_Baseline
encoder = Encoder_Baseline(input_size=input_size, output_size=hidden_size, name=modality.upper(), encoder_type='mslstm')
else:
encoder = Hierarchical_Encoder(input_size = input_size, hidden_size = hidden_size, opt = opt)
elif opt['encoder_type'] == 'IEL':
encoder = HighWay_IEL(
input_size=input_size,
hidden_size=hidden_size,
name=modality.upper(),
dropout=opt['encoder_dropout']
)
elif opt['encoder_type'] == 'LEL':
encoder = LEL(
input_size=input_size,
hidden_size=hidden_size,
name=modality.upper(),
dropout=opt['encoder_dropout']
)
elif opt['encoder_type'] == 'MME':
encoder = MultiModalEncoder(
input_size=input_size,
hidden_size=opt['dim_hidden'],
dropout=opt['encoder_dropout'],
name=opt['modality'].upper(),
multimodal_fusion_type=opt.get('multimodal_fusion_type', 'mean'),
num_heads=opt.get('num_heads', 8),
att_dropout=opt.get('att_dropout', 0.0),
with_layernorm=opt.get('with_norm', True),
shared_layernorm=opt.get('shared_layernorm', False),
with_residual=opt.get('with_residual', True),
pivot_idx=0,
include_pivot=opt.get('include_pivot', False),
n_frames=opt['n_frames'],
watch=opt.get('mm_watch', 1)
)
elif opt['encoder_type'] == 'GRU':
if opt.get('use_chain', False):
encoder = Progressive_Encoder(
input_size=input_size,
output_size=hidden_size,
opt=opt,
return_gate_info=opt.get('return_gate_info', False)
)
else:
auxiliary_pos = []
for char in modality:
auxiliary_for_this_input = opt.get('auxiliary_for_%s'%char, '')
pos = []
for c in auxiliary_for_this_input:
pos.append(modality.index(c))
auxiliary_pos.append(pos)
skip_info = opt.get('skip_info', [])
if not len(skip_info):
skip_info = [0] * len(modality)
opt['skip_info'] = skip_info
from models.encoder import Encoder_Baseline_TwoStream
if opt.get('two_stream', False):
E = Encoder_Baseline_TwoStream
if 'a' in modality:
hidden_size[modality.index('a')] = opt.get('dim_hidden_a', opt['dim_hidden'])
else:
E = Encoder_Baseline
#E = Encoder_HighWay
if opt.get('use_svd', False):
encoder = SVD_Encoder(
input_size=input_size,
output_size=hidden_size,
name=modality.upper(),
auxiliary_pos=auxiliary_pos,
skip_info=skip_info,
return_gate_info=opt.get('return_gate_info', False),
num_factor=opt['num_factor']
)
else:
encoder = E(
input_size=input_size,
output_size=hidden_size,
name=modality.upper(),
auxiliary_pos=auxiliary_pos,
skip_info=skip_info,
return_gate_info=opt.get('return_gate_info', False),
opt=opt
)
else:
assert len(modality) == 1
encoder = BertEncoder(feats_size = mapping[modality], config = opt)
return encoder
def get_joint_representation_learner(opt):
modality = opt['modality'].lower()
if opt['encoder_type'] == 'GRU':
if opt.get('use_chain', False):
feats_size = [opt['dim_hidden'], opt['dim_hidden']] if opt.get('chain_both') else [opt['dim_hidden']]
elif (opt['multi_scale_context_attention'] and not opt.get('query_all', False)) or opt.get('addition', False) or opt.get('gated_sum', False) or opt.get('temporal_concat', False):
feats_size = [opt['dim_hidden']]
elif opt.get('two_stream', False):
if 'a' in opt['modality']:
feats_size = [opt['dim_hidden'], opt.get('dim_hidden_a', opt['dim_hidden'])]
else:
feats_size = [opt['dim_hidden']]
else:
feats_size = [opt['dim_hidden'] * (2 if opt.get('bidirectional', False) else 1)] * (len(modality) - sum(opt['skip_info']))
elif opt['encoder_type'] in ['IEL', 'LEL']:
feats_size = [opt['dim_hidden']] * len(modality)
else:
feats_size = [opt['dim_hidden']]
return Joint_Representaion_Learner(feats_size, opt)
def get_decoder(opt):
if opt['decoder_type'] == 'LSTM':
if opt.get('decoder_gcc', False):
decoder = LSTM_GCC_Decoder(opt)
elif opt.get('two_stream', False):
decoder = LSTM_Decoder_2stream(opt)
elif opt.get('top_down', False):
decoder = Top_Down_Decoder(opt)
else:
decoder = LSTM_Decoder(opt)
elif opt['decoder_type'] == 'ENSEMBLE':
decoder = ENSEMBLE_Decoder(opt)
elif opt['decoder_type'] == 'ARFormer':
#decoder = BD(config=opt)
if opt['method'] == 'ag':
decoder = ARDecoder_with_attribute_generation(config=opt)
else:
decoder = BertDecoder(config=opt)
else:
if opt['method'] == 'mp':
decoder = BertDecoder(config=opt)
elif opt['method'] == 'nva':
decoder = NVADecoder(config=opt)
elif opt['method'] == 'direct':
decoder = DirectDecoder(config=opt)
elif opt['method'] == 'ap':
decoder = APDecoder(config=opt)
elif opt['method'] == 'signal':
decoder = SignalDecoder(config=opt)
elif opt['method'] == 'signal3':
decoder = Signal3Decoder(config=opt)
elif opt['method'] == 'signal2':
decoder = Signal2Decoder(config=opt)
elif opt['method'] == 'nv':
decoder = NVDecoder(config=opt)
elif opt['method'] == 'ms':
decoder = MSDecoder(config=opt)
return decoder
def get_beam_decoder(opt, embedding):
if opt.get('use_beam_decoder', False):
return BeamDecoder(opt, embedding)
return None
def get_model(opt):
modality = opt['modality'].lower()
input_size = []
mapping = {
'i': opt['dim_i'],
'm': opt['dim_m'],
'a': opt['dim_a']
}
for char in modality:
assert char in mapping.keys()
input_size.append(mapping[char])
preEncoder, input_size = get_preEncoder(opt, input_size)
encoder = get_encoder(opt, input_size, mapping, modality)
if opt.get('intra_triplet', False) or opt['encoder_type'] == 'MME':
joint_representation_learner = None
else:
joint_representation_learner = get_joint_representation_learner(opt)
if len(opt['crit']) == 1:
# only the main task: language generation
if not opt.get('use_beam_decoder', False) and not opt.get('use_rl', False):
assert opt['crit'][0] == 'lang'
have_auxiliary_tasks = sum([(1 if item not in ['lang', 'tag'] else 0) for item in opt['crit']])
auxiliary_task_predictor = Auxiliary_Task_Predictor(opt) if have_auxiliary_tasks else None
decoder = get_decoder(opt)
tgt_word_prj = nn.Linear(opt["dim_hidden"], opt["vocab_size"], bias=False)
beam_decoder = get_beam_decoder(opt, decoder.embedding)
model = Seq2Seq(
preEncoder = preEncoder,
encoder = encoder,
joint_representation_learner = joint_representation_learner,
auxiliary_task_predictor = auxiliary_task_predictor,
decoder = decoder,
tgt_word_prj = tgt_word_prj,
beam_decoder = beam_decoder,
opt = opt
)
return model
| 2.03125 | 2 |
testsuite/tests/NA17-007__Ada_runtime_units/run_test.py | AdaCore/style_checker | 2 | 12764784 | <gh_stars>1-10
def test_a_cohama_adb(style_checker):
"""Style check test against a-cohama.adb."""
style_checker.set_year(2006)
p = style_checker.run_style_checker('trunk/gnat', 'a-cohama.adb')
style_checker.assertEqual(p.status, 0, p.image)
style_checker.assertRunOutputEmpty(p)
def test_a_cohamb_adb(style_checker):
"""Style check test against a-cohamb.adb."""
p = style_checker.run_style_checker('trunk/gnat', 'a-cohamb.adb')
style_checker.assertNotEqual(p.status, 0, p.image)
style_checker.assertRunOutputEqual(p, """\
a-cohamb.adb: Copyright notice missing, must occur before line 24
""")
def test_a_cohata_ads(style_checker):
"""Style check test against a-cohata.ads."""
style_checker.set_year(2006)
p = style_checker.run_style_checker('trunk/gnat', 'a-cohata.ads')
style_checker.assertEqual(p.status, 0, p.image)
style_checker.assertRunOutputEmpty(p)
def test_a_except_ads(style_checker):
"""Style check test against a-except.ads."""
style_checker.set_year(2006)
p = style_checker.run_style_checker('trunk/gnat', 'a-except.ads')
style_checker.assertNotEqual(p.status, 0, p.image)
style_checker.assertRunOutputEqual(p, """\
a-except.ads:9: Copyright notice must include current year (found 2005, expected 2006)
""")
def test_exceptions_ads(style_checker):
"""Style check test against exceptions.ads."""
style_checker.set_year(2006)
p = style_checker.run_style_checker('trunk/gnat', 'exceptions.ads')
style_checker.assertNotEqual(p.status, 0, p.image)
style_checker.assertRunOutputEqual(p, """\
exceptions.ads:9: Copyright notice must include current year (found 2005, expected 2006)
""")
def test_a_zttest_ads(style_checker):
"""Style check test against a-zttest.ads
"""
p = style_checker.run_style_checker('trunk/gnat', 'a-zttest.ads')
style_checker.assertEqual(p.status, 0, p.image)
style_checker.assertRunOutputEmpty(p)
def test_directio_ads(style_checker):
"""Style check test against directio.ads
"""
p = style_checker.run_style_checker('trunk/gnat', 'directio.ads')
style_checker.assertEqual(p.status, 0, p.image)
style_checker.assertRunOutputEmpty(p)
def test_i_c_ads(style_checker):
"""Style check test against i-c.ads
"""
p = style_checker.run_style_checker('trunk/gnat', 'i-c.ads')
style_checker.assertEqual(p.status, 0, p.image)
style_checker.assertRunOutputEmpty(p)
def test_s_taprop_linux_adb(style_checker):
"""Style check test s-taprop-linux.adb
"""
style_checker.set_year(2010)
p = style_checker.run_style_checker('trunk/gnat', 's-taprop-linux.adb')
style_checker.assertEqual(p.status, 0, p.image)
style_checker.assertRunOutputEmpty(p)
| 2.40625 | 2 |
examples/yolo/yolo/__main__.py | surround-deakin-team/surround | 0 | 12764785 | import logging
from .wrapper import PipelineWrapper
import os
import json
logging.basicConfig(level=logging.INFO)
def main():
wrapper = PipelineWrapper()
config = wrapper.get_config()
output = wrapper.run(json.dumps({"data": "hello"}))
with open(os.path.join(config["output_path"], "output.txt"), 'w') as f:
f.write(output["output"])
logging.info(output)
if __name__ == "__main__":
main()
| 2.4375 | 2 |
deuce/tests/test_files_model.py | BenjamenMeyer/deuce | 0 | 12764786 | <reponame>BenjamenMeyer/deuce
from deuce.tests import V1Base
from deuce.model import File
class TestFilesModel(V1Base):
def setUp(self):
super(TestFilesModel, self).setUp()
# Create a vault and a file for us to work with
vault_name = 'files_vault_test'
fileid = 'My_Testing_File_Id'
self._file = File('test_projectd_id', vault_name, fileid)
def tests(self):
retval = self._file.file_id
retval = self._file.vault_id
retval = self._file.finalized
| 2.25 | 2 |
Section 5/sha1.py | PacktPublishing/Hands-On-Cryptography-for-Python-Developers | 4 | 12764787 | <filename>Section 5/sha1.py
import hashlib
import struct
def hmac(message):
return hashlib.sha1("keykeykey1" + message).digest()
class SHA1LengthExtension:
digest_size = 20
block_size = 64
def __init__(self, digest, data, key_len):
# set constants
self.h0 = struct.unpack(b'>I', digest[0:4])[0]
self.h1 = struct.unpack(b'>I', digest[4:8])[0]
self.h2 = struct.unpack(b'>I', digest[8:12])[0]
self.h3 = struct.unpack(b'>I', digest[12:16])[0]
self.h4 = struct.unpack(b'>I', digest[16:20])[0]
# set the length of the unknown key
self.key_len = key_len
self.message = "\x00" * self.key_len + data
self.total_message_length = 0
# only used to reveal what message we need to feed the hmac with.
self.message = self.append(self.message)
def extend(self, new_data):
# adjust so that the total bit length includes the key
# and the known messge
self.total_message_length = -(int(-len(self.message) // 64)) * 64
chunk = self.append(new_data)
return self.process(chunk), self.message[self.key_len:] + new_data
def append(self, message):
self.total_message_length += len(message)
message_length = len(message)
message += b'\x80'
message += b'\x00' * ((56 - (message_length + 1) % 64) % 64)
message_bit_length = self.total_message_length * 8
message += struct.pack(b'>Q', message_bit_length)
return message
def process(self, chunk):
w = [0] * 80
for i in range(16):
w[i] = struct.unpack(b'>I', chunk[i * 4:i * 4 + 4])[0]
# message expansion
for i in range(16, 80):
w[i] = self.rotate_left(
w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16], 1
)
a = self.h0
b = self.h1
c = self.h2
d = self.h3
e = self.h4
for i in xrange(80):
if 0 <= i <= 19:
f = d ^ (b & (c ^ d))
k = 0x5a827999
elif 20 <= i <= 39:
f = b ^ c ^ d
k = 0x6eD9eba1
elif 40 <= i <= 59:
f = (b & c) | (b & d) | (c & d)
k = 0x8F1bbcdc
elif 60 <= i <= 79:
f = b ^ c ^ d
k = 0xca62C1D6
a, b, c, d, e = ((
self.rotate_left(a, 5) + f + e + k + w[i]) & 0xffffffff,
a,
self.rotate_left(b, 30),
c,
d
)
h0 = struct.pack(b'>I', (self.h0 + a) & 0xffffffff)
h1 = struct.pack(b'>I', (self.h1 + b) & 0xffffffff)
h2 = struct.pack(b'>I', (self.h2 + c) & 0xffffffff)
h3 = struct.pack(b'>I', (self.h3 + d) & 0xffffffff)
h4 = struct.pack(b'>I', (self.h4 + e) & 0xffffffff)
return h0 + h1 + h2 + h3 + h4
def rotate_left(self, n, b):
return ((n << b) | (n >> (32 - b))) & 0xffffffff
if __name__ == '__main__':
# the length-extension with sha1 constants set as initial digest
# reduces to normal sha1, i.e., using the algorithm for length extension,
# we can compute normal sha1 digests.
s = SHA1LengthExtension(
"67452301efcdab8998badcfe10325476c3d2e1f0".decode("hex"), "", 0
)
# this is a test to make sure the length-extension code works as
# it should.
assert(
s.process(s.append("hello")) == hashlib.sha1("hello").digest()
)
# first, we compute the bad hmac using the key+message concatentation
# construction, i.e., we find the initial constants for sha1.
digest = hmac("hello")
# feeding the unknown key length to the length extension, along with
# the known message (note, the key length can be bruteforced if given
# access to an oracle, which usually is the case in reality)
s = SHA1LengthExtension(digest, "hello", 10)
# extend the hmac with additional data and verify against oracle
new_digest, m = s.extend("file")
assert(hmac(m) == new_digest)
| 3.21875 | 3 |
moviepy/audio/AudioClip.py | antiboredom/moviepy | 1 | 12764788 | import sys
from copy import copy
import numpy as np
from moviepy.audio.io.ffmpeg_audiowriter import ffmpeg_audiowrite
from moviepy.decorators import requires_duration
from moviepy.Clip import Clip
# optimize range in function of Python's version
if sys.version_info < (3,):
range = xrange
class AudioClip(Clip):
"""Base class for audio clips.
See ``SoundClip`` and ``CompositeSoundClip`` for usable classes.
An AudioClip is a Clip with a ``get_frame`` attribute of
the form `` t -> [ f_t ]`` for mono sound and
``t-> [ f1_t, f2_t ]`` for stereo sound (the arrays are Numpy arrays).
The `f_t` are floats between -1 and 1. These bounds can be
trespassed wihtout problems (the program will put the
sound back into the bounds at conversion time, without much impact).
Parameters
-----------
get_frame
A function `t-> frame at time t`. The frame does not mean much
for a sound, it is just a float. What 'makes' the sound are
the variations of that float in the time.
nchannels
Number of channels (one or two for mono or stereo).
Examples
---------
>>> # Plays the note A (a sine wave of frequency 404HZ)
>>> import numpy as np
>>> gf = lambda t : 2*[ np.sin(404 * 2 * np.pi * t) ]
>>> clip = AudioClip().set_get_frame(gf)
>>> clip.set_duration(5).preview()
"""
def __init__(self, get_frame = None):
Clip.__init__(self)
if get_frame:
self.get_frame = get_frame
frame0 = self.get_frame(0)
if hasattr(frame0, '__iter__'):
self.nchannels = len(list(frame0))
else:
self.nchannels = 1
@requires_duration
def to_soundarray(self,tt=None,fps=None, nbytes=2):
"""
Transforms the sound into an array that can be played by pygame
or written in a wav file. See ``AudioClip.preview``.
Parameters
------------
fps
Frame rate of the sound for the conversion.
44100 for top quality.
nbytes
Number of bytes to encode the sound: 1 for 8bit sound,
2 for 16bit, 4 for 32bit sound.
"""
if tt is None:
tt = np.arange(0,self.duration, 1.0/fps)
#print tt.max() - tt.min(), tt.min(), tt.max()
snd_array = self.get_frame(tt)
snd_array = np.maximum(-0.99,
np.minimum(0.99,snd_array))
inttype = {1:'int8',2:'int16', 4:'int32'}[nbytes]
return (2**(8*nbytes-1)*snd_array).astype(inttype)
@requires_duration
def to_audiofile(self,filename, fps=44100, nbytes=2,
buffersize=2000, codec='libvorbis',
bitrate=None, verbose=True):
"""
codecs = { 'libmp3lame': 'mp3',
'libvorbis':'ogg',
'libfdk_aac':'m4a',
'pcm_s16le':'wav',
'pcm_s32le': 'wav'}
"""
return ffmpeg_audiowrite(self,filename, fps, nbytes, buffersize,
codec, bitrate, verbose)
class AudioArrayClip(AudioClip):
"""
An audio clip made from a sound array.
Parameters
-----------
array
A Numpy array representing the sound, of size Nx1 for mono,
Nx2 for stereo.
fps
Frames per second : speed at which the sound is supposed to be
played.
"""
def __init__(self, array, fps):
Clip.__init__(self)
self.array = array
self.fps = fps
self.duration = 1.0 * len(array) / fps
def get_frame(t):
""" complicated, but must be able to handle the case where t
is a list of the form sin(t) """
if isinstance(t, np.ndarray):
array_inds = (self.fps*t).astype(int)
in_array = (array_inds>0) & (array_inds < len(self.array))
result = np.zeros((len(t),2))
result[in_array] = self.array[array_inds[in_array]]
return result
else:
i = int(self.fps * t)
if i < 0 or i >= len(self.array):
return 0*self.array[0]
else:
return self.array[i]
self.get_frame = get_frame
self.nchannels = len(list(self.get_frame(0)))
class CompositeAudioClip(AudioClip):
""" Clip made by composing several AudioClips.
An audio clip made by putting together several audio clips.
Parameters
------------
clips
List of audio clips, which may start playing at different times or
together. If all have their ``duration`` attribute set, the
duration of the composite clip is computed automatically.
"""
def __init__(self, clips):
Clip.__init__(self)
self.clips = clips
ends = [c.end for c in self.clips]
self.nchannels = max([c.nchannels for c in self.clips])
if not any([(e is None) for e in ends]):
self.duration = max(ends)
self.end = max(ends)
def get_frame(t):
# buggy
played_parts = [c.is_playing(t) for c in self.clips]
sounds= [c.get_frame(t - c.start)*np.array([part]).T
for c,part in zip(self.clips, played_parts)
if (part is not False) ]
if isinstance(t,np.ndarray):
zero = np.zeros((len(t),self.nchannels))
else:
zero = np.zeros(self.nchannels)
return zero + sum(sounds)
self.get_frame = get_frame
| 3.109375 | 3 |
src/wavestate/utilities/arguments/pyargparse.py | wavestate/wavestate-utilities | 0 | 12764789 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: © 2021 Massachusetts Institute of Technology.
# SPDX-FileCopyrightText: © 2021 <NAME> <<EMAIL>>
# NOTICE: authors should document their contributions in concisely in NOTICE
# with details inline in source files, comments, and docstrings.
"""
"""
import argparse
from ..utilities.strings import padding_remove
def kwdict_argparse(ap, kwdict, groups_kw=dict()):
groups = dict()
groups_prio = dict()
for gname, gdict in groups_kw.items():
APpriority = gdict.get("APpriority", None)
if APpriority is not None:
groups_prio[gname] = APpriority
for hname, hdict in kwdict.items():
if hdict.get("APignore", False):
continue
group = hdict.get("APgroup", None)
s = groups.setdefault(group, set())
s.add(hname)
gprio = groups_prio.get(group, float("inf"))
hprio = hdict.get("APpriority", float("inf"))
if hprio < gprio:
gprio = hprio
groups_prio[group] = gprio
gnames = list(groups_prio.keys())
gnames.sort(
key=lambda g: (groups_prio[g], g)
if groups_prio[g] is not None
else float("inf")
)
for gname in gnames:
hset = groups[gname]
gkw = dict()
if gname is None:
apG = ap
else:
gdict = groups_kw.get(gname, None)
if gdict is not None:
ghelp = gdict.get("about", None)
if ghelp is not None:
gkw["description"] = padding_remove(ghelp)
apG = ap.add_argument_group(gname, **gkw)
hlist = list(hset)
hlist.sort(key=lambda h: (kwdict[h].get("APpriority", float("inf")), h))
for hname in hlist:
hdict = kwdict[hname]
name = hdict.get(hname, hname)
APshort = hdict.get("APshort", None)
APlong = "--{}".format(name)
APflags = list(hdict.get("APflags", [APlong]))
if APshort is not None:
APflags = [APshort] + APflags
APhide = hdict.get("APhide", False)
if APhide:
helptext = argparse.SUPPRESS
else:
helptext = hdict.get("about", "<needs doc>").replace("%", "%%")
helptext = padding_remove(helptext)
APkw = dict(
dest=name,
help=helptext,
)
APaction = hdict.get("APaction", None)
if APaction is not None:
APkw["action"] = APaction
APtype = hdict.get("APtype", None)
if APtype is not None:
APkw["type"] = APtype
APrequired = hdict.get("APrequired", None)
if APrequired is not None:
APkw["required"] = APrequired
APnargs = hdict.get("APnargs", None)
if APnargs is not None:
APkw["nargs"] = APnargs
APchoices = hdict.get("APchoices ", None)
if APchoices is not None:
APkw["choices "] = APchoices
APmetavar = hdict.get("APmetavar", None)
if APmetavar is not None:
APkw["metavar"] = APmetavar
APchoices = hdict.get("APchoices", None)
if APchoices is not None:
APkw["choices"] = APchoices
APconst = hdict.get("APconst", None)
if APconst is not None:
APkw["const"] = APconst
APdefault = hdict.get("APdefault", None)
if APdefault is not None:
APkw["default"] = APdefault
else:
APkw["default"] = argparse.SUPPRESS
APpositional = hdict.get("APpositional", False)
if not APpositional:
apG.add_argument(*APflags, **APkw)
# add the aliases to parse, but with their help suppressed
for aname in hdict.get("aliases", []):
APkw["help"] = argparse.SUPPRESS
apG.add_argument("--{}".format(aname), **APkw)
for aname in hdict.get("aliases_bad", []):
APkw["help"] = argparse.SUPPRESS
apG.add_argument("--{}".format(aname), **APkw)
else:
dest = APkw.pop("dest")
apG.add_argument(dest, **APkw)
return ap
| 2.15625 | 2 |
shell/myReadLine.py | utep-cs-systems-courses/os-shell-LailaMRomero | 0 | 12764790 | <reponame>utep-cs-systems-courses/os-shell-LailaMRomero<gh_stars>0
#! /usr/bin/env python3
#! /usr/bin/env python3
from os import read
ibuf = "" # Input buffer,
sbuf = "" # String buffer
sbufLength = 0 # String buffer length
currChar = 0 # Index of current char in sbuf
def getChar():
global ibuf
global sbuf
global sbufLength
global currChar
if currChar == sbufLength: # If we reached the end of sbuf, get a new line and reset values
ibuf = read(0, 100) # The number of bytes that can be accepted
sbuf = ibuf.decode()
sbufLength = len(sbuf) # The length of the string
currChar = 0
if sbufLength == 0: # If we reached the end of the input then it would return nothing
return ''
char = sbuf[currChar]
currChar += 1
return char
def readLine():
char = getChar()
line = ""
while char != '\n': # While char is not equal to new line, keep getting chars for line
line += char
char = getChar()
if char == '': # If char is empty, then we reached EOF; retun
return line
line+= '\n' # If a new line was found, then return the line with a new line char
return line #was return not print
def main():
readLine()
if '__main__' == __name__:
main()
| 3.515625 | 4 |
swamp/search/tests/test_searchtarget.py | rigdenlab/SWAMP | 2 | 12764791 | import os
import swamp
import unittest
import joblib
from operator import itemgetter
from swamp.utils import remove, create_tempfile
from swamp.search.searchtarget import SearchTarget
TOPCONS_DUMY = """TOPCONS predicted topology:
iiiiiiiiiiiiiiMMMMMMMMMMMMMMMMMMMMMooooooMMMMMMMMMMMMMMMMMMMMMiiiiiiiiiiMMMMMMMMMMMMMMMMMMMMMooooooMMMMMMMMMMMMMMMMMMMMMiiiiiiiiiiiMMMMMMMMMMMMMMMMMMMMMoooooooooooMMMMMMMMMMMMMMMMMMMMMiiiiiiiiiiiiiiMMMMMMMMMMMMMMMMMMMMMoooooooMMMMMMMMMMMMMMMMMMMMMiiiiiiiiiiiiiiiiiiiiiiMMMMMMMMMMMMMMMMMMMMMoMMMMMMMMMMMMMMMMMMMMMiiiiiiiiiiiiiiiiiiii
"""
PDB_DUMY = """CRYST1 73.330 73.330 163.520 90.00 90.00 90.00 P 41 2 2 8
REMARK 465
REMARK 465 MISSING RESIDUES
REMARK 465 THE FOLLOWING RESIDUES WERE NOT LOCATED IN THE
REMARK 465 EXPERIMENT. (M=MODEL NUMBER; RES=RESIDUE NAME; C=CHAIN
REMARK 465 IDENTIFIER; SSSEQ=SEQUENCE NUMBER; I=INSERTION CODE.)
REMARK 465
REMARK 465 M RES C SSSEQI
REMARK 465 MET A -4
REMARK 465 VAL A -3
REMARK 465 ALA A -2
REMARK 465 ALA A -1
REMARK 465 SER A 0
REMARK 465 MET A 1
REMARK 465 GLY A 98
REMARK 465 LYS A 99
REMARK 465 HIS A 212
REMARK 465 LYS A 215
ATOM 760 N VAL A 100 17.668 61.385 96.142 1.00 36.12 N
ANISOU 760 N VAL A 100 4189 5832 3703 370 -20 96 N
ATOM 761 CA VAL A 100 16.510 62.175 95.720 1.00 34.76 C
ANISOU 761 CA VAL A 100 3981 5676 3550 300 62 84 C
ATOM 762 C VAL A 100 16.924 63.214 94.641 1.00 39.15 C
ANISOU 762 C VAL A 100 4461 6274 4139 307 77 -9 C
ATOM 763 O VAL A 100 16.205 63.379 93.656 1.00 38.11 O
ANISOU 763 O VAL A 100 4288 6134 4059 275 108 -15 O
ATOM 764 CB VAL A 100 15.715 62.769 96.916 1.00 37.75 C
ANISOU 764 CB VAL A 100 4379 6111 3852 257 129 130 C
ATOM 765 CG1 VAL A 100 14.623 63.727 96.450 1.00 36.89 C
ANISOU 765 CG1 VAL A 100 4216 6025 3776 215 217 110 C
ATOM 766 CG2 VAL A 100 15.112 61.661 97.786 1.00 38.05 C
ANISOU 766 CG2 VAL A 100 4485 6113 3858 228 124 244 C
ATOM 767 N GLY A 101 18.105 63.825 94.809 1.00 36.09 N
ANISOU 767 N GLY A 101 4052 5944 3718 343 50 -70 N
ATOM 768 CA GLY A 101 18.670 64.791 93.867 1.00 34.68 C
ANISOU 768 CA GLY A 101 3805 5805 3566 340 63 -145 C
ATOM 769 C GLY A 101 18.998 64.193 92.514 1.00 37.41 C
ANISOU 769 C GLY A 101 4110 6137 3967 361 26 -177 C
ATOM 770 O GLY A 101 18.818 64.843 91.481 1.00 35.74 O
ANISOU 770 O GLY A 101 3843 5954 3784 335 57 -198 O
ATOM 771 N VAL A 102 19.463 62.931 92.513 1.00 34.96 N
ANISOU 771 N VAL A 102 3830 5784 3671 410 -36 -177 N
ATOM 772 CA VAL A 102 19.819 62.187 91.297 1.00 34.18 C
ANISOU 772 CA VAL A 102 3699 5666 3623 436 -67 -233 C
ATOM 773 C VAL A 102 18.531 61.710 90.593 1.00 37.41 C
ANISOU 773 C VAL A 102 4118 6023 4073 373 -37 -212 C
ATOM 774 O VAL A 102 18.409 61.831 89.370 1.00 35.53 O
ANISOU 774 O VAL A 102 3822 5829 3850 347 -28 -263 O
ATOM 775 CB VAL A 102 20.820 61.047 91.624 1.00 38.45 C
ANISOU 775 CB VAL A 102 4268 6161 4180 528 -137 -249 C
ATOM 776 CG1 VAL A 102 21.126 60.185 90.399 1.00 38.40 C
ANISOU 776 CG1 VAL A 102 4237 6117 4236 561 -155 -331 C
ATOM 777 CG2 VAL A 102 22.111 61.608 92.229 1.00 37.89 C
ANISOU 777 CG2 VAL A 102 4155 6188 4054 582 -172 -273 C
ATOM 778 N ILE A 103 17.542 61.236 91.381 1.00 34.36 N
ANISOU 778 N ILE A 103 3794 5569 3692 337 -19 -133 N
ATOM 779 CA ILE A 103 16.260 60.794 90.844 1.00 33.66 C
ANISOU 779 CA ILE A 103 3704 5449 3636 259 10 -110 C
ATOM 780 C ILE A 103 15.544 61.966 90.187 1.00 37.70 C
ANISOU 780 C ILE A 103 4134 6061 4131 214 61 -102 C
ATOM 781 O ILE A 103 15.031 61.813 89.070 1.00 37.84 O
ANISOU 781 O ILE A 103 4097 6118 4163 169 63 -129 O
ATOM 782 CB ILE A 103 15.417 60.020 91.896 1.00 37.07 C
ANISOU 782 CB ILE A 103 4214 5798 4074 220 22 -17 C
ATOM 783 CG1 ILE A 103 16.062 58.633 92.170 1.00 37.34 C
ANISOU 783 CG1 ILE A 103 4330 5703 4156 266 -32 -15 C
ATOM 784 CG2 ILE A 103 13.920 59.876 91.451 1.00 37.66 C
ANISOU 784 CG2 ILE A 103 4258 5883 4167 115 67 14 C
ATOM 785 CD1 ILE A 103 15.598 57.949 93.432 1.00 47.42 C
ANISOU 785 CD1 ILE A 103 5694 6900 5425 246 -28 105 C
ATOM 786 N LEU A 104 15.594 63.153 90.831 1.00 33.68 N
ANISOU 786 N LEU A 104 3611 5596 3591 231 101 -71 N
ATOM 787 CA LEU A 104 14.977 64.376 90.307 1.00 33.36 C
ANISOU 787 CA LEU A 104 3499 5623 3552 211 157 -47 C
ATOM 788 C LEU A 104 15.511 64.746 88.917 1.00 34.07 C
ANISOU 788 C LEU A 104 3518 5780 3648 214 139 -90 C
ATOM 789 O LEU A 104 14.708 65.012 88.027 1.00 32.90 O
ANISOU 789 O LEU A 104 3303 5692 3505 182 158 -56 O
ATOM 790 CB LEU A 104 15.136 65.530 91.310 1.00 33.69 C
ANISOU 790 CB LEU A 104 3558 5667 3576 234 207 -34 C
ATOM 791 CG LEU A 104 14.360 66.816 91.054 1.00 38.02 C
ANISOU 791 CG LEU A 104 4051 6243 4152 231 283 6 C
ATOM 792 CD1 LEU A 104 12.849 66.546 90.864 1.00 37.78 C
ANISOU 792 CD1 LEU A 104 3981 6236 4138 203 317 79 C
ATOM 793 CD2 LEU A 104 14.564 67.790 92.218 1.00 40.00 C
ANISOU 793 CD2 LEU A 104 4341 6468 4390 248 340 -14 C
ATOM 794 N VAL A 105 16.858 64.715 88.727 1.00 31.14 N
ANISOU 794 N VAL A 105 3147 5419 3264 251 101 -159 N
ATOM 795 CA VAL A 105 17.526 64.969 87.443 1.00 30.96 C
ANISOU 795 CA VAL A 105 3053 5478 3231 250 87 -207 C
ATOM 796 C VAL A 105 17.042 63.944 86.411 1.00 35.03 C
ANISOU 796 C VAL A 105 3543 6022 3746 218 59 -243 C
ATOM 797 O VAL A 105 16.709 64.332 85.295 1.00 35.34 O
ANISOU 797 O VAL A 105 3507 6161 3761 183 71 -233 O
ATOM 798 CB VAL A 105 19.074 64.920 87.574 1.00 35.01 C
ANISOU 798 CB VAL A 105 3565 6008 3728 296 51 -284 C
ATOM 799 CG1 VAL A 105 19.755 64.761 86.208 1.00 34.91 C
ANISOU 799 CG1 VAL A 105 3479 6090 3696 293 33 -351 C
ATOM 800 CG2 VAL A 105 19.596 66.143 88.285 1.00 34.55 C
ANISOU 800 CG2 VAL A 105 3509 5958 3662 295 82 -268 C
ATOM 801 N GLY A 106 17.024 62.660 86.802 1.00 31.90 N
ANISOU 801 N GLY A 106 3210 5538 3374 226 26 -284 N
ATOM 802 CA GLY A 106 16.603 61.553 85.954 1.00 32.79 C
ANISOU 802 CA GLY A 106 3316 5644 3498 184 4 -348 C
ATOM 803 C GLY A 106 15.165 61.656 85.488 1.00 38.66 C
ANISOU 803 C GLY A 106 4015 6444 4230 96 28 -295 C
ATOM 804 O GLY A 106 14.840 61.246 84.373 1.00 39.30 O
ANISOU 804 O GLY A 106 4041 6603 4287 40 16 -354 O
ATOM 805 N CYS A 107 14.292 62.202 86.336 1.00 34.66 N
ANISOU 805 N CYS A 107 3520 5918 3732 83 64 -190 N
ATOM 806 CA CYS A 107 12.871 62.327 86.029 1.00 33.96 C
ANISOU 806 CA CYS A 107 3372 5898 3635 11 89 -126 C
ATOM 807 C CYS A 107 12.559 63.546 85.180 1.00 36.79 C
ANISOU 807 C CYS A 107 3622 6399 3958 17 112 -65 C
ATOM 808 O CYS A 107 11.462 63.650 84.641 1.00 34.93 O
ANISOU 808 O CYS A 107 3306 6262 3703 -34 121 -13 O
ATOM 809 CB CYS A 107 12.047 62.300 87.309 1.00 34.50 C
ANISOU 809 CB CYS A 107 3487 5894 3726 0 126 -44 C
ATOM 810 SG CYS A 107 12.085 60.707 88.159 1.00 39.40 S
ANISOU 810 SG CYS A 107 4227 6357 4386 -36 99 -71 S
ATOM 811 N CYS A 108 13.515 64.471 85.058 1.00 35.29 N
ANISOU 811 N CYS A 108 3424 6224 3761 77 122 -62 N
ATOM 812 CA CYS A 108 13.303 65.682 84.256 1.00 35.81 C
ANISOU 812 CA CYS A 108 3398 6402 3805 88 148 20 C
ATOM 813 C CYS A 108 13.248 65.386 82.748 1.00 39.76 C
ANISOU 813 C CYS A 108 3808 7060 4239 38 113 -8 C
ATOM 814 O CYS A 108 13.805 64.369 82.295 1.00 39.23 O
ANISOU 814 O CYS A 108 3760 7001 4146 8 73 -131 O
ATOM 815 CB CYS A 108 14.373 66.725 84.577 1.00 35.44 C
ANISOU 815 CB CYS A 108 3377 6313 3777 143 174 28 C
ATOM 816 SG CYS A 108 14.063 67.645 86.106 1.00 38.80 S
ANISOU 816 SG CYS A 108 3867 6613 4263 189 240 86 S
ATOM 817 N PRO A 109 12.626 66.278 81.941 1.00 36.74 N
ANISOU 817 N PRO A 109 3324 6811 3825 33 128 102 N
ATOM 818 CA PRO A 109 12.651 66.072 80.487 1.00 36.54 C
ANISOU 818 CA PRO A 109 3203 6973 3707 -19 92 82 C
ATOM 819 C PRO A 109 14.051 66.355 79.917 1.00 39.90 C
ANISOU 819 C PRO A 109 3632 7433 4096 -2 88 25 C
ATOM 820 O PRO A 109 14.984 66.720 80.652 1.00 38.31 O
ANISOU 820 O PRO A 109 3500 7110 3946 47 109 3 O
ATOM 821 CB PRO A 109 11.626 67.086 79.984 1.00 38.91 C
ANISOU 821 CB PRO A 109 3395 7399 3989 -7 112 256 C
ATOM 822 CG PRO A 109 11.709 68.211 80.953 1.00 43.16 C
ANISOU 822 CG PRO A 109 3985 7792 4623 78 173 354 C
ATOM 823 CD PRO A 109 11.921 67.533 82.291 1.00 38.62 C
ANISOU 823 CD PRO A 109 3527 7039 4109 85 182 254 C
ATOM 824 N GLY A 110 14.178 66.209 78.606 1.00 36.96 N
ANISOU 824 N GLY A 110 3172 7251 3622 -51 63 1 N
ATOM 825 CA GLY A 110 15.412 66.500 77.896 1.00 36.79 C
ANISOU 825 CA GLY A 110 3125 7311 3543 -47 66 -43 C
ATOM 826 C GLY A 110 15.754 67.977 77.871 1.00 39.68 C
ANISOU 826 C GLY A 110 3468 7678 3930 -11 108 116 C
ATOM 827 O GLY A 110 14.932 68.832 78.237 1.00 37.90 O
ANISOU 827 O GLY A 110 3235 7404 3761 20 137 269 O
ATOM 828 N GLY A 111 16.997 68.255 77.492 1.00 37.57 N
ANISOU 828 N GLY A 111 3192 7452 3630 -14 120 73 N
ATOM 829 CA GLY A 111 17.524 69.610 77.421 1.00 38.32 C
ANISOU 829 CA GLY A 111 3273 7535 3751 -3 166 207 C
ATOM 830 C GLY A 111 17.587 70.139 76.005 1.00 45.38 C
ANISOU 830 C GLY A 111 4057 8656 4530 -51 169 312 C
ATOM 831 O GLY A 111 17.838 69.373 75.069 1.00 44.83 O
ANISOU 831 O GLY A 111 3925 8771 4339 -97 138 211 O
ATOM 832 N THR A 112 17.360 71.460 75.849 1.00 44.03 N
ANISOU 832 N THR A 112 3865 8469 4397 -38 211 517 N
ATOM 833 CA THR A 112 17.408 72.178 74.572 1.00 46.06 C
ANISOU 833 CA THR A 112 4021 8928 4552 -78 221 676 C
ATOM 834 C THR A 112 18.755 71.978 73.857 1.00 50.30 C
ANISOU 834 C THR A 112 4519 9608 4985 -142 226 577 C
ATOM 835 O THR A 112 18.772 71.719 72.654 1.00 51.95 O
ANISOU 835 O THR A 112 4629 10072 5039 -195 206 587 O
ATOM 836 CB THR A 112 17.000 73.648 74.806 1.00 62.92 C
ANISOU 836 CB THR A 112 6172 10938 6798 -35 277 915 C
ATOM 837 OG1 THR A 112 15.573 73.736 74.755 1.00 69.65 O
ANISOU 837 OG1 THR A 112 6981 11815 7666 17 260 1043 O
ATOM 838 CG2 THR A 112 17.610 74.618 73.802 1.00 64.44 C
ANISOU 838 CG2 THR A 112 6303 11251 6931 -82 311 1083 C
ATOM 839 N ALA A 113 19.873 72.042 74.608 1.00 44.39 N
ANISOU 839 N ALA A 113 3836 8720 4311 -139 253 467 N
ATOM 840 CA ALA A 113 21.222 71.889 74.071 1.00 44.43 C
ANISOU 840 CA ALA A 113 3795 8854 4234 -190 266 364 C
ATOM 841 C ALA A 113 21.413 70.625 73.231 1.00 47.29 C
ANISOU 841 C ALA A 113 4088 9428 4452 -211 225 186 C
ATOM 842 O ALA A 113 22.263 70.622 72.346 1.00 49.14 O
ANISOU 842 O ALA A 113 4242 9859 4571 -263 243 147 O
ATOM 843 CB ALA A 113 22.253 71.957 75.192 1.00 44.42 C
ANISOU 843 CB ALA A 113 3868 8672 4339 -172 285 250 C
ATOM 844 N SER A 114 20.592 69.580 73.459 1.00 42.02 N
ANISOU 844 N SER A 114 3449 8728 3789 -181 179 77 N
ATOM 845 CA SER A 114 20.643 68.338 72.676 1.00 42.09 C
ANISOU 845 CA SER A 114 3406 8907 3678 -208 146 -113 C
ATOM 846 C SER A 114 20.318 68.595 71.190 1.00 48.23 C
ANISOU 846 C SER A 114 4059 9997 4271 -285 144 -27 C
ATOM 847 O SER A 114 20.812 67.864 70.334 1.00 49.03 O
ANISOU 847 O SER A 114 4094 10295 4241 -326 141 -189 O
ATOM 848 CB SER A 114 19.694 67.294 73.251 1.00 42.07 C
ANISOU 848 CB SER A 114 3468 8782 3735 -182 104 -217 C
ATOM 849 OG SER A 114 18.338 67.653 73.046 1.00 40.48 O
ANISOU 849 OG SER A 114 3237 8626 3517 -204 85 -61 O
ATOM 850 N ASN A 115 19.509 69.644 70.892 1.00 45.56 N
ANISOU 850 N ASN A 115 3683 9708 3919 -299 148 228 N
ATOM 851 CA ASN A 115 19.145 70.032 69.522 1.00 47.16 C
ANISOU 851 CA ASN A 115 3760 10222 3938 -367 140 364 C
ATOM 852 C ASN A 115 20.388 70.481 68.749 1.00 52.79 C
ANISOU 852 C ASN A 115 4408 11108 4543 -421 186 376 C
ATOM 853 O ASN A 115 20.575 70.064 67.608 1.00 53.35 O
ANISOU 853 O ASN A 115 4376 11476 4418 -488 179 307 O
ATOM 854 CB ASN A 115 18.077 71.140 69.504 1.00 44.25 C
ANISOU 854 CB ASN A 115 3369 9833 3610 -341 138 667 C
ATOM 855 CG ASN A 115 16.800 70.842 70.247 1.00 49.80 C
ANISOU 855 CG ASN A 115 4113 10392 4415 -288 102 684 C
ATOM 856 OD1 ASN A 115 16.532 69.712 70.684 1.00 42.29 O
ANISOU 856 OD1 ASN A 115 3203 9378 3486 -290 71 477 O
ATOM 857 ND2 ASN A 115 15.969 71.870 70.401 1.00 36.54 N
ANISOU 857 ND2 ASN A 115 2419 8656 2807 -236 113 942 N
ATOM 858 N VAL A 116 21.239 71.306 69.392 1.00 50.44 N
ANISOU 858 N VAL A 116 4165 10635 4366 -402 236 447 N
ATOM 859 CA VAL A 116 22.505 71.841 68.853 1.00 52.16 C
ANISOU 859 CA VAL A 116 4326 10981 4511 -463 290 468 C
ATOM 860 C VAL A 116 23.544 70.715 68.752 1.00 54.64 C
ANISOU 860 C VAL A 116 4613 11384 4763 -465 293 166 C
ATOM 861 O VAL A 116 24.294 70.673 67.783 1.00 55.35 O
ANISOU 861 O VAL A 116 4601 11734 4694 -529 323 124 O
ATOM 862 CB VAL A 116 23.071 73.022 69.706 1.00 56.89 C
ANISOU 862 CB VAL A 116 4998 11337 5279 -457 344 609 C
ATOM 863 CG1 VAL A 116 24.071 73.845 68.896 1.00 58.16 C
ANISOU 863 CG1 VAL A 116 5080 11672 5348 -553 403 725 C
ATOM 864 CG2 VAL A 116 21.955 73.923 70.239 1.00 56.78 C
ANISOU 864 CG2 VAL A 116 5052 11120 5402 -409 344 840 C
ATOM 865 N MET A 117 23.610 69.824 69.766 1.00 49.13 N
ANISOU 865 N MET A 117 4004 10473 4191 -388 266 -33 N
ATOM 866 CA MET A 117 24.552 68.701 69.767 1.00 48.45 C
ANISOU 866 CA MET A 117 3899 10430 4078 -358 268 -314 C
ATOM 867 C MET A 117 24.254 67.711 68.648 1.00 52.17 C
ANISOU 867 C MET A 117 4293 11152 4378 -393 252 -474 C
ATOM 868 O MET A 117 25.194 67.215 68.028 1.00 52.50 O
ANISOU 868 O MET A 117 4257 11371 4319 -405 284 -646 O
ATOM 869 CB MET A 117 24.629 68.000 71.128 1.00 49.33 C
ANISOU 869 CB MET A 117 4128 10247 4370 -260 239 -453 C
ATOM 870 CG MET A 117 25.150 68.879 72.238 1.00 52.66 C
ANISOU 870 CG MET A 117 4613 10460 4935 -235 258 -352 C
ATOM 871 SD MET A 117 26.786 69.586 71.949 1.00 59.01 S
ANISOU 871 SD MET A 117 5326 11401 5696 -287 317 -356 S
ATOM 872 CE MET A 117 26.451 71.275 72.409 1.00 55.97 C
ANISOU 872 CE MET A 117 4993 10867 5406 -350 352 -71 C
ATOM 873 N THR A 118 22.954 67.453 68.369 1.00 48.22 N
ANISOU 873 N THR A 118 3801 10684 3838 -416 208 -425 N
ATOM 874 CA THR A 118 22.510 66.589 67.261 1.00 49.35 C
ANISOU 874 CA THR A 118 3864 11087 3801 -477 189 -573 C
ATOM 875 C THR A 118 22.875 67.211 65.915 1.00 54.31 C
ANISOU 875 C THR A 118 4351 12081 4202 -570 221 -475 C
ATOM 876 O THR A 118 23.249 66.476 64.997 1.00 55.68 O
ANISOU 876 O THR A 118 4443 12501 4211 -616 237 -677 O
ATOM 877 CB THR A 118 21.012 66.325 67.336 1.00 50.39 C
ANISOU 877 CB THR A 118 4022 11182 3942 -497 131 -513 C
ATOM 878 OG1 THR A 118 20.724 65.778 68.612 1.00 43.18 O
ANISOU 878 OG1 THR A 118 3238 9934 3233 -419 110 -590 O
ATOM 879 CG2 THR A 118 20.537 65.369 66.270 1.00 47.96 C
ANISOU 879 CG2 THR A 118 3636 11134 3454 -578 107 -697 C
ATOM 880 N TYR A 119 22.760 68.554 65.795 1.00 51.06 N
ANISOU 880 N TYR A 119 3914 11704 3783 -598 236 -166 N
ATOM 881 CA TYR A 119 23.117 69.271 64.566 1.00 53.65 C
ANISOU 881 CA TYR A 119 4114 12372 3900 -691 271 -16 C
ATOM 882 C TYR A 119 24.628 69.129 64.296 1.00 59.16 C
ANISOU 882 C TYR A 119 4757 13180 4541 -710 338 -173 C
ATOM 883 O TYR A 119 25.034 68.875 63.162 1.00 59.37 O
ANISOU 883 O TYR A 119 4665 13547 4347 -783 366 -255 O
ATOM 884 CB TYR A 119 22.679 70.751 64.632 1.00 55.51 C
ANISOU 884 CB TYR A 119 4355 12551 4185 -703 279 366 C
ATOM 885 CG TYR A 119 23.166 71.571 63.454 1.00 60.61 C
ANISOU 885 CG TYR A 119 4880 13516 4633 -800 322 559 C
ATOM 886 CD1 TYR A 119 22.548 71.478 62.209 1.00 63.65 C
ANISOU 886 CD1 TYR A 119 5144 14271 4768 -873 293 637 C
ATOM 887 CD2 TYR A 119 24.279 72.401 63.569 1.00 62.92 C
ANISOU 887 CD2 TYR A 119 5170 13763 4972 -833 393 656 C
ATOM 888 CE1 TYR A 119 23.026 72.193 61.109 1.00 66.91 C
ANISOU 888 CE1 TYR A 119 5441 15003 4978 -968 334 824 C
ATOM 889 CE2 TYR A 119 24.753 73.132 62.484 1.00 65.74 C
ANISOU 889 CE2 TYR A 119 5416 14418 5143 -936 440 843 C
ATOM 890 CZ TYR A 119 24.127 73.022 61.255 1.00 76.19 C
ANISOU 890 CZ TYR A 119 6624 16110 6213 -999 411 933 C
ATOM 891 OH TYR A 119 24.601 73.748 60.192 1.00 84.36 O
ANISOU 891 OH TYR A 119 7548 17454 7051 -1105 458 1138 O
ATOM 892 N LEU A 120 25.443 69.243 65.355 1.00 56.13 N
ANISOU 892 N LEU A 120 4451 12528 4349 -644 363 -228 N
ATOM 893 CA LEU A 120 26.885 69.097 65.249 1.00 57.63 C
ANISOU 893 CA LEU A 120 4579 12807 4510 -648 422 -379 C
ATOM 894 C LEU A 120 27.279 67.646 64.978 1.00 60.25 C
ANISOU 894 C LEU A 120 4879 13229 4783 -599 423 -735 C
ATOM 895 O LEU A 120 28.129 67.411 64.125 1.00 61.78 O
ANISOU 895 O LEU A 120 4956 13698 4819 -639 477 -862 O
ATOM 896 CB LEU A 120 27.592 69.691 66.475 1.00 57.46 C
ANISOU 896 CB LEU A 120 4636 12495 4700 -601 440 -323 C
ATOM 897 CG LEU A 120 27.363 71.209 66.696 1.00 63.48 C
ANISOU 897 CG LEU A 120 5431 13154 5533 -661 461 9 C
ATOM 898 CD1 LEU A 120 27.684 71.611 68.123 1.00 62.41 C
ANISOU 898 CD1 LEU A 120 5407 12675 5630 -606 459 21 C
ATOM 899 CD2 LEU A 120 28.144 72.063 65.682 1.00 68.31 C
ANISOU 899 CD2 LEU A 120 5923 14043 5990 -782 529 157 C
ATOM 1976 N ALA A 270 17.395 71.560 89.366 1.00 35.38 N
ANISOU 1976 N ALA A 270 3622 5894 3926 228 397 -64 N
ATOM 1977 CA ALA A 270 17.973 72.447 90.379 1.00 35.08 C
ANISOU 1977 CA ALA A 270 3634 5791 3903 205 443 -136 C
ATOM 1978 C ALA A 270 16.885 73.370 90.924 1.00 38.15 C
ANISOU 1978 C ALA A 270 4048 6088 4360 230 542 -100 C
ATOM 1979 O ALA A 270 16.822 73.575 92.129 1.00 38.62 O
ANISOU 1979 O ALA A 270 4161 6109 4402 225 578 -172 O
ATOM 1980 CB ALA A 270 19.128 73.259 89.793 1.00 35.65 C
ANISOU 1980 CB ALA A 270 3680 5870 3995 157 449 -165 C
ATOM 1981 N ALA A 271 15.997 73.864 90.048 1.00 34.42 N
ANISOU 1981 N ALA A 271 3528 5596 3955 265 584 11 N
ATOM 1982 CA ALA A 271 14.866 74.726 90.424 1.00 34.94 C
ANISOU 1982 CA ALA A 271 3597 5575 4102 317 682 61 C
ATOM 1983 C ALA A 271 13.844 73.999 91.306 1.00 38.84 C
ANISOU 1983 C ALA A 271 4101 6101 4557 346 691 53 C
ATOM 1984 O ALA A 271 13.371 74.567 92.293 1.00 38.92 O
ANISOU 1984 O ALA A 271 4146 6047 4593 369 773 7 O
ATOM 1985 CB ALA A 271 14.180 75.270 89.179 1.00 35.46 C
ANISOU 1985 CB ALA A 271 3588 5648 4238 361 705 211 C
ATOM 1986 N LEU A 272 13.492 72.752 90.934 1.00 35.78 N
ANISOU 1986 N LEU A 272 3680 5809 4104 337 616 91 N
ATOM 1987 CA LEU A 272 12.516 71.931 91.663 1.00 35.50 C
ANISOU 1987 CA LEU A 272 3648 5811 4029 343 620 101 C
ATOM 1988 C LEU A 272 13.015 71.552 93.053 1.00 39.08 C
ANISOU 1988 C LEU A 272 4184 6252 4414 316 617 5 C
ATOM 1989 O LEU A 272 12.231 71.522 93.998 1.00 40.15 O
ANISOU 1989 O LEU A 272 4334 6392 4529 325 673 0 O
ATOM 1990 CB LEU A 272 12.119 70.681 90.845 1.00 34.66 C
ANISOU 1990 CB LEU A 272 3494 5793 3881 317 540 155 C
ATOM 1991 CG LEU A 272 11.233 70.919 89.606 1.00 38.06 C
ANISOU 1991 CG LEU A 272 3821 6290 4350 336 544 263 C
ATOM 1992 CD1 LEU A 272 10.998 69.623 88.830 1.00 36.79 C
ANISOU 1992 CD1 LEU A 272 3620 6223 4134 283 461 273 C
ATOM 1993 CD2 LEU A 272 9.888 71.527 89.980 1.00 39.50 C
ANISOU 1993 CD2 LEU A 272 3951 6476 4583 389 629 334 C
ATOM 1994 N ALA A 273 14.325 71.313 93.181 1.00 35.62 N
ANISOU 1994 N ALA A 273 3787 5818 3930 284 555 -67 N
ATOM 1995 CA ALA A 273 14.977 70.980 94.446 1.00 35.61 C
ANISOU 1995 CA ALA A 273 3851 5833 3846 260 535 -149 C
ATOM 1996 C ALA A 273 15.034 72.213 95.357 1.00 41.32 C
ANISOU 1996 C ALA A 273 4608 6511 4582 252 627 -233 C
ATOM 1997 O ALA A 273 14.832 72.086 96.566 1.00 41.96 O
ANISOU 1997 O ALA A 273 4729 6623 4590 240 654 -281 O
ATOM 1998 CB ALA A 273 16.385 70.473 94.176 1.00 35.68 C
ANISOU 1998 CB ALA A 273 3867 5876 3813 240 441 -195 C
ATOM 1999 N ALA A 274 15.306 73.400 94.773 1.00 37.99 N
ANISOU 1999 N ALA A 274 4172 6014 4250 253 679 -252 N
ATOM 2000 CA ALA A 274 15.361 74.677 95.495 1.00 38.62 C
ANISOU 2000 CA ALA A 274 4289 6011 4372 241 781 -348 C
ATOM 2001 C ALA A 274 13.976 75.054 96.014 1.00 40.49 C
ANISOU 2001 C ALA A 274 4522 6213 4650 298 887 -328 C
ATOM 2002 O ALA A 274 13.870 75.527 97.136 1.00 42.06 O
ANISOU 2002 O ALA A 274 4764 6399 4817 285 958 -435 O
ATOM 2003 CB ALA A 274 15.902 75.778 94.584 1.00 39.90 C
ANISOU 2003 CB ALA A 274 4439 6076 4645 227 814 -342 C
ATOM 2004 N ALA A 275 12.917 74.808 95.219 1.00 35.17 N
ANISOU 2004 N ALA A 275 3785 5545 4034 358 895 -198 N
ATOM 2005 CA ALA A 275 11.534 75.127 95.578 1.00 35.50 C
ANISOU 2005 CA ALA A 275 3792 5576 4119 425 993 -162 C
ATOM 2006 C ALA A 275 10.891 74.133 96.544 1.00 41.37 C
ANISOU 2006 C ALA A 275 4539 6428 4750 407 987 -168 C
ATOM 2007 O ALA A 275 10.161 74.562 97.441 1.00 41.44 O
ANISOU 2007 O ALA A 275 4550 6440 4754 435 1088 -219 O
ATOM 2008 CB ALA A 275 10.682 75.243 94.323 1.00 35.94 C
ANISOU 2008 CB ALA A 275 3756 5631 4267 491 994 -11 C
ATOM 2009 N HIS A 276 11.151 72.806 96.365 1.00 37.48 N
ANISOU 2009 N HIS A 276 4048 6022 4172 360 877 -116 N
ATOM 2010 CA HIS A 276 10.514 71.766 97.173 1.00 36.82 C
ANISOU 2010 CA HIS A 276 3970 6029 3991 331 867 -88 C
ATOM 2011 C HIS A 276 11.327 71.079 98.243 1.00 39.86 C
ANISOU 2011 C HIS A 276 4431 6468 4246 274 814 -146 C
ATOM 2012 O HIS A 276 10.741 70.470 99.133 1.00 40.60 O
ANISOU 2012 O HIS A 276 4537 6634 4256 250 835 -122 O
ATOM 2013 CB HIS A 276 9.784 70.774 96.275 1.00 37.80 C
ANISOU 2013 CB HIS A 276 4031 6199 4131 322 810 33 C
ATOM 2014 CG HIS A 276 8.828 71.460 95.363 1.00 41.63 C
ANISOU 2014 CG HIS A 276 4421 6676 4720 382 864 104 C
ATOM 2015 ND1 HIS A 276 7.594 71.905 95.817 1.00 44.10 N
ANISOU 2015 ND1 HIS A 276 4674 7023 5060 428 967 129 N
ATOM 2016 CD2 HIS A 276 8.988 71.835 94.071 1.00 43.54 C
ANISOU 2016 CD2 HIS A 276 4613 6894 5037 410 830 160 C
ATOM 2017 CE1 HIS A 276 7.034 72.512 94.787 1.00 44.28 C
ANISOU 2017 CE1 HIS A 276 4609 7036 5180 492 987 207 C
ATOM 2018 NE2 HIS A 276 7.819 72.471 93.702 1.00 44.20 N
ANISOU 2018 NE2 HIS A 276 4602 6997 5195 478 903 236 N
"""
CONPRED_DUMMY = """PFRMAT RR
TARGET 536987
AUTHOR RaptorX-Contact
METHOD deep dilated residual networks (one variant of deep CNN). Consult <EMAIL> for details.
MODEL 1
MVAASMNILSKISSFIGKTFSLWAALFAAAAFFAPDTFKWAGPYIPWLLG
IIMFGMGLTLKPSDFDILFKHPKVVIIGVIAQFAIMPATAWCLSKLLNLP
AEIAVGVILVGCCPGGTASNVMTYLARGNVALSVAVTSVSTLTSPLLTPA
IFLMLAGEMLEIQAAGMLMSIVKMVLLPIVLGLIVHKVLGSKTEKLTDAL
PLVSVAAIVLIIGAVVGASKGKIMESGLLIFAVVVLHNGIGYLLGFFAAK
WTGLPYDAQKALTIEVGMQNSGLAAALAAAHFAAAPVVAVPGALFSVWHN
ISGSLLATYWAAKAGKHKKPLDRAGSENLYFQ
53 178 0 8 0.9999614
57 182 0 8 0.9999346
58 182 0 8 0.9999014
54 181 0 8 0.9998163
54 182 0 8 0.9997769
54 178 0 8 0.9996910
249 259 0 8 0.9989253
58 185 0 8 0.9979285
58 186 0 8 0.9977884
249 262 0 8 0.9974785
94 104 0 8 0.9972718
123 133 0 8 0.9972159
57 179 0 8 0.9963613
246 263 0 8 0.9962631
50 178 0 8 0.9946589
106 288 0 8 0.9932054
57 183 0 8 0.9925978
123 261 0 8 0.9922032
102 288 0 8 0.9917381
27 212 0 8 0.9908113
103 291 0 8 0.9907801
75 136 0 8 0.9905434
31 216 0 8 0.9904293
89 240 0 8 0.9902470
27 213 0 8 0.9900678
110 292 0 8 0.9887912
85 244 0 8 0.9886514
90 108 0 8 0.9883336
109 278 0 8 0.9877242
94 107 0 8 0.9875522
78 262 0 8 0.9875078
48 207 0 8 0.9874308
74 262 0 8 0.9874212
28 216 0 8 0.9870313
245 263 0 8 0.9866461
78 136 0 8 0.9865698
106 291 0 8 0.9861109
79 139 0 8 0.9859405
133 265 0 8 0.9857825
77 252 0 8 0.9857346
109 274 0 8 0.9857225
110 295 0 8 0.9855377
81 248 0 8 0.9851450
81 266 0 8 0.9848748
74 258 0 8 0.9841593
106 292 0 8 0.9837796
31 213 0 8 0.9835263
68 135 0 8 0.9834397
48 211 0 8 0.9833449
113 274 0 8 0.9828007
52 207 0 8 0.9818235
128 261 0 8 0.9814836
90 107 0 8 0.9814461
119 265 0 8 0.9814367
105 288 0 8 0.9791791
271 296 0 8 0.9788657
90 111 0 8 0.9781752
31 217 0 8 0.9776807
53 175 0 8 0.9772123
77 262 0 8 0.9764582
129 258 0 8 0.9764170
234 298 0 8 0.9763948
133 261 0 8 0.9759184
79 140 0 8 0.9759070
55 182 0 8 0.9758528
246 259 0 8 0.9756561
27 209 0 8 0.9746038
234 295 0 8 0.9741930
112 148 0 8 0.9737659
102 287 0 8 0.9732612
132 258 0 8 0.9728087
82 266 0 8 0.9718467
242 263 0 8 0.9710815
245 266 0 8 0.9700539
91 108 0 8 0.9698529
75 139 0 8 0.9698042
48 210 0 8 0.9697683
24 212 0 8 0.9695854
107 233 0 8 0.9683198
136 262 0 8 0.9669924
107 291 0 8 0.9663849
79 136 0 8 0.9657449
94 108 0 8 0.9650769
125 307 0 8 0.9650706
77 248 0 8 0.9650462
120 133 0 8 0.9647374
93 233 0 8 0.9635152
51 207 0 8 0.9634590
"""
class SearchTargetTestCase(unittest.TestCase):
def test_1(self):
pdb_fname = create_tempfile(PDB_DUMY)
self.addCleanup(remove, pdb_fname)
conpred_fname = create_tempfile(CONPRED_DUMMY)
self.addCleanup(remove, conpred_fname)
topcons_fname = create_tempfile(TOPCONS_DUMY)
self.addCleanup(remove, topcons_fname)
search = SearchTarget(workdir=os.path.join(os.environ['CCP4_SCR'], 'test'), conpred=conpred_fname,
sspred=topcons_fname, target_pdb_benchmark=PDB_DUMY, queue_environment='environ',
platform='local', queue_name='queue', n_contacts_threshold=0)
self.addCleanup(remove, os.path.join(os.environ['CCP4_SCR'], 'test'))
self.assertTrue(os.path.isdir(os.path.join(os.environ['CCP4_SCR'], 'test')))
self.assertEqual(search.search_header, """**********************************************************************
***************** SWAMP SEARCH *****************
**********************************************************************
""")
self.assertEqual(os.path.join(os.environ['CCP4_SCR'], 'test', "tmp_cmap_{}.map"), search._tmp_cmap)
self.assertEqual(os.path.join(os.environ['CCP4_SCR'], 'test', "search_{}"), search._search_workdir)
self.assertIsNone(search._tmp_pdb)
search.target.split()
self.assertFalse(search.target.error)
self.assertEqual(swamp.FRAG_MAPALIGN_DB, search.template_library)
self.assertEqual('mapalign', search.library_format)
self.assertDictEqual({'directory': os.path.join(os.environ['CCP4_SCR'], 'test'), 'shell': '/bin/bash',
'name': 'swamp', 'queue': 'queue', 'environment': 'environ', 'processes': 1},
search._other_task_info)
self.assertListEqual(["SUBTRGT_RANK", "SUBTRGT_ID", "N_CON_MAP_A", "MAP_A", "MAP_B", "CON_SCO", "GAP_SCO",
"TOTAL_SCO", "ALI_LEN", "QSCORE", "RMSD", "SEQ_ID", "N_ALIGN"], search._column_reference)
self.assertIsNone(search.scripts)
self.assertIsNone(search.search_pickle_dict)
search._create_scripts()
self.assertEqual(12, len(search.scripts))
self.assertListEqual(
['%s/search_1/search_1_results.pckl' % os.path.join(os.environ['CCP4_SCR'], 'test'),
'%s/search_10/search_10_results.pckl' % os.path.join(os.environ['CCP4_SCR'], 'test'),
'%s/search_11/search_11_results.pckl' % os.path.join(os.environ['CCP4_SCR'], 'test'),
'%s/search_12/search_12_results.pckl' % os.path.join(os.environ['CCP4_SCR'], 'test'),
'%s/search_2/search_2_results.pckl' % os.path.join(os.environ['CCP4_SCR'], 'test'),
'%s/search_3/search_3_results.pckl' % os.path.join(os.environ['CCP4_SCR'], 'test'),
'%s/search_4/search_4_results.pckl' % os.path.join(os.environ['CCP4_SCR'], 'test'),
'%s/search_5/search_5_results.pckl' % os.path.join(os.environ['CCP4_SCR'], 'test'),
'%s/search_6/search_6_results.pckl' % os.path.join(os.environ['CCP4_SCR'], 'test'),
'%s/search_7/search_7_results.pckl' % os.path.join(os.environ['CCP4_SCR'], 'test'),
'%s/search_8/search_8_results.pckl' % os.path.join(os.environ['CCP4_SCR'], 'test'),
'%s/search_9/search_9_results.pckl' % os.path.join(os.environ['CCP4_SCR'], 'test')],
sorted(list(search.search_pickle_dict.keys())))
def test_2(self):
pdb_fname = create_tempfile(PDB_DUMY)
self.addCleanup(remove, pdb_fname)
conpred_fname = create_tempfile(CONPRED_DUMMY)
self.addCleanup(remove, conpred_fname)
topcons_fname = create_tempfile(TOPCONS_DUMY)
self.addCleanup(remove, topcons_fname)
search = SearchTarget(workdir=os.path.join(os.environ['CCP4_SCR'], 'test_2'), conpred=conpred_fname,
sspred=topcons_fname, platform='sge', n_contacts_threshold=0,
alignment_algorithm_name='mapalign')
self.addCleanup(remove, os.path.join(os.environ['CCP4_SCR'], 'test_2'))
search.target.split()
self.assertDictEqual({'directory': os.path.join(os.environ['CCP4_SCR'], 'test_2'), 'shell': '/bin/bash',
'name': 'swamp', 'max_array_size': 1}, search._other_task_info)
search._create_scripts()
for idx, pickle in enumerate(sorted(search.search_pickle_dict.keys())):
if not os.path.isdir(os.path.dirname(pickle)):
os.makedirs(os.path.dirname(pickle))
self.addCleanup(remove, os.path.dirname(pickle))
joblib.dump([["MAP_A_%s" % idx, "MAP_B_%s" % idx, "CON_SCO_%s" % idx, "GAP_SCO_%s" % idx,
"TOTAL_SCO_%s" % idx, "ALI_LEN_%s" % idx, "QSCORE_%s" % idx, "RMSD_%s" % idx,
"SEQ_ID_%s" % idx, "N_ALIGN_%s" % idx]],
pickle)
self.assertIsNone(search.results)
search.results = search.recover_results()
self.assertListEqual([[1, '2_6', 11, 'MAP_A_0', 'MAP_B_0', 'CON_SCO_0', 'GAP_SCO_0', 'TOTAL_SCO_0', 'ALI_LEN_0',
'QSCORE_0', 'RMSD_0', 'SEQ_ID_0', 'N_ALIGN_0'],
[2, '1_7', 8, 'MAP_A_4', 'MAP_B_4', 'CON_SCO_4', 'GAP_SCO_4', 'TOTAL_SCO_4', 'ALI_LEN_4',
'QSCORE_4', 'RMSD_4', 'SEQ_ID_4', 'N_ALIGN_4'],
[3, '4_9', 7, 'MAP_A_5', 'MAP_B_5', 'CON_SCO_5', 'GAP_SCO_5', 'TOTAL_SCO_5', 'ALI_LEN_5',
'QSCORE_5', 'RMSD_5', 'SEQ_ID_5', 'N_ALIGN_5'],
[4, '3_5', 6, 'MAP_A_6', 'MAP_B_6', 'CON_SCO_6', 'GAP_SCO_6', 'TOTAL_SCO_6', 'ALI_LEN_6',
'QSCORE_6', 'RMSD_6', 'SEQ_ID_6', 'N_ALIGN_6'],
[5, '2_7', 5, 'MAP_A_7', 'MAP_B_7', 'CON_SCO_7', 'GAP_SCO_7', 'TOTAL_SCO_7', 'ALI_LEN_7',
'QSCORE_7', 'RMSD_7', 'SEQ_ID_7', 'N_ALIGN_7'],
[6, '3_4', 4, 'MAP_A_8', 'MAP_B_8', 'CON_SCO_8', 'GAP_SCO_8', 'TOTAL_SCO_8', 'ALI_LEN_8',
'QSCORE_8', 'RMSD_8', 'SEQ_ID_8', 'N_ALIGN_8'],
[7, '3_8', 3, 'MAP_A_9', 'MAP_B_9', 'CON_SCO_9', 'GAP_SCO_9', 'TOTAL_SCO_9', 'ALI_LEN_9',
'QSCORE_9', 'RMSD_9', 'SEQ_ID_9', 'N_ALIGN_9'],
[8, '4_10', 3, 'MAP_A_10', 'MAP_B_10', 'CON_SCO_10', 'GAP_SCO_10', 'TOTAL_SCO_10',
'ALI_LEN_10', 'QSCORE_10', 'RMSD_10', 'SEQ_ID_10', 'N_ALIGN_10'],
[9, '4_5', 2, 'MAP_A_11', 'MAP_B_11', 'CON_SCO_11', 'GAP_SCO_11', 'TOTAL_SCO_11',
'ALI_LEN_11', 'QSCORE_11', 'RMSD_11', 'SEQ_ID_11', 'N_ALIGN_11'],
[10, '8_10', 2, 'MAP_A_1', 'MAP_B_1', 'CON_SCO_1', 'GAP_SCO_1', 'TOTAL_SCO_1',
'ALI_LEN_1', 'QSCORE_1', 'RMSD_1', 'SEQ_ID_1', 'N_ALIGN_1'],
[11, '4_8', 1, 'MAP_A_2', 'MAP_B_2', 'CON_SCO_2', 'GAP_SCO_2', 'TOTAL_SCO_2', 'ALI_LEN_2',
'QSCORE_2', 'RMSD_2', 'SEQ_ID_2', 'N_ALIGN_2'],
[12, '9_10', 1, 'MAP_A_3', 'MAP_B_3', 'CON_SCO_3', 'GAP_SCO_3', 'TOTAL_SCO_3',
'ALI_LEN_3', 'QSCORE_3', 'RMSD_3', 'SEQ_ID_3', 'N_ALIGN_3']],
sorted(search.results, key=itemgetter(0)))
for result in search.results:
result[5] = result[2]
search._make_dataframe(search.results)
search.rank(consco_threshold=0)
self.assertListEqual([11, 8, 7, 6, 5, 4, 3, 3, 2, 2, 1, 1], search.ranked_searchmodels.consco.tolist())
search.rank(consco_threshold=0, combine_searchmodels=True)
self.assertListEqual([4.416666666666667], search.ranked_searchmodels.consco.tolist())
| 1.992188 | 2 |
python/models_intruderNet/model38.py | xilodyne/IntruderNet | 0 | 12764792 | <filename>python/models_intruderNet/model38.py
from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D
from keras.layers import Dropout, Flatten, Dense
from keras.models import Sequential
from keras.utils import multi_gpu_model
class ModelIntruderNet38:
def __init__(self):
''' Constructor for this class. '''
def get_epochs(self):
#epochs = 1
#epochs = 3
#epochs = 10
#epochs = 50
#epochs = 100#
epochs = 150
#epochs = 500
return epochs
def get_modelname(self):
save_name = "mod38_multi_5.i" + str(self.get_epochs())
return save_name
def get_model(self):
model = Sequential()
model.add(Conv2D(filters=2, kernel_size=2, padding='same', activation='relu', input_shape=(640, 480, 3)))
model.add(MaxPooling2D(pool_size=2))
model.add(Conv2D(filters=4, kernel_size=2, padding='valid', activation='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Conv2D(filters=8, kernel_size=2, padding='valid', activation='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Conv2D(filters=16, kernel_size=3, padding='valid', activation='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Conv2D(filters=32, kernel_size=3, padding='valid', activation='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Conv2D(filters=64, kernel_size=3, padding='valid', activation='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Conv2D(filters=128, kernel_size=3, padding='valid', activation='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(GlobalAveragePooling2D(data_format=None))
model.add(Dense(2048, activation='relu'))
model.add(Dropout(0.25))
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.25))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.25))
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.25))
model.add(Dense(2, activation='sigmoid'))
return model
def get_model_compiled_single_gpu(self):
model = self.get_model()
# model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
return model
def get_model_compiled_multi_gpu(self):
# Replicates `model` on 8 GPUs.
# This assumes that your machine has 8 available GPUs.
model = self.get_model()
parallel_model = multi_gpu_model(model, gpus=2)
parallel_model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
#parallel_model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])
# parallel_model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
return parallel_model
| 2.59375 | 3 |
linktitle/routes.py | zeinabmohammed/linktitle | 2 | 12764793 | import frappe
from frappe.utils import cstr, unique
@frappe.whitelist()
def title_field(doctype, name):
meta = frappe.get_meta(doctype)
if meta.title_field:
return frappe.db.get_value(doctype, name, meta.title_field or 'name')
else:
return name | 1.945313 | 2 |
perf/utils.py | jancajthaml-openbank/e2e | 0 | 12764794 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import collections
import threading
import sys
import os
import stat
import shutil
import fcntl
import termios
import struct
import copy
import signal
import time
import fcntl
from functools import partial
this = sys.modules[__name__]
this.__progress_running = False
this.__last_progress = ""
TTY = sys.stdout.isatty() and (str(os.environ.get('CI', 'false')) == 'false')
def interrupt_stdout() -> None:
if this.__progress_running:
sys.stdout.write('\n')
sys.stdout.flush()
this.__progress_running = False
def debug(msg) -> None:
this.__progress_running = False
if isinstance(msg, str):
sys.stdout.write('\033[97m debug | \033[0m{0}\033[K\n'.format(msg))
sys.stdout.flush()
elif isinstance(msg, collections.Iterable) and len(msg):
sys.stdout.write('\033[97m debug | \033[0m{0}\033[K\n'.format(msg[0]))
for chunk in msg[1:]:
sys.stdout.write('\033[97m | \033[0m{0}\033[K\n'.format(chunk))
sys.stdout.flush()
def info(msg) -> None:
this.__progress_running = False
sys.stdout.write('\033[95m info | \033[0m{0}\033[K\n'.format(msg))
sys.stdout.flush()
def progress(msg) -> None:
if this.__last_progress == msg:
return
this.__last_progress = msg
if TTY:
this.__progress_running = True
sys.stdout.write('\033[94m | {0}\033[K\r'.format(msg.rstrip()))
sys.stdout.flush()
else:
sys.stdout.write('\033[94m | {0}\033[K\n'.format(msg.rstrip()))
sys.stdout.flush()
def error(msg) -> None:
this.__progress_running = False
sys.stdout.write('\033[91m! error | {0}\033[0m[K\n'.format(msg))
sys.stdout.flush()
def success(msg) -> None:
this.__progress_running = False
sys.stdout.write('\033[92m pass | {0}\033[0m\033[K\n'.format(msg))
sys.stdout.flush()
def warn(msg) -> None:
this.__progress_running = False
sys.stdout.write('\033[93m warn | {0}\033[0m\033[K\n'.format(msg))
sys.stdout.flush()
class timeit():
def __init__(self, label):
self.__label = label
def __call__(self, f, *args, **kwargs):
self.__enter__()
result = f(*args, **kwargs)
self.__exit__()
return result
def __enter__(self):
self.ts = time.time()
sys.stdout.write('\033[95m info | \033[0mstarting {0}\033[K\n'.format(self.__label))
sys.stdout.flush()
def __exit__(self, exception_type, exception_value, traceback):
if exception_type == KeyboardInterrupt:
sys.stdout.write('\033[0m')
sys.stdout.flush()
return
te = time.time()
sys.stdout.write('\033[90m {0} took {1}\033[0m\n'.format(self.__label, human_readable_duration((te - self.ts)*1e3)))
sys.stdout.flush()
def human_readable_duration(ms):
if ms < 1:
return "0 ms"
s, ms = divmod(ms, 1e3)
m, s = divmod(s, 60)
h, m = divmod(m, 60)
h = int(h)
m = int(m)
s = int(s)
ms = int(ms)
return ' '.join(u'{h}{m}{s}{ms}'.format(
h=str(h) + " h " if h > 0 else '',
m=str(m) + " m " if m > 0 else '',
s=str(s) + " s " if s > 0 else '',
ms=str(ms) + " ms " if ms > 0 else ''
).strip().split(" ")[:4])
class with_deadline():
def __init__(self, timeout=None):
if not isinstance(timeout, int):
raise ValueError("invalid timeout")
self.__timeout = timeout
self.__ready = False
self.__fn = lambda *args: None
def __get__(self, instance, *args):
return partial(self.__call__, instance)
def __call__(self, *args, **kwargs):
if not self.__ready:
self.__fn = args[0]
self.__ready = True
return self
with self:
return self.__fn(*args, **kwargs)
def __enter__(self):
def handler(signum, frame):
raise TimeoutError()
signal.signal(signal.SIGALRM, handler)
signal.alarm(self.__timeout)
def __exit__(self, *args):
signal.alarm(0)
| 2.25 | 2 |
code/main.py | Samthebest999/AI-Friend | 0 | 12764795 | from functions import *
from imports import *
if os.path.exists("setup.py"):
os.remove("setup.py")
if os.path.exists("updater.py"):
os.remove("updater.py")
wget.download("https://raw.githubusercontent.com/Samthebest999/AI-Friend/main/code/updater.py")
def update():
current_toml = open("version.toml", "r").read()
if os.path.exists("version.toml"):
os.remove("version.toml")
vtu = "https://raw.githubusercontent.com/Samthebest999/AI-Friend/main/code/version.toml"
wget.download(vtu)
new_open_toml = open("version.toml", "r").read()
if current_toml == new_open_toml:
pass
if current_toml != new_open_toml:
working_dir = os.getcwd()
os.system(working_dir + "\\python\\python.exe updater.py")
quit(say_stuff("Updating...", False))
def recognize_speech():
r = sr.Recognizer()
mic = sr.Microphone()
with mic as source:
audio = r.listen(source)
r.adjust_for_ambient_noise(source)
global users_text
users_text = r.recognize_google(audio)
update()
# TODO GET A BETTER NAME FOR THE PROGRAM!
class VoiceRecogError(Exception):
pass
class AI:
stuff = 0
ouns = open("UserNameStuff.txt", "r")
rouns = ouns.read()
ouns.close()
while stuff == 0:
say_stuff("Hi, " + rouns, False)
try:
recognize_speech()
except:
raise VoiceRecogError(say_stuff(
"Microphone error, Possible reasons: Too much background noise, microphone off, couldn't hear you, "
"your accent, or just program couldn't recognize your voice",
False), "Microphone error! Possible reasons: Too much background noise, Your microphone is off, "
"Something blocking your microphone? "
"or"
"your accent, Maybe I couldn't recognize your voice?")
user_input = users_text
if user_input == "help":
help_dict = {"Hi": "Says \"Hello\" back to you", "browser": "Asks you which website you would like to "
"open, then opens it"}
help_menu = """Hi Welcome To The Help Menu!\n""" + str(help_dict)
print(help_menu)
say_stuff(help_menu, False)
if user_input == "hi":
say_stuff("Hello, " + rouns, False)
if user_input == "browser":
working_dir = os.getcwd()
os.system(working_dir + "\\python\\python.exe browser.py")
if user_input == "update":
update()
| 2.671875 | 3 |
gui/serializers.py | narsi84/digilib | 0 | 12764796 | from django.contrib.auth.models import User, Group
from rest_framework import serializers
from .models import *
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = User
fields = ('url', 'username', 'email', 'groups')
class GroupSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Group
fields = ('url', 'name')
class BookSerializer(serializers.ModelSerializer):
#class BookSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Book
fields = '__all__'
depth = 1
class TagSerializer(serializers.ModelSerializer):
#class TagSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Tag
fields = '__all__'
class CategorySerializer(serializers.ModelSerializer):
#class CategorySerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Category
fields = '__all__'
| 2.078125 | 2 |
gen-py/airr/ttypes.py | lasersonlab/airr-formats | 1 | 12764797 | <filename>gen-py/airr/ttypes.py
#
# Autogenerated by Thrift Compiler (0.9.3)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py:new_style
#
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class Origin(object):
HUMAN = 0
ANIMAL = 1
SYNTHETIC = 2
_VALUES_TO_NAMES = {
0: "HUMAN",
1: "ANIMAL",
2: "SYNTHETIC",
}
_NAMES_TO_VALUES = {
"HUMAN": 0,
"ANIMAL": 1,
"SYNTHETIC": 2,
}
class Sex(object):
MALE = 0
FEMALE = 1
_VALUES_TO_NAMES = {
0: "MALE",
1: "FEMALE",
}
_NAMES_TO_VALUES = {
"MALE": 0,
"FEMALE": 1,
}
class Locus(object):
IGH = 0
IGK = 1
IGL = 2
TRB = 3
TRA = 4
TRG = 5
TRD = 6
_VALUES_TO_NAMES = {
0: "IGH",
1: "IGK",
2: "IGL",
3: "TRB",
4: "TRA",
5: "TRG",
6: "TRD",
}
_NAMES_TO_VALUES = {
"IGH": 0,
"IGK": 1,
"IGL": 2,
"TRB": 3,
"TRA": 4,
"TRG": 5,
"TRD": 6,
}
class Study(object):
"""
Attributes:
- id
- title
- labName
- correspondence
- doi
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'id', None, None, ), # 1
(2, TType.STRING, 'title', None, None, ), # 2
(3, TType.STRING, 'labName', None, None, ), # 3
(4, TType.STRING, 'correspondence', None, None, ), # 4
(5, TType.STRING, 'doi', None, None, ), # 5
)
def __init__(self, id=None, title=None, labName=None, correspondence=None, doi=None,):
self.id = id
self.title = title
self.labName = labName
self.correspondence = correspondence
self.doi = doi
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.id = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.title = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.labName = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.correspondence = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.doi = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Study')
if self.id is not None:
oprot.writeFieldBegin('id', TType.STRING, 1)
oprot.writeString(self.id)
oprot.writeFieldEnd()
if self.title is not None:
oprot.writeFieldBegin('title', TType.STRING, 2)
oprot.writeString(self.title)
oprot.writeFieldEnd()
if self.labName is not None:
oprot.writeFieldBegin('labName', TType.STRING, 3)
oprot.writeString(self.labName)
oprot.writeFieldEnd()
if self.correspondence is not None:
oprot.writeFieldBegin('correspondence', TType.STRING, 4)
oprot.writeString(self.correspondence)
oprot.writeFieldEnd()
if self.doi is not None:
oprot.writeFieldBegin('doi', TType.STRING, 5)
oprot.writeString(self.doi)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.id)
value = (value * 31) ^ hash(self.title)
value = (value * 31) ^ hash(self.labName)
value = (value * 31) ^ hash(self.correspondence)
value = (value * 31) ^ hash(self.doi)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Subject(object):
"""
Attributes:
- id
- origin
- sex
- age
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'id', None, None, ), # 1
(2, TType.I32, 'origin', None, None, ), # 2
(3, TType.I32, 'sex', None, None, ), # 3
(4, TType.I32, 'age', None, None, ), # 4
)
def __init__(self, id=None, origin=None, sex=None, age=None,):
self.id = id
self.origin = origin
self.sex = sex
self.age = age
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.id = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.origin = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.sex = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.age = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Subject')
if self.id is not None:
oprot.writeFieldBegin('id', TType.STRING, 1)
oprot.writeString(self.id)
oprot.writeFieldEnd()
if self.origin is not None:
oprot.writeFieldBegin('origin', TType.I32, 2)
oprot.writeI32(self.origin)
oprot.writeFieldEnd()
if self.sex is not None:
oprot.writeFieldBegin('sex', TType.I32, 3)
oprot.writeI32(self.sex)
oprot.writeFieldEnd()
if self.age is not None:
oprot.writeFieldBegin('age', TType.I32, 4)
oprot.writeI32(self.age)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.id)
value = (value * 31) ^ hash(self.origin)
value = (value * 31) ^ hash(self.sex)
value = (value * 31) ^ hash(self.age)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Sample(object):
"""
Attributes:
- id
- type
- source
- dt
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'id', None, None, ), # 1
(2, TType.STRING, 'type', None, None, ), # 2
(3, TType.STRING, 'source', None, None, ), # 3
(4, TType.STRING, 'dt', None, None, ), # 4
)
def __init__(self, id=None, type=None, source=None, dt=None,):
self.id = id
self.type = type
self.source = source
self.dt = dt
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.id = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.type = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.source = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.dt = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Sample')
if self.id is not None:
oprot.writeFieldBegin('id', TType.STRING, 1)
oprot.writeString(self.id)
oprot.writeFieldEnd()
if self.type is not None:
oprot.writeFieldBegin('type', TType.STRING, 2)
oprot.writeString(self.type)
oprot.writeFieldEnd()
if self.source is not None:
oprot.writeFieldBegin('source', TType.STRING, 3)
oprot.writeString(self.source)
oprot.writeFieldEnd()
if self.dt is not None:
oprot.writeFieldBegin('dt', TType.STRING, 4)
oprot.writeString(self.dt)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.id)
value = (value * 31) ^ hash(self.type)
value = (value * 31) ^ hash(self.source)
value = (value * 31) ^ hash(self.dt)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Chain(object):
"""
Attributes:
- id
- ntSeq
- locus
- vAllele
- dAllele
- jAllele
- alignment
- ntCDR3
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'id', None, None, ), # 1
(2, TType.STRING, 'ntSeq', None, None, ), # 2
(3, TType.I32, 'locus', None, None, ), # 3
(4, TType.STRING, 'vAllele', None, None, ), # 4
(5, TType.STRING, 'dAllele', None, None, ), # 5
(6, TType.STRING, 'jAllele', None, None, ), # 6
(7, TType.STRING, 'alignment', None, None, ), # 7
(8, TType.STRING, 'ntCDR3', None, None, ), # 8
)
def __init__(self, id=None, ntSeq=None, locus=None, vAllele=None, dAllele=None, jAllele=None, alignment=None, ntCDR3=None,):
self.id = id
self.ntSeq = ntSeq
self.locus = locus
self.vAllele = vAllele
self.dAllele = dAllele
self.jAllele = jAllele
self.alignment = alignment
self.ntCDR3 = ntCDR3
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.id = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.ntSeq = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.locus = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.vAllele = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.dAllele = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRING:
self.jAllele = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.STRING:
self.alignment = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.STRING:
self.ntCDR3 = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Chain')
if self.id is not None:
oprot.writeFieldBegin('id', TType.STRING, 1)
oprot.writeString(self.id)
oprot.writeFieldEnd()
if self.ntSeq is not None:
oprot.writeFieldBegin('ntSeq', TType.STRING, 2)
oprot.writeString(self.ntSeq)
oprot.writeFieldEnd()
if self.locus is not None:
oprot.writeFieldBegin('locus', TType.I32, 3)
oprot.writeI32(self.locus)
oprot.writeFieldEnd()
if self.vAllele is not None:
oprot.writeFieldBegin('vAllele', TType.STRING, 4)
oprot.writeString(self.vAllele)
oprot.writeFieldEnd()
if self.dAllele is not None:
oprot.writeFieldBegin('dAllele', TType.STRING, 5)
oprot.writeString(self.dAllele)
oprot.writeFieldEnd()
if self.jAllele is not None:
oprot.writeFieldBegin('jAllele', TType.STRING, 6)
oprot.writeString(self.jAllele)
oprot.writeFieldEnd()
if self.alignment is not None:
oprot.writeFieldBegin('alignment', TType.STRING, 7)
oprot.writeString(self.alignment)
oprot.writeFieldEnd()
if self.ntCDR3 is not None:
oprot.writeFieldBegin('ntCDR3', TType.STRING, 8)
oprot.writeString(self.ntCDR3)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.id)
value = (value * 31) ^ hash(self.ntSeq)
value = (value * 31) ^ hash(self.locus)
value = (value * 31) ^ hash(self.vAllele)
value = (value * 31) ^ hash(self.dAllele)
value = (value * 31) ^ hash(self.jAllele)
value = (value * 31) ^ hash(self.alignment)
value = (value * 31) ^ hash(self.ntCDR3)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class FindCdr3Req(object):
"""
Attributes:
- cdr3
- locus
- studyIds
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'cdr3', None, None, ), # 1
(2, TType.I32, 'locus', None, None, ), # 2
(3, TType.LIST, 'studyIds', (TType.STRING,None), None, ), # 3
)
def __init__(self, cdr3=None, locus=None, studyIds=None,):
self.cdr3 = cdr3
self.locus = locus
self.studyIds = studyIds
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.cdr3 = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.locus = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.studyIds = []
(_etype3, _size0) = iprot.readListBegin()
for _i4 in xrange(_size0):
_elem5 = iprot.readString()
self.studyIds.append(_elem5)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('FindCdr3Req')
if self.cdr3 is not None:
oprot.writeFieldBegin('cdr3', TType.STRING, 1)
oprot.writeString(self.cdr3)
oprot.writeFieldEnd()
if self.locus is not None:
oprot.writeFieldBegin('locus', TType.I32, 2)
oprot.writeI32(self.locus)
oprot.writeFieldEnd()
if self.studyIds is not None:
oprot.writeFieldBegin('studyIds', TType.LIST, 3)
oprot.writeListBegin(TType.STRING, len(self.studyIds))
for iter6 in self.studyIds:
oprot.writeString(iter6)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.cdr3 is None:
raise TProtocol.TProtocolException(message='Required field cdr3 is unset!')
if self.locus is None:
raise TProtocol.TProtocolException(message='Required field locus is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.cdr3)
value = (value * 31) ^ hash(self.locus)
value = (value * 31) ^ hash(self.studyIds)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class FindCdr3Resp(object):
"""
Attributes:
- found
- specificities
"""
thrift_spec = (
None, # 0
(1, TType.BOOL, 'found', None, None, ), # 1
(2, TType.LIST, 'specificities', (TType.STRING,None), None, ), # 2
)
def __init__(self, found=None, specificities=None,):
self.found = found
self.specificities = specificities
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.BOOL:
self.found = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.specificities = []
(_etype10, _size7) = iprot.readListBegin()
for _i11 in xrange(_size7):
_elem12 = iprot.readString()
self.specificities.append(_elem12)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('FindCdr3Resp')
if self.found is not None:
oprot.writeFieldBegin('found', TType.BOOL, 1)
oprot.writeBool(self.found)
oprot.writeFieldEnd()
if self.specificities is not None:
oprot.writeFieldBegin('specificities', TType.LIST, 2)
oprot.writeListBegin(TType.STRING, len(self.specificities))
for iter13 in self.specificities:
oprot.writeString(iter13)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.found is None:
raise TProtocol.TProtocolException(message='Required field found is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.found)
value = (value * 31) ^ hash(self.specificities)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
| 1.914063 | 2 |
lib/software_update.py | psidex/PyonAir-pycom | 0 | 12764798 | <filename>lib/software_update.py<gh_stars>0
from Configuration import config
from helper import wifi_lock, led_lock
import machine
import pycom
import time
from network import WLAN
from OTA import WiFiOTA
def software_update(logger):
"""
Connects to the wlan and fetches updates from a server. After having applied the patches successfully, it reboots
the device.
:param logger: status logger
:type logger: LoggerFactory object
"""
with wifi_lock:
try:
logger.info("Over the Air update started")
led_lock.acquire(1) # disable all other indicator LEDs
pycom.rgbled(0x555500) # Yellow LED
# Get credentials from configuration
ssid = config.get_config("SSID")
password = <PASSWORD>.get_config("wifi_password")
server_ip = config.get_config("server")
port = int(config.get_config("port"))
logger.info("SSID: " + str(ssid))
logger.info("server_ip: " + str(server_ip))
logger.info("port: " + str(port))
version = config.get_config("code_version")
# Perform OTA update
ota = WiFiOTA(logger, ssid, password, server_ip, port, version)
# Turn off WiFi to save power
w = WLAN()
w.deinit()
logger.info("connecting...")
ota.connect()
if ota.update():
new_version = str(ota.get_current_version()) # get updated version
config.save_config({"code_version": str(new_version)}) # save new version to config on SD
logger.info(
"Successfully updated the device from version {} to version {}".format(version, new_version))
except Exception as e:
logger.exception("Failed to update the device")
pycom.rgbled(0x550000) # turn LED to RED
time.sleep(3)
finally:
# Turn off update mode
config.save_config({"update": False})
# Turn off indicator LED
pycom.rgbled(0x000000)
led_lock.release()
# Reboot the device to apply patches
logger.info("rebooting...")
machine.reset()
| 2.765625 | 3 |
papersite/spamdetector.py | josephabirached/papers | 22 | 12764799 | ### Stupid, but extensible spam detector
STOP_SPAM_WORDS = ["Loan", "Lender", ".trade", ".bid", ".men", ".win"]
def is_spam(request):
if 'email' in request.form:
if any(
[ word.upper() in request.form['email'].upper()
for word in STOP_SPAM_WORDS]):
return True
if 'username' in request.form:
if any(
[ word.upper() in request.form['username'].upper()
for word in STOP_SPAM_WORDS]):
return True
return False
| 2.890625 | 3 |
examples/tensorflow-adversarial-patches/src/generate_patch.py | usnistgov/dioptra | 14 | 12764800 | #!/usr/bin/env python
# This Software (Dioptra) is being made available as a public service by the
# National Institute of Standards and Technology (NIST), an Agency of the United
# States Department of Commerce. This software was developed in part by employees of
# NIST and in part by NIST contractors. Copyright in portions of this software that
# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant
# to Title 17 United States Code Section 105, works of NIST employees are not
# subject to copyright protection in the United States. However, NIST may hold
# international copyright in software created by its employees and domestic
# copyright (or licensing rights) in portions of software that were assigned or
# licensed to NIST. To the extent that NIST holds copyright in this software, it is
# being made available under the Creative Commons Attribution 4.0 International
# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts
# of the software developed or licensed by NIST.
#
# ACCESS THE FULL CC BY 4.0 LICENSE HERE:
# https://creativecommons.org/licenses/by/4.0/legalcode
import os
from pathlib import Path
from typing import Dict, List
import click
import mlflow
import numpy as np
import structlog
from prefect import Flow, Parameter
from prefect.utilities.logging import get_logger as get_prefect_logger
from structlog.stdlib import BoundLogger
from mitre.securingai import pyplugs
from mitre.securingai.sdk.utilities.contexts import plugin_dirs
from mitre.securingai.sdk.utilities.logging import (
StderrLogStream,
StdoutLogStream,
attach_stdout_stream_handler,
clear_logger_handlers,
configure_structlog,
set_logging_level,
)
_CUSTOM_PLUGINS_IMPORT_PATH: str = "securingai_custom"
_PLUGINS_IMPORT_PATH: str = "securingai_builtins"
DISTANCE_METRICS: List[Dict[str, str]] = [
{"name": "l_infinity_norm", "func": "l_inf_norm"},
{"name": "l_1_norm", "func": "l_1_norm"},
{"name": "l_2_norm", "func": "l_2_norm"},
{"name": "cosine_similarity", "func": "paired_cosine_similarities"},
{"name": "euclidean_distance", "func": "paired_euclidean_distances"},
{"name": "manhattan_distance", "func": "paired_manhattan_distances"},
{"name": "wasserstein_distance", "func": "paired_wasserstein_distances"},
]
LOGGER: BoundLogger = structlog.stdlib.get_logger()
def _map_norm(ctx, param, value):
norm_mapping: Dict[str, float] = {"inf": np.inf, "1": 1, "2": 2}
processed_norm: float = norm_mapping[value]
return processed_norm
def _coerce_comma_separated_ints(ctx, param, value):
return tuple(int(x.strip()) for x in value.split(","))
def _coerce_int_to_bool(ctx, param, value):
return bool(int(value))
@click.command()
@click.option(
"--data-dir",
type=click.Path(
exists=True, file_okay=False, dir_okay=True, resolve_path=True, readable=True
),
help="Root directory for NFS mounted datasets (in container)",
)
@click.option(
"--image-size",
type=click.STRING,
callback=_coerce_comma_separated_ints,
help="Dimensions for the input images",
)
@click.option(
"--adv-tar-name",
type=click.STRING,
default="adversarial_patch.tar.gz",
help="Name to give to tarfile artifact containing patches",
)
@click.option(
"--adv-data-dir",
type=click.STRING,
default="adv_patches",
help="Directory for saving adversarial patches",
)
@click.option(
"--model-name",
type=click.STRING,
help="Name of model to load from registry",
)
@click.option(
"--model-version",
type=click.STRING,
help="Version of model to load from registry",
)
@click.option(
"--rotation-max",
type=click.FLOAT,
help="The maximum rotation applied to random patches. \
The value is expected to be in the range `[0, 180]` ",
default=22.5,
)
@click.option(
"--scale-min",
type=click.FLOAT,
help="The minimum scaling applied to random patches. \
The value should be in the range `[0, 1]`, but less than `scale_max` ",
default=0.1,
)
@click.option(
"--scale-max",
type=click.FLOAT,
help="The maximum scaling applied to random patches. \
The value should be in the range `[0, 1]`, but larger than `scale_min.` ",
default=1.0,
)
@click.option(
"--learning-rate",
type=click.FLOAT,
help="The learning rate of the patch attack optimization procedure. ",
default=5.0,
)
@click.option(
"--max-iter",
type=click.INT,
help=" The number of patch optimization steps. ",
default=500,
)
@click.option(
"--patch-target",
type=click.INT,
help=" The target class index of the generated patch. Negative numbers will generate randomized id labels.",
default=-1,
)
@click.option(
"--num-patch",
type=click.INT,
help=" The number of patches generated. Each adversarial image recieves one patch. ",
default=1,
)
@click.option(
"--num-patch-gen-samples",
type=click.INT,
help=" The number of sample images used to generate each patch. ",
default=10,
)
@click.option(
"--imagenet-preprocessing",
type=click.BOOL,
help="If true, initializes model with Imagenet image preprocessing settings.",
default=False,
)
@click.option(
"--seed",
type=click.INT,
help="Set the entry point rng seed",
default=-1,
)
def patch_attack(
data_dir,
image_size,
adv_tar_name,
adv_data_dir,
rotation_max,
scale_min,
scale_max,
learning_rate,
max_iter,
patch_target,
num_patch,
num_patch_gen_samples,
model_name,
model_version,
imagenet_preprocessing,
seed,
patch_shape=None,
):
LOGGER.info(
"Execute MLFlow entry point",
entry_point="gen_patch",
data_dir=data_dir,
image_size=image_size,
adv_tar_name=adv_tar_name,
adv_data_dir=adv_data_dir,
model_name=model_name,
model_version=model_version,
patch_target=patch_target,
num_patch=num_patch,
num_patch_gen_samples=num_patch_gen_samples,
rotation_max=rotation_max,
scale_min=scale_min,
scale_max=scale_max,
learning_rate=learning_rate,
max_iter=max_iter,
imagenet_preprocessing=imagenet_preprocessing,
seed=seed,
)
clip_values: Tuple[float, float] = (0, 255) if image_size[2] == 3 else (0, 1)
if imagenet_preprocessing:
rescale = 1.0
else:
rescale = 1.0 / 255
with mlflow.start_run() as active_run: # noqa: F841
flow: Flow = init_gen_patch_flow()
state = flow.run(
parameters=dict(
testing_dir=Path(data_dir),
image_size=image_size,
rescale=rescale,
clip_values=clip_values,
adv_tar_name=adv_tar_name,
adv_data_dir=(Path.cwd() / adv_data_dir).resolve(),
model_name=model_name,
model_version=model_version,
patch_target=patch_target,
num_patch=num_patch,
num_patch_gen_samples=num_patch_gen_samples,
rotation_max=rotation_max,
scale_min=scale_min,
scale_max=scale_max,
learning_rate=learning_rate,
max_iter=max_iter,
patch_shape=patch_shape,
imagenet_preprocessing=imagenet_preprocessing,
seed=seed,
)
)
return state
def init_gen_patch_flow() -> Flow:
with Flow("Fast Gradient Method") as flow:
(
testing_dir,
image_size,
rescale,
clip_values,
adv_tar_name,
adv_data_dir,
model_name,
model_version,
rotation_max,
scale_min,
scale_max,
learning_rate,
max_iter,
patch_target,
num_patch,
num_patch_gen_samples,
imagenet_preprocessing,
patch_shape,
seed,
) = (
Parameter("testing_dir"),
Parameter("image_size"),
Parameter("rescale"),
Parameter("clip_values"),
Parameter("adv_tar_name"),
Parameter("adv_data_dir"),
Parameter("model_name"),
Parameter("model_version"),
Parameter("rotation_max"),
Parameter("scale_min"),
Parameter("scale_max"),
Parameter("learning_rate"),
Parameter("max_iter"),
Parameter("patch_target"),
Parameter("num_patch"),
Parameter("num_patch_gen_samples"),
Parameter("imagenet_preprocessing"),
Parameter("patch_shape"),
Parameter("seed"),
)
seed, rng = pyplugs.call_task(
f"{_PLUGINS_IMPORT_PATH}.random", "rng", "init_rng", seed=seed
)
tensorflow_global_seed = pyplugs.call_task(
f"{_PLUGINS_IMPORT_PATH}.random", "sample", "draw_random_integer", rng=rng
)
dataset_seed = pyplugs.call_task(
f"{_PLUGINS_IMPORT_PATH}.random", "sample", "draw_random_integer", rng=rng
)
init_tensorflow_results = pyplugs.call_task(
f"{_PLUGINS_IMPORT_PATH}.backend_configs",
"tensorflow",
"init_tensorflow",
seed=tensorflow_global_seed,
)
make_directories_results = pyplugs.call_task(
f"{_PLUGINS_IMPORT_PATH}.artifacts",
"utils",
"make_directories",
dirs=[adv_data_dir],
)
log_mlflow_params_result = pyplugs.call_task( # noqa: F841
f"{_PLUGINS_IMPORT_PATH}.tracking",
"mlflow",
"log_parameters",
parameters=dict(
entry_point_seed=seed,
tensorflow_global_seed=tensorflow_global_seed,
dataset_seed=dataset_seed,
),
)
keras_classifier = pyplugs.call_task(
f"{_CUSTOM_PLUGINS_IMPORT_PATH}.custom_patch_plugins",
"registry_art",
"load_wrapped_tensorflow_keras_classifier",
name=model_name,
version=model_version,
clip_values=clip_values,
imagenet_preprocessing=imagenet_preprocessing,
upstream_tasks=[init_tensorflow_results],
)
patch_dir = pyplugs.call_task(
f"{_CUSTOM_PLUGINS_IMPORT_PATH}.custom_patch_plugins",
"attacks_patch",
"create_adversarial_patches",
data_dir=testing_dir,
keras_classifier=keras_classifier,
adv_data_dir=adv_data_dir,
image_size=image_size,
rescale=rescale,
patch_target=patch_target,
num_patch=num_patch,
num_patch_samples=num_patch_gen_samples,
rotation_max=rotation_max,
scale_min=scale_min,
scale_max=scale_max,
learning_rate=learning_rate,
max_iter=max_iter,
patch_shape=patch_shape,
upstream_tasks=[make_directories_results],
)
log_evasion_dataset_result = pyplugs.call_task( # noqa: F841
f"{_PLUGINS_IMPORT_PATH}.artifacts",
"mlflow",
"upload_directory_as_tarball_artifact",
source_dir=adv_data_dir,
tarball_filename=adv_tar_name,
upstream_tasks=[patch_dir],
)
return flow
if __name__ == "__main__":
log_level: str = os.getenv("AI_JOB_LOG_LEVEL", default="INFO")
as_json: bool = True if os.getenv("AI_JOB_LOG_AS_JSON") else False
clear_logger_handlers(get_prefect_logger())
attach_stdout_stream_handler(as_json)
set_logging_level(log_level)
configure_structlog()
with plugin_dirs(), StdoutLogStream(as_json), StderrLogStream(as_json):
_ = patch_attack()
| 1.6875 | 2 |
manage/config.py | augustand/kervice | 1 | 12764801 | <reponame>augustand/kervice<filename>manage/config.py
import logging
class Config(object):
def __init__(self):
self.__cfg = {
"log": {
"format": "PID %(process)d %(asctime)s %(levelname)-5s %(threadName)-10s [%(lineno)d]%(name)-15s %(message)s",
"datefmt": "%Y-%m-%d %H:%M:%S",
"filename": "",
"level": logging.DEBUG
},
}
def dev(self):
"""
开发环境配置
:return:
"""
self.__cfg["log"]["level"] = logging.DEBUG
return self.__cfg
def stage(self):
"""
测试环境配置
:return:
"""
self.__cfg["log"]["level"] = logging.DEBUG
return self.__cfg
def pro(self):
"""
生产环境配置
:return:
"""
self.__cfg["log"]["level"] = logging.ERROR
return self.__cfg
| 2.453125 | 2 |
tests/test_inlinequeryresultlocation.py | ehsanbarkhordar/botcup | 1 | 12764802 | #!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2018
# <NAME> <<EMAIL>>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
import pytest
from telegram import (InputTextMessageContent, InlineQueryResultLocation, InlineKeyboardButton,
InlineQueryResultVoice, InlineKeyboardMarkup)
@pytest.fixture(scope='class')
def inline_query_result_location():
return InlineQueryResultLocation(
TestInlineQueryResultLocation.id,
TestInlineQueryResultLocation.latitude,
TestInlineQueryResultLocation.longitude,
TestInlineQueryResultLocation.title,
live_period=TestInlineQueryResultLocation.live_period,
thumb_url=TestInlineQueryResultLocation.thumb_url,
thumb_width=TestInlineQueryResultLocation.thumb_width,
thumb_height=TestInlineQueryResultLocation.thumb_height,
input_message_content=TestInlineQueryResultLocation.input_message_content,
reply_markup=TestInlineQueryResultLocation.reply_markup)
class TestInlineQueryResultLocation(object):
id = 'id'
type = 'location'
latitude = 0.0
longitude = 1.0
title = 'title'
live_period = 70
thumb_url = 'thumb url'
thumb_width = 10
thumb_height = 15
input_message_content = InputTextMessageContent('input_message_content')
reply_markup = InlineKeyboardMarkup([[InlineKeyboardButton('reply_markup')]])
def test_expected_values(self, inline_query_result_location):
assert inline_query_result_location.id == self.id
assert inline_query_result_location.type == self.type
assert inline_query_result_location.latitude == self.latitude
assert inline_query_result_location.longitude == self.longitude
assert inline_query_result_location.title == self.title
assert inline_query_result_location.live_period == self.live_period
assert inline_query_result_location.thumb_url == self.thumb_url
assert inline_query_result_location.thumb_width == self.thumb_width
assert inline_query_result_location.thumb_height == self.thumb_height
assert (inline_query_result_location.input_message_content.to_dict()
== self.input_message_content.to_dict())
assert inline_query_result_location.reply_markup.to_dict() == self.reply_markup.to_dict()
def test_to_dict(self, inline_query_result_location):
inline_query_result_location_dict = inline_query_result_location.to_dict()
assert isinstance(inline_query_result_location_dict, dict)
assert inline_query_result_location_dict['id'] == inline_query_result_location.id
assert inline_query_result_location_dict['type'] == inline_query_result_location.type
assert (inline_query_result_location_dict['latitude']
== inline_query_result_location.latitude)
assert (inline_query_result_location_dict['longitude']
== inline_query_result_location.longitude)
assert inline_query_result_location_dict['title'] == inline_query_result_location.title
assert (inline_query_result_location_dict['live_period']
== inline_query_result_location.live_period)
assert (inline_query_result_location_dict['thumb_url']
== inline_query_result_location.thumb_url)
assert (inline_query_result_location_dict['thumb_width']
== inline_query_result_location.thumb_width)
assert (inline_query_result_location_dict['thumb_height']
== inline_query_result_location.thumb_height)
assert (inline_query_result_location_dict['input_message_content']
== inline_query_result_location.input_message_content.to_dict())
assert (inline_query_result_location_dict['reply_markup']
== inline_query_result_location.reply_markup.to_dict())
def test_equality(self):
a = InlineQueryResultLocation(self.id, self.longitude, self.latitude, self.title)
b = InlineQueryResultLocation(self.id, self.longitude, self.latitude, self.title)
c = InlineQueryResultLocation(self.id, 0, self.latitude, self.title)
d = InlineQueryResultLocation('', self.longitude, self.latitude, self.title)
e = InlineQueryResultVoice(self.id, '', '')
assert a == b
assert hash(a) == hash(b)
assert a is not b
assert a == c
assert hash(a) == hash(c)
assert a != d
assert hash(a) != hash(d)
assert a != e
assert hash(a) != hash(e)
| 2.28125 | 2 |
setup.py | SebastianSemper/chefkoch | 0 | 12764803 | # -*- coding: utf-8 -*-
# Copyright 2019 <NAME>
# https://www.tu-ilmenau.de/it-ems/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Setup script for installation of chefkoch package
Usecases:
- install package system-wide on your machine (needs su privileges)
EXAMPLE: 'python setup.py install'
- install package for your local user only (no privileges needed)
EXAMPLE: 'python setup.py install --user'
'''
# import modules
import platform
import sys
import os
import re
import subprocess
from distutils import sysconfig
def WARNING(string):
print("\033[91mWARNING:\033[0m %s" % (string))
def INFO(string):
print("\033[96mINFO:\033[0m %s" % (string))
# load setup and extensions from setuptools. If that fails, try distutils
try:
from setuptools import setup, Extension
except ImportError:
WARNING("Could not import setuptools.")
raise
# global package constants
packageName = 'chefkoch'
packageVersion = '0.0' # provide a version tag as fallback
fullVersion = packageVersion
strVersionFile = "%s/version.py" %(packageName)
VERSION_PY = """
# -*- coding: utf-8 -*-
# This file carries the module's version information which will be updated
# during execution of the installation script, setup.py. Distribution tarballs
# contain a pre-generated copy of this file.
__version__ = '%s'
"""
##############################################################################
### function and class declaration section. DO NOT PUT SCRIPT CODE IN BETWEEN
##############################################################################
def getCurrentVersion():
'''
Determine package version and put it in the signatures.
'''
global packageVersion
global fullVersion
# check if there is a manual version override
if os.path.isfile(".version"):
with open(".version", "r") as f:
stdout = f.read().split('\n')[0]
print("Override of version string to '%s' (from .version file )" % (
stdout))
fullVersion = stdout
else:
# check if source directory is a git repository
if not os.path.exists(".git"):
print(("Installing from something other than a Git repository; " +
"Version file '%s' untouched.") % (strVersionFile))
return
# fetch current tag and commit description from git
try:
p = subprocess.Popen(
["git", "describe", "--tags", "--dirty", "--always"],
stdout=subprocess.PIPE
)
except EnvironmentError:
print("Not a git repository; Version file '%s' not touched." % (
strVersionFile))
return
stdout = p.communicate()[0].strip()
if stdout is not str:
stdout = stdout.decode()
if p.returncode != 0:
print(("Unable to fetch version from git repository; " +
"leaving version file '%s' untouched.") % (strVersionFile))
return
fullVersion = stdout
# output results to version string, extract package version number from
# `fullVersion` as this string might also contain additional tags (e.g.
# commit hashes or `-dirty` flags from git tags)
versionMatch = re.match(r"[.+\d+]+\d*[abr]\d*", fullVersion)
if versionMatch:
packageVersion = versionMatch.group(0)
print("Fetched package version number from git tag (%s)." % (
packageVersion))
# determine requirements for install and setup
def checkRequirement(lstRequirements, importName, requirementName):
'''
Don't add packages unconditionally as this involves the risk of updating an
already installed package. Sometimes this may break during install or mix
up dependencies after install. Consider an update only if the requested
package is not installed at all or if we are building an installation
wheel.
'''
try:
__import__(importName)
except ImportError:
lstRequirements.append(requirementName)
else:
if 'bdist_wheel' in sys.argv[1:]:
lstRequirements.append(requirementName)
def doc_opts():
'''
Introduce a command-line setup target to generate the sphinx doc.
'''
try:
from sphinx.setup_command import BuildDoc
except ImportError:
return {}
class OwnDoc(BuildDoc):
def __init__(self, *args, **kwargs):
super(OwnDoc, self).__init__(*args, **kwargs)
return OwnDoc
##############################################################################
### The actual script. KEEP THE `import filter` ALIVE AT ALL TIMES
##############################################################################
if __name__ == '__main__':
# get version from git and update chefkoch/__init__.py accordingly
getCurrentVersion()
# make sure there exists a version.py file in the project
with open(strVersionFile, "w") as f:
f.write(VERSION_PY % (fullVersion))
print("Set %s to '%s'" % (strVersionFile, fullVersion))
# get the long description from the README file.
# CAUTION: Python2/3 utf encoding shit calls needs some adjustments
fileName = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
'README.md'
)
f = (open(fileName, 'r') if sys.version_info < (3, 0)
else open(fileName, 'r', encoding='utf-8'))
longDescription = f.read()
f.close()
print("Building %s v%s" % (
packageName,
packageVersion
))
# check if all requirements are met prior to actually calling setup()
setupRequires = []
installRequires = []
checkRequirement(setupRequires, 'setuptools', 'setuptools>=18.0')
checkRequirement(installRequires, 'dask', 'dask>=1.0.0')
checkRequirement(installRequires, 'six', 'six')
checkRequirement(installRequires, 'dask-jobqueue', 'dask-jobqueue>=0.4.1')
print("Requirements for setup: %s" % (setupRequires))
print("Requirements for install: %s" % (installRequires))
# everything's set. Fire in the hole.
setup(
name=packageName,
version=packageVersion,
description=('A compute cluster cuisine for distributed scientific ' +
'computing in python'),
long_description=longDescription,
author='<NAME>, EMS group TU Ilmenau',
author_email='<EMAIL>',
url='https://ems-tu-ilmenau.github.io/ćhefkoch/',
license='Apache Software License',
classifiers=[
'Development Status :: 1 - Planning',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Intended Audience :: Information Technology',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS :: MacOS X',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering',
'Topic :: Software Development',
'Topic :: System :: Distributed Computing'
],
keywords=('compute cluster HPC LSF dask parallel computing ' +
'scheduler framework'),
setup_requires=setupRequires,
install_requires=installRequires,
packages=[
'chefkoch'
],
cmdclass={'build_doc': doc_opts()},
command_options={
'build_doc': {
'project': ('setup.py', packageName),
'version': ('setup.py', packageVersion),
'release': ('setup.py', fullVersion),
'copyright': ('setup.py', '2019, ' + packageName)
}}
)
| 1.726563 | 2 |
doctor/email_info.py | JuliasBright/SendMoney | 1 | 12764804 | <filename>doctor/email_info.py
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_HOST_USER = '<EMAIL>'
EMAIL_HOST_PASSWORD = '<PASSWORD>@'
EMAIL_USE_TLS = True | 1.3125 | 1 |
examples/srx/srx_nat_proxyarp.py | cro/py-junos-eznc | 0 | 12764805 | # for debugging ...
import pdb
from pprint import pprint as pp
from lxml import etree
# for the example ...
from jnpr.eznc import Netconf
from jnpr.eznc.resources.srx.nat import NatProxyArp
from jnpr.eznc.utils import Config
# create a junos device and open a connection
jdev = Netconf(user='jeremy', password='<PASSWORD>', host='vsrx_cyan')
jdev.open()
# create a config utility object
cu = Config(jdev)
# select a proxy-arp entry, using direct resource access
entry = NatProxyArp(jdev, ('ge-0/0/1.124', '198.18.11.5'))
def doit():
if not entry.exists:
print "creating entry"
entry.write(touch=True)
print cu.diff()
# [edit security]
# + nat {
# + proxy-arp {
# + interface ge-0/0/1.124 {
# + address {
# + 198.18.11.5/32;
# + }
# + }
# + }
# + }
print "rollback...."
cu.rollback()
else:
print "entry exists"
doit() | 2.453125 | 2 |
app.py | beyondchan28/API-Client---GoGoDot-Blog | 1 | 12764806 | <filename>app.py
from flask import Flask
from flask.templating import render_template
import requests
import json
app = Flask(__name__)
@app.route('/', methods=['GET'])
def index():
url = "http://localhost:8000/resource/"
headers = {
'Content-Type': "application/json",
'Authorization': "<KEY>"
}
req = requests.request("GET", url, headers=headers)
data = json.loads(req.content)
return render_template('index.html', data= data)
@app.route('/news', methods=['GET'])
def news():
url = "http://localhost:5000/news/"
headers = {
'Content-Type': "application/json",
'Authorization': "<KEY>"
}
req = requests.request("GET", url, headers=headers)
data = json.loads(req.content)
return render_template('news.html', data= data) | 2.875 | 3 |
Practicals03/Example01.py | MichalKyjovsky/NPRG065_Programing_in_Python | 0 | 12764807 | #! /usr/bin/env Python3
list_of_tuples = []
list_of_tuples_comprehension = ([(i, j, i * j) for i in range(1, 11) for j in range(1, 11)])
for i in range(1, 11):
for j in range(1, 11):
list_of_tuples.append((i, j, i * j))
print(list_of_tuples)
print('************')
print(list_of_tuples_comprehension)
| 4.03125 | 4 |
ppmessage/db/sqlpgsql.py | gamert/ppmessage | 6 | 12764808 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2010-2016 PPMessage.
# <NAME>, <EMAIL>.
# All rights reserved
#
# db/sqlpsql.py
#
from .sqlnone import SqlNone
from ppmessage.core.constant import SQL
from ppmessage.core.singleton import singleton
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm import scoped_session
from sqlalchemy import create_engine
import logging
import traceback
class SqlInstance(SqlNone):
def __init__(self, _db_config):
self.pgsql_config = _db_config
self.db_name = self.pgsql_config.get("db_name")
self.db_pass = self.pgsql_config.get("db_pass")
self.db_user = self.pgsql_config.get("db_user")
self.db_host = self.pgsql_config.get("db_host")
self.db_port = self.pgsql_config.get("db_port")
super(SqlInstance, self).__init__()
return
def name(self):
return SQL.PGSQL
def createEngine(self):
db_string = "postgresql+psycopg2://%s:%s@%s:%s/%s" % (
self.db_user,
self.db_pass,
self.db_host,
self.db_port,
self.db_name
)
if self.dbengine == None:
self.dbengine = create_engine(db_string, echo_pool=True)
# it will create a thread local session for every single web request
return self.dbengine
| 2.46875 | 2 |
data-cleaning/parse_xml2dom.py | RoderickLi/python-snippet | 6 | 12764809 | # see ()[https://stackoverflow.com/a/40749716]
from xml.dom.minidom import parseString
html_string = """
<!DOCTYPE html>
<html><head><title>title</title></head><body><p>test</p></body></html>
"""
# extract the text value of the document's <p> tag:
doc = parseString(html_string)
paragraph = doc.getElementsByTagName("p")[0]
content = paragraph.firstChild.data
print(content)
# This would raise an exception on common HTML entities such as or ®.
| 3.140625 | 3 |
mgba_gamedata/gb/__init__.py | mgba-emu/gamedata | 2 | 12764810 | from mgba_gamedata.registry import Platform, Game
class GB(Platform):
pass
class GBGame(Game):
platform = GB
| 1.585938 | 2 |
Leak #5 - Lost In Translation/windows/Resources/Ops/PyScripts/lib/ops/data/logonasuser.py | bidhata/EquationGroupLeaks | 9 | 12764811 | <reponame>bidhata/EquationGroupLeaks<gh_stars>1-10
from ops.data import OpsClass, OpsField, DszObject, DszCommandObject, cmd_definitions
import dsz
if ('logonasuser' not in cmd_definitions):
dszlogonasuser = OpsClass('logon', {'handle': OpsField('handle', dsz.TYPE_INT), 'alias': OpsField('alias', dsz.TYPE_STRING)}, DszObject)
logonasusercommand = OpsClass('logonasuser', {'logon': dszlogonasuser}, DszCommandObject)
cmd_definitions['logonasuser'] = logonasusercommand | 2.09375 | 2 |
eden/integration/hg/sparse_test.py | jmswen/eden | 0 | 12764812 | <reponame>jmswen/eden
#!/usr/bin/env python3
#
# Copyright (c) 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from eden.integration.lib import hgrepo
from .lib.hg_extension_test_base import EdenHgTestCase, hg_test
@hg_test
class SparseTest(EdenHgTestCase):
def populate_backing_repo(self, repo: hgrepo.HgRepository) -> None:
repo.write_file("a_file.txt", "")
repo.commit("first commit")
def test_sparse(self) -> None:
"""Verify that we show a reasonable error if someone has managed
to load the sparse extension, rather than an ugly stack trace"""
for sub in [
"clear",
"cwd",
"delete",
"disable",
"enable",
"exclude",
"explain",
"files someprofile",
"importrules",
"include",
"show",
"list",
"refresh",
"reset",
]:
with self.assertRaises(hgrepo.HgError) as context:
self.hg("--config", "extensions.sparse=", "sparse", *sub.split())
self.assertIn(
"don't need sparse profiles",
# pyre-fixme[16]: `_E` has no attribute `stderr`.
context.exception.stderr.decode("utf-8", errors="replace"),
)
| 1.648438 | 2 |
app/recipe/tests/test_recipe_api.py | xWaterBottlex/recipe-app-api | 0 | 12764813 | <filename>app/recipe/tests/test_recipe_api.py<gh_stars>0
from django.contrib.auth import get_user_model
from django.urls import reverse
from rest_framework.test import APITestCase
from rest_framework import status
from core.models import Recipe, Tag, Ingredient
from recipe.serializers import RecipeSerializer, RecipeDetailSerializer
RECIPE_URL = reverse('recipe:recipe-list')
def detail_url(recipe_id):
"""Return recipe detail url"""
return reverse('recipe:recipe-detail', args=[recipe_id])
def sample_tag(user, name='Main course'):
"""Create and return a sample tag"""
return Tag.objects.create(user=user, name=name)
def sample_ingredient(user, name='Cinnamon'):
"""Creating and return a sample ingredient"""
return Ingredient.objects.create(user=user, name=name)
def sample_recipe(user, **params):
"""Create and return sample recipe"""
defaults = {
'title': 'sample recipe',
'time_minutes': 10,
'price': 0.4,
}
defaults.update(params)
return Recipe.objects.create(user=user, **defaults)
class PublicRecipeApiTest(APITestCase):
"""Test unauth recipe api access"""
def test_auth_required(self):
"""Test that authentication is required"""
res = self.client.get(RECIPE_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateRecipeApiTest(APITestCase):
"""Test unauthenticated recipe API access"""
def setUp(self):
self.user = get_user_model().objects.create_user(
'<EMAIL>',
'<PASSWORD>'
)
self.client.force_authenticate(self.user)
def test_retrieve_recipes(self):
"""Test retrieve list of recipes"""
sample_recipe(user=self.user)
sample_recipe(user=self.user)
res = self.client.get(RECIPE_URL)
recipes = Recipe.objects.all().order_by('-id')
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_recipes_limited_to_user(self):
"""Test that the only logged in users can access recipes"""
user2 = get_user_model().objects.create_user(
'<EMAIL>',
'test123123'
)
sample_recipe(user=user2)
sample_recipe(user=self.user)
res = self.client.get(RECIPE_URL)
recipes = Recipe.objects.filter(user=self.user)
# print(recipes.first().id)
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data, serializer.data)
def test_view_recipe_detail(self):
"""Test viewing recipe detail"""
recipe = sample_recipe(user=self.user)
recipe.tag.add(sample_tag(user=self.user))
recipe.ingredients.add(sample_ingredient(user=self.user))
url = detail_url(recipe.id)
res = self.client.get(url)
serializer = RecipeDetailSerializer(recipe)
self.assertEqual(res.data, serializer.data)
def test_create_basic_recipe(self):
"""Test create recipe"""
payload = {
'title': 'Vada pav',
'time_minutes': 30,
'price': 5.00,
'user': self.user
}
res = self.client.post(RECIPE_URL, payload)
# print(res.data)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(title=res.data['title'])
for key in payload.keys():
self.assertEqual(payload[key], getattr(recipe, key))
def test_create_recipe_with_tags(self):
"""Test creating a recipe with tags"""
tag1 = sample_tag(self.user, name='Vegan')
tag2 = sample_tag(self.user, name='Desert')
payload = {
'title': 'Avocado lime cheesecake',
'tag': [tag1.id, tag2.id],
'time_minutes': 60,
'price': 0.6
}
res = self.client.post(RECIPE_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(title=res.data['title'])
tags = recipe.tag.all()
self.assertEqual(tags.count(), 2)
self.assertIn(tag1, tags)
self.assertIn(tag2, tags)
def test_create_recipe_with_ingredients(self):
"""Test that you can create recipe with ingredients"""
ingredient1 = sample_ingredient(user=self.user,
name='Prawns')
ingredient2 = sample_ingredient(user=self.user,
name='Ginger')
payload = {
'title': 'Thai prawn red curry',
'ingredients': [ingredient1.id, ingredient2.id],
'time_minutes': 20,
'price': 7.00
}
# assert isinstance(self.client.post, object)
res = self.client.post(RECIPE_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(title=res.data['title'])
ingredients = recipe.ingredients.all()
self.assertEqual(ingredients.count(), 2)
self.assertIn(ingredient1, ingredients)
self.assertIn(ingredient2, ingredients)
| 2.5 | 2 |
formulas/confluent_hypergeometric.py | pascalmolin/fungrim | 0 | 12764814 | from .expr import *
def_Topic(
Title("Confluent hypergeometric functions"),
Entries(
"316533",
"f565f5",
"512bea",
"cee331",
"d6add6",
"1b9cc5",
"b9cc75",
),
Section("Hypergeometric series"),
Entries(
"4c41ad",
"0a0aec",
"a61f01",
"dec042",
"70111e",
),
Section("Differential equations"),
Entries(
"06f229",
"bb5d67",
),
Section("Kummer's transformation"),
Entries(
"be533c",
"a047eb",
"9d3147",
),
Section("Connection formulas"),
Entries(
"c8fcc7", # ustar as u
"4cf1e9", # ustar as 2f0
"f7f84e", # 1f1r as ustar
"6cf802", # u as 1f1
"18ef23", # u as 1f1, integer
"2df3e3", # 0f1 as 1f1
"325a0e", # 0f1 as J
"00dfd1", # 0f1 as I
),
Section("Asymptotic expansions"),
Entries(
"d1b3b5",
"99f69c",
"876844",
"279e4f",
"461a54",
"7b91b4",
),
)
make_entry(ID("316533"),
SymbolDefinition(Hypergeometric0F1, Hypergeometric0F1(a,z), "Confluent hypergeometric limit function"))
make_entry(ID("f565f5"),
SymbolDefinition(Hypergeometric0F1Regularized, Hypergeometric0F1Regularized(a,z), "Regularized confluent hypergeometric limit function"))
make_entry(ID("512bea"),
SymbolDefinition(Hypergeometric1F1, Hypergeometric1F1(a,b,z), "Kummer confluent hypergeometric function"))
make_entry(ID("cee331"),
SymbolDefinition(Hypergeometric1F1Regularized, Hypergeometric1F1Regularized(a,b,z), "Regularized Kummer confluent hypergeometric function"))
make_entry(ID("d6add6"),
SymbolDefinition(HypergeometricU, HypergeometricU(a,b,z), "Tricomi confluent hypergeometric function"))
make_entry(ID("1b9cc5"),
SymbolDefinition(HypergeometricUStar, HypergeometricUStar(a,b,z), "Scaled Tricomi confluent hypergeometric function"))
make_entry(ID("b9cc75"),
SymbolDefinition(Hypergeometric2F0, Hypergeometric2F0(a,b,z), "Tricomi confluent hypergeometric function, alternative notation"))
make_entry(ID("4c41ad"),
Formula(Equal(Hypergeometric0F1(a,z), Sum(1/RisingFactorial(a,k) * (z**k / Factorial(k)), Tuple(k, 0, Infinity)))),
Variables(a,z),
Assumptions(And(Element(a,SetMinus(CC,ZZLessEqual(0))), Element(z,CC))))
make_entry(ID("0a0aec"),
Formula(Equal(Hypergeometric0F1Regularized(a,z), Sum(1/GammaFunction(a+k) * (z**k / Factorial(k)), Tuple(k, 0, Infinity)))),
Variables(a,z),
Assumptions(And(Element(a,CC), Element(z,CC))))
make_entry(ID("a61f01"),
Formula(Equal(Hypergeometric1F1(a,b,z), Sum(RisingFactorial(a,k)/RisingFactorial(b,k) * (z**k / Factorial(k)), Tuple(k, 0, Infinity)))),
Variables(a,b,z),
Assumptions(And(Element(a,CC), Element(b, SetMinus(CC, ZZLessEqual(0))), Element(z,CC))))
make_entry(ID("dec042"),
Formula(Equal(Hypergeometric1F1(-n,b,z), Sum(RisingFactorial(-n,k)/RisingFactorial(b,k) * (z**k / Factorial(k)), Tuple(k, 0, n)))),
Variables(n,b,z),
#Assumptions(And(Element(n, ZZGreaterEqual(0)), Element(b, SetMinus(CC, ZZBetween(-n+1, 0))), Element(z,CC))))
Assumptions(And(Element(n, ZZGreaterEqual(0)), Element(b, CC), Not(And(Element(b, ZZLessEqual(0)), Greater(b, -n))), Element(z,CC))))
make_entry(ID("70111e"),
Formula(Equal(Hypergeometric1F1Regularized(a,b,z), Sum(RisingFactorial(a,k)/GammaFunction(b+k) * (z**k / Factorial(k)), Tuple(k, 0, Infinity)))),
Variables(a,b,z),
Assumptions(And(Element(a,CC), Element(b, CC), Element(z,CC))))
make_entry(ID("be533c"),
Formula(Equal(Hypergeometric1F1(a,b,z), Exp(z) * Hypergeometric1F1(b-a, b, -z))),
Variables(a, b, z),
Assumptions(And(Element(a, CC), Element(b, SetMinus(CC, ZZLessEqual(0))), Element(z, CC))))
make_entry(ID("a047eb"),
Formula(Equal(Hypergeometric1F1Regularized(a,b,z), Exp(z) * Hypergeometric1F1Regularized(b-a, b, -z))),
Variables(a, b, z),
Assumptions(And(Element(a, CC), Element(b, CC), Element(z, CC))))
make_entry(ID("9d3147"),
Formula(Equal(HypergeometricU(a,b,z), z**(1-b) * HypergeometricU(1+a-b, 2-b, z))),
Variables(a, b, z),
Assumptions(And(Element(a, CC), Element(b, CC), Element(z, CC), Unequal(z, 0))))
make_entry(ID("06f229"),
Formula(Where(Equal(z * Derivative(y(z), Tuple(z, z, 2)) + (b-z) * Derivative(y(z), Tuple(z, z, 1)) - a*y(z), 0), Equal(y(z),
C*Hypergeometric1F1Regularized(a,b,z) + D*HypergeometricU(a,b,z)))),
Variables(z, a, b, C, D),
Assumptions(And(Element(a, CC), Element(b, CC), Element(z, CC), Element(C, CC), Element(D, CC),
Or(Equal(D, 0), Unequal(z, 0), Element(-a, ZZGreaterEqual(0))))))
make_entry(ID("bb5d67"),
Formula(Where(Equal(z * Derivative(y(z), Tuple(z, z, 2)) + a * Derivative(y(z), Tuple(z, z, 1)) - y(z), 0), Equal(y(z),
C*Hypergeometric0F1Regularized(a,z) + D*z**(1-a)*Hypergeometric0F1Regularized(2-a,z)))),
Variables(z, a, C, D),
Assumptions(And(Element(a, CC), Element(z, CC), Element(C, CC), Element(D, CC),
Or(Equal(D, 0), Unequal(z, 0), Element(1-a, ZZGreaterEqual(0))))))
make_entry(ID("c8fcc7"),
Formula(Equal(HypergeometricUStar(a,b,z), z**a * HypergeometricU(a,b,z))),
Variables(a, b, z),
Assumptions(And(Element(a, CC), Element(b, CC), Element(z, CC), Unequal(z, 0))))
make_entry(ID("4cf1e9"),
Formula(Equal(HypergeometricUStar(a,b,z), Hypergeometric2F0(a, a-b+1, -(1/z)))),
Variables(a, b, z),
Assumptions(And(Element(a, CC), Element(b, CC), Element(z, CC), Unequal(z, 0))))
# todo: requires reciprocal gamma function
make_entry(ID("f7f84e"),
Formula(Equal(Hypergeometric1F1Regularized(a,b,z),
Div((-z)**(-a), GammaFunction(b-a)) * HypergeometricUStar(a,b,z) + Div(z**(a-b) * Exp(z), GammaFunction(a)) * HypergeometricUStar(b-a, b, -z))),
Variables(a, b, z),
Assumptions(And(Element(a, CC), Element(b, CC), Element(z, CC), Unequal(z, 0))))
make_entry(ID("6cf802"),
Formula(Equal(HypergeometricU(a,b,z),
GammaFunction(1-b) / GammaFunction(a-b+1) * Hypergeometric1F1(a,b,z) + GammaFunction(b-1)/GammaFunction(a) * z**(1-b) * Hypergeometric1F1(a-b+1, 2-b, z))),
Variables(a, b, z),
Assumptions(And(Element(a, CC), Element(b, CC), Element(z, CC), Unequal(z, 0), NotElement(b, ZZ))))
make_entry(ID("18ef23"),
Formula(Equal(HypergeometricU(a,n,z),
ComplexLimit(
GammaFunction(1-b) / GammaFunction(a-b+1) * Hypergeometric1F1(a,b,z) + GammaFunction(b-1)/GammaFunction(a) * z**(1-b) * Hypergeometric1F1(a-b+1, 2-b, z),
b, n))),
Variables(a, n, z),
Assumptions(And(Element(a, CC), Element(n, ZZ), Element(z, CC), Unequal(z, 0))))
make_entry(ID("2df3e3"),
Formula(Equal(Hypergeometric0F1(a,z), Exp(-(2*Sqrt(z))) * Hypergeometric1F1(a-Div(1,2), 2*a-1, 4*Sqrt(z)))),
Variables(a, z),
Assumptions(And(Element(a, CC), Element(z, CC), NotElement(a, ZZLessEqual(0)))))
make_entry(ID("325a0e"),
Formula(Equal(Hypergeometric0F1Regularized(a,z), (-z)**((1-a)/2) * BesselJ(a-1, 2*Sqrt(-z)))),
Variables(a, z),
Assumptions(And(Element(a, CC), Element(z, CC), Unequal(z, 0))))
make_entry(ID("00dfd1"),
Formula(Equal(Hypergeometric0F1Regularized(a,z), z**((1-a)/2) * BesselI(a-1, 2*Sqrt(z)))),
Variables(a, z),
Assumptions(And(Element(a, CC), Element(z, CC), Unequal(z, 0))))
make_entry(ID("d1b3b5"),
Formula(Equal(HypergeometricUStar(a,b,z),
Sum(RisingFactorial(a,k) * RisingFactorial(a-b+1,k) / (Factorial(k) * (-z)**k), Tuple(k, 0, n-1))
+ HypergeometricUStarRemainder(n,a,b,z))),
Variables(a,b,z,n),
Assumptions(And(Element(a,CC), Element(b,CC), Element(z,CC), Unequal(z,0), Element(n,ZZGreaterEqual(0)))))
make_entry(ID("99f69c"),
SymbolDefinition(HypergeometricUStarRemainder, HypergeometricUStarRemainder(n,a,b,z), "Error term in asymptotic expansion of Tricomi confluent hypergeometric function"))
make_entry(ID("876844"),
Formula(Equal(ComplexLimit(Abs(HypergeometricUStarRemainder(n,a,b, Exp(ConstI*theta) * z)), z, Infinity), 0)),
Variables(a,b,theta,n),
Assumptions(And(Element(a,CC), Element(b,CC), Element(theta, RR), Element(n,ZZGreaterEqual(1)))))
make_entry(ID("279e4f"),
Formula(Where(LessEqual(Abs(HypergeometricUStarRemainder(n,a,b,z)),
Abs((RisingFactorial(a,n) * RisingFactorial(a-b+1,n)) / (Factorial(n) * z**n)) *
(2 / (1 - sigma)) * Exp((2 * rho) / ((1 - sigma) * Abs(z)))),
Equal(sigma, Abs(b-2*a)/Abs(z)),
Equal(rho, Abs(a**2-a*b+b/2) + sigma*(1+sigma/4)/(1-sigma)**(2)))),
Variables(a,b,z,n),
Assumptions(And(Element(a,CC), Element(b,CC), Element(z,CC), Unequal(z,0), Element(n,ZZGreaterEqual(0)), Greater(Re(z), Abs(b-2*a)))),
References("DLMF section 13.7, https://dlmf.nist.gov/13.7"))
make_entry(ID("461a54"),
Formula(Where(LessEqual(Abs(HypergeometricUStarRemainder(n,a,b,z)),
Abs((RisingFactorial(a,n) * RisingFactorial(a-b+1,n)) / (Factorial(n) * z**n)) *
(2 * Sqrt(1 + Div(1,2)*ConstPi*n) / (1 - sigma)) * Exp((ConstPi * rho) / ((1 - sigma) * Abs(z)))),
Equal(sigma, Abs(b-2*a)/Abs(z)),
Equal(rho, Abs(a**2-a*b+b/2) + sigma*(1+sigma/4)/(1-sigma)**(2)))),
Variables(a,b,z,n),
Assumptions(And(Element(a,CC), Element(b,CC), Element(z,CC), Unequal(z,0), Element(n,ZZGreaterEqual(0)), Or(Greater(Abs(Im(z)), Abs(b-2*a)), Greater(Re(z), Abs(b-2*a))))),
References("DLMF section 13.7, https://dlmf.nist.gov/13.7"))
make_entry(ID("7b91b4"),
Formula(Where(LessEqual(Abs(HypergeometricUStarRemainder(n,a,b,z)),
Abs((RisingFactorial(a,n) * RisingFactorial(a-b+1,n)) / (Factorial(n) * z**n)) *
((2 * C(n)) / (1 - tau) * Exp(2 * C(1) * rho / ((1 - tau) * Abs(z))))),
Equal(sigma, Abs(b-2*a)/Abs(z)),
Equal(nu, 1+2*sigma**2),
Equal(tau, nu * sigma),
Equal(rho, Abs(a**2-a*b+b/2) + tau*(1+tau/4)/(1-tau)**(2)),
Equal(C(m), (Sqrt(1+ConstPi*m/2) + sigma*nu**2*m) * nu**m))),
Variables(a,b,z,n),
Assumptions(And(Element(a,CC), Element(b,CC), Element(z,CC), Unequal(z,0), Element(n,ZZGreaterEqual(0)), Greater(Abs(z), 2*Abs(b-2*a)))),
References("DLMF section 13.7, https://dlmf.nist.gov/13.7"))
| 1.945313 | 2 |
music/class_/audioa/d/base/_key.py | jedhsu/music | 0 | 12764815 | """
*D Key*
A key of D.
"""
from abc import ABCMeta
__all__ = ["D_Key"]
class D_Key:
__metaclass__ = ABCMeta
| 2.375 | 2 |
validation_strategy.py | jodsche/recsys2018-1 | 0 | 12764816 | import joblib
import numpy as np
import pandas as pd
np.random.seed(0)
df_tracks = pd.read_hdf('df_data/df_tracks.hdf')
df_playlists = pd.read_hdf('df_data/df_playlists.hdf')
df_playlists_info = pd.read_hdf('df_data/df_playlists_info.hdf')
df_playlists_test = pd.read_hdf('df_data/df_playlists_test.hdf')
df_playlists_test_info = pd.read_hdf('df_data/df_playlists_test_info.hdf')
num_tracks = df_playlists_info.groupby('num_tracks').pid.apply(np.array)
validation_playlists = {}
for i, j in df_playlists_test_info.num_tracks.value_counts().reset_index().values:
validation_playlists[i] = np.random.choice(num_tracks.loc[i], 2 * j, replace=False)
val1_playlist = {}
val2_playlist = {}
for i in [0, 1, 5, 10, 25, 100]:
val1_playlist[i] = []
val2_playlist[i] = []
value_counts = df_playlists_test_info.query('num_samples==@i').num_tracks.value_counts()
for j, k in value_counts.reset_index().values:
val1_playlist[i] += list(validation_playlists[j][:k])
validation_playlists[j] = validation_playlists[j][k:]
val2_playlist[i] += list(validation_playlists[j][:k])
validation_playlists[j] = validation_playlists[j][k:]
val1_index = df_playlists.pid.isin(val1_playlist[0])
val2_index = df_playlists.pid.isin(val2_playlist[0])
for i in [1, 5, 10, 25, 100]:
val1_index = val1_index | (df_playlists.pid.isin(val1_playlist[i]) & (df_playlists.pos >= i))
val2_index = val2_index | (df_playlists.pid.isin(val2_playlist[i]) & (df_playlists.pos >= i))
train = df_playlists[~(val1_index | val2_index)]
val1 = df_playlists[val1_index]
val2 = df_playlists[val2_index]
val1_pids = np.hstack([val1_playlist[i] for i in val1_playlist])
val2_pids = np.hstack([val2_playlist[i] for i in val2_playlist])
train = pd.concat([train, df_playlists_test])
train.to_hdf('df_data/train.hdf', key='abc')
val1.to_hdf('df_data/val1.hdf', key='abc')
val2.to_hdf('df_data/val2.hdf', key='abc')
joblib.dump(val1_pids, 'df_data/val1_pids.pkl')
joblib.dump(val2_pids, 'df_data/val2_pids.pkl')
| 2.4375 | 2 |
returns/interfaces/specific/reader.py | thecoblack/returns | 0 | 12764817 | """
This module is special.
``Reader`` does not produce ``ReaderBasedN`` interface as other containers.
Because ``Reader`` can be used with two or three type arguments:
- ``RequiresContext[value, env]``
- ``RequiresContextResult[value, error, env]``
Because the second type argument changes its meaning
based on the used ``KindN`` instance,
we need to have two separate interfaces for two separate use-cases:
- ``ReaderBased2`` is used for types where the second type argument is ``env``
- ``ReaderBased3`` is used for types where the third type argument is ``env``
We also have two methods and two poinfree helpers
for ``bind_context`` composition: one for each interface.
Furthermore, ``Reader`` cannot have ``ReaderBased1`` type,
because we need both ``value`` and ``env`` types at all cases.
See also:
https://github.com/dry-python/returns/issues/485
"""
from abc import abstractmethod
from typing import TYPE_CHECKING, Any, Callable, Type, TypeVar
from returns.interfaces import container, iterable
from returns.primitives.hkt import Kind2, Kind3
if TYPE_CHECKING:
from returns.context import RequiresContext, NoDeps # noqa: WPS433
_FirstType = TypeVar('_FirstType')
_SecondType = TypeVar('_SecondType')
_ThirdType = TypeVar('_ThirdType')
_UpdatedType = TypeVar('_UpdatedType')
_ValueType = TypeVar('_ValueType')
_ErrorType = TypeVar('_ErrorType')
_EnvType = TypeVar('_EnvType')
_ReaderBased2Type = TypeVar('_ReaderBased2Type', bound='ReaderBased2')
_ReaderBased3Type = TypeVar('_ReaderBased3Type', bound='ReaderBased3')
class ReaderBased2(
container.Container2[_FirstType, _SecondType],
iterable.Iterable2[_FirstType, _SecondType],
):
"""
Reader interface for ``Kind2`` based types.
It has two type arguments and treats the second type argument as env type.
"""
@abstractmethod
def __call__(self, deps: _SecondType) -> _FirstType:
"""Calls the reader with the env to get the result back."""
@property
@abstractmethod
def empty(self: _ReaderBased2Type) -> 'NoDeps':
"""Is required to call ``Reader`` with explicit empty argument."""
@abstractmethod
def bind_context(
self: _ReaderBased2Type,
function: Callable[
[_FirstType],
'RequiresContext[_UpdatedType, _SecondType]',
],
) -> Kind2[_ReaderBased2Type, _UpdatedType, _SecondType]:
"""Allows to apply a wrapped function over a ``Reader`` container."""
@abstractmethod
def modify_env(
self: _ReaderBased2Type,
function: Callable[[_UpdatedType], _SecondType],
) -> Kind2[_ReaderBased2Type, _FirstType, _UpdatedType]:
"""Transforms the environment before calling the container."""
@classmethod
@abstractmethod
def ask(
cls: Type[_ReaderBased2Type],
) -> Kind2[_ReaderBased2Type, _SecondType, _SecondType]:
"""Returns the depedencies inside the container."""
@classmethod
@abstractmethod
def from_context(
cls: Type[_ReaderBased2Type], # noqa: N805
inner_value: 'RequiresContext[_ValueType, _EnvType]',
) -> Kind2[_ReaderBased2Type, _ValueType, _EnvType]:
"""Unit method to create new containers from successful ``Reader``."""
class ReaderBased3(
container.Container3[_FirstType, _SecondType, _ThirdType],
iterable.Iterable3[_FirstType, _SecondType, _ThirdType],
):
"""
Reader interface for ``Kind3`` based types.
It has three type arguments and treats the third type argument as env type.
The second type argument is not used here.
"""
@abstractmethod
def __call__(self, deps: _ThirdType) -> Any:
"""
Calls the reader with the env to get the result back.
Returns ``Any``, because we cannot know in advance
what combitation of ``_FirstType`` and ``_SecondType`` would be used.
It can be ``Union[_FirstType, _SecondType]`` or ``Tuple`` or ``Result``.
Or any other type.
"""
@property
@abstractmethod
def empty(self: _ReaderBased3Type) -> 'NoDeps':
"""Is required to call ``Reader`` with explicit empty argument."""
@abstractmethod
def bind_context(
self: _ReaderBased3Type,
function: Callable[
[_FirstType],
'RequiresContext[_UpdatedType, _ThirdType]',
],
) -> Kind3[_ReaderBased3Type, _UpdatedType, _SecondType, _ThirdType]:
"""Allows to apply a wrapped function over a ``Reader`` container."""
@abstractmethod
def modify_env(
self: _ReaderBased3Type,
function: Callable[[_UpdatedType], _ThirdType],
) -> Kind3[_ReaderBased3Type, _FirstType, _SecondType, _UpdatedType]:
"""Transforms the environment before calling the container."""
@classmethod
@abstractmethod
def ask(
cls: Type[_ReaderBased3Type],
) -> Kind3[_ReaderBased3Type, _ThirdType, _SecondType, _ThirdType]:
"""Returns the depedencies inside the container."""
@classmethod
@abstractmethod
def from_context(
cls: Type[_ReaderBased3Type], # noqa: N805
inner_value: 'RequiresContext[_ValueType, _EnvType]',
) -> Kind3[_ReaderBased3Type, _ValueType, _SecondType, _EnvType]:
"""Unit method to create new containers from successful ``Reader``."""
| 2.25 | 2 |
confu/builds/__init__.py | ararslan/confu | 13 | 12764818 | from confu.builds.base import Build
from confu.builds.unix import UnixBuild
from confu.builds.pnacl import PNaClBuild
from confu.builds.emscripten import EmscriptenBuild
from confu.builds.module import Module, ModuleCollection
from confu.builds.deps import DependencyCollection
| 0.960938 | 1 |
tgpy/api.py | tm-a-t/TGPy | 14 | 12764819 | <filename>tgpy/api.py
import logging
from typing import Any, Callable
class API:
code_transformers: list[tuple[str, Callable[[str], str]]]
variables: dict[str, Any]
constants: dict[str, Any]
def __init__(self):
self.variables = {}
self.constants = {}
self.code_transformers = []
def add_code_transformer(self, name: str, transformer: Callable[[str], str]):
self.code_transformers.append((name, transformer))
def _apply_code_transformers(self, code: str) -> str:
for _, transformer in self.code_transformers:
try:
code = transformer(code)
except Exception:
logger = logging.getLogger(__name__)
logger.exception(
f'Error while applying code transformer {transformer}',
exc_info=True,
)
raise
return code
api = API()
| 2.546875 | 3 |
paper_pictures_AirPollutionData.py | KirstieJane/bocpdms | 21 | 12764820 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 8 17:03:07 2018
@author: jeremiasknoblauch
Description: Plots pics from Air Pollution Data London
"""
import csv
import numpy as np
from Evaluation_tool import EvaluationTool
from matplotlib import pyplot as plt
import matplotlib.dates as mdates
import datetime
import matplotlib
#ensure that we have type 1 fonts (for ICML publishing guiedlines)
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
"""""STEP 1: DATA TRANSFOMRATIONS"""""
normalize = True
deseasonalize_2h = True
deseasonalize_day = True #only one of the two deseasonalizations should be chosen
shortened, shortened_to = False, 500
daily_avg = True
if daily_avg:
deseasonalize_2h = False
data_dir = ("//Users//jeremiasknoblauch//Documents//OxWaSP//BOCPDMS//" +
"//Code//SpatialBOCD//Data//AirPollutionData")
cp_type = "CongestionChargeData"
dist_file_road = (data_dir + "//" + cp_type + "//" +
"RoadDistanceMatrix_")
dist_file_euclid = (data_dir + "//" + cp_type + "//" +
"EuclideanDistanceMatrix_")
results_file = ("/Users//jeremiasknoblauch//Documents////OxWaSP//BOCPDMS//Code//" +
"SpatialBOCD//Paper//AirPollutionData//" +
"results_daily.txt")
res_path = ("/Users//jeremiasknoblauch//Documents////OxWaSP//BOCPDMS//Code//" +
"SpatialBOCD//Paper//AirPollutionData//")
frequency = "2h" #2h, daily (=every 15 min),
mode = "bigger" #bigger, smaller (bigger contains more filled-in values)
if mode == "bigger":
stationIDs = ["BT1", "BX1", "BX2", "CR2", "CR4",
"EA1", "EA2", "EN1", "GR4", "GR5",
"HG1", "HG2", "HI0", "HI1", "HR1",
"HS2", "HV1", "HV3", "KC1", "KC2",
"LH2", "MY1", "RB3", "RB4", "TD0",
"TH1", "TH2", "WA2", "WL1"]
elif mode == "smaller":
stationIDs = ["BT1", "BX2", "CR2", "EA2", "EN1", "GR4",
"GR5", "HG1", "HG2", "HI0", "HR1", "HV1",
"HV3", "KC1", "LH2", "RB3", "TD0", "WA2"]
num_stations = len(stationIDs)
"""STEP 1: Read in distances"""
"""STEP 1.1: Read in road distances (as strings)"""
pw_distances_road = []
station_IDs = []
count = 0
with open(dist_file_road + mode + ".csv") as csvfile:
reader = csv.reader(csvfile)
for row in reader:
pw_distances_road += row
"""STEP 1.2: Read in euclidean distances (as strings)"""
pw_distances_euclid = []
station_IDs = []
count = 0
with open(dist_file_euclid + mode + ".csv") as csvfile:
reader = csv.reader(csvfile)
for row in reader:
pw_distances_euclid += row
"""STEP 1.3: Convert both distance lists to floats and matrices"""
pw_d_r, pw_d_e = [], []
for r,e in zip(pw_distances_road, pw_distances_euclid):
pw_d_r.append(float(r))
pw_d_e.append(float(e))
pw_distances_road = np.array(pw_d_r).reshape(num_stations, num_stations)
pw_distances_euclid = np.array(pw_d_e).reshape(num_stations, num_stations)
"""STEP 2: Convert distance matrices to nbhs"""
cutoffs = [0.0, 10.0, 20.0, 30.0, 40.0, 100.0]
num_nbhs = len(cutoffs) - 1
"""STEP 2.1: road distances"""
road_nbhs = []
for location in range(0, num_stations):
location_nbh = []
for i in range(0, num_nbhs):
larger_than, smaller_than = cutoffs[i], cutoffs[i+1]
indices = np.intersect1d(
np.where(pw_distances_road[location,:] > larger_than),
np.where(pw_distances_road[location,:] < smaller_than)).tolist()
location_nbh.append(indices.copy())
road_nbhs.append(location_nbh.copy())
"""STEP 2.2: euclidean distances"""
euclid_nbhs =[]
for location in range(0, num_stations):
location_nbh = []
for i in range(0, num_nbhs):
larger_than, smaller_than = cutoffs[i], cutoffs[i+1]
indices = np.intersect1d(
np.where(pw_distances_euclid[location,:] > larger_than),
np.where(pw_distances_euclid[location,:] < smaller_than)).tolist()
location_nbh.append(indices.copy())
euclid_nbhs.append(location_nbh.copy())
"""STEP 3: Read in station data for each station"""
station_data = []
for id_ in stationIDs:
file_name = (data_dir + "//" + cp_type + "//" +
id_ + "_081702-081703_" + frequency + ".txt")
"""STEP 3.1: Read in raw data"""
#NOTE: Skip the header
data_raw = []
count = 0
with open(file_name) as csvfile:
reader = csv.reader(csvfile)
for row in reader:
if count > 0:
data_raw += row
count += 1
"""STEP 3.2: Convert to floats"""
#NOTE: We have row names, so skip every second
dat = []
for entry in data_raw:
dat += [float(entry)]
"""STEP 3.3: Append to station_data list"""
station_data.append(dat.copy())
"""STEP 4: Format the station data into a matrix"""
T, S1, S2 = len(station_data[0]), num_stations, 1
data = np.zeros((T, num_stations))
for i in range(0, num_stations):
data[:,i] = np.array(station_data[i])
intercept_priors = np.mean(data,axis=0)
hyperpar_opt = "caron"
"""STEP 5: Transformation if necessary"""
if shortened:
T = shortened_to
data = data[:T,:]
if daily_avg:
"""average 12 consecutive values until all have been processed"""
new_data = np.zeros((int(T/12), num_stations))
for station in range(0, num_stations):
new_data[:, station] = np.mean(data[:,station].
reshape(int(T/12), 12),axis=1)
data= new_data
T = data.shape[0]
if deseasonalize_day:
if deseasonalize_2h:
print("CAREFUL! You want to deseasonalize twice, so deseasonalizing " +
"was aborted!")
elif not daily_avg:
mean_day = np.zeros((7, num_stations))
#deseasonalize
for station in range(0, num_stations):
"""get the daily average. Note that we have 12 obs/day for a year"""
for day in range(0, 7):
selection_week = [False]*day + [True]*12 + [False]*(6-day)
selection = (selection_week * int(T/(7*12)) +
selection_week[:(T-int(T/(7*12))*7*12)])
mean_day[day, station] = np.mean(data[selection,station])
data[selection,station] = (data[selection,station] -
mean_day[day, station])
if deseasonalize_day and daily_avg:
mean_day = np.zeros((7, num_stations))
#deseasonalize
for station in range(0, num_stations):
"""get the daily average. Note that we have 12 obs/day for a year"""
#Also note that T will already have changed to the #days
for day in range(0, 7):
selection_week = [False]*day + [True] + [False]*(6-day)
selection = (selection_week * int(T/7) +
selection_week[:(T-int(T/7)*7)])
mean_day[day, station] = np.mean(data[selection,station])
data[selection,station] = (data[selection,station] -
mean_day[day, station])
T = data.shape[0]
if deseasonalize_2h:
if deseasonalize_day:
print("CAREFUL! You want to deseasonalize twice, so deseasonalizing " +
"was aborted!")
else:
mean_2h = np.zeros((12*7, num_stations))
for station in range(0, num_stations):
"""get the average for each 2h-interval for each weekday"""
for _2h in range(0, 12*7):
selection_2h = [False]*_2h + [True] + [False]*(12*7-1-_2h)
selection = (selection_2h * int(T/(7*12)) +
selection_2h[:(T-int(T/(7*12))*7*12)])
mean_2h[_2h, station] = np.mean(data[selection,station])
data[selection,station] = (data[selection,station] -
mean_2h[_2h, station])
if normalize:
data = (data - np.mean(data, axis=0))/np.sqrt(np.var(data,axis=0))
intercept_priors = np.mean(data,axis=0)
"""""STEP 2: READ RESULTS"""""
EvT = EvaluationTool()
EvT.build_EvaluationTool_via_results(results_file)
segmentation = EvT.results[EvT.names.index("MAP CPs")][-2]
model_labels = EvT.results[EvT.names.index("model labels")]
num_models = len(np.union1d(model_labels, model_labels))
relevant_models = np.union1d([seg[1] for seg in segmentation],[seg[1] for seg in segmentation])
#mods = [8,11,13,17,18]
all_models = [e for e in range(0, len(model_labels))] #np.linspace(0, len(model_labels)-1, len(model_labels), dtype = int)
"""Get dates"""
def perdelta(start, end, delta, date_list):
curr = start
while curr < end:
#yield curr
date_list.append(curr)
curr += delta
all_dates = []
#start_year, start_month, start_day, start_hour = 2002, 8, 17, 0
#start_datetime = datetime.datetime(year = 2002, month = 8, day = 17, hour = 0)
#stop_datetime = datetime.datetime(year=2003, month = 8, day = 18, hour = 0)
#perdelta(start_datetime, stop_datetime, datetime.timedelta(hours = 2), all_dates)
start_year, start_month, start_day, start_hour = 2002, 8, 17, 0
start_datetime = datetime.date(year = 2002, month = 8, day = 17)
stop_datetime = datetime.date(year=2003, month = 8, day = 18)
perdelta(start_datetime, stop_datetime, datetime.timedelta(days = 1), all_dates)
"""""STEP 3: Plot"""""
index_selection = [0,5,9,13,17,21,30]
#location, color
true_CPs = [[datetime.date(year = 2003, month = 2, day = 17), "red", 4.0]]
#paper: height_ratio, num_subplots = [4,3,5],3
#poster: height_ratio, num_subplots = [4,3,4],3
height_ratio, num_subplots = [4,3,5],3
#paper: ylabel_coords = [-0.085, 0.5]
#poster: [-0.06, 0.5]
ylabel_coords = [-0.085, 0.5]
#paper: figsize = (8,5) #for poster: 12,5
fig, ax_array = plt.subplots(num_subplots, sharex = True,
gridspec_kw = {'height_ratios':height_ratio},
figsize=(8,5))
plt.subplots_adjust(hspace = .2, left = None, bottom = None, right = None, top = None)
off = 5
time_range = np.linspace(10,T-2, T-2-off,dtype = int)
all_dates = all_dates[-len(time_range):]
fig_1 = EvT.plot_raw_TS(data[-len(time_range):,:].reshape(len(time_range), 29), all_dates = all_dates, ax = ax_array[0],
time_range = time_range,
custom_colors_series = ["black"]*10,
ylab_fontsize = 14,
yticks_fontsize = 14,
ylab = "NOX",
xlab=None,
ylabel_coords = ylabel_coords,
true_CPs = true_CPs)
mod = [17,21]
EvT.plot_model_posterior(indices=mod, #mods, #mods, #relevant_models,
plot_type = "trace", #"MAPVariance1_trace",
#y_axis_labels = [str(e) for e in all_models],#[#"AR(1)",
#"M(5+)", "M(6)",
# "M(6+)",
# "M(7)", "M(7+)"],#relevant_models],
time_range = time_range,
y_axis_labels = [],
log_format=False, aspect = 'auto',
show_MAP_CPs = False,
#start_plot = 2002.75, stop_plot = 2003.75,
custom_colors = ["blue", "orange"], # ["orange"], #custom_colors_models,
ax = ax_array[1] ,#ax_array[1], #None, #ax_array[1],
xlab = None, #ylab = None, #trace",period_time_list = None,
number_offset = 1.0, #datetime.timedelta(days = 1),#0.75,
number_fontsize = 20,
period_line_thickness = 7.0,
xlab_fontsize = 14, ylab_fontsize = 14,
xticks_fontsize = 14, yticks_fontsize = 14,
ylabel_coords = ylabel_coords,
#ylab = None, #"Model posterior max",
#period_time_list = [
# [datetime.datetime(year = 2003, month = 2, day = 17, hour = 0),
# datetime.datetime(year = 2003, month = 2, day = 18, hour = 0)]],
#label_list = [["1"]],
#window_len = int(12*7*1),
period_time_list = None, #[[datetime.datetime(year = 2003, month = 2, day = 17, hour = 0),
#datetime.datetime(year = 2003, month = 2, day = 18, hour = 0)]],
label_list = None, #[["1"]],
SGV = True,
log_det = True,
all_dates = all_dates,
true_CPs = true_CPs)
fig_4 = EvT.plot_model_posterior(indices=mod, #mods, #mods, #relevant_models,
plot_type = "BF", #"MAPVariance1_trace",
#y_axis_labels = [str(e) for e in all_models],#[#"AR(1)",
#"M(5+)", "M(6)",
# "M(6+)",
# "M(7)", "M(7+)"],#relevant_models],
time_range = time_range,
log_format=True, aspect = 'auto',
show_MAP_CPs = False,
#start_plot = 2002.7, stop_plot = 2003.7,
custom_colors = ["green"], #custom_colors_models,
ax = ax_array[2], xlab = None, ylab = "log(BF)", #trace",
period_time_list = None,
label_list =None,
number_offset = 0.75,
number_fontsize = 20,
period_line_thickness = 7.0,
xlab_fontsize = 14, ylab_fontsize = 14,
xticks_fontsize = 14, yticks_fontsize = 14,
ylabel_coords = ylabel_coords,
window_len = int(12*7*2),
SGV = False,
log_det = True,
all_dates = all_dates,
true_CPs = true_CPs
)
fig.savefig(res_path + "APData.pdf") | 2.546875 | 3 |
sakt/__init__.py | scaomath/kaggle-riiid-test | 0 | 12764821 | <gh_stars>0
from .sakt import *
# from .train_sakt_final import * | 1.078125 | 1 |
Vargi_Bots/ros_packages/pkg_task5/scripts/node_t5_ur5_1_package_pick.py | ROBODITYA/Eyantra-2021-Vargi-Bots | 1 | 12764822 | #!/usr/bin/env python
''' This node is used for controlling the ur5_1 arm and conveyor belt. '''
import sys
import math
import datetime
from datetime import datetime
import yaml
import rospy
import rospkg
import moveit_commander
import moveit_msgs.msg
import actionlib
from std_srvs.srv import Empty
from std_msgs.msg import String
from pkg_vb_sim.srv import vacuumGripper
from pkg_task5.srv import camera_packages
from pkg_ros_iot_bridge.msg import msgRosIotAction
# Message Class that is used by ROS Actions internally
from pkg_ros_iot_bridge.msg import msgRosIotGoal
# Message Class that is used for Action Goal Messages
from pkg_ros_iot_bridge.msg import msgRosIotResult
# Message Class that is used for Action Result Messages
from pkg_ros_iot_bridge.msg import msgRosIotFeedback
# # Message Class that is used for Action Feedback Messages
from pkg_ros_iot_bridge.msg import msgMqttSub
# Message Class for MQTT Subscription Messages
''' Class to initiate the pick & place process. '''
class Ur5PickPlace:
# Constructor
def __init__(self):
self._original_orders = []
self.HigherPriorityOrder = []
self.ur5_1_home_pose = [math.radians(-90), math.radians(-90), math.radians(0),
math.radians(-90), math.radians(-90), math.radians(90)]
self.ur5_1_conveyor_pose = [math.radians(7.8), math.radians(-139.4), math.radians(-57.6),
math.radians(-72.8), math.radians(89.9), math.radians(7.8)]
ur5_1_pkg00 = [math.radians(-55.8), math.radians(-67.0), math.radians(1.2),
math.radians(-114.1), math.radians(-121.3), math.radians(90)]
ur5_1_pkg01 = [math.radians(-118.9), math.radians(-85.6), math.radians(18.7),
math.radians(-113.1), math.radians(-61.0), math.radians(90.0)]
ur5_1_pkg02 = [math.radians(55.7), math.radians(-117.0), math.radians(5.4),
math.radians(-68.4), math.radians(124.2), math.radians(90)]
ur5_1_pkg10 = [math.radians(-55.1), math.radians(-96.9), math.radians(82.6),
math.radians(-165.7), math.radians(-124.8), math.radians(90)]
ur5_1_pkg11 = [math.radians(-122.7), math.radians(-116.5), math.radians(95.9),
math.radians(-159.3), math.radians(-57.2), math.radians(90.0)]
ur5_1_pkg12 = [math.radians(54.4), math.radians(-84.5), math.radians(-83.6),
math.radians(-9.3), math.radians(126.7), math.radians(90)]
ur5_1_pkg20 = [math.radians(-55.09), math.radians(-96.44), math.radians(87.31),
math.radians(9.035), math.radians(125.49), math.radians(90)]
ur5_1_pkg21 = [math.radians(116.01), math.radians(-61.96), math.radians(-129.27),
math.radians(10.33), math.radians(62.64), math.radians(90)]
ur5_1_pkg22 = [math.radians(55.5), math.radians(-85.8), math.radians(-114.2),
math.radians(20.8), math.radians(124.7), math.radians(90.0)]
ur5_1_pkg30 = [math.radians(-55.08), math.radians(-91.64), math.radians(117.76),
math.radians(-26.22), math.radians(125.48), math.radians(90.0)]
ur5_1_pkg31 = [math.radians(-121.6), math.radians(-115.9), math.radians(135.1),
math.radians(-19.2), math.radians(58.3), math.radians(90)]
ur5_1_pkg32 = [math.radians(-160.73), math.radians(-92.61), math.radians(118.27),
math.radians(-25.89), math.radians(19.84), math.radians(90)]
# Names of packages and their respective bins in gazebo
self.packages_name_position = {"packagen00":ur5_1_pkg00, "packagen01":ur5_1_pkg01,
"packagen02":ur5_1_pkg02, "packagen10":ur5_1_pkg10,
"packagen11":ur5_1_pkg11, "packagen12":ur5_1_pkg12,
"packagen20":ur5_1_pkg20, "packagen21":ur5_1_pkg21,
"packagen22":ur5_1_pkg22, "packagen30":ur5_1_pkg30,
"packagen31":ur5_1_pkg31, "packagen32":ur5_1_pkg32}
# Initialize ROS Node
rospy.init_node('node_t5_ur5_1_package_pick', anonymous=True)
rospy.sleep(15)
self.publish_orders = rospy.Publisher('/Orders_to_ship', String, queue_size=10)
# Wait for service
rospy.wait_for_service('/2Dcamera_packages_type')
# Load variables for moveit!
self._robot_ns = '/' + "ur5_1"
self._planning_group = "manipulator"
self._commander = moveit_commander.roscpp_initialize(sys.argv)
self._robot = moveit_commander.RobotCommander(robot_description=self._robot_ns + "/robot_description",
ns=self._robot_ns)
self._scene = moveit_commander.PlanningSceneInterface(ns=self._robot_ns)
self._group = moveit_commander.MoveGroupCommander(self._planning_group, robot_description=self._robot_ns + "/robot_description",
ns=self._robot_ns)
self._display_trajectory_publisher = rospy.Publisher(self._robot_ns + '/move_group/display_planned_path',
moveit_msgs.msg.DisplayTrajectory, queue_size=2)
self._exectute_trajectory_client = actionlib.SimpleActionClient(self._robot_ns + '/execute_trajectory',
moveit_msgs.msg.ExecuteTrajectoryAction)
self._exectute_trajectory_client.wait_for_server()
rospy.set_param('/ur5_1_vacuum_gripper_service', False)
self._planning_frame = self._group.get_planning_frame()
self._eef_link = self._group.get_end_effector_link()
self._group_names = self._robot.get_group_names()
self._computed_plan = ''
self._curr_state = self._robot.get_current_state()
self._group.set_planning_time(99)
rp = rospkg.RosPack()
self._pkg_path = rp.get_path('pkg_task5')
self._file_path = self._pkg_path + '/config/saved_trajectories/'
rospy.loginfo("Package Path: {}".format(self._file_path))
rospy.loginfo('\033[94m' + "Planning Group: {}".format(self._planning_frame) + '\033[0m')
rospy.loginfo('\033[94m' + "End Effector Link: {}".format(self._eef_link) + '\033[0m')
rospy.loginfo('\033[94m' + "Group Names: {}".format(self._group_names) + '\033[0m')
rospy.loginfo('\033[94m' + " >>> Ur5Moveit init done." + '\033[0m')
## MQTT Client
# Initialize Action Client
self._ac = actionlib.ActionClient('/action_ros_iot',
msgRosIotAction)
param_config_iot = rospy.get_param('config_pyiot')
# Store the ROS Topic to get the start message from bridge action server
self._param_order_topic = param_config_iot['mqtt']['sub_cb_ros_topic']
self._config_mqtt_pub_topic = param_config_iot['mqtt']['topic_pub']
# Subscribe to the desired topic and attach a Callback Funtion to it.
rospy.Subscriber(self._param_order_topic, msgMqttSub, self.func_callback_orders)
# Dictionary to Store all the goal handels
self._goal_handles = {}
self._orders = []
self._package_colours = None
# Wait for Action Server that will use the action - '/action_iot_ros' to start
self._ac.wait_for_server()
rospy.loginfo("Action server up, we can send goals.")
''' Get all data from incoming orders. '''
def func_callback_orders(self, msg):
rospy.loginfo('***Order received:'+ msg.message)
order_msg = eval(msg.message)
self._order_type = {'Medicine':['HP', 'Red', '450'],
'Clothes':['LP', 'Green', '150'],
'Food':['MP', 'Yellow', '250']}
order_id = order_msg['order_id']
order_time = order_msg['order_time']
order_item = order_msg['item']
order_priority = self._order_type[order_item][0]
order_city = order_msg['city']
order_lon = order_msg['lon']
order_lat = order_msg['lat']
order_cost = self._order_type[order_item][2]
info = {'id':'IncomingOrders', 'Team Id':'VB#693', 'Unique Id':'RRCneYRC',
'Order ID':order_id, 'Order Date and Time': order_time,
'Item':order_item, 'Priority':order_priority,
'Order Quantity':'1', 'City':order_city, 'Longitude':order_lon,
"Latitude":order_lat, 'Cost':order_cost}
message = str(info)
goal_handle = self.send_goal_to_mqtt_client("spreadsheet", "pub",
self._config_mqtt_pub_topic, message)
self._goal_handles['Order'] = goal_handle
if info["Priority"] == 'HP':
info["color"] = 'red'
info["package_name"] = self.assignName('red')
info["location_on_shelf"] = self.assignLoc(info["package_name"])
elif info["Priority"] == 'MP':
info["color"] = 'yellow'
info["package_name"] = self.assignName('yellow')
info["location_on_shelf"] = self.assignLoc(info["package_name"])
else:
info["color"] = 'green'
info["package_name"] = self.assignName('green')
info["location_on_shelf"] = self.assignLoc(info["package_name"])
self._orders.append(info)
rospy.loginfo('******Orders Received******* :')
self._original_orders.append(info)
''' Assigns package names to the prioritized order. '''
def assignName(self, curr_color):
for k in sorted(self._package_colours):
if self._package_colours[k] == curr_color:
val = k
self._package_colours.pop(k)
return val
''' Assigns package names to the prioritized order. '''
def assignLoc(self, pkgName):
val = self.packages_name_position[pkgName]
self.packages_name_position.pop(pkgName)
return val
''' Function to prioritize incoming order using insertion sort algorithm. '''
def func_prioritize_orders(self):
orders = self._orders
l = len(orders)
priority_to_value = {"LP":1, "MP":2, "HP":3}
for i in range(l):
pos = i
while pos > 0 and priority_to_value[orders[pos]['Priority']] > priority_to_value[orders[pos-1]['Priority']]:
orders[pos-1], orders[pos] = orders[pos], orders[pos-1]
pos -= 1
return orders
''' Function to get detected packages. '''
def camera1_callback(self):
get_packages_type = rospy.ServiceProxy('/2Dcamera_packages_type', camera_packages)
try:
self.get_packages = get_packages_type(True)
self._package_colours = eval(self.get_packages.pack_type)
except rospy.ServiceException as exc:
print "Service did not process request: " + str(exc)
'''This function will be called when there is a change of state
in the Action Client State Machine. '''
def on_transition(self, goal_handle):
# from on_goal() to on_transition(). goal_handle generated by send_goal() is used here.
result = msgRosIotResult()
index = 0
for i in self._goal_handles:
if self._goal_handles[i] == goal_handle:
index = i
break
rospy.loginfo("Transition Callback. Client Goal Handle #: " + str(index))
rospy.loginfo("Comm. State: " + str(goal_handle.get_comm_state()))
rospy.loginfo("Goal Status: " + str(goal_handle.get_goal_status()))
# Comm State - Monitors the State Machine of the Client which is different from Server's
# Comm State = 2 -> Active
# Comm State = 3 -> Wating for Result
# Comm State = 7 -> Done
# if (Comm State == ACTIVE)
if goal_handle.get_comm_state() == 2:
rospy.loginfo(str(index) + ": Goal just went active.")
# if (Comm State == DONE)
if goal_handle.get_comm_state() == 7:
rospy.loginfo(str(index) + ": Goal is DONE")
rospy.loginfo(goal_handle.get_terminal_state())
# get_result() gets the result produced by the Action Server
result = goal_handle.get_result()
rospy.loginfo(result.flag_success)
if result.flag_success == True:
rospy.loginfo("Goal successfully completed. Client Goal Handle #: " + str(index))
else:
rospy.loginfo("Goal failed. Client Goal Handle #: " + str(index))
''' This function is used to send Goals to MQtt client. '''
def send_goal_to_mqtt_client(self, arg_protocol, arg_mode, arg_topic, arg_message):
# Create a Goal Message object
goal = msgRosIotGoal()
goal.protocol = arg_protocol
goal.mode = arg_mode
goal.topic = arg_topic
goal.message = arg_message
rospy.loginfo("Sending to mqtt client")
# self.on_transition - It is a function pointer to a function which will be called when
# there is a change of state in the Action Client State Machine
goal_handle = self._ac.send_goal(goal,
self.on_transition,
None)
return goal_handle
''' Function to attach box to UR5_1 vacuum gripper. '''
def attach_box(self, current_package, timeout=4):
touch_links = self._robot.get_link_names(self._planning_group)
self._scene.attach_box(self._eef_link, current_package, touch_links=touch_links)
if rospy.get_param('/ur5_2_vacuum_gripper_service') == True or rospy.get_param('/conveyor_belt_service') == True:
rospy.loginfo_once("Waiting for Service")
rospy.sleep(1)
rospy.set_param('/ur5_1_vacuum_gripper_service', True)
try:
rospy.wait_for_service('/eyrc/vb/ur5/activate_vacuum_gripper/ur5_1')
self.attach = rospy.ServiceProxy('eyrc/vb/ur5/activate_vacuum_gripper/ur5_1',
vacuumGripper)
self.attach(True)
rospy.set_param('/ur5_1_vacuum_gripper_service', False)
except rospy.ServiceException, e:
print "Service call failed: %s" % e
print "Trying to reconnect service"
rospy.wait_for_service('/eyrc/vb/ur5/activate_vacuum_gripper/ur5_1')
rospy.set_param('/ur5_1_vacuum_gripper_service', True)
self.attach = rospy.ServiceProxy('eyrc/vb/ur5/activate_vacuum_gripper/ur5_1',
vacuumGripper)
self.attach(True)
rospy.set_param('/ur5_1_vacuum_gripper_service', False)
''' Function to detach box from UR5_1 vacuum gripper. '''
def detach_remove_box(self, current_package, timeout=4):
if rospy.get_param('/ur5_2_vacuum_gripper_service') == True or rospy.get_param('/conveyor_belt_service') == True:
rospy.loginfo_once("Waiting for Service")
rospy.sleep(1)
rospy.set_param('/ur5_1_vacuum_gripper_service', True)
self._scene.remove_attached_object(self._eef_link, name=current_package)
try:
rospy.wait_for_service('/eyrc/vb/ur5/activate_vacuum_gripper/ur5_1')
self.attach = rospy.ServiceProxy('eyrc/vb/ur5/activate_vacuum_gripper/ur5_1',
vacuumGripper)
self.attach(False)
rospy.set_param('/ur5_1_vacuum_gripper_service', False)
except rospy.ServiceException, e:
print "Service call failed: %s" % e
print "Trying again to connect service"
rospy.wait_for_service('/eyrc/vb/ur5/activate_vacuum_gripper/ur5_1')
rospy.set_param('/ur5_1_vacuum_gripper_service', True)
self.attach = rospy.ServiceProxy('eyrc/vb/ur5/activate_vacuum_gripper/ur5_1',
vacuumGripper)
self.attach(False)
rospy.set_param('/ur5_1_vacuum_gripper_service', False)
self._scene.remove_world_object(current_package)
''' Set UR5_1 joint angles. '''
def set_joint_angles(self, arg_list_joint_angles):
self._group.set_joint_value_target(arg_list_joint_angles)
self._computed_plan = self._group.plan()
self._group.get_current_joint_values()
rospy.loginfo('\033[94m' + ">>> Current Joint Values:" + '\033[0m')
flag_plan = self._group.go(wait=True)
self._group.stop()
if flag_plan == True:
rospy.loginfo(
'\033[94m' + ">>> set_joint_angles() Success" + '\033[0m')
else:
rospy.logerr(
'\033[94m' + ">>> set_joint_angles() Failed." + '\033[0m')
return flag_plan
''' Function to set the angles until it reaches max attempt. '''
def hard_set_joint_angles(self, arg_list_joint_angles, arg_max_attempts):
number_attempts = 0
flag_success = False
while ((number_attempts <= arg_max_attempts) and (flag_success is False)):
number_attempts += 1
flag_success = self.set_joint_angles(arg_list_joint_angles)
rospy.logwarn("attempts: {}".format(number_attempts))
''' Function to play saved trajectories. '''
def moveit_play_planned_path_from_file(self, arg_file_path, arg_file_name):
file_path = arg_file_path + arg_file_name
list_joint_values = self._group.get_current_joint_values()
with open(file_path, 'r') as file_open:
loaded_plan = yaml.load(file_open)
ret = self._group.execute(loaded_plan)
return ret
''' Function to play saved trajectories until it reaches max attempt. '''
def moveit_hard_play_planned_path_from_file(self, arg_file_path, arg_file_name, arg_max_attempts):
number_attempts = 0
flag_success = False
while ((number_attempts <= arg_max_attempts) and (flag_success is False)):
number_attempts += 1
flag_success = self.moveit_play_planned_path_from_file(arg_file_path, arg_file_name)
rospy.logwarn("attempts: {}".format(number_attempts))
return True
''' UR5_1 robot arm pick and place routine. '''
def robot_pick_place(self, _current_package, package_location, conveyor_location, ordersheet):
self.hard_set_joint_angles(package_location, 7)
self.attach_box(_current_package)
rospy.sleep(0.5)
self.moveit_hard_play_planned_path_from_file(self._file_path,
'conveyor_'+_current_package+'.yaml', 5)
self.detach_remove_box(_current_package)
rospy.sleep(0.5)
dt_string = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
info = {'id':'OrdersDispatched', 'Team Id':'VB#693', 'Unique Id':'RRCneYRC',
'Order ID':ordersheet['Order ID'],
'City':ordersheet['City'], 'Item':ordersheet['Item'],
'Priority':ordersheet['Priority'],
'Dispatch Quantity':'1', 'Cost':ordersheet['Cost'],
'Dispatch Status':'YES', 'Dispatch Date and Time': dt_string}
message = str(info)
goal_handle = self.send_goal_to_mqtt_client("spreadsheet", "pub",
self._config_mqtt_pub_topic, message)
self._goal_handles['Order Dispatched'] = goal_handle
self.publish_orders.publish(message)
# Destructor
def __del__(self):
moveit_commander.roscpp_shutdown()
rospy.loginfo(
'\033[94m' + "Object of class Ur5Moveit Deleted." + '\033[0m')
''' Main Function. '''
def main():
# Wait for Initializing Gazebo and Rviz
# Create UR5 object
ur5_1 = Ur5PickPlace()
pkg_names=ur5_1.camera1_callback()
# Initially move the robot to home position
ur5_1.set_joint_angles(ur5_1.ur5_1_conveyor_pose)
while not rospy.is_shutdown():
if len(ur5_1._orders) != 0:
ur5_1.func_prioritize_orders()
curr_order = ur5_1._orders.pop(0)
ur5_1.robot_pick_place(curr_order['package_name'], curr_order['location_on_shelf'],
ur5_1.ur5_1_conveyor_pose, curr_order)
else:
pass
if __name__ == '__main__':
main()
| 2.296875 | 2 |
tests/utils.py | MattSegal/cjob | 1 | 12764823 | <filename>tests/utils.py
import os
import boto3
def settings_factory(**kwargs):
def get_settings():
return {
"AWS_REGION": "ap-southeast-2",
"AWS_PROFILE": "default",
"EC2_INSTANCE_TYPE": "r5.2xlarge",
"EC2_KEY_FILE_PATH": "~/.ssh/testkey.pem",
**kwargs,
}
return get_settings
def create_test_instance(client, name, **kwargs):
run_kwargs = {
"MaxCount": 1,
"MinCount": 1,
"ImageId": "ami-076a5bf4a712000ed",
"InstanceType": "r5.2xlarge",
"SecurityGroupIds": [],
"KeyName": "zzz",
"InstanceInitiatedShutdownBehavior": "terminate",
"TagSpecifications": [
{
"ResourceType": "instance",
"Tags": [{"Key": "Name", "Value": name}],
}
],
**kwargs,
}
resp = client.run_instances(**run_kwargs)
return resp["Instances"][0]["InstanceId"] | 2.09375 | 2 |
bioplus/wrappers.py | benjschiller/seriesoftubes | 1 | 12764824 | <reponame>benjschiller/seriesoftubes
# this module wraps the built-in module random
# and provides some useful random sequence generators, etc.
import random
import itertools
def random_seq(n=1,GC=0.5):
'''random_seq provides a random nucleotide (A, T, G, or C). You may optionally provide n, a positive integer, which will cause random_seq to return a string of n nucleotides. You may also optionally provide GC, the probability of encountering a G or C, which must be on the closed interval [0,1]. The probability of encountering an A or T is calculated as 1 - GC.
'''
return ''.join( list(random_seq_generator(n,GC)) )
def random_seq_generator(n=1,GC=0.5):
'''
random_seq_generator acts like random_seq, but returns a generator that
returns the nucleotides one by one
'''
myError = ValueError('randomN requires a positive integer n (default = 1) \
and a probability GC (float 0.0 to 1.0)')
#AT = 1 - GC
if not type(GC)==float or GC == 0 or GC == 1: raise myError
elif not GC >= 0 and GC <= 1: raise myError
elif not type(n)==int: raise myError
elif n < 1: raise myError
else:
randomGenerator = itertools.repeat(random.random())
gc_or_at = lambda x: random.choice(['G','C']) \
if x < GC else random.choice(['A','T'])
return itertools.imap(gc_or_at, randomGenerator)
| 3.390625 | 3 |
src/modules/trainer.py | tkosht/forecaster | 0 | 12764825 | <filename>src/modules/trainer.py
import datetime
import numpy
import torch
import torch.optim as optim
import mlflow
import pickle
from torch.utils.tensorboard import SummaryWriter
from typing import Tuple
from .util.items import Items
from .dataset.batcher import BatchMaker
from .dataset.dateset import Tsr
class Trainer(object):
def __init__(self, model, optimizer, params: Items):
self.model = model
self.optimizer = optimizer
self.params = params
self.loss_train = None
self.loss_valid = None
self.train_model = model # switch model if pretrain or train
# setup tensorboard writer
experiment_id = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
logdir = f"result/{experiment_id}"
writer = SummaryWriter(log_dir=logdir)
self.writer = writer
self.experiment_id = experiment_id
def write_graph(self, trainset):
self.writer.add_graph(self.model, (trainset.ti, trainset.tc, trainset.kn))
def get_quantile(self, x: Tsr, alpha: float):
idx = (numpy.array(self.params.quantiles) == alpha).argmax()
return x[:, idx, :][..., 0] # just to get a first dim
def do_pretrain(self, dataset, epochs: int = 500):
self.train_model = self.model.pretrain
self._do_train(dataset, epochs)
self._save_model(model_title=f"pretrained_model_{self.model.name}")
def do_train(self, dataset, epochs: int = 500):
self.train_model = self.model
self._do_train(dataset, epochs)
self._save_model(model_title="latest_model")
def _get_train_mode(self):
mode = "train" if self.train_model == self.model else "pretrain"
return mode
def _do_train(self, dataset, epochs: int = 500):
train_mode = self._get_train_mode()
print(f"##### {train_mode} #####")
ti, tc, kn, tg = (
dataset.trainset.ti,
dataset.trainset.tc,
dataset.trainset.kn,
dataset.trainset.tg,
)
batch = BatchMaker(bsz=self.params.batch_size)
# train loop
n_steps = -1
losses = []
# # epoch loop
for idx in range(epochs):
shuffle = numpy.random.permutation(range(len(ti)))
_ti, _tc, _kn, _tg = ti[shuffle], tc[shuffle], kn[shuffle], tg[shuffle]
# # batch loop
for bdx, bch in enumerate(
zip(
batch(ti),
batch(_ti),
batch(_tc),
batch(_kn),
batch(_tg),
)
):
(bti_org, bti, btc, bkn, btg) = bch
n_steps += 1
def closure():
model_params = dict(batch=bch)
self.optimizer.zero_grad()
if train_mode == "pretrain":
loss = self.model.loss_pretrain(bti, btc, bkn, **model_params)
else:
loss = self.model.loss_train(bti, btc, bkn, btg, **model_params)
losses.append(loss.item())
assert len(losses) == n_steps + 1
self.loss_train = loss.item() # keep latest loss
k = self.params.log_interval
_loss = loss.item() if n_steps < k else numpy.mean(losses[-k:])
self.writer.add_scalar(
f"{train_mode}/loss/step/train", _loss, n_steps
)
# logging progress
if idx % self.params.log_interval == 0 and idx > 0 and bdx == 0:
mean_loss = numpy.mean(losses[-k:])
print(
f"{train_mode}/loss[{idx:03d}][{bdx:03d}][{n_steps:05d}]",
mean_loss,
)
# prediction with trainset
loss_train = self._predict(
idx, ti, tc, kn, tg, pred_mode="train"
)
self.loss_train = loss_train.item()
# prediction with testset
testset = dataset.create_testset()
loss_valid = self._predict(
idx,
testset.ti,
testset.tc,
testset.kn,
testset.tg,
pred_mode="valid",
)
self.loss_valid = loss_valid.item()
loss.backward()
return loss
self.optimizer.step(closure)
if idx % self.params.save_interval == 0 and idx > 0:
mlflow.pytorch.log_model(
self.model, f"models.{idx:05d}", pickle_module=pickle
)
def _predict(
self, idx, ti: Tsr, tc: Tsr, kn: Tsr, tg: Tsr, pred_mode="train"
) -> Tsr:
batch = BatchMaker(bsz=self.params.batch_size)
bti, btc, bkn, btg = (
next(batch(ti)),
next(batch(tc)),
next(batch(kn)),
next(batch(tg)),
)
with torch.no_grad():
pred = self.model(bti, btc, bkn)
loss = self.model.calc_loss(pred, btg)
preds = self._make_predictions(pred, btg)
self._write_log2tb(idx, preds, loss, pred_mode)
return loss
def _make_predictions(self, y_pred, tg) -> Tuple[Tsr, Tsr, Tsr, Tsr]:
pred = y_pred.view(-1, len(self.params.quantiles), self.model.args.dim_out)
p = self.get_quantile(pred, alpha=0.5)
p10 = self.get_quantile(pred, alpha=0.1)
p90 = self.get_quantile(pred, alpha=0.9)
t = tg[:, -1, :][..., 0]
return p, p10, p90, t
def _write_log2tb(self, idx, preds, loss, pred_type="train") -> None:
train_mode = self._get_train_mode()
for n, (y0, yL, yH, t0) in enumerate(zip(*preds)):
dct_pred = dict(p=y0, p10=yL, p90=yH, t=t0)
self.writer.add_scalars(
f"{train_mode}/prediction/epoch_{idx:03d}/{pred_type}",
dct_pred,
n,
)
self.writer.add_scalar(
f"{train_mode}/loss/interval/{pred_type}", loss.item(), idx
)
def finalize(self, args):
self._log_experiments(args)
def _log_experiments(self, args):
# experiment log
hparams = dict(
experiment_id=self.experiment_id,
model=args.model,
max_epoch=args.max_epoch,
optimizer=str(self.optimizer),
)
self.writer.add_hparams(
hparams,
{
"hparam/loss/train": self.loss_train,
"hparam/loss/valid": self.loss_valid,
},
)
self.writer.close()
def _save_model(self, model_title="latest_model"):
mlflow.pytorch.log_model(
self.model,
"models",
registered_model_name=model_title,
pickle_module=pickle,
)
def get_args():
import argparse
parser = argparse.ArgumentParser(description="Model Toy Test")
parser.add_argument(
"--model",
type=str,
choices=["cyclic", "trend", "recent", "full"],
default="cyclic",
)
parser.add_argument(
"--max-epoch-pretrain",
type=int,
default=300,
)
parser.add_argument(
"--max-epoch",
type=int,
default=30 * 1000,
)
parser.add_argument(
"--log-interval",
type=int,
default=100,
)
parser.add_argument(
"--save-interval",
type=int,
default=10000,
)
parser.add_argument(
"--resume",
action="store_true",
default=False,
)
args = parser.parse_args()
return args
if __name__ == "__main__":
from .model.model import Cyclic, Trend
from .dataset.dateset import DatesetToy
args = get_args()
# setup device
device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")
print("device:", device)
# create toy dataset
B, W, Dout = 64, 14, 1
toydataset = DatesetToy(Dout, W, args.model, device=device)
trainset = toydataset.create_trainset()
if args.resume:
def search_latest_model_info():
from mlflow.tracking import MlflowClient
mlf_client = MlflowClient()
for m in mlf_client.list_registered_models():
if m.name == "latest_model":
return m
return None
reg_info = search_latest_model_info()
assert reg_info is not None
info_latest = dict(reg_info)["latest_versions"][0]
uri = f"models:/latest_model/{info_latest.version}"
print(f"loading the latest model ... [{uri}]")
model = mlflow.pytorch.load_model(uri)
print("loading done.")
# recreate toydataset corresponding to the model
args.model = model.name
toydataset = DatesetToy(Dout, W, args.model, device=device)
trainset = toydataset.create_trainset()
else:
# setup model
dims = (trainset.ti.shape[-1], trainset.tc.shape[-1], trainset.kn.shape[-1])
modeller = dict(cyclic=Cyclic, trend=Trend)
model = modeller[args.model](
dim_ins=dims,
dim_out=trainset.tg.shape[-1],
ws=trainset.ti.shape[1],
dim_emb=8,
n_heads=4,
n_layers=1,
k=5,
)
model.to(device)
print("model.args:", model.name, model.args)
# setup optimizer
# optimizer = optim.LBFGS(model.parameters(), lr=0.8) # Newton
# optimizer = optim.SGD(model.parameters(), lr=0.001)
optimizer = optim.Adam(model.parameters(), lr=0.01)
params = Items(is_readonly=True).setup(
dict(
batch_size=B,
quantiles=[0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95],
log_interval=args.log_interval,
save_interval=args.save_interval,
)
)
trainer = Trainer(model, optimizer, params)
# graph to tensorboard
trainer.write_graph(trainset)
# pretrain
trainer.do_pretrain(toydataset, epochs=args.max_epoch_pretrain)
# train
trainer.do_train(toydataset, epochs=args.max_epoch)
# finalize
trainer.finalize(args)
| 2.34375 | 2 |
shor/operations.py | jywyq/shor | 0 | 12764826 | from shor.layers import _BaseLayer
class _Operation(_BaseLayer):
"""Abstract base quantum computing operation class
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def to_gates(self):
return []
class Measure(_Operation):
def __init__(self, *bits, axis='z', **kwargs):
if not bits:
bits = [0]
self.bits = bits
super().__init__(**kwargs, axis=axis)
| 2.53125 | 3 |
prototype/3DO_FFI_bindings/external_direct_optimization/test-suite/python_3/variance.py | recursion-ninja/PCG | 0 | 12764827 | #! usr/bin/env python
from math import sqrt
for run in range(6):
file1 = open("../data/times_just_C_run_{}.txt".format(run))
file2 = open("../data/times_poy_processor_{}.txt".format(run))
C_times_str = file1.readlines()
POY_times_str = file2.readlines()
if len(C_times_str) < 11 or len(POY_times_str) < 11:
continue
file1.close()
file2.close()
variance = 0
mean = 0
expectation = 0
POY_count = 0
C_count = 0
C_times = list(map(lambda x: int(float(x.split()[-1])), C_times_str))
POY_times = list(map(lambda x: int(float(x.split()[-1])), POY_times_str))
print("Run {}:".format(run))
for c, poy in zip(C_times[:-1], POY_times[:-1]): # remember, last line is average
print("{:>7}{:>7}{:>7}".format(c, poy, c - poy))
# print((c - poy) * (c - poy))
mean += abs(c - poy)
if c < poy:
C_count += 1
else:
POY_count += 1
mean /= 10
for c, poy in zip(C_times[:-1], POY_times[:-1]):
difference = abs(c - poy)
rel_diff = c - poy
variance += (difference - mean) * (difference - mean)
expectation += rel_diff
expectation /= 10
variance /= 10
print("\nC is faster: {:>8}".format(C_count))
print("Poy is faster: {:>8}".format(POY_count))
print("Mean time diff: {:>8}".format(int(mean)))
print("Expectation: {:>8}".format(int(expectation)))
print("Variance: {:>8}".format(int(variance)))
print("Std. Dev: {:>8}".format(int(sqrt(variance))))
print() | 3.25 | 3 |
pytorch_lit/shared_params.py | lipovsek/PyTorch-LIT | 151 | 12764828 | <filename>pytorch_lit/shared_params.py
from torch.nn import Parameter
from .memory import Memory
class SharedParameterUtil:
_isHijacked = False
_memory = None
_mainNew = None
@staticmethod
def _shared_new(cls, data=None, requires_grad=True):
if data is None:
return SharedParameterUtil._mainNew(cls, data, requires_grad)
mShape = data.shape
fSize = 1
for i in mShape:
fSize *= i
sharedSlot = Memory.obtain(SharedParameterUtil._memory)
nT = sharedSlot.reshape(-1)[:fSize]
nT = nT.reshape(mShape)
return SharedParameterUtil._mainNew(cls, nT, requires_grad)
@staticmethod
def hijackParameters(memoryKey):
if SharedParameterUtil._isHijacked:
raise RuntimeError("already hijacked, reset first")
SharedParameterUtil._mainNew = Parameter.__new__
SharedParameterUtil._isHijacked = True
SharedParameterUtil._memory = memoryKey
Parameter.__new__ = SharedParameterUtil._shared_new
@staticmethod
def resetParameters(resetMemory=False):
if not SharedParameterUtil._isHijacked:
return
Parameter.__new__ = SharedParameterUtil._mainNew
SharedParameterUtil._isHijacked = False
SharedParameterUtil._mainNew = None
if resetMemory:
Memory.deallocKey(SharedParameterUtil._memory)
SharedParameterUtil._memory = None
| 2.453125 | 2 |
ferenda/sources/legal/se/kommitte.py | redhog/ferenda | 18 | 12764829 | # -*- coding: utf-8 -*-
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
import re
from . import SwedishLegalStore, Trips
class KommitteStore(SwedishLegalStore):
def basefile_to_pathfrag(self, basefile):
# "Ju 2012:01" => "Ju/2012/01"
return basefile.replace(" ", "/").replace(":", "/")
def pathfrag_to_basefile(self, pathfrag):
# "Ju/2012/01" => "2012:152"
return pathfrag.replace("/", " ", 1).replace("/", ":")
class Kommitte(Trips):
documentstore_class = KommitteStore
alias = "komm"
app = "komm"
base = "KOMM"
download_params = [{'maxpage': 101, 'app': app, 'base': base}]
basefile_regex = "(?P<basefile>\w+ \d{4}:\w+)$"
re_basefile = re.compile(r'(\w+ \d{4}:\w+)', re.UNICODE)
def parse_from_soup(self, soup, basefile):
pre = soup.findAll("pre")[-1]
text = ''.join(pre.findAll(text=True))
print(text)
# End result something like this
#
# <http://rinfo.lagrummet.se/komm/a/1991:03> a :Kommittebeskrivning
# dcterms:identifier "A 1991:03" ;
# :tillkalladAr "1991" ;
# :lopnummer "03";
# :kommittestatus "Avslutad";
# :avslutadAr "1993";
# :departement <http://rinfo.lagrummet.se/publ/org/Arbetsmarknadsdepartementet>;
# :kommittedirektiv <http://rinfo.lagrummet.se/publ/dir/1991:75> ,
# <http://rinfo.lagrummet.se/publ/dir/1992:33> ,
# :betankanden <http://rinfo.lagrummet.se/publ/bet/sou/1993:81> .
#
# <http://rinfo.lagrummet.se/publ/bet/sou/1993:81> dcterms:title "Översyn av arbetsmiljölagen";
| 2.546875 | 3 |
tests/test_input.py | skurmedel/wordsalad | 0 | 12764830 | from wordsalad.input import split_germanic, group_words
import unittest
class TestTokenisation(unittest.TestCase):
def test_split_germanic_punctuation_treated_like_one_word(self):
txt = "abc. def.,"
res = list(split_germanic(txt))
self.assertListEqual(["abc", ".", "def", ".", ","], res)
def test_split_germanic_fails_on_empty_whitespace(self):
txt = "A high powered mutant"
with self.assertRaises(ValueError):
list(split_germanic(txt, whitespace=""))
def test_split_germanic_whitespace_not_a_string(self):
txt = "ab1c d1ef"
lst = list(split_germanic(txt, whitespace=1))
self.assertListEqual(["ab", "c d", "ef"], lst)
def test_split_germanic_start_words(self):
s = "Hello my name is <NAME>. How very nice to meet you! :) What is your name?"
start_words = []
list(split_germanic(s, start_words=start_words, punctuation=".!", sentence_end=".?!"))
self.assertEqual(["Hello", "How", ":)"], start_words)
def test_split_germanic(self):
cases = [
(
"Swiss cheese is a type of dairy product.[5]",
["Swiss", "cheese", "is", "a", "type", "of", "dairy", "product", ".", "[", "5", "]"]
),
(
"Who are you... he said.",
["Who", "are", "you", ".", ".", ".", "he", "said", "."]
),
(
"A list of (approved) items follows:",
["A", "list", "of", "(", "approved", ")", "items", "follows", ":"]
)
]
for s, expected in cases:
actual = list(split_germanic(s))
self.assertListEqual(expected, actual)
class TestGroupWords(unittest.TestCase):
def test_group_words_words_must_be_iterable(self):
with self.assertRaises(TypeError):
list(group_words(1))
def test_group_words_size_must_be_int(self):
with self.assertRaises(ValueError):
list(group_words([1,2], size="abc"))
def test_group_words_fills_with_empty(self):
words = [1,2,3,4,5,6,7]
res = group_words(words, size=3, empty="E")
self.assertIsNotNone(res)
self.assertEqual(
list(res),
[
(1,2,3),
(4,5,6),
(7,"E", "E")
])
def test_group_words_empty_sequence_gives_empty(self):
res = group_words([])
self.assertEqual(list(res), [])
def test_group_words_size_larger_than_1(self):
with self.assertRaises(ValueError):
list(group_words([1,2,3], size=1)) | 3.796875 | 4 |
Validation/Tools/scripts/simpleEdmComparison.py | trackerpro/cmssw | 1 | 12764831 | <reponame>trackerpro/cmssw
#! /usr/bin/env python
from __future__ import print_function
import inspect
import itertools
import logging
import optparse
import pprint
import random
import sys
import ROOT
from DataFormats.FWLite import Events, Handle
typeMap = { 'double' : ['double', 'vector<double>'],
'int' : ['int', 'vector<int>'],}
class ProductNotFoundError(RuntimeError):
"""
Special exception for a product not in file
"""
pass
def compareEvents(event1, event2, handleName, label, options):
"""
Compare two events
"""
# Is it a vector of objects or object (funky ROOT buffer for single value)
isSimpleObject = (handleName.find('vector') == -1)
# Compare run, lumi, event
aux1 = event1.eventAuxiliary()
aux2 = event2.eventAuxiliary()
rle1 = (aux1.run(), aux1.luminosityBlock(), aux1.event())
rle2 = (aux2.run(), aux2.luminosityBlock(), aux2.event())
logging.debug("Comparing RLE #'s %s and %s" % (rle1, rle2))
if rle1 != rle2:
raise RuntimeError("Run/Lumi/Events don't match: %s vs %s" % (rle1, rle2))
handle1 = Handle(handleName)
handle2 = Handle(handleName)
if event1.getByLabel(label, handle1) and event2.getByLabel(label, handle2):
objects1 = handle1.product()
objects2 = handle1.product()
else:
raise ProductNotFoundError("Product %s %s not found." % (handleName, label))
if isSimpleObject:
val1 = objects1[0]
val2 = objects2[0]
if options.blurRate and options.blur and random.random() < options.blurRate:
# This is different than Charles's method, which makes no sense to me
val1 += (random.random()-0.5) * options.blur
if val1 != val2:
logging.error("Mismatch %s and %s in %s" % (val1, val2, aux2.event()))
return (1, 1)
else:
logging.debug("Match of %s in %s" % (objects1[0], aux2.event()))
return (1, 0)
else:
count = 0
mismatch = 0
for val1, val2 in itertools.izip_longest(objects1, objects2):
count += 1
if options.blurRate and options.blur and random.random() < options.blurRate:
# This is different than Charles's method, which makes no sense to me
val1 += (random.random()-0.5) * options.blur * val1
if val1 != val2:
mismatch += 1
logging.error("Comparison problem %s != %s" % (val1, val2))
logging.debug("Compared %s elements" % count)
return (count, mismatch)
if __name__ == "__main__":
###################
## Setup Options ##
###################
random.seed()
logging.basicConfig(level=logging.INFO)
parser = optparse.OptionParser("usage: %prog [options] config.txt file1.root file2.root\nVisit https://twiki.cern.ch/twiki/bin/view/CMS/SWGuidePhysicsToolsEdmOneToOneComparison\nfor full documentation.")
modeGroup = optparse.OptionGroup (parser, "Mode Conrols")
tupleGroup = optparse.OptionGroup (parser, "Tuple Controls")
optionsGroup = optparse.OptionGroup (parser, "Options")
modeGroup.add_option ('--compare', dest='compare', action='store_true',
help='Compare tuple1 to tuple2')
tupleGroup.add_option ('--numEvents', dest='numEvents', type='int',
default=1e9,
help="number of events for first and second file")
tupleGroup.add_option ('--label', dest='label', type='string',
action='append',
help="Change label ('tuple^object^label')")
optionsGroup.add_option ('--blur1', dest='blur', type='float',
default=0.05,
help="Randomly changes values by 'BLUR' " +\
"from tuple1. For debugging only.")
optionsGroup.add_option ('--blurRate', dest='blurRate', type='float',
default=0.00,
help="Rate at which objects will be changed. " + \
"(%default default)")
parser.add_option_group (modeGroup)
parser.add_option_group (tupleGroup)
parser.add_option_group (optionsGroup)
(options, args) = parser.parse_args()
if len(args) != 3:
parser.error("Too many or too few arguments")
options.config = args[0]
options.file1 = args[1]
options.file2 = args[2]
# Parse object name and label out of Charles format
tName, objName, lName = options.label[0].split('^')
label = lName.split(',')
ROOT.gROOT.SetBatch()
ROOT.gSystem.Load("libFWCoreFWLite.so")
ROOT.gSystem.Load("libDataFormatsFWLite.so")
ROOT.FWLiteEnabler.enable()
chain1 = Events ([options.file1], forceEvent=True)
chain2 = Events ([options.file2], forceEvent=True)
if chain1.size() != chain1.size():
raise RuntimeError("Files have different #'s of events")
numEvents = min(options.numEvents, chain1.size())
# Parameters to this script are the same regardless if the
# product is double or vector<double> so have to try both
productsCompared = 0
totalCount = 0
mismatches = 0
for handleName in typeMap[objName]:
try:
chain1.toBegin()
chain2.toBegin()
logging.info("Testing identity for handle=%s, label=%s" % (handleName, label))
# Use itertools to iterate over lists in ||
for ev1, ev2, count in itertools.izip(chain1, chain2, xrange(numEvents)):
evCount, evMismatch = compareEvents(event1=ev1, event2=ev2, handleName=handleName, label=label, options=options)
totalCount += evCount
mismatches += evMismatch
logging.info("Compared %s events" % (count+1))
productsCompared += 1
# Try to reproduce the output that Charles's summary script is expecting
plagerDict = {'eventsCompared' : count+1}
plagerDict.update({'count_%s' % objName : totalCount})
if mismatches:
plagerDict.update({objName: {'_var' : {handleName:mismatches}}})
print("Summary")
pprint.pprint(plagerDict)
except ProductNotFoundError:
logging.info("No product found for handle=%s, label=%s" % (handleName, label))
logging.info("Total products compared: %s, %s/%s" % (productsCompared, mismatches, totalCount))
if not productsCompared:
print("Plager compatible message: not able to get any products")
sys.exit()
| 2.296875 | 2 |
src/models/__init__.py | NoSyu/CDMM-B | 1 | 12764832 | <gh_stars>1-10
from .cdmmb import *
| 1.078125 | 1 |
wedding/views.py | markbackhouse/django-wedding-website | 0 | 12764833 | <reponame>markbackhouse/django-wedding-website
from django.conf import settings
from django.shortcuts import render
from guests.save_the_date import SAVE_THE_DATE_CONTEXT_MAP
def home(request):
return render(
request,
"home.html",
context={
"save_the_dates": SAVE_THE_DATE_CONTEXT_MAP,
"support_email": settings.DEFAULT_WEDDING_REPLY_EMAIL,
},
)
| 1.84375 | 2 |
vmrunner/vmrunner.py | Mattlk13/include-OS | 0 | 12764834 | <reponame>Mattlk13/include-OS
import os
import sys
import subprocess
import thread
import threading
import time
import re
import linecache
import traceback
import validate_vm
import signal
import psutil
from prettify import color
INCLUDEOS_HOME = None
nametag = "<VMRunner>"
if "INCLUDEOS_PREFIX" not in os.environ:
def_home = "/usr/local"
print color.WARNING("WARNING:"), "Environment varialble INCLUDEOS_PREFIX is not set. Trying default", def_home
if not os.path.isdir(def_home): raise Exception("Couldn't find INCLUDEOS_PREFIX")
INCLUDEOS_HOME= def_home
else:
INCLUDEOS_HOME = os.environ['INCLUDEOS_PREFIX']
package_path = os.path.dirname(os.path.realpath(__file__))
# The end-of-transmission character
EOT = chr(4)
# Exit codes used by this program
exit_codes = {"SUCCESS" : 0,
"PROGRAM_FAILURE" : 1,
"TIMEOUT" : 66,
"VM_PANIC" : 67,
"CALLBACK_FAILED" : 68,
"BUILD_FAIL" : 69,
"ABORT" : 70,
"VM_EOT" : 71 }
def get_exit_code_name (exit_code):
for name, code in exit_codes.iteritems():
if code == exit_code: return name
return "UNKNOWN ERROR"
# We want to catch the exceptions from callbacks, but still tell the test writer what went wrong
def print_exception():
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback,
limit=10, file=sys.stdout)
devnull = open(os.devnull, 'w')
# Check for prompt-free sudo access
def have_sudo():
try:
subprocess.check_output(["sudo", "-n", "whoami"], stderr = devnull) == 0
except Exception as e:
raise Exception("Sudo access required")
return True
# Run a command, pretty print output, throw on error
def cmd(cmdlist):
res = subprocess.check_output(cmdlist)
for line in res.rstrip().split("\n"):
print color.SUBPROC(line)
def abstract():
raise Exception("Abstract class method called. Use a subclass")
# Hypervisor base / super class
# (It seems to be recommended for "new style classes" to inherit object)
class hypervisor(object):
def __init__(self, config):
self._config = config;
# Boot a VM, returning a hypervisor handle for reuse
def boot(self):
abstract()
# Stop the VM booted by boot
def stop(self):
abstract()
# Read a line of output from vm
def readline(self):
abstract()
# Verify that the hypervisor is available
def available(self, config_data = None):
abstract()
# Wait for this VM to exit
def wait(self):
abstract()
# Wait for this VM to exit
def poll(self):
abstract()
# A descriptive name
def name(self):
abstract()
# Qemu Hypervisor interface
class qemu(hypervisor):
def __init__(self, config):
super(qemu, self).__init__(config)
self._proc = None
self._stopped = False
self._sudo = False
# Pretty printing
self._nametag = "<" + type(self).__name__ + ">"
self.INFO = color.INFO(self._nametag)
def name(self):
return "Qemu"
def drive_arg(self, filename, drive_type="virtio", drive_format="raw", media_type="disk"):
return ["-drive","file="+filename+",format="+drive_format+",if="+drive_type+",media="+media_type]
def net_arg(self, backend, device, if_name = "net0", mac="c0:01:0a:00:00:2a"):
qemu_ifup = INCLUDEOS_HOME+"/includeos/scripts/qemu-ifup"
# FIXME: this needs to get removed
names = {"virtio" : "virtio-net", "vmxnet" : "vmxnet3", "vmxnet3" : "vmxnet3"}
return ["-device", names[device]+",netdev="+if_name+",mac="+mac,
"-netdev", backend+",id="+if_name+",script="+qemu_ifup]
def kvm_present(self):
command = "egrep -m 1 '^flags.*(vmx|svm)' /proc/cpuinfo"
try:
subprocess.check_output(command, shell = True)
print self.INFO, "KVM ON"
return True
except Exception as err:
print self.INFO, "KVM OFF"
return False
# Start a process and preserve in- and output pipes
# Note: if the command failed, we can't know until we have exit status,
# but we can't wait since we expect no exit. Checking for program start error
# is therefore deferred to the callee
def start_process(self, cmdlist):
if cmdlist[0] == "sudo": # and have_sudo():
print color.WARNING("Running with sudo")
self._sudo = True
# Start a subprocess
self._proc = subprocess.Popen(cmdlist,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
stdin = subprocess.PIPE)
print self.INFO, "Started process PID ",self._proc.pid
return self._proc
def get_error_messages(self):
if self._proc.poll():
data, err = self._proc.communicate()
return err
def boot(self, multiboot, kernel_args = "", image_name = None):
self._stopped = False
# Use provided image name if set, otherwise try to find it in json-config
if not image_name:
image_name = self._config["image"]
# multiboot - e.g. boot with '-kernel' and no bootloader
if multiboot:
# TODO: Remove .img-extension from vm.json in tests to avoid this hack
if (image_name.endswith(".img")):
image_name = image_name.split(".")[0]
kernel_args = ["-kernel", image_name, "-append", kernel_args]
disk_args = []
print self.INFO, "Booting", image_name, "directly without bootloader (multiboot / -kernel args)"
else:
kernel_args = []
disk_args = self.drive_arg(image_name, "ide")
print self.INFO, "Booting", image_name, "with a bootable disk image"
if "bios" in self._config:
kernel_args.extend(["-bios", self._config["bios"]])
if "drives" in self._config:
for disk in self._config["drives"]:
disk_args += self.drive_arg(disk["file"], disk["type"], disk["format"], disk["media"])
net_args = []
i = 0
if "net" in self._config:
for net in self._config["net"]:
net_args += self.net_arg(net["backend"], net["device"], "net"+str(i), net["mac"])
i+=1
mem_arg = []
if "mem" in self._config:
mem_arg = ["-m", str(self._config["mem"])]
vga_arg = ["-nographic" ]
if "vga" in self._config:
vga_arg = ["-vga", str(self._config["vga"])]
# TODO: sudo is only required for tap networking and kvm. Check for those.
command = ["sudo", "qemu-system-x86_64"]
if self.kvm_present(): command.append("--enable-kvm")
command += kernel_args
command += disk_args + net_args + mem_arg + vga_arg
print self.INFO, "command:"
print color.DATA(" ".join(command))
try:
self.start_process(command)
except Exception as e:
print self.INFO,"Starting subprocess threw exception:", e
raise e
def stop(self):
signal = "-SIGTERM"
# Don't try to kill twice
if self._stopped:
self.wait()
return self
else:
self._stopped = True
if self._proc and self._proc.poll() == None :
if not self._sudo:
print self.INFO,"Stopping child process (no sudo required)"
self._proc.terminate()
else:
# Find and terminate all child processes, since parent is "sudo"
parent = psutil.Process(self._proc.pid)
children = parent.children()
print self.INFO, "Stopping", self._config["image"], "PID",self._proc.pid, "with", signal
for child in children:
print self.INFO," + child process ", child.pid
# The process might have gotten an exit status by now so check again to avoid negative exit
if (not self._proc.poll()):
subprocess.call(["sudo", "kill", signal, str(child.pid)])
# Wait for termination (avoids the need to reset the terminal etc.)
self.wait()
return self
def wait(self):
if (self._proc): self._proc.wait()
return self
def read_until_EOT(self):
chars = ""
while (not self._proc.poll()):
char = self._proc.stdout.read(1)
if char == chr(4):
return chars
chars += char
return chars
def readline(self):
if self._proc.poll():
raise Exception("Process completed")
return self._proc.stdout.readline()
def writeline(self, line):
if self._proc.poll():
raise Exception("Process completed")
return self._proc.stdin.write(line + "\n")
def poll(self):
return self._proc.poll()
# VM class
class vm:
def __init__(self, config, hyper = qemu):
self._exit_status = 0
self._exit_msg = ""
self._config = config
self._on_success = lambda(line) : self.exit(exit_codes["SUCCESS"], nametag + " All tests passed")
self._on_panic = self.panic
self._on_timeout = self.timeout
self._on_output = {
"PANIC" : self._on_panic,
"SUCCESS" : self._on_success }
assert(issubclass(hyper, hypervisor))
self._hyper = hyper(config)
self._timeout_after = None
self._timer = None
self._on_exit_success = lambda : None
self._on_exit = lambda : None
self._root = os.getcwd()
def stop(self):
self._hyper.stop().wait()
if self._timer:
self._timer.cancel()
return self
def wait(self):
if hasattr(self, "_timer") and self._timer:
self._timer.join()
self._hyper.wait()
return self._exit_status
def poll(self):
return self._hyper.poll()
def exit(self, status, msg):
self._exit_status = status
self.stop()
print color.INFO(nametag),"Exit called with status", self._exit_status, "(",get_exit_code_name(self._exit_status),")"
print color.INFO(nametag),"Calling on_exit"
# Change back to test source
os.chdir(self._root)
self._on_exit()
if status == 0:
# Print success message and return to caller
print color.SUCCESS(msg)
print color.INFO(nametag),"Calling on_exit_success"
return self._on_exit_success()
# Print fail message and exit with appropriate code
print color.EXIT_ERROR(get_exit_code_name(status), msg)
sys.exit(status)
# Default timeout event
def timeout(self):
print color.INFO("<timeout>"), "VM timed out"
# Note: we have to stop the VM since the main thread is blocking on vm.readline
#self.exit(exit_codes["TIMEOUT"], nametag + " Test timed out")
self._exit_status = exit_codes["TIMEOUT"]
self._exit_msg = "vmrunner timed out after " + str(self._timeout_after) + " seconds"
self._hyper.stop().wait()
# Default panic event
def panic(self, panic_line):
panic_reason = self._hyper.readline()
print color.INFO(nametag), "VM signalled PANIC. Reading until EOT (", hex(ord(EOT)), ")"
print color.VM(panic_reason),
remaining_output = self._hyper.read_until_EOT()
for line in remaining_output.split("\n"):
print color.VM(line)
self.exit(exit_codes["VM_PANIC"], panic_reason)
# Events - subscribable
def on_output(self, output, callback):
self._on_output[ output ] = callback
def on_success(self, callback):
self._on_output["SUCCESS"] = lambda(line) : [callback(line), self._on_success(line)]
def on_panic(self, callback):
self._on_output["PANIC"] = lambda(line) : [callback(line), self._on_panic(line)]
def on_timeout(self, callback):
self._on_timeout = callback
def on_exit_success(self, callback):
self._on_exit_success = callback
def on_exit(self, callback):
self._on_exit = callback
# Read a line from the VM's standard out
def readline(self):
return self._hyper.readline()
# Write a line to VM stdout
def writeline(self, line):
return self._hyper.writeline(line)
# Make using GNU Make
def make(self, params = []):
print color.INFO(nametag), "Building with 'make' (params=" + str(params) + ")"
make = ["make"]
make.extend(params)
cmd(make)
return self
# Call cmake
def cmake(self, args = []):
print color.INFO(nametag), "Building with cmake (%s)" % args
# install dir:
INSTDIR = os.getcwd()
# create build directory
try:
os.makedirs("build")
except OSError as err:
if err.errno!=17: # Errno 17: File exists
self.exit(exit_codes["BUILD_FAIL"], "could not create build directory")
# go into build directory
# NOTE: The test gets run from here
os.chdir("build")
# build with prefix = original path
cmake = ["cmake", "..", "-DCMAKE_INSTALL_PREFIX:PATH=" + INSTDIR]
cmake.extend(args)
try:
cmd(cmake)
# if everything went well, build with make and install
return self.make()
except Exception as e:
print "Excetption while building: ", e
self.exit(exit_codes["BUILD_FAIL"], "building with cmake failed")
# Clean cmake build folder
def clean(self):
print color.INFO(nametag), "Cleaning cmake build folder"
subprocess.call(["rm","-rf","build"])
# Boot the VM and start reading output. This is the main event loop.
def boot(self, timeout = 60, multiboot = True, kernel_args = "booted with vmrunner", image_name = None):
# This might be a reboot
self._exit_status = None
self._timeout_after = timeout
# Start the timeout thread
if (timeout):
print color.INFO(nametag),"setting timeout to",timeout,"seconds"
self._timer = threading.Timer(timeout, self._on_timeout)
self._timer.start()
# Boot via hypervisor
try:
self._hyper.boot(multiboot, kernel_args, image_name)
except Exception as err:
print color.WARNING("Exception raised while booting ")
if (timeout): self._timer.cancel()
self.exit(exit_codes["CALLBACK_FAILED"], str(err))
# Start analyzing output
while self._hyper.poll() == None and not self._exit_status:
try:
line = self._hyper.readline()
except Exception as e:
print color.WARNING("Exception thrown while waiting for vm output")
break
if line:
# Special case for end-of-transmission
if line == EOT:
if not self._exit_status: self._exit_status = exit_codes["VM_EOT"]
break
if line.startswith(" [ Kernel ] service exited with status"):
self._exit_status = int(line.split(" ")[-1].rstrip())
self._exit_msg = "Service exited"
break
else:
print color.VM(line.rstrip())
else:
pass
# TODO: Add event-trigger for EOF?
for pattern, func in self._on_output.iteritems():
if re.search(pattern, line):
try:
res = func(line)
except Exception as err:
print color.WARNING("Exception raised in event callback: ")
print_exception()
res = False
self.stop()
# NOTE: It can be 'None' without problem
if res == False:
self._exit_status = exit_codes["CALLBACK_FAILED"]
self.exit(self._exit_status, " Event-triggered test failed")
# If the VM process didn't exit by now we need to stop it.
if (self.poll() == None):
self.stop()
# We might have an exit status, e.g. set by a callback noticing something wrong with VM output
if self._exit_status:
self.exit(self._exit_status, self._exit_msg)
# Process might have ended prematurely
elif self.poll():
self.exit(self._hyper.poll(), self._hyper.get_error_messages())
# If everything went well we can return
return self
print color.HEADER("IncludeOS vmrunner loading VM configs")
schema_path = package_path + "/vm.schema.json"
print color.INFO(nametag), "Validating JSON according to schema ",schema_path
validate_vm.load_schema(schema_path)
validate_vm.has_required_stuff(".")
default_spec = {"image" : "service.img"}
# Provide a list of VM's with validated specs
vms = []
if validate_vm.valid_vms:
print color.INFO(nametag), "Loaded VM specification(s) from JSON"
for spec in validate_vm.valid_vms:
print color.INFO(nametag), "Found VM spec: "
print color.DATA(spec.__str__())
vms.append(vm(spec))
else:
print color.WARNING(nametag), "No VM specification JSON found, trying default config"
vms.append(vm(default_spec))
# Handler for SIGINT
def handler(signum, frame):
print
print color.WARNING("Process interrupted - stopping vms")
for vm in vms:
try:
vm.exit(exit_codes["ABORT"], "Process terminated by user")
except Exception as e:
print color.WARNING("Forced shutdown caused exception: "), e
raise e
signal.signal(signal.SIGINT, handler)
| 2.078125 | 2 |
hypothesis/summary/train.py | JoeriHermans/hypothesis | 45 | 12764835 | r"""Summary objects at the end of training procedures."""
import numpy as np
import pickle
import torch
class TrainingSummary:
def __init__(self,
model_best,
model_final,
epochs,
epoch_best,
losses_train,
losses_test=None,
identifier=None):
self.identifier = identifier
self.epochs = epochs
self.model_best = model_best
self.model_final = model_final
self.epoch_best = epoch_best
self.losses_train = losses_train
self.losses_test = losses_test
def save(self, path):
summary = {
"identifier": self.identifier,
"best_model": self.model_best,
"final_model": self.model_final,
"epochs": self.epochs,
"best_epoch": self.epoch_best,
"training_losses": self.losses_train,
"testing_losses": self.losses_test}
torch.save(summary, path)
def load(self, path):
summary = torch.load(path)
self.identifier = summary["identifier"]
self.model_best = summary["best_model"]
self.model_final = summary["final_model"]
self.epochs = summary["epochs"]
self.epoch_best = summary["best_epoch"]
self.losses_train = summary["training_losses"]
self.losses_test = summary["testing_losses"]
def test_losses_available(self):
return self.losses_test is not None and len(self.losses_test) > 0
def identifier_available(self):
return self.identifier is not None
def num_epochs(self):
return self.epochs
def best_epoch(self):
return self.epoch_best
def best_model(self):
return self.model_best
def final_model(self):
return self.model_final
def test_losses(self, log=False):
if log:
losses = np.log(self.losses_test)
else:
losses = self.losses_test
return losses
def train_losses(self, log=False):
if log:
losses = np.log(self.losses_train)
else:
losses = self.losses_train
return losses
def __str__(self):
representation = ""
if self.identifier_available():
representation = "Identifier:\t\t{}\n".format(self.identifier)
representation = representation + "Total epochs:\t\t{}\n".format(self.epochs) + \
"Best training loss:\t{}\n".format(self.losses_train.min()) + \
"Final training loss:\t{}".format(self.losses_train[-1])
if self.test_losses_available():
representation = representation + \
"\nBest testing loss:\t{}\n".format(self.losses_test.min()) + \
"Best test epoch:\t{}\n".format(self.epoch_best) + \
"Final test loss:\t{}".format(self.losses_test[-1])
return representation
| 2.6875 | 3 |
python/helpers/tests/generator3_tests/data/SkeletonGeneration/segmentation_fault_handling/sigsegv.py | tgodzik/intellij-community | 2 | 12764836 | <filename>python/helpers/tests/generator3_tests/data/SkeletonGeneration/segmentation_fault_handling/sigsegv.py
import ctypes
ctypes.string_at(0)
| 1.03125 | 1 |
2020/07/day7.py | jscpeterson/advent-of-code-2020 | 0 | 12764837 | TARGET_BAG = 'shiny gold'
def get_data(filepath):
return [line.strip() for line in open(filepath).readlines()]
def parse_rule(rule):
rule = rule.split(' ')
adj, color = rule[0], rule[1]
container_bag = '%s %s' % (adj, color)
bags_contained = {}
if rule[4:] == ['no', 'other', 'bags.']:
pass
else:
for i in range(4, len(rule), 4):
quantity, adj, color = rule[i], rule[i + 1], rule[i + 2]
bags_contained['%s %s' % (adj, color)] = int(quantity)
return container_bag, bags_contained
def parse_bag_rules(inputs):
bag_rules = {}
for line in inputs:
container_bag, bags_contained = parse_rule(line)
bag_rules[container_bag] = bags_contained
return bag_rules
def bag_contains_target(bag_rules, bag_to_search, target_bag):
if bag_rules[bag_to_search]:
for bag in bag_rules[bag_to_search]:
if bag == target_bag:
return True
else:
if bag_contains_target(bag_rules, bag, target_bag):
return True
else:
return False
def solve(inputs):
bag_rules = parse_bag_rules(inputs)
good_bags = []
for bag in bag_rules.keys():
if bag_contains_target(bag_rules, bag, TARGET_BAG):
good_bags.append(bag)
return len(good_bags)
def search_bags(bag_rules, bag_to_search, num_bags):
num_bags_total = num_bags
for bag in bag_rules[bag_to_search]:
num_bags_inside = bag_rules[bag_to_search][bag]
num_bags_total += num_bags * search_bags(bag_rules, bag, num_bags_inside)
return num_bags_total
def solve2(inputs):
bag_rules = parse_bag_rules(inputs)
return search_bags(bag_rules, TARGET_BAG, 1) - 1 # Bag count does not include target bag
assert solve(get_data('test')) == 4
print('Part 1: %d' % solve(get_data('input')))
assert solve2(get_data('test')) == 32
assert solve2(get_data('test2')) == 126
print('Part 2: %d' % solve2(get_data('input')))
| 3.3125 | 3 |
mbed_flasher/flashers/__init__.py | bridadan/mbed-flasher | 0 | 12764838 | <filename>mbed_flasher/flashers/__init__.py
#!/usr/bin/env python
"""
Copyright 2016 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from mbed_flasher.flashers.FlasherMbed import FlasherMbed as mbed_flasher
from mbed_flasher.flashers.FlasherJLink import FlasherJLink as jlink_flasher
from mbed_flasher.flashers.FlasherST import FlasherSTLink as st_flasher
# disable Invalid constant name warning, not a const
# pylint: disable=C0103
AvailableFlashers = []
# Order matters since JLinkExe flash is preferred for JLink boards
if jlink_flasher.is_executable_installed():
AvailableFlashers.append(jlink_flasher)
if st_flasher.is_executable_installed():
AvailableFlashers.append(st_flasher)
AvailableFlashers.append(mbed_flasher)
| 1.65625 | 2 |
pages/mixins.py | rangertaha/django-boilerplate-pages | 0 | 12764839 | <gh_stars>0
#-*- coding:utf-8 -*-
"""
"""
import logging
import random
from django.contrib.messages.views import SuccessMessageMixin
from django.views.generic.list import ListView
from django.views.generic.edit import FormView
from django.views.generic.base import ContextMixin
from django.views.generic import View
from django.conf import settings
from .models import Section
logger = logging.getLogger(__name__)
class NavBarMixin(ContextMixin):
def get_context_data(self, **kwargs):
context = super(NavBarMixin, self).get_context_data(**kwargs)
context['sections'] = Section.objects.filter().distinct()
return context
class MenuMixin(ContextMixin):
def get_context_data(self, **kwargs):
context = super(MenuMixin, self).get_context_data(**kwargs)
section = self.kwargs.get('section', None)
context['section'] = Section.objects.get(slug=section)
return context
class PageMixin(NavBarMixin, MenuMixin):
pass
"""
class ContactModalMixin(SuccessMessageMixin, FormView):
form_class = ContactForm
success_url = '/'
success_message = "Successfully sent message..."
def form_valid(self, form):
form.send_email()
return super(ContactModalMixin, self).form_valid(form)
class MetaMixin(ContextMixin):
def get_context_data(self, **kwargs):
context = super(MetaMixin, self).get_context_data(**kwargs)
context['title'] = random.choice(settings.TITLE)
context['description'] = random.choice(settings.DESCRIPTION)
context['keywords'] = random.choice(settings.KEYWORDS)
context['author'] = settings.AUTHOR
return context
class FooterMixin(ContextMixin):
def get_context_data(self, **kwargs):
context = super(FooterMixin, self).get_context_data(**kwargs)
context['footer_text'] = settings.FOOTER_TEXT
context['top_authors'] = Author.objects.filter()[:10]
context['top_licenses'] = License.objects.filter()[:10]
context['top_tools'] = Tool.objects.filter()[:10]
context['top_categories'] = Category.objects.filter()[:10]
context['top_sections'] = Section.objects.filter()[:10]
context['top_links'] = Link.objects.filter()[:10]
context['top_languages'] = ProgrammingLanguage.objects.filter()[:10]
return context
class NavBarMixin(ContextMixin):
def get_context_data(self, **kwargs):
context = super(NavBarMixin, self).get_context_data(**kwargs)
context['sections'] = Section.objects.filter(active=True, parent__isnull=True).distinct()
context['name'] = settings.BRAND_NAME
return context
class SiteMixin(FooterMixin, NavBarMixin):
def get_context_data(self, **kwargs):
context = super(SiteMixin, self).get_context_data(**kwargs)
section = self.kwargs.get('section', None)
category = self.kwargs.get('category', None)
tool = self.kwargs.get('tool', None)
if section:
context['section'] = Section.objects.get(slug=section)
if category:
context['category'] = Category.objects.get(slug=category)
if tool:
context['tool'] = tool
return context
"""
| 1.992188 | 2 |
djangoq_demo/order_reminder/models.py | forance/django-q | 0 | 12764840 | from decimal import Decimal
from django.db import models
# Create your models here.
class Orders(models.Model):
order_id = models.CharField( max_length=200, blank=False, null=True, unique=True)
ship_date = models.DateField(auto_now_add=False, auto_now=False, null=True)
customer = models.CharField("Company Name",max_length=200, blank=False, null=True)
currency= models.CharField(max_length=200, blank=False, null=True)
creater = models.CharField(max_length=200, blank=False, null=True)
order_amount= models.DecimalField(max_digits=10, decimal_places=2)
paid_amount= models.DecimalField(max_digits=10, decimal_places=2, default=Decimal('0.00'))
create_date = models.DateField(auto_now_add=False, auto_now=False, null=True)
pay_term = models.CharField("Payment Term",max_length=200, blank=False, null=True)
updater = models.CharField(max_length=200, blank=False, null=True)
def __unicode__(self):
# return u'%s, %s' %(self.company.companyprofile_name, self.reference_id)
#return u'%s' %(self.company.companyprofile_name)
return u'%s, %s, %s $%s, %s' %(self.order_id, self.customer, self.currency, self.order_amount, self.creater)
| 2.359375 | 2 |
tests/utils.py | tdm-project/pykeyrock | 0 | 12764841 | #!/usr/bin/env python
#
# Copyright 2021 CRS4 - Center for Advanced Studies, Research and Development
# in Sardinia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from lorem_text import lorem
import random
def random_org_name(prefix='pykeyrock unittest', words=2):
return f"{prefix} {lorem.words(words)}".capitalize()
def random_org_description(words=5):
return lorem.words(words).capitalize() + '.'
def random_app_name(prefix='pykeyrock unittest', words=2):
return f"{prefix} {lorem.words(words)}".capitalize()
def random_app_description(words=5):
return lorem.words(words).capitalize() + '.'
def random_role_name(prefix='pykeyrock unittest', words=2):
_w = prefix.split() + lorem.words(2).split()
return ''.join(list(map(str.capitalize, _w)))
def random_user_email(prefix='pykeyrock_unittest'):
_login, _domain, _tld = lorem.words(3).split()
return f"{prefix}_{_login}@{_domain}.{_tld[0:2]}".lower()
def random_user_password():
return '.'.join(lorem.words(3).split()).lower()
def random_user_name(prefix='pykeyrock', words=2):
return f"{prefix} {lorem.words(words)}".capitalize()
def random_permission_name(prefix='pykeyrock unittest', words=2):
return f"{prefix} {lorem.words(words)}".capitalize()
def random_permission_action(prefix='pykeyrock unittest', words=2):
_actions = ["GET", "POST", "PUT", "PATCH", "DELETE"]
return random.choice(_actions)
def random_permission_resource(prefix='pykeyrock unittest', words=1):
_w = prefix.split() + lorem.words(words).split()
return '/'.join(list(map(str.lower, _w)))
| 1.992188 | 2 |
news/admin.py | SIBSIND/PHPMYADMINWEBSITE | 31 | 12764842 | # -*- coding: UTF-8 -*-
# vim: set expandtab sw=4 ts=4 sts=4:
#
# phpMyAdmin web site
#
# Copyright (C) 2008 - 2016 <NAME> <<EMAIL>>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from django.contrib import admin
from news.models import Post, Planet
class PostAdmin(admin.ModelAdmin):
list_display = ('title', 'date', 'author')
list_filter = ('author',)
date_hierarchy = 'date'
prepopulated_fields = {'slug': ('title',)}
search_fields = ['title', 'slug']
def save_model(self, request, obj, form, change):
if getattr(obj, 'author', None) is None:
obj.author = request.user
obj.save()
class PlanetAdmin(admin.ModelAdmin):
list_display = ('title', 'date', 'url')
date_hierarchy = 'date'
search_fields = ['title', 'url']
admin.site.register(Planet, PlanetAdmin)
admin.site.register(Post, PostAdmin)
| 2.265625 | 2 |
donut/modules/editor/__init__.py | rlin0/donut | 0 | 12764843 | <reponame>rlin0/donut
import flask
blueprint = flask.Blueprint(
'editor',
__name__,
template_folder='templates',
static_folder='static',
static_url_path='/donut/modules/editor/static')
import donut.modules.editor.routes
| 1.921875 | 2 |
hy-data-analysis-with-python-spring-2020/part03-e06_meeting_lines/test/test_meeting_lines.py | Melimet/DAP2020 | 0 | 12764844 | <filename>hy-data-analysis-with-python-spring-2020/part03-e06_meeting_lines/test/test_meeting_lines.py
#!/usr/bin/python3
import unittest
from unittest.mock import patch
import numpy as np
from tmc import points
from tmc.utils import load, get_out, patch_helper
module_name='src.meeting_lines'
meeting_lines = load(module_name, 'meeting_lines')
ph = patch_helper(module_name)
@points('p03-06.1')
class MeetingLines(unittest.TestCase):
def test_first(self):
a1=1
b1=4
a2=3
b2=2
x, y = meeting_lines(a1,b1,a2,b2)
self.assertAlmostEqual(y, a1*x + b1, msg="Meeting point in not on line a1=%i, b1=%i" % (a1,b1))
self.assertAlmostEqual(y, a2*x + b2, msg="Meeting point in not on line a2=%i, b2=%i" % (a2,b2))
def test_calls(self):
with patch(ph("np.linalg.solve"), wraps=np.linalg.solve) as psolve:
a1=1
b1=4
a2=3
b2=2
meeting_lines(a1,b1,a2,b2)
psolve.assert_called()
def test_underdetermined(self):
a1=1
b1=4
p=(a1,b1,a1,b1)
system="(a1=%i, b1=%i, a2=%i, b2=%i)" % p
with self.assertRaises(np.linalg.linalg.LinAlgError,
msg="Under determined system %s should raise an exception!" % system):
meeting_lines(*p)
def test_inconsistent(self):
a1=1
b1=4
p=(a1,b1,a1,b1)
system="(a1=%i, b1=%i, a2=%i, b2=%i)" % p
with self.assertRaises(np.linalg.linalg.LinAlgError,
msg="Inconsistent system %s should raise an exception!" % system):
meeting_lines(*p)
if __name__ == '__main__':
unittest.main()
| 2.421875 | 2 |
example/__init__.py | aeroworks-io/python-ddd | 1 | 12764845 | <filename>example/__init__.py
from faker import Faker
from dddpy import Entity, ID, Primitive
fake = Faker()
fake.seed_instance(0)
class UserID(ID):
...
class UserName(Primitive, str):
"""User's name"""
@classmethod
def __generate__(cls):
return cls(fake.name())
class User(Entity):
id: UserID
name: UserName
| 2.875 | 3 |
muDIC/utils/utilities.py | diehlpk/muDIC | 7 | 12764846 | from functools import reduce
import scipy.ndimage as nd
import numpy as np
def convert_to_img_frame(img, node_position, mesh, borders, settings):
local_node_pos = np.zeros((2, mesh.element_def.n_nodes), dtype=settings.precision)
# Partition image
image_frame = extract_subframe(img, borders, settings.pad)
# Determine nodal positions in image frame coordinates
local_node_pos[0, :] = node_position[0] + settings.pad - borders[0, :]
local_node_pos[1, :] = node_position[1] + settings.pad - borders[2, :]
return image_frame, local_node_pos
def generate_edge_coordinates(seed):
seeding = np.linspace(0., 1., seed)
es, ns = np.meshgrid(seeding, seeding)
mask = np.ones_like(es, dtype=np.bool)
mask[1:-1, 1:-1] = 0
return es[mask], ns[mask]
def find_element_borders(node_position, mesh, seed=20):
e, n = generate_edge_coordinates(seed)
N_at_borders = mesh.element_def.Nn(e.flatten(), n.flatten())
# Find global coordinates of elements
pixel_x = np.einsum("jk,k->j", N_at_borders, node_position[0])
pixel_y = np.einsum("jk,k->j", N_at_borders, node_position[1])
axis = None
# [Xmin_Xmax,Ymin,Ymax,elm_nr]
borders = np.zeros((4, mesh.n_elms), dtype=np.int)
borders[0, :] = np.min(pixel_x, axis=axis)
borders[1, :] = np.max(pixel_x, axis=axis)
borders[2, :] = np.min(pixel_y, axis=axis)
borders[3, :] = np.max(pixel_y, axis=axis)
return borders
def extract_subframe(img, borders, pad):
return img[borders[2, 0] - pad:borders[3, 0] + pad, borders[0, 0] - pad:borders[1, 0] + pad]
def find_borders(coord):
return int(np.min(np.floor(coord))), int(np.max(np.ceil(coord)))
def find_inconsistent(ep, ny):
rem1 = np.where(ep > 1.)
rem2 = np.where(ep < 0.)
rem3 = np.where(ny > 1.)
rem4 = np.where(ny < 0.)
return reduce(np.union1d, [rem1[0], rem2[0], rem3[0], rem4[0]])
def extract_points_from_image(image, coordinates):
return nd.map_coordinates(image, coordinates, order=3, prefilter=True)
def image_coordinates(image):
xs, ys = np.meshgrid(np.arange(image.shape[0]), np.arange(image.shape[1]))
return xs,ys | 2.515625 | 3 |
src/voice_command/voice_recognition.py | Team-1-TTM4115/OfficePortal | 0 | 12764847 | import speech_recognition as sr
import re
TEXT_TO_NUMBER = {
'0': 0,
'zero': 0,
'1': 1,
'one': 1,
'2': 2,
'two': 2,
'to': 2,
'too': 2,
'3': 3,
'three': 3,
'tree': 3,
'4': 4,
'four': 4,
'for': 4,
'5': 5,
'five': 5,
}
def text_to_number(text, keyword):
print(text)
print(keyword)
text_num = get_number_from_text(text, keyword)
return TEXT_TO_NUMBER[text_num]
def get_number_from_text(text, keyword):
return re.findall(r'%s(.+)' % keyword, text)[0].strip()
class VoiceRecognition:
def __init__(self):
self.r = sr.Recognizer()
self.mic = sr.Microphone()
def recognize_command(self):
response = {}
try:
with self.mic as source:
self.r.adjust_for_ambient_noise(source)
audio = self.r.listen(source, phrase_time_limit=3)
command = self.r.recognize_google(audio)
response["recording"] = command
response["success"] = True
except sr.UnknownValueError:
response[
"exception"
] = "Google Speech Recognition could not understand audio"
response["success"] = False
except sr.RequestError as e:
response[
"exception"
] = f"Could not request results from Google Speech Recognition service; {e}"
response["success"] = False
return response
| 3.34375 | 3 |
call_api.py | jimwangzx/PhishBuster | 1 | 12764848 | from urllib.parse import urlparse
import requests
import re
def url_syntax(url_changes):
url_search_http = re.search("http", url_changes)
if url_search_http is None:
url_http = "http://" + url_changes
else:
url_http = url_changes
return url_http # Returns the url with 'http://' if not there in the input url
def api_call(inurl, seurl):
'''
"inurl" is the input url which is suspected to be phishing site.
"seurl" is the select url in which only the domain name of the orginal site is to be given as in put.
'''
check = url_syntax (inurl)
inurl = urlparse(check).netloc
URL = "https://phishbuster-web.herokuapp.com/api/"+ inurl + '+' + seurl
req = requests.get(url = URL)
return req.json()
if __name__ == '__main__':
print(api_call('https://www.microsoft.com~@www.google.com/wsgrye/ruygfbryu/gijgnuf','google.com'))
| 3.515625 | 4 |
backend/projects/tests/test_models.py | LucasSantosGuedes/App-Gestao | 142 | 12764849 | import pytest
from mixer.backend.django import mixer
from projects.models import Project, ProjectMembership
from users.models import User
@pytest.mark.django_db
class TestProject:
def test_project_create(self):
user = mixer.blend(User, username='test')
proj = mixer.blend(Project, owner = user)
assert proj.owner == user
def test_project_str(self):
proj = mixer.blend(Project)
assert str(proj) == proj.title
@pytest.mark.django_db
class TestProjectMembers:
def test_member(self):
proj = mixer.blend(Project)
user = mixer.blend(User, username='test')
mixer.blend(ProjectMembership, member=user, project=proj)
assert proj.members.get(username='test') == user
def test_proj_member_str(self):
pmem = mixer.blend(ProjectMembership)
assert str(pmem) == f'{pmem.member.full_name} , {pmem.project.title}' | 2.328125 | 2 |
armstrong/core/arm_content/tests/video/backends/youtube.py | cirlabs/armstrong.core.arm_content | 0 | 12764850 | from ..._utils import *
from ....fields.video import EmbeddedVideo
from ....video.backends import helpers
from ....video.backends.youtube import YouTubeBackend
class YouTubeBackendTestCase(ArmContentTestCase):
def generate_random_url(self):
random_id = str(random.randint(100, 200))
url = "http://youtube.com/watch?v=%s" % random_id
return random_id, url
def test_returns_tuple_with_url_as_first_value(self):
random_id = str(random.randint(100, 200))
url = "http://youtube.com/watch?v=%s" % random_id
backend = YouTubeBackend()
video = EmbeddedVideo(url, backend)
self.assertEqual("http", video.url.scheme)
self.assertEqual("youtube.com", video.url.netloc)
def test_returns_tuple_with_id_as_second_value(self):
random_id = str(random.randint(100, 200))
url = "http://youtube.com/watch?v=%s" % random_id
backend = YouTubeBackend()
video = EmbeddedVideo(url, backend)
self.assertEqual(random_id, video.id)
def test_returns_the_expected_html_when_embed_is_called(self):
random_id = str(random.randint(100, 200))
url = "http://youtube.com/watch?v=%s" % random_id
backend = YouTubeBackend()
video = EmbeddedVideo(url, backend)
expected = "".join([
'<iframe title="YouTube video player" ',
'width="640" height="390" ',
'src="http://www.youtube.com/embed/%s" ',
'frameborder="0" allowfullscreen></iframe>']) % random_id
self.assertEqual(expected, backend.embed(video))
def test_embed_width_can_be_set_with_a_kwarg(self):
random_width = random.randint(1000, 2000)
random_id = str(random.randint(100, 200))
url = "http://youtube.com/watch?v=%s" % random_id
backend = YouTubeBackend()
video = EmbeddedVideo(url, backend)
expected = "".join([
'<iframe title="YouTube video player" ',
'width="%d" height="390" ' % random_width,
'src="http://www.youtube.com/embed/%s" ',
'frameborder="0" allowfullscreen></iframe>']) % random_id
self.assertRegexpMatches(backend.embed(video, width=random_width),
r'width="%d"' % random_width)
def test_embed_height_can_be_set_with_a_kwarg(self):
random_height = random.randint(1000, 2000)
random_id = str(random.randint(100, 200))
url = "http://youtube.com/watch?v=%s" % random_id
backend = YouTubeBackend()
video = EmbeddedVideo(url, backend)
expected = "".join([
'<iframe title="YouTube video player" ',
'width="%d" height="390" ' % random_height,
'src="http://www.youtube.com/embed/%s" ',
'frameborder="0" allowfullscreen></iframe>']) % random_id
self.assertRegexpMatches(backend.embed(video, height=random_height),
r'height="%d"' % random_height)
def test_embed_width_and_height_can_be_strings(self):
random_height = str(random.randint(1000, 2000))
random_width = str(random.randint(1000, 2000))
random_id = str(random.randint(100, 200))
url = "http://youtube.com/watch?v=%s" % random_id
backend = YouTubeBackend()
video = EmbeddedVideo(url, backend)
expected = "".join([
'<iframe title="YouTube video player" ',
'width="%s" height="%s" ' % (random_width, random_height),
'src="http://www.youtube.com/embed/%s" ',
'frameborder="0" allowfullscreen></iframe>']) % random_id
self.assertRegexpMatches(backend.embed(video, width=random_width),
r'width="%s"' % random_width)
self.assertRegexpMatches(backend.embed(video, height=random_height),
r'height="%s"' % random_height)
def test_height_defaults_to_configured_if_not_provided(self):
random_height = random.randint(1000, 2000)
settings = fudge.Fake()
settings.has_attr(ARMSTRONG_EMBED_VIDEO_HEIGHT=random_height)
settings.has_attr(ARMSTRONG_EMBED_VIDEO_WIDTH="does not matter")
with fudge.patched_context(helpers, 'settings', settings):
random_id, url = self.generate_random_url()
backend = YouTubeBackend()
video = EmbeddedVideo(url, backend)
self.assertRegexpMatches(backend.embed(video),
r'height="%s"' % random_height)
def test_width_defaults_to_configured_if_not_provided(self):
random_width = random.randint(1000, 2000)
settings = fudge.Fake()
settings.has_attr(ARMSTRONG_EMBED_VIDEO_WIDTH=random_width)
settings.has_attr(ARMSTRONG_EMBED_VIDEO_HEIGHT="does not matter")
with fudge.patched_context(helpers, 'settings', settings):
random_id, url = self.generate_random_url()
backend = YouTubeBackend()
video = EmbeddedVideo(url, backend)
self.assertRegexpMatches(backend.embed(video),
r'width="%s"' % random_width)
| 2.578125 | 3 |