text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import collections
import json
import os
import pickle
import gym
import ray
from ray.rllib.agents.registry import get_agent_class
from ray.rllib.env import MultiAgentEnv
from ray.rllib.env.base_env import _DUMMY_AGENT_ID
from ray.rllib.evaluation.episode import _flatten_action
from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID
from ray.tune.util import merge_dicts
EXAMPLE_USAGE = """
Example Usage via RLlib CLI:
rllib rollout /tmp/ray/checkpoint_dir/checkpoint-0 --run DQN
--env CartPole-v0 --steps 1000000 --out rollouts.pkl
Example Usage via executable:
./rollout.py /tmp/ray/checkpoint_dir/checkpoint-0 --run DQN
--env CartPole-v0 --steps 1000000 --out rollouts.pkl
"""
# Note: if you use any custom models or envs, register them here first, e.g.:
#
# ModelCatalog.register_custom_model("pa_model", ParametricActionsModel)
# register_env("pa_cartpole", lambda _: ParametricActionCartpole(10))
def create_parser(parser_creator=None):
parser_creator = parser_creator or argparse.ArgumentParser
parser = parser_creator(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="Roll out a reinforcement learning agent "
"given a checkpoint.",
epilog=EXAMPLE_USAGE)
parser.add_argument(
"checkpoint", type=str, help="Checkpoint from which to roll out.")
required_named = parser.add_argument_group("required named arguments")
required_named.add_argument(
"--run",
type=str,
required=True,
help="The algorithm or model to train. This may refer to the name "
"of a built-on algorithm (e.g. RLLib's DQN or PPO), or a "
"user-defined trainable function or class registered in the "
"tune registry.")
required_named.add_argument(
"--env", type=str, help="The gym environment to use.")
parser.add_argument(
"--no-render",
default=False,
action="store_const",
const=True,
help="Surpress rendering of the environment.")
parser.add_argument(
"--steps", default=10000, help="Number of steps to roll out.")
parser.add_argument("--out", default=None, help="Output filename.")
parser.add_argument(
"--config",
default="{}",
type=json.loads,
help="Algorithm-specific configuration (e.g. env, hyperparams). "
"Surpresses loading of configuration from checkpoint.")
return parser
def run(args, parser):
config = {}
# Load configuration from file
config_dir = os.path.dirname(args.checkpoint)
config_path = os.path.join(config_dir, "params.pkl")
if not os.path.exists(config_path):
config_path = os.path.join(config_dir, "../params.pkl")
if not os.path.exists(config_path):
if not args.config:
raise ValueError(
"Could not find params.pkl in either the checkpoint dir or "
"its parent directory.")
else:
with open(config_path, "rb") as f:
config = pickle.load(f)
if "num_workers" in config:
config["num_workers"] = min(2, config["num_workers"])
config = merge_dicts(config, args.config)
if not args.env:
if not config.get("env"):
parser.error("the following arguments are required: --env")
args.env = config.get("env")
ray.init()
cls = get_agent_class(args.run)
agent = cls(env=args.env, config=config)
agent.restore(args.checkpoint)
num_steps = int(args.steps)
rollout(agent, args.env, num_steps, args.out, args.no_render)
class DefaultMapping(collections.defaultdict):
"""default_factory now takes as an argument the missing key."""
def __missing__(self, key):
self[key] = value = self.default_factory(key)
return value
def default_policy_agent_mapping(unused_agent_id):
return DEFAULT_POLICY_ID
def rollout(agent, env_name, num_steps, out=None, no_render=True):
policy_agent_mapping = default_policy_agent_mapping
if hasattr(agent, "workers"):
env = agent.workers.local_worker().env
multiagent = isinstance(env, MultiAgentEnv)
if agent.workers.local_worker().multiagent:
policy_agent_mapping = agent.config["multiagent"][
"policy_mapping_fn"]
policy_map = agent.workers.local_worker().policy_map
state_init = {p: m.get_initial_state() for p, m in policy_map.items()}
use_lstm = {p: len(s) > 0 for p, s in state_init.items()}
action_init = {
p: _flatten_action(m.action_space.sample())
for p, m in policy_map.items()
}
else:
env = gym.make(env_name)
multiagent = False
use_lstm = {DEFAULT_POLICY_ID: False}
if out is not None:
rollouts = []
steps = 0
while steps < (num_steps or steps + 1):
mapping_cache = {} # in case policy_agent_mapping is stochastic
if out is not None:
rollout = []
obs = env.reset()
agent_states = DefaultMapping(
lambda agent_id: state_init[mapping_cache[agent_id]])
prev_actions = DefaultMapping(
lambda agent_id: action_init[mapping_cache[agent_id]])
prev_rewards = collections.defaultdict(lambda: 0.)
done = False
reward_total = 0.0
while not done and steps < (num_steps or steps + 1):
multi_obs = obs if multiagent else {_DUMMY_AGENT_ID: obs}
action_dict = {}
for agent_id, a_obs in multi_obs.items():
if a_obs is not None:
policy_id = mapping_cache.setdefault(
agent_id, policy_agent_mapping(agent_id))
p_use_lstm = use_lstm[policy_id]
if p_use_lstm:
a_action, p_state, _ = agent.compute_action(
a_obs,
state=agent_states[agent_id],
prev_action=prev_actions[agent_id],
prev_reward=prev_rewards[agent_id],
policy_id=policy_id)
agent_states[agent_id] = p_state
else:
a_action = agent.compute_action(
a_obs,
prev_action=prev_actions[agent_id],
prev_reward=prev_rewards[agent_id],
policy_id=policy_id)
a_action = _flatten_action(a_action) # tuple actions
action_dict[agent_id] = a_action
prev_actions[agent_id] = a_action
action = action_dict
action = action if multiagent else action[_DUMMY_AGENT_ID]
next_obs, reward, done, _ = env.step(action)
if multiagent:
for agent_id, r in reward.items():
prev_rewards[agent_id] = r
else:
prev_rewards[_DUMMY_AGENT_ID] = reward
if multiagent:
done = done["__all__"]
reward_total += sum(reward.values())
else:
reward_total += reward
if not no_render:
env.render()
if out is not None:
rollout.append([obs, action, next_obs, reward, done])
steps += 1
obs = next_obs
if out is not None:
rollouts.append(rollout)
print("Episode reward", reward_total)
if out is not None:
pickle.dump(rollouts, open(out, "wb"))
if __name__ == "__main__":
parser = create_parser()
args = parser.parse_args()
run(args, parser)
|
{
"content_hash": "2fc22bc492858a32bd64063f91edb3ae",
"timestamp": "",
"source": "github",
"line_count": 213,
"max_line_length": 78,
"avg_line_length": 36.732394366197184,
"alnum_prop": 0.5878067484662577,
"repo_name": "ujvl/ray-ng",
"id": "f7ea31e966e25fb9da0b6f5256646ebd2078ce39",
"size": "7847",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rllib/rollout.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "349753"
},
{
"name": "C++",
"bytes": "6547"
},
{
"name": "CMake",
"bytes": "4927"
},
{
"name": "Makefile",
"bytes": "5285"
},
{
"name": "Python",
"bytes": "260095"
},
{
"name": "Shell",
"bytes": "6666"
}
],
"symlink_target": ""
}
|
import json, shutil, cPickle, os, csv
import numpy as np
import preprocessors.fft as fft
from pandas import read_csv
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from theano import config
from pandas import DataFrame
from cnn.conv_net import ConvNet
from utils.loader import load_test_data
from utils.config_name_creator import *
from utils.data_scaler import scale_across_time, scale_across_features
config.floatX = 'float32'
def minmax_rescale(probability):
scaler = MinMaxScaler(feature_range=(0.000000001, 0.999999999))
return scaler.fit_transform(probability)
def softmax_rescale(probability):
norm_x = StandardScaler().fit_transform(probability)
return 1.0 / (1.0 + np.exp(-norm_x))
def median_scaler(x):
return (x - np.median(x))/2.0 + 0.5
def merge_csv_data(submission_path, subjects, submission_name, scale=None):
submission_name += scale if scale else ''
with open(submission_path + '/' + submission_name + '.csv', 'wb') as f:
writer = csv.writer(f)
writer.writerow(['clip', 'preictal'])
for subject in subjects:
df = read_csv(submission_path + '/' + subject + '.csv')
df['clip'] = [subject+'_'+i for i in df['clip']]
if scale=='minmax':
df['preictal'] = minmax_rescale(df.drop('clip', axis=1).values)
elif scale =='softmax':
df['preictal'] = softmax_rescale(df.drop('clip', axis=1).values)
elif scale =='median':
df['preictal'] = median_scaler(df.drop('clip', axis=1).values)
with open(submission_path + '/' + submission_name + '.csv', 'a') as f:
df.to_csv(f, header=False, index=False)
def predict(subject, data_path, model_path, submission_path):
patient_filenames = [filename for filename in os.listdir(model_path) if
subject in filename and filename.endswith('.pickle')]
for filename in patient_filenames:
print filename
d = load_test_data(data_path, subject)
x, id = d['x'], d['id']
with open(model_path + '/' + filename, 'rb') as f:
state_dict = cPickle.load(f)
scalers = state_dict['scalers']
x, _ = scale_across_time(x, x_test=None, scalers=scalers) if state_dict['params']['scale_time'] \
else scale_across_features(x, x_test=None, scalers=scalers)
cnn = ConvNet(state_dict['params'])
cnn.set_weights(state_dict['weights'])
test_proba = cnn.get_test_proba(x)
ans = zip(id, test_proba)
df = DataFrame(data=ans, columns=['clip', 'preictal'])
csv_name = '.'.join(filename.split('.')[:-1]) if '.' in filename else filename
df.to_csv(submission_path + '/' + csv_name + '.csv', index=False, header=True)
def run_predictor():
with open('SETTINGS.json') as f:
settings_dict = json.load(f)
model_path = settings_dict['path']['model_path'] + '/' + create_cnn_model_name(settings_dict)
data_path = settings_dict['path']['processed_data_path'] + '/' + create_fft_data_name(settings_dict)
submission_path = model_path + '/submission'
print submission_path
if not os.path.exists(data_path):
fft.run_fft_preprocessor()
if not os.path.exists(submission_path):
os.makedirs(submission_path)
shutil.copy2('SETTINGS.json', submission_path + '/SETTINGS.json')
subjects = ['Dog_1', 'Dog_2', 'Dog_3', 'Dog_4', 'Dog_5', 'Patient_1', 'Patient_2']
#subjects = ['Dog_1', 'Dog_2', 'Dog_3', 'Dog_4']
for subject in subjects:
print '***********************', subject, '***************************'
predict(subject, data_path, model_path, submission_path)
merge_csv_data(submission_path, subjects, submission_name='submission', scale='minmax')
merge_csv_data(submission_path, subjects, submission_name='submission', scale='softmax')
merge_csv_data(submission_path, subjects, submission_name='submission', scale='median')
merge_csv_data(submission_path, subjects, submission_name='submission')
if __name__ == '__main__':
run_predictor()
|
{
"content_hash": "53967916c81a94d87bdec578351460e6",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 105,
"avg_line_length": 38.490566037735846,
"alnum_prop": 0.6323529411764706,
"repo_name": "IraKorshunova/kaggle-seizure-prediction",
"id": "2cd6c961ccfbe741eaa88d455cdf30774b3c5c3d",
"size": "4080",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cnn/predict.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "99531"
}
],
"symlink_target": ""
}
|
from django.http import HttpResponse, Http404
from django.template import Context
from django.contrib.sites.models import Site
from listings.syndication.models import Feed
from listings.models import POSTING_ACTIVE
def display_feed(request, feed_url):
site = Site.objects.get_current()
try:
feed = site.feed_set.get(feed_url=feed_url)
except Feed.DoesNotExist:
raise Http404
template = feed.get_template()
context = Context({'ads': feed.ads.filter(status=POSTING_ACTIVE)})
return HttpResponse(template.render(context), content_type=feed.content_type)
|
{
"content_hash": "03840c7baeec84c9ec487cbb406d72b2",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 81,
"avg_line_length": 34.94117647058823,
"alnum_prop": 0.7457912457912458,
"repo_name": "wtrevino/django-listings",
"id": "60fc9ca97ab6b601687dce3e3774c805a518e1e2",
"size": "619",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "listings/syndication/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "5455"
},
{
"name": "Python",
"bytes": "113175"
}
],
"symlink_target": ""
}
|
import os
import shutil
import glob
from PIL import Image
def parser():
rootDir = str(input("Informe o diretório de origem:\n"))
destDir = str(input("Informe o diretório de destino:\n"))
for dirName, subdirList, fileList in os.walk(rootDir):
print("Diretório encontrado: %s" % dirName)
has_image = len(glob.glob(dirName + "\*.jpg"))
if has_image != 0:
dirMatr = dirName.find("-")
if dirMatr != -1:
matricula = dirName.split("-")
matricula = matricula[-1]
matricula = matricula.strip()
dirCopy = destDir + os.sep + matricula
shutil.copytree(dirName, dirCopy)
for fname in fileList:
print("\t%s" % fname)
else:
nomeAluno = dirName.split(os.sep)
nomeAluno = nomeAluno[-1].strip()
arquivo = open(destDir + os.sep + "alunos_sem_matricula.txt", "a")
arquivo.write(nomeAluno + "\n")
arquivo.close()
print(nomeAluno, " - Não possue matrícula")
def renomearFotoPerfil():
diretorio = str(input("Informe o diretório:\n"))
for dirName, subdirList, fileList in os.walk(diretorio):
if dirName != diretorio:
if dirName.endswith("_") == False:
nomeFoto = dirName + os.sep + fileList[0]
nomeFotoNovo = dirName + os.sep + "perfil.jpg"
os.rename(nomeFoto, nomeFotoNovo)
print("Origem - Arquivo: ", nomeFoto, ", Destino -> ", nomeFotoNovo)
def parserSemTraco():
rootDir = str(input("Informe o diretório de origem:\n"))
destDir = str(input("Informe o diretório de destino:\n"))
for dirName, subdirList, fileList in os.walk(rootDir):
print("Diretorio encontrado: %s" % dirName)
has_image = len(glob.glob(dirName + "\*.jpg"))
if has_image != 0:
dirMatr = dirName.find("-")
if dirMatr == -1:
matricula = dirName.split(" ")[-1]
if matricula.isdigit():
matricula = matricula.strip()
dirCopy = destDir + os.sep + matricula
shutil.copytree(dirName, dirCopy)
for fname in fileList:
print("\t%s" % fname)
def redimensionarImagem():
diretorio = str(input("Informe o diretório:\n"))
width = 1024
height = 682
print("Padrão: Largura: ", width, ", Altura: ", height)
for dirName, subdirList, fileList in os.walk(diretorio):
if dirName != diretorio:
qtdArquivos = len(fileList)
print("Quantidade de arquivos: ", qtdArquivos)
for file in fileList:
img = Image.open(dirName + os.sep + file)
img = img.resize((width, height), Image.ANTIALIAS)
img.save(dirName + os.sep + file)
print(file," - redimensionado.")
def limparImagensReconhecimento():
diretorio = str(input("Informe o diretório:\n"))
for dirName, subdirList, fileList in os.walk(diretorio):
if dirName != diretorio:
qtdArquivos = len(fileList)
print("Quantidade de arquivos: ", qtdArquivos)
for file in fileList:
print("Analisando arquivo: ", file)
if (file.find("perfil")<0):
os.remove(dirName + os.sep + file)
print("Removido")
def parserNovo():
rootDir = str(input("Informe o diretório de origem:\n"))
destDir = str(input("Informe o diretório de destino:\n"))
for dirName, subdirList, fileList in os.walk(rootDir):
for fileName in fileList:
matricula = fileName.split(" ")
matricula = matricula[-1].split(".")
matricula = matricula[0]
if matricula.isdigit():
matricula = matricula.strip()
dirFile = dirName + os.sep + fileName
dirCopy = destDir + os.sep + matricula
os.mkdir(dirCopy)
shutil.copy(dirFile, dirCopy)
print(fileName + " transferido com sucesso.")
def main():
pergunta = eval(input("Para acessar o parser, digite 1. \n Para acessar o parserSemTraço, digite 2. \n Para renomear as fotos de perfil, digite 3. \n Para redimensionar as fotos, digite 4. \n Para remover fotos de reconhecimento, digite 5. \n Para usar o parser novo, digite 6.\n"))
if pergunta == 1:
parser()
elif pergunta == 2:
parserSemTraco()
elif pergunta == 3:
renomearFotoPerfil()
elif pergunta == 4:
redimensionarImagem()
elif pergunta == 5:
limparImagensReconhecimento()
elif pergunta == 6:
parserNovo()
if __name__ == "__main__":
main()
|
{
"content_hash": "2c5319e2000257091d3ed10256d3d985",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 286,
"avg_line_length": 38.61417322834646,
"alnum_prop": 0.550978792822186,
"repo_name": "LADOSSIFPB/nutrif",
"id": "097844d06fa6f73445a080c7af0e84ad1e47746e",
"size": "4918",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nutrif-image-parser/parserByFile.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "231534"
},
{
"name": "HTML",
"bytes": "6534305"
},
{
"name": "Java",
"bytes": "507491"
},
{
"name": "JavaScript",
"bytes": "1402876"
},
{
"name": "Python",
"bytes": "8425"
},
{
"name": "TSQL",
"bytes": "2780"
}
],
"symlink_target": ""
}
|
"""Hparams for model architecture and trainer."""
import ast
from collections import abc
import copy
from typing import Any, Dict, Text
import six
import tensorflow as tf
import yaml
def eval_str_fn(val):
if val in {'true', 'false'}:
return val == 'true'
try:
return ast.literal_eval(val)
except (ValueError, SyntaxError):
return val
# pylint: disable=protected-access
class Config(object):
"""A config utility class."""
def __init__(self, config_dict=None):
self.update(config_dict)
def __setattr__(self, k, v):
self.__dict__[k] = Config(v) if isinstance(v, dict) else copy.deepcopy(v)
def __getattr__(self, k):
return self.__dict__[k]
def __getitem__(self, k):
return self.__dict__[k]
def __repr__(self):
return repr(self.as_dict())
def __deepcopy__(self, memodict):
return type(self)(self.as_dict())
def __str__(self):
try:
return yaml.dump(self.as_dict(), indent=4)
except TypeError:
return str(self.as_dict())
def _update(self, config_dict, allow_new_keys=True):
"""Recursively update internal members."""
if not config_dict:
return
for k, v in six.iteritems(config_dict):
if k not in self.__dict__:
if allow_new_keys:
self.__setattr__(k, v)
else:
raise KeyError('Key `{}` does not exist for overriding. '.format(k))
else:
if isinstance(self.__dict__[k], Config) and isinstance(v, dict):
self.__dict__[k]._update(v, allow_new_keys)
elif isinstance(self.__dict__[k], Config) and isinstance(v, Config):
self.__dict__[k]._update(v.as_dict(), allow_new_keys)
else:
self.__setattr__(k, v)
def get(self, k, default_value=None):
return self.__dict__.get(k, default_value)
def update(self, config_dict):
"""Update members while allowing new keys."""
self._update(config_dict, allow_new_keys=True)
def keys(self):
return self.__dict__.keys()
def override(self, config_dict_or_str, allow_new_keys=False):
"""Update members while disallowing new keys."""
if isinstance(config_dict_or_str, str):
if not config_dict_or_str:
return
elif '=' in config_dict_or_str:
config_dict = self.parse_from_str(config_dict_or_str)
elif config_dict_or_str.endswith('.yaml'):
config_dict = self.parse_from_yaml(config_dict_or_str)
else:
raise ValueError(
'Invalid string {}, must end with .yaml or contains "=".'.format(
config_dict_or_str))
elif isinstance(config_dict_or_str, dict):
config_dict = config_dict_or_str
else:
raise ValueError('Unknown value type: {}'.format(config_dict_or_str))
self._update(config_dict, allow_new_keys)
def parse_from_yaml(self, yaml_file_path: Text) -> Dict[Any, Any]:
"""Parses a yaml file and returns a dictionary."""
with tf.io.gfile.GFile(yaml_file_path, 'r') as f:
config_dict = yaml.load(f, Loader=yaml.FullLoader)
return config_dict
def save_to_yaml(self, yaml_file_path):
"""Write a dictionary into a yaml file."""
with tf.io.gfile.GFile(yaml_file_path, 'w') as f:
yaml.dump(self.as_dict(), f, default_flow_style=False)
def parse_from_str(self, config_str: Text) -> Dict[Any, Any]:
"""Parse a string like 'x.y=1,x.z=2' to nested dict {x: {y: 1, z: 2}}."""
if not config_str:
return {}
config_dict = {}
try:
for kv_pair in config_str.split(','):
if not kv_pair: # skip empty string
continue
key_str, value_str = kv_pair.split('=')
key_str = key_str.strip()
def add_kv_recursive(k, v):
"""Recursively parse x.y.z=tt to {x: {y: {z: tt}}}."""
if '.' not in k:
if '*' in v:
# we reserve * to split arrays.
return {k: [eval_str_fn(vv) for vv in v.split('*')]}
return {k: eval_str_fn(v)}
pos = k.index('.')
return {k[:pos]: add_kv_recursive(k[pos + 1:], v)}
def merge_dict_recursive(target, src):
"""Recursively merge two nested dictionary."""
for k in src.keys():
if ((k in target and isinstance(target[k], dict) and
isinstance(src[k], abc.Mapping))):
merge_dict_recursive(target[k], src[k])
else:
target[k] = src[k]
merge_dict_recursive(config_dict, add_kv_recursive(key_str, value_str))
return config_dict
except ValueError:
raise ValueError('Invalid config_str: {}'.format(config_str))
def as_dict(self):
"""Returns a dict representation."""
config_dict = {}
for k, v in six.iteritems(self.__dict__):
if isinstance(v, Config):
config_dict[k] = v.as_dict()
else:
config_dict[k] = copy.deepcopy(v)
return config_dict
# pylint: enable=protected-access
def default_detection_configs():
"""Returns a default detection configs."""
h = Config()
# model name.
h.name = 'efficientdet-d1'
# activation type: see activation_fn in utils.py.
h.act_type = 'swish'
# input preprocessing parameters
h.image_size = 640 # An integer or a string WxH such as 640x320.
h.target_size = None
h.input_rand_hflip = True
h.jitter_min = 0.1
h.jitter_max = 2.0
h.autoaugment_policy = None
h.grid_mask = False
h.sample_image = None
h.map_freq = 5 # AP eval frequency in epochs.
# dataset specific parameters
# TODO(tanmingxing): update this to be 91 for COCO, and 21 for pascal.
h.num_classes = 90 # 1+ actual classes, 0 is reserved for background.
h.seg_num_classes = 3 # segmentation classes
h.heads = ['object_detection'] # 'object_detection', 'segmentation'
h.skip_crowd_during_training = True
h.label_map = None # a dict or a string of 'coco', 'voc', 'waymo'.
h.max_instances_per_image = 100 # Default to 100 for COCO.
h.regenerate_source_id = False
# model architecture
h.min_level = 3
h.max_level = 7
h.num_scales = 3
# ratio w/h: 2.0 means w=1.4, h=0.7. Can be computed with k-mean per dataset.
h.aspect_ratios = [1.0, 2.0, 0.5] # [[0.7, 1.4], [1.0, 1.0], [1.4, 0.7]]
h.anchor_scale = 4.0
# is batchnorm training mode
h.is_training_bn = True
# optimization
h.momentum = 0.9
h.optimizer = 'sgd' # can be 'adam' or 'sgd'.
h.learning_rate = 0.08 # 0.008 for adam.
h.lr_warmup_init = 0.008 # 0.0008 for adam.
h.lr_warmup_epoch = 1.0
h.first_lr_drop_epoch = 200.0
h.second_lr_drop_epoch = 250.0
h.poly_lr_power = 0.9
h.clip_gradients_norm = 10.0
h.num_epochs = 300
h.data_format = 'channels_last'
# The default image normalization is identical to Cloud TPU ResNet.
h.mean_rgb = [0.485 * 255, 0.456 * 255, 0.406 * 255]
h.stddev_rgb = [0.229 * 255, 0.224 * 255, 0.225 * 255]
h.scale_range = False
# classification loss
h.label_smoothing = 0.0 # 0.1 is a good default
# Behold the focal loss parameters
h.alpha = 0.25
h.gamma = 1.5
# localization loss
h.delta = 0.1 # regularization parameter of huber loss.
# total loss = box_loss * box_loss_weight + iou_loss * iou_loss_weight
h.box_loss_weight = 50.0
h.iou_loss_type = None
h.iou_loss_weight = 1.0
# regularization l2 loss.
h.weight_decay = 4e-5
h.strategy = None # 'tpu', 'gpus', None
h.mixed_precision = False # If False, use float32.
h.loss_scale = None # set to 2**16 enables dynamic loss scale
h.model_optimizations = {} # 'prune':{}
# For detection.
h.box_class_repeats = 3
h.fpn_cell_repeats = 3
h.fpn_num_filters = 88
h.separable_conv = True
h.apply_bn_for_resampling = True
h.conv_after_downsample = False
h.conv_bn_act_pattern = False
h.drop_remainder = True # drop remainder for the final batch eval.
# For post-processing nms, must be a dict.
h.nms_configs = {
'method': 'gaussian',
'iou_thresh': None, # use the default value based on method.
'score_thresh': 0.,
'sigma': None,
'pyfunc': False,
'max_nms_inputs': 0,
'max_output_size': 100,
}
h.tflite_max_detections = 100
# version.
h.fpn_name = None
h.fpn_weight_method = None
h.fpn_config = None
h.batch_norm_trainable = True
# No stochastic depth in default.
h.survival_prob = None
h.img_summary_steps = None
h.lr_decay_method = 'cosine'
h.moving_average_decay = 0.9998
h.ckpt_var_scope = None # ckpt variable scope.
# If true, skip loading pretrained weights if shape mismatches.
h.skip_mismatch = True
h.backbone_name = 'efficientnet-b1'
h.backbone_config = None
h.var_freeze_expr = None
# A temporary flag to switch between legacy and keras models.
h.use_keras_model = True
h.dataset_type = None
h.positives_momentum = None
h.grad_checkpoint = False
# Parameters for the Checkpoint Callback.
h.verbose = 1
h.save_freq = 'epoch'
return h
efficientdet_model_param_dict = {
'efficientdet-d0':
dict(
name='efficientdet-d0',
backbone_name='efficientnet-b0',
image_size=512,
fpn_num_filters=64,
fpn_cell_repeats=3,
box_class_repeats=3,
),
'efficientdet-d1':
dict(
name='efficientdet-d1',
backbone_name='efficientnet-b1',
image_size=640,
fpn_num_filters=88,
fpn_cell_repeats=4,
box_class_repeats=3,
),
'efficientdet-d2':
dict(
name='efficientdet-d2',
backbone_name='efficientnet-b2',
image_size=768,
fpn_num_filters=112,
fpn_cell_repeats=5,
box_class_repeats=3,
),
'efficientdet-d3':
dict(
name='efficientdet-d3',
backbone_name='efficientnet-b3',
image_size=896,
fpn_num_filters=160,
fpn_cell_repeats=6,
box_class_repeats=4,
),
'efficientdet-d4':
dict(
name='efficientdet-d4',
backbone_name='efficientnet-b4',
image_size=1024,
fpn_num_filters=224,
fpn_cell_repeats=7,
box_class_repeats=4,
),
'efficientdet-d5':
dict(
name='efficientdet-d5',
backbone_name='efficientnet-b5',
image_size=1280,
fpn_num_filters=288,
fpn_cell_repeats=7,
box_class_repeats=4,
),
'efficientdet-d6':
dict(
name='efficientdet-d6',
backbone_name='efficientnet-b6',
image_size=1280,
fpn_num_filters=384,
fpn_cell_repeats=8,
box_class_repeats=5,
fpn_weight_method='sum', # Use unweighted sum for stability.
),
'efficientdet-d7':
dict(
name='efficientdet-d7',
backbone_name='efficientnet-b6',
image_size=1536,
fpn_num_filters=384,
fpn_cell_repeats=8,
box_class_repeats=5,
anchor_scale=5.0,
fpn_weight_method='sum', # Use unweighted sum for stability.
),
'efficientdet-d7x':
dict(
name='efficientdet-d7x',
backbone_name='efficientnet-b7',
image_size=1536,
fpn_num_filters=384,
fpn_cell_repeats=8,
box_class_repeats=5,
anchor_scale=4.0,
max_level=8,
fpn_weight_method='sum', # Use unweighted sum for stability.
),
}
lite_common_param = dict(
mean_rgb=127.0,
stddev_rgb=128.0,
act_type='relu6',
fpn_weight_method='sum',
)
efficientdet_lite_param_dict = {
# lite models are in progress and subject to changes.
# mean_rgb and stddev_rgb are consistent with EfficientNet-Lite models in
# https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/lite/efficientnet_lite_builder.py#L28
'efficientdet-lite0':
dict(
name='efficientdet-lite0',
backbone_name='efficientnet-lite0',
image_size=320,
fpn_num_filters=64,
fpn_cell_repeats=3,
box_class_repeats=3,
anchor_scale=3.0,
**lite_common_param,
),
'efficientdet-lite1':
dict(
name='efficientdet-lite1',
backbone_name='efficientnet-lite1',
image_size=384,
fpn_num_filters=88,
fpn_cell_repeats=4,
box_class_repeats=3,
anchor_scale=3.0,
**lite_common_param,
),
'efficientdet-lite2':
dict(
name='efficientdet-lite2',
backbone_name='efficientnet-lite2',
image_size=448,
fpn_num_filters=112,
fpn_cell_repeats=5,
box_class_repeats=3,
anchor_scale=3.0,
**lite_common_param,
),
'efficientdet-lite3':
dict(
name='efficientdet-lite3',
backbone_name='efficientnet-lite3',
image_size=512,
fpn_num_filters=160,
fpn_cell_repeats=6,
box_class_repeats=4,
**lite_common_param,
),
'efficientdet-lite3x':
dict(
name='efficientdet-lite3x',
backbone_name='efficientnet-lite3',
image_size=640,
fpn_num_filters=200,
fpn_cell_repeats=6,
box_class_repeats=4,
anchor_scale=3.0,
**lite_common_param,
),
'efficientdet-lite4':
dict(
name='efficientdet-lite4',
backbone_name='efficientnet-lite4',
image_size=640,
fpn_num_filters=224,
fpn_cell_repeats=7,
box_class_repeats=4,
**lite_common_param,
),
}
def get_efficientdet_config(model_name='efficientdet-d1'):
"""Get the default config for EfficientDet based on model name."""
h = default_detection_configs()
if model_name in efficientdet_model_param_dict:
h.override(efficientdet_model_param_dict[model_name])
elif model_name in efficientdet_lite_param_dict:
h.override(efficientdet_lite_param_dict[model_name])
else:
raise ValueError('Unknown model name: {}'.format(model_name))
return h
def get_detection_config(model_name):
if model_name.startswith('efficientdet'):
return get_efficientdet_config(model_name)
else:
raise ValueError('model name must start with efficientdet.')
|
{
"content_hash": "bc4e720409f05b2fa0227977b2948d51",
"timestamp": "",
"source": "github",
"line_count": 475,
"max_line_length": 118,
"avg_line_length": 30.541052631578946,
"alnum_prop": 0.5863376301096023,
"repo_name": "tensorflow/examples",
"id": "bfa0d7e2ee297fb79a00a09b581700c7ef78bfa7",
"size": "15189",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow_examples/lite/model_maker/third_party/efficientdet/hparams_config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "106227"
},
{
"name": "CMake",
"bytes": "1553"
},
{
"name": "CSS",
"bytes": "4746"
},
{
"name": "Dockerfile",
"bytes": "467"
},
{
"name": "HTML",
"bytes": "12491"
},
{
"name": "Java",
"bytes": "305092"
},
{
"name": "JavaScript",
"bytes": "24461"
},
{
"name": "Jupyter Notebook",
"bytes": "1733035"
},
{
"name": "Kotlin",
"bytes": "631463"
},
{
"name": "Objective-C",
"bytes": "14639"
},
{
"name": "Objective-C++",
"bytes": "14293"
},
{
"name": "Python",
"bytes": "1232357"
},
{
"name": "Ruby",
"bytes": "3744"
},
{
"name": "Shell",
"bytes": "41573"
},
{
"name": "Starlark",
"bytes": "17498"
},
{
"name": "Swift",
"bytes": "553535"
}
],
"symlink_target": ""
}
|
from flask_turboduck.auth import Auth
from app import app, db
from models import User
# Authentication wrapper for TurboDuck
auth = Auth(app, db, user_model=User)
|
{
"content_hash": "dbbf8926b081d7943cdd5afb080b975c",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 38,
"avg_line_length": 23.571428571428573,
"alnum_prop": 0.7818181818181819,
"repo_name": "DommertTech/flask-turnkey",
"id": "911c4d016f82a2443d64c6cdd27c892eac674dab",
"size": "206",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flask_turnkey/auth.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2607"
},
{
"name": "HTML",
"bytes": "50115"
},
{
"name": "JavaScript",
"bytes": "6800"
},
{
"name": "Python",
"bytes": "167330"
}
],
"symlink_target": ""
}
|
import inspect
import types
import sys
from pkg_resources import get_distribution, DistributionNotFound
__title__ = 'implements'
__author__ = ('Kamil Sindi <ksindi@ksindi.com>, '
'Praveen G Shirali <praveengshirali@gmail.com>')
__license__ = 'Apache License, Version 2.0'
try:
__version__ = get_distribution(__name__).version
except DistributionNotFound:
# package is not installed
pass
__all__ = ['Interface', 'implements']
class Interface:
pass
def implements(interface_cls):
"""Verifies whether the decorated class implements the interface as
defined by the `interface_cls`.
"""
def _decorator(cls):
verify_class_hierarchy(interface_cls, cls)
errors = []
errors.extend(verify_methods(interface_cls, cls))
errors.extend(verify_properties(interface_cls, cls))
errors.extend(verify_attributes(interface_cls, cls))
if errors:
raise NotImplementedError(
'Found {} errors in implementation:\n- {}\nwith {}'.format(
len(errors), '\n- '.join(errors), cls))
return cls
return _decorator
def get_mro(cls):
return cls.mro()[:-1] if cls.mro()[-1] is object else cls.mro()
def verify_class_hierarchy(ifc, cls):
ifc_mro = get_mro(ifc)
cls_mro = get_mro(cls)
common = set(ifc_mro) & set(cls_mro)
if len(common):
raise ValueError(
"Found {} common classes between the implementation and the "
"interface. Expected none. The implementation class and any "
"class in its class-hierarchy, must not inherit from the "
"interface class, or any class from the interface hierarchy. "
"Common classes: [{}]"
"".format(len(common), ", ".join([str(s) for s in common]))
)
def getobj_via_dict(cls, name):
for c in cls.__mro__:
if name in c.__dict__:
return c.__dict__[name]
return None
def is_classmethod(obj):
if sys.version_info < (3, 7):
clsmethod_ident = classmethod
else:
clsmethod_ident = (classmethod, types.ClassMethodDescriptorType)
return isinstance(obj, clsmethod_ident)
def is_staticmethod(obj):
return isinstance(obj, (staticmethod, types.BuiltinMethodType))
def verify_method_type(method_typer, expected_type,
name, ifc_obj, cls_obj, ifc_name, cls_name):
"""Verify a method's type across interface and implementation. Raises
an exception if they don't match.
Args:
method_typer (callable):
A method type checker (single argument callable) which returns
True if the supplied argument matches the method type
expected_type (string):
A string representation of the expected method type. This is
used in the exception string
name (string):
Name of the attribute being checked
ifc_obj (object):
The fetched object from the interface, matched by name
cls_obj (object):
The fetched object from the implementation, matched by name
ifc_name (string):
Name of the interface class
cls_name (string):
Name of the implementation class
"""
errors = []
if method_typer(ifc_obj):
if not method_typer(cls_obj):
errors.append(
"'{}' must implement '{}' as {} as defined in interface '{}'"
"".format(cls_name, name, expected_type, ifc_name)
)
return errors
def verify_methods(interface_cls, cls):
def methods_predicate(m):
return inspect.isfunction(m) or inspect.ismethod(m)
errors = []
for name, method in inspect.getmembers(interface_cls, methods_predicate):
signature = inspect.signature(method)
cls_method = getattr(cls, name, None)
cls_signature = None
ifc_name = interface_cls.__name__
cls_name = cls.__name__
if cls_method and callable(cls_method):
cls_signature = inspect.signature(cls_method)
ifc_obj = getobj_via_dict(interface_cls, name)
cls_obj = getobj_via_dict(cls, name)
method_types_to_check = [
(is_classmethod, "a classmethod"),
(is_staticmethod, "a staticmethod"),
(inspect.isasyncgenfunction, "an async genenerator-function"),
(inspect.isgeneratorfunction, "a generator-function"),
(inspect.iscoroutinefunction, "a coroutine-function")
]
for (method_typer, expected_type) in method_types_to_check:
errors.extend(
verify_method_type(method_typer, expected_type,
name, ifc_obj, cls_obj, ifc_name, cls_name)
)
if cls_signature != signature:
errors.append(
"'{}' must implement method '{}{}' defined in interface '{}'"
.format(cls_name, name, signature, ifc_name)
)
return errors
def verify_properties(interface_cls, cls):
errors = []
prop_attrs = dict(fget='getter', fset='setter', fdel='deleter')
descriptors = inspect.getmembers(interface_cls, inspect.isdatadescriptor)
for name, prop in descriptors:
cls_prop = getattr(cls, name, None)
for attr in prop_attrs:
# instanceof doesn't work for class function comparison
ifc_prop_obj = getattr(prop, attr, None)
cls_prop_obj = getattr(cls_prop, attr, None)
if ifc_prop_obj:
cls_name = cls.__name__
ifc_name = interface_cls.__name__
proptype = prop_attrs[attr]
# -- verify presence and type of data-descriptors
if type(ifc_prop_obj) != type(cls_prop_obj):
errors.append(
"'{}' must implement a {} for property '{}' defined "
"in interface '{}'"
"".format(cls_name, proptype, name, ifc_name)
)
continue
# -- verify signatures of data-descriptors
ifc_prop_sig = inspect.signature(ifc_prop_obj)
cls_prop_sig = None
if callable(cls_prop_obj):
cls_prop_sig = inspect.signature(cls_prop_obj)
if ifc_prop_sig != cls_prop_sig:
errors.append(
"'{}' must implement a {} for property '{}' with the "
"same signature as defined in interface '{}'"
"".format(cls_name, proptype, name, ifc_name)
)
return errors
def verify_attributes(interface_cls, cls):
errors = []
interface_attributes = get_attributes(interface_cls)
cls_attributes = get_attributes(cls)
for missing_attr in interface_attributes - cls_attributes:
errors.append(
"'{}' must have class attribute '{}' defined in interface '{}'"
.format(cls.__name__, missing_attr, interface_cls.__name__)
)
return errors
def get_attributes(cls):
boring = dir(type('dummy', (object,), {}))
return set(item[0] for item in inspect.getmembers(cls) # skipcq: PTC-W0015
if item[0] not in boring and not callable(item[1]))
|
{
"content_hash": "2cd703498dc1bbc3ff24ba21abd6d018",
"timestamp": "",
"source": "github",
"line_count": 208,
"max_line_length": 79,
"avg_line_length": 35.28846153846154,
"alnum_prop": 0.5816076294277929,
"repo_name": "ksindi/implements",
"id": "247df22a587eaf6a491d12c1b3edb4a953df3560",
"size": "7921",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "implements.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1568"
},
{
"name": "Python",
"bytes": "16183"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Folder'
db.create_table('documents_folder', (
('object_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['core.Object'], unique=True, primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('parent', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='child_set', null=True, to=orm['documents.Folder'])),
))
db.send_create_signal('documents', ['Folder'])
# Adding model 'File'
db.create_table('documents_file', (
('object_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['core.Object'], unique=True, primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('folder', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['documents.Folder'])),
('content', self.gf('django.db.models.fields.files.FileField')(max_length=100)),
))
db.send_create_signal('documents', ['File'])
# Adding model 'Document'
db.create_table('documents_document', (
('object_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['core.Object'], unique=True, primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=255)),
('folder', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['documents.Folder'])),
('body', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
))
db.send_create_signal('documents', ['Document'])
# Adding model 'WebLink'
db.create_table('documents_weblink', (
('object_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['core.Object'], unique=True, primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=255)),
('folder', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['documents.Folder'])),
('url', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal('documents', ['WebLink'])
def backwards(self, orm):
# Deleting model 'Folder'
db.delete_table('documents_folder')
# Deleting model 'File'
db.delete_table('documents_file')
# Deleting model 'Document'
db.delete_table('documents_document')
# Deleting model 'WebLink'
db.delete_table('documents_weblink')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.group': {
'Meta': {'ordering': "['name']", 'object_name': 'Group'},
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'child_set'", 'null': 'True', 'to': "orm['core.Group']"})
},
'core.object': {
'Meta': {'object_name': 'Object'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'everybody_execute': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'everybody_read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'everybody_write': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Group']"}),
'group_execute': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group_read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group_write': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'links': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'links_rel_+'", 'null': 'True', 'to': "orm['core.Object']"}),
'nuvius_resource': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'object_name': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'object_type': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'subscribers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'subscriptions'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"}),
'trash': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.User']"}),
'user_execute': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user_read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user_write': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'core.user': {
'Meta': {'ordering': "['name']", 'object_name': 'User'},
'default_group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'default_user_set'", 'null': 'True', 'to': "orm['core.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'other_groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.Group']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'documents.document': {
'Meta': {'ordering': "['-last_updated']", 'object_name': 'Document', '_ormbases': ['core.Object']},
'body': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['documents.Folder']"}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'documents.file': {
'Meta': {'ordering': "['-last_updated']", 'object_name': 'File', '_ormbases': ['core.Object']},
'content': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['documents.Folder']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'})
},
'documents.folder': {
'Meta': {'object_name': 'Folder', '_ormbases': ['core.Object']},
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'child_set'", 'null': 'True', 'to': "orm['documents.Folder']"})
},
'documents.weblink': {
'Meta': {'ordering': "['-last_updated']", 'object_name': 'WebLink', '_ormbases': ['core.Object']},
'folder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['documents.Folder']"}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['documents']
|
{
"content_hash": "77ea38625d047b7e53add1ad8ac04981",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 203,
"avg_line_length": 71.8562874251497,
"alnum_prop": 0.5621666666666667,
"repo_name": "alejo8591/maker",
"id": "599ae52a2f18e45703a1345c7f5ac47999fbac44",
"size": "12130",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "documents/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "1578070"
},
{
"name": "Perl",
"bytes": "164"
},
{
"name": "Python",
"bytes": "2863599"
},
{
"name": "Shell",
"bytes": "3561"
}
],
"symlink_target": ""
}
|
from enum import auto, Enum
from collections import defaultdict
from dataclasses import dataclass, field
from datetime import datetime
from typing import Dict, List, Optional
from rdr_service.model.questionnaire_response import QuestionnaireResponseAnswer, QuestionnaireResponseStatus
class ParticipantResponses:
def __init__(self):
self.responses: Dict[int, 'Response'] = {}
self._responses_in_order = None
@property
def in_authored_order(self) -> List['Response']:
if not self._responses_in_order:
self._responses_in_order = sorted(
self.responses.values(),
key=lambda response: response.authored_datetime or False
)
return self._responses_in_order
@dataclass
class Response:
id: int
survey_code: str
authored_datetime: datetime
status: QuestionnaireResponseStatus
answered_codes: Dict[str, List['Answer']] = field(default_factory=lambda: defaultdict(list))
def has_answer_for(self, question_code_str):
return (
question_code_str in self.answered_codes
and any([answer.is_valid for answer in self.answered_codes[question_code_str]])
)
def get_answers_for(self, question_code_str: str) -> Optional[List['Answer']]:
if question_code_str is None:
return None
question_code_str = question_code_str.lower()
if (
question_code_str not in self.answered_codes
or not any([answer.is_valid for answer in self.answered_codes[question_code_str]])
):
return None
return [answer for answer in self.answered_codes[question_code_str] if answer.is_valid]
def get_single_answer_for(self, question_code_str):
answers = self.get_answers_for(question_code_str)
if not answers:
return None
if len(answers) > 1 and len({answer.value for answer in answers}) > 1:
raise Exception(f'Too many answers found for question "{question_code_str}" (responsed id {self.id})')
else:
return answers[0]
class DataType(Enum):
BOOLEAN = auto()
CODE = auto()
DATE = auto()
DATETIME = auto()
DECIMAL = auto()
INTEGER = auto()
STRING = auto()
URI = auto()
@dataclass
class Answer:
id: int
value: str
data_type: DataType
is_valid: bool = True
@classmethod
def from_db_model(cls, db_answer: QuestionnaireResponseAnswer):
if db_answer.valueBoolean is not None:
answer_str = str(db_answer.valueBoolean)
answer_type = DataType.BOOLEAN
elif db_answer.valueCodeId is not None:
answer_str = db_answer.code.value
answer_type = DataType.CODE
elif db_answer.valueDate is not None:
answer_str = str(db_answer.valueDate)
answer_type = DataType.DATE
elif db_answer.valueDateTime is not None:
answer_str = str(db_answer.valueDateTime)
answer_type = DataType.DATETIME
elif db_answer.valueDecimal is not None:
answer_str = str(db_answer.valueDecimal)
answer_type = DataType.DECIMAL
elif db_answer.valueInteger is not None:
answer_str = str(db_answer.valueInteger)
answer_type = DataType.INTEGER
elif db_answer.valueString is not None:
answer_str = db_answer.valueString
answer_type = DataType.STRING
elif db_answer.valueUri is not None:
answer_str = db_answer.valueUri
answer_type = DataType.URI
else:
raise Exception(f'Unable to parse answer with id "{db_answer.questionnaireResponseAnswerId}')
return Answer(
id=db_answer.questionnaireResponseAnswerId,
value=answer_str,
data_type=answer_type
)
|
{
"content_hash": "32000e31b6343c904e0a1e581b16321a",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 114,
"avg_line_length": 33.37931034482759,
"alnum_prop": 0.6309400826446281,
"repo_name": "all-of-us/raw-data-repository",
"id": "cd434df44df00c1adb54cdd9c1923cd380c428ff",
"size": "3872",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "rdr_service/domain_model/response.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1866"
},
{
"name": "Mako",
"bytes": "1715"
},
{
"name": "Python",
"bytes": "17040924"
},
{
"name": "R",
"bytes": "2212"
},
{
"name": "Shell",
"bytes": "92213"
}
],
"symlink_target": ""
}
|
"""
minimize the total (weighted) travel cost for servicing
a set of customers from k facilities.
Copyright (c) by Joao Pedro PEDROSO and Mikio KUBO, 2012
"""
import math
import random
from pyscipopt import Model, quicksum, multidict
def kmedian(I,J,c,k):
"""kmedian -- minimize total cost of servicing customers from k facilities
Parameters:
- I: set of customers
- J: set of potential facilities
- c[i,j]: cost of servicing customer i from facility j
- k: number of facilities to be used
Returns a model, ready to be solved.
"""
model = Model("k-median")
x,y = {},{}
for j in J:
y[j] = model.addVar(vtype="B", name="y(%s)"%j)
for i in I:
x[i,j] = model.addVar(vtype="B", name="x(%s,%s)"%(i,j))
for i in I:
model.addCons(quicksum(x[i,j] for j in J) == 1, "Assign(%s)"%i)
for j in J:
model.addCons(x[i,j] <= y[j], "Strong(%s,%s)"%(i,j))
model.addCons(quicksum(y[j] for j in J) == k, "Facilities")
model.setObjective(quicksum(c[i,j]*x[i,j] for i in I for j in J), "minimize")
model.data = x,y
return model
def distance(x1,y1,x2,y2):
"""return distance of two points"""
return math.sqrt((x2-x1)**2 + (y2-y1)**2)
def make_data(n,m,same=True):
"""creates example data set"""
if same == True:
I = range(n)
J = range(m)
x = [random.random() for i in range(max(m,n))] # positions of the points in the plane
y = [random.random() for i in range(max(m,n))]
else:
I = range(n)
J = range(n,n+m)
x = [random.random() for i in range(n+m)] # positions of the points in the plane
y = [random.random() for i in range(n+m)]
c = {}
for i in I:
for j in J:
c[i,j] = distance(x[i],y[i],x[j],y[j])
return I,J,c,x,y
if __name__ == "__main__":
import sys
random.seed(67)
n = 200
m = n
I,J,c,x_pos,y_pos = make_data(n,m,same=True)
k = 20
model = kmedian(I,J,c,k)
# model.Params.Threads = 1
model.optimize()
EPS = 1.e-6
x,y = model.data
edges = [(i,j) for (i,j) in x if model.getVal(x[i,j]) > EPS]
facilities = [j for j in y if model.getVal(y[j]) > EPS]
print("Optimal value:",model.getObjVal())
print("Selected facilities:", facilities)
print("Edges:", edges)
print("max c:", max([c[i,j] for (i,j) in edges]))
try: # plot the result using networkx and matplotlib
import networkx as NX
import matplotlib.pyplot as P
P.clf()
G = NX.Graph()
facilities = set(j for j in J if model.getVal(y[j]) > EPS)
other = set(j for j in J if j not in facilities)
client = set(i for i in I if i not in facilities and i not in other)
G.add_nodes_from(facilities)
G.add_nodes_from(client)
G.add_nodes_from(other)
for (i,j) in edges:
G.add_edge(i,j)
position = {}
for i in range(len(x_pos)):
position[i] = (x_pos[i],y_pos[i])
NX.draw(G,position,with_labels=False,node_color="w",nodelist=facilities)
NX.draw(G,position,with_labels=False,node_color="c",nodelist=other,node_size=50)
NX.draw(G,position,with_labels=False,node_color="g",nodelist=client,node_size=50)
P.show()
except ImportError:
print("install 'networkx' and 'matplotlib' for plotting")
|
{
"content_hash": "fd7babe82d88c895793393c946b29e87",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 96,
"avg_line_length": 31.12727272727273,
"alnum_prop": 0.572429906542056,
"repo_name": "SCIP-Interfaces/PySCIPOpt",
"id": "db191081061414463971c0c60682d4c673fb1bb8",
"size": "3491",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/finished/kmedian.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "500394"
},
{
"name": "Shell",
"bytes": "1779"
}
],
"symlink_target": ""
}
|
from .repoclass import *
from .helpers import *
# Repository Eudat b2share Sandbox
HOST = "https://trng-b2share.eudat.eu/api" # api base url
ID = "b2share_sandbox"
LABEL = "b2share Sandbox"
class RepoClassEudat(Repo):
def get_host(self):
return str(HOST)
def get_label(self):
return str(LABEL)
def get_id(self):
return str(ID)
def verify_token(self, token):
try:
global HOST
global ID
# get file id from bucket url:
r = requests.get(''.join((HOST, '/records', '?access_token=', token)), params={'access_token': token}, verify=True, timeout=3)
status_note(['<', ID, '> token verification: ', xstr(r.status_code), ' ', xstr(r.reason)])
if r.status_code == 200:
return True
elif r.status_code == 401:
return False
except:
raise
def create_depot(self, access_token):
global HOST
try:
headers = {"Content-Type": "application/json"}
base_url = ''.join((HOST, "/records/?access_token=", access_token))
# test md
d = {"titles": [{"title": "TestRest"}], "community": "e9b9792e-79fb-4b07-b6b4-b9c2bd06d095",
"open_access": True, "community_specific": {}}
r = requests.post(base_url, data=json.dumps(d), headers=headers)
status_note([xstr(r.status_code), ' ', xstr(r.reason)])
status_note(['[debug] ', xstr(r.json())]) # debug
status_note(['created depot <', xstr(r.json()['id']), '>'])
return str(r.json()['id'])
except:
raise
def add_zip_to_depot(self, deposition_id, zip_name, target_path, token):
global HOST
try:
fsum = files_dir_size(target_path)
if fsum <= env_max_dir_size_mb:
# get bucket url:
headers = {"Content-Type": "application/json"}
r = requests.get(''.join((HOST, '/records/', deposition_id, '/draft?access_token=', token)),
headers=headers)
status_note([xstr(r.status_code), ' ', xstr(r.reason)])
bucket_url = ''
if r.status_code == 200:
if 'links' in r.json():
if 'bucket' in r.json()['links']:
bucket_url = r.json()['links']['bucket']
status_note(['using bucket <', bucket_url, '>'])
else:
status_note(xstr(r.text))
# upload file into bucket:
headers = {"Content-Type": "application/octet-stream"}
# create a filelike object in memory
filelike = BytesIO()
# fill memory object into zip constructor
zipf = zipfile.ZipFile(filelike, 'w', zipfile.ZIP_DEFLATED)
# walk target dir recursively
for root, dirs, files in os.walk(target_path): # .split(os.sep)[-1]):
for file in files:
zipf.write(os.path.join(root, file),
arcname=os.path.relpath(os.path.join(root, file), target_path))
zipf.close()
filelike.seek(0)
r = requests.put(''.join((bucket_url, '/', zip_name, '?access_token=', token)), data=filelike.read(),
headers=headers)
status_note([xstr(r.status_code), ' ', xstr(r.reason)])
if r.status_code == 200:
status_note([xstr(r.status_code), ' uploaded file <', zip_name, '> to depot <', deposition_id, '> ',
xstr(r.json()['checksum'])])
else:
status_note(xstr(r.text))
else:
status_note("! error: file not found")
except Exception as exc:
# raise
status_note(['! error: ', xstr(exc.args[0])])
def update_md(self, record_id, my_md, access_token):
global HOST
try:
base_url = ''.join((HOST, "/api/records/", record_id, "/draft?access_token=", access_token))
# test:
# test_md = [{"op": "add", "path": "/keywords", "value": ["keyword1", "keyword2"]}]
headers = {"Content-Type": "application/json-patch+json"}
r = requests.patch(base_url, data=json.dumps(my_md), headers=headers)
status_note([xstr(r.status_code), ' ', xstr(r.reason)])
status_note(xstr(r.json()))
except:
raise
|
{
"content_hash": "340eb52213207a368574a4ef87ebee16",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 138,
"avg_line_length": 43.84905660377358,
"alnum_prop": 0.4961273666092943,
"repo_name": "o2r-project/o2r-shipper",
"id": "15e10dc92a5b87beb2c1b32be75e6ccc5c8425d3",
"size": "4648",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "repos/repob2sharesandbox.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1799"
},
{
"name": "Python",
"bytes": "60999"
},
{
"name": "Shell",
"bytes": "417"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, unicode_literals
from wsgi.app import app
class Message(object):
""" Represent a message to show to the user """
def __init__(self, message, level):
self.message = message
self.level = level
def __str__(self):
return self.message
def add_message(message, level):
app.add_message(Message(message, level))
def success(message):
add_message(message, 'success')
def info(message):
add_message(message, 'info')
def warning(message):
add_message(message, 'warning')
def danger(message):
add_message(message, 'danger')
|
{
"content_hash": "0592669ef9f104e3e4f88b03eaa0ce21",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 56,
"avg_line_length": 18.323529411764707,
"alnum_prop": 0.6581059390048154,
"repo_name": "Gagaro/PimpMyBot",
"id": "f7fd5218c50d2327c946c1834c84342a66f83131",
"size": "623",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pimpmybot/wsgi/messages.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10060"
},
{
"name": "HTML",
"bytes": "52024"
},
{
"name": "JavaScript",
"bytes": "3816"
},
{
"name": "Makefile",
"bytes": "235"
},
{
"name": "Python",
"bytes": "83160"
}
],
"symlink_target": ""
}
|
"""
ASN.1 type classes for the time stamp protocol (TSP). Exports the following
items:
- TimeStampReq()
- TimeStampResp()
Also adds TimeStampedData() support to asn1crypto.cms.ContentInfo(),
TimeStampedData() and TSTInfo() support to
asn1crypto.cms.EncapsulatedContentInfo() and some oids and value parsers to
asn1crypto.cms.CMSAttribute().
Other type classes are defined that help compose the types listed above.
"""
from __future__ import unicode_literals, division, absolute_import, print_function
from .algos import DigestAlgorithm
from .cms import (
CMSAttribute,
CMSAttributeType,
ContentInfo,
ContentType,
EncapsulatedContentInfo,
)
from .core import (
Any,
BitString,
Boolean,
Choice,
GeneralizedTime,
IA5String,
Integer,
ObjectIdentifier,
OctetString,
Sequence,
SequenceOf,
SetOf,
UTF8String,
)
from .crl import CertificateList
from .x509 import (
Attributes,
CertificatePolicies,
GeneralName,
GeneralNames,
)
# The structures in this file are based on https://tools.ietf.org/html/rfc3161,
# https://tools.ietf.org/html/rfc4998, https://tools.ietf.org/html/rfc5544,
# https://tools.ietf.org/html/rfc5035, https://tools.ietf.org/html/rfc2634
class Version(Integer):
_map = {
0: 'v0',
1: 'v1',
2: 'v2',
3: 'v3',
4: 'v4',
5: 'v5',
}
class MessageImprint(Sequence):
_fields = [
('hash_algorithm', DigestAlgorithm),
('hashed_message', OctetString),
]
class Accuracy(Sequence):
_fields = [
('seconds', Integer, {'optional': True}),
('millis', Integer, {'tag_type': 'implicit', 'tag': 0, 'optional': True}),
('micros', Integer, {'tag_type': 'implicit', 'tag': 1, 'optional': True}),
]
class Extension(Sequence):
_fields = [
('extn_id', ObjectIdentifier),
('critical', Boolean, {'default': False}),
('extn_value', OctetString),
]
class Extensions(SequenceOf):
_child_spec = Extension
class TSTInfo(Sequence):
_fields = [
('version', Version),
('policy', ObjectIdentifier),
('message_imprint', MessageImprint),
('serial_number', Integer),
('gen_time', GeneralizedTime),
('accuracy', Accuracy, {'optional': True}),
('ordering', Boolean, {'default': False}),
('nonce', Integer, {'optional': True}),
('tsa', GeneralName, {'tag_type': 'explicit', 'tag': 0, 'optional': True}),
('extensions', Extensions, {'tag_type': 'implicit', 'tag': 1, 'optional': True}),
]
class TimeStampReq(Sequence):
_fields = [
('version', Version),
('message_imprint', MessageImprint),
('req_policy', ObjectIdentifier, {'optional': True}),
('nonce', Integer, {'optional': True}),
('cert_req', Boolean, {'default': False}),
('extensions', Extensions, {'tag_type': 'implicit', 'tag': 0, 'optional': True}),
]
class PKIStatus(Integer):
_map = {
0: 'granted',
1: 'granted_with_mods',
2: 'rejection',
3: 'waiting',
4: 'revocation_warning',
5: 'revocation_notification',
}
class PKIFreeText(SequenceOf):
_child_spec = UTF8String
class PKIFailureInfo(BitString):
_map = {
0: 'bad_alg',
2: 'bad_request',
5: 'bad_data_format',
14: 'time_not_available',
15: 'unaccepted_policy',
16: 'unaccepted_extensions',
17: 'add_info_not_available',
25: 'system_failure',
}
class PKIStatusInfo(Sequence):
_fields = [
('status', PKIStatus),
('status_string', PKIFreeText, {'optional': True}),
('fail_info', PKIFailureInfo, {'optional': True}),
]
class TimeStampResp(Sequence):
_fields = [
('status', PKIStatusInfo),
('time_stamp_token', ContentInfo),
]
class MetaData(Sequence):
_fields = [
('hash_protected', Boolean),
('file_name', UTF8String, {'optional': True}),
('media_type', IA5String, {'optional': True}),
('other_meta_data', Attributes, {'optional': True}),
]
class TimeStampAndCRL(SequenceOf):
_fields = [
('time_stamp', EncapsulatedContentInfo),
('crl', CertificateList, {'optional': True}),
]
class TimeStampTokenEvidence(SequenceOf):
_child_spec = TimeStampAndCRL
class DigestAlgorithms(SequenceOf):
_child_spec = DigestAlgorithm
class EncryptionInfo(Sequence):
_fields = [
('encryption_info_type', ObjectIdentifier),
('encryption_info_value', Any),
]
class PartialHashtree(SequenceOf):
_child_spec = OctetString
class PartialHashtrees(SequenceOf):
_child_spec = PartialHashtree
class ArchiveTimeStamp(Sequence):
_fields = [
('digest_algorithm', DigestAlgorithm, {'tag_type': 'implicit', 'tag': 0, 'optional': True}),
('attributes', Attributes, {'tag_type': 'implicit', 'tag': 1, 'optional': True}),
('reduced_hashtree', PartialHashtrees, {'tag_type': 'implicit', 'tag': 2, 'optional': True}),
('time_stamp', ContentInfo),
]
class ArchiveTimeStampSequence(SequenceOf):
_child_spec = ArchiveTimeStamp
class EvidenceRecord(Sequence):
_fields = [
('version', Version),
('digest_algorithms', DigestAlgorithms),
('crypto_infos', Attributes, {'tag_type': 'implicit', 'tag': 0, 'optional': True}),
('encryption_info', EncryptionInfo, {'tag_type': 'implicit', 'tag': 1, 'optional': True}),
('archive_time_stamp_sequence', ArchiveTimeStampSequence),
]
class OtherEvidence(Sequence):
_fields = [
('oe_type', ObjectIdentifier),
('oe_value', Any),
]
class Evidence(Choice):
_alternatives = [
('tst_evidence', TimeStampTokenEvidence, {'tag_type': 'implicit', 'tag': 0}),
('ers_evidence', EvidenceRecord, {'tag_type': 'implicit', 'tag': 1}),
('other_evidence', OtherEvidence, {'tag_type': 'implicit', 'tag': 2}),
]
class TimeStampedData(Sequence):
_fields = [
('version', Version),
('data_uri', IA5String, {'optional': True}),
('meta_data', MetaData, {'optional': True}),
('content', OctetString, {'optional': True}),
('temporal_evidence', Evidence),
]
class IssuerSerial(Sequence):
_fields = [
('issuer', GeneralNames),
('serial_number', Integer),
]
class ESSCertID(Sequence):
_fields = [
('cert_hash', OctetString),
('issuer_serial', IssuerSerial, {'optional': True}),
]
class ESSCertIDs(SequenceOf):
_child_spec = ESSCertID
class SigningCertificate(Sequence):
_fields = [
('certs', ESSCertIDs),
('policies', CertificatePolicies, {'optional': True}),
]
class SetOfSigningCertificates(SetOf):
_child_spec = SigningCertificate
class ESSCertIDv2(Sequence):
_fields = [
('hash_algorithm', DigestAlgorithm, {'default': {'algorithm': 'sha256'}}),
('cert_hash', OctetString),
('issuer_serial', IssuerSerial, {'optional': True}),
]
class ESSCertIDv2s(SequenceOf):
_child_spec = ESSCertIDv2
class SigningCertificateV2(Sequence):
_fields = [
('certs', ESSCertIDv2s),
('policies', CertificatePolicies, {'optional': True}),
]
class SetOfSigningCertificatesV2(SetOf):
_child_spec = SigningCertificateV2
EncapsulatedContentInfo._oid_specs['tst_info'] = TSTInfo
EncapsulatedContentInfo._oid_specs['timestamped_data'] = TimeStampedData
ContentInfo._oid_specs['timestamped_data'] = TimeStampedData
ContentType._map['1.2.840.113549.1.9.16.1.4'] = 'tst_info'
ContentType._map['1.2.840.113549.1.9.16.1.31'] = 'timestamped_data'
CMSAttributeType._map['1.2.840.113549.1.9.16.2.12'] = 'signing_certificate'
CMSAttribute._oid_specs['signing_certificate'] = SetOfSigningCertificates
CMSAttributeType._map['1.2.840.113549.1.9.16.2.47'] = 'signing_certificate_v2'
CMSAttribute._oid_specs['signing_certificate_v2'] = SetOfSigningCertificatesV2
|
{
"content_hash": "b18fbebdcbdb964514d39b27bb53616d",
"timestamp": "",
"source": "github",
"line_count": 308,
"max_line_length": 101,
"avg_line_length": 26.15909090909091,
"alnum_prop": 0.6174754871540276,
"repo_name": "todaychi/hue",
"id": "ac73821bc912eef3b4b3eaf88fd2697093f21b5d",
"size": "8074",
"binary": false,
"copies": "13",
"ref": "refs/heads/master",
"path": "desktop/core/ext-py/asn1crypto-0.22.0/asn1crypto/tsp.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3096"
},
{
"name": "Batchfile",
"bytes": "41710"
},
{
"name": "C",
"bytes": "2717013"
},
{
"name": "C++",
"bytes": "199945"
},
{
"name": "CSS",
"bytes": "691188"
},
{
"name": "Emacs Lisp",
"bytes": "11704"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Go",
"bytes": "6671"
},
{
"name": "HTML",
"bytes": "23983570"
},
{
"name": "Java",
"bytes": "575404"
},
{
"name": "JavaScript",
"bytes": "5432201"
},
{
"name": "Lex",
"bytes": "39802"
},
{
"name": "M4",
"bytes": "1377"
},
{
"name": "Makefile",
"bytes": "146585"
},
{
"name": "Mako",
"bytes": "3525679"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "PLSQL",
"bytes": "31565"
},
{
"name": "PLpgSQL",
"bytes": "3646"
},
{
"name": "Perl",
"bytes": "3499"
},
{
"name": "PigLatin",
"bytes": "328"
},
{
"name": "Python",
"bytes": "45877726"
},
{
"name": "Roff",
"bytes": "16669"
},
{
"name": "Shell",
"bytes": "46975"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "Thrift",
"bytes": "278712"
},
{
"name": "Visual Basic",
"bytes": "2884"
},
{
"name": "XSLT",
"bytes": "521413"
},
{
"name": "Yacc",
"bytes": "353353"
}
],
"symlink_target": ""
}
|
import logging
from contextlib import contextmanager
from pathlib import Path
from typing import Dict, Set, Tuple
from airflow.configuration import conf
log = logging.getLogger(__name__)
# TODO(potiuk) change the tests use better approach sing Pytest fixtures rather than
# `unit_test_mode` parameter. It's potentially disruptive so we should not do it **JUST** yet
def remove_all_configurations():
old_sections, old_proxies = (conf._sections, conf._proxies)
conf._sections = {}
conf._proxies = {}
return old_sections, old_proxies
def restore_all_configurations(sections: Dict, proxies: Dict):
conf._sections = sections # type: ignore
conf._proxies = proxies # type: ignore
@contextmanager
def use_config(config: str):
"""
Temporary load the deprecated test configuration.
"""
sections, proxies = remove_all_configurations()
conf.read(str(Path(__file__).parents[1] / "config_templates" / config))
try:
yield
finally:
restore_all_configurations(sections, proxies)
@contextmanager
def set_deprecated_options(deprecated_options: Dict[Tuple[str, str], Tuple[str, str, str]]):
"""
Temporary replaces deprecated options with the ones provided.
"""
old_deprecated_options = conf.deprecated_options
conf.deprecated_options = deprecated_options
try:
yield
finally:
conf.deprecated_options = old_deprecated_options
@contextmanager
def set_sensitive_config_values(sensitive_config_values: Set[Tuple[str, str]]):
"""
Temporary replaces sensitive values with the ones provided.
"""
old_sensitive_config_values = conf.sensitive_config_values
conf.sensitive_config_values = sensitive_config_values
try:
yield
finally:
conf.sensitive_config_values = old_sensitive_config_values
|
{
"content_hash": "205924724bad84ab6b2469f9d123f41f",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 106,
"avg_line_length": 29.774193548387096,
"alnum_prop": 0.7009750812567714,
"repo_name": "danielvdende/incubator-airflow",
"id": "4728d33b4bd7dd716d28ca7148bf29383410564e",
"size": "2632",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/utils/test_config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25785"
},
{
"name": "Dockerfile",
"bytes": "76693"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "164512"
},
{
"name": "JavaScript",
"bytes": "236992"
},
{
"name": "Jinja",
"bytes": "37155"
},
{
"name": "Jupyter Notebook",
"bytes": "2929"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "21824455"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "495567"
},
{
"name": "TypeScript",
"bytes": "326556"
}
],
"symlink_target": ""
}
|
from cloudkittyclient.common import base
class BaseAttributeMixin(object):
def _validate_attribute(self, attribute):
attr = getattr(self, attribute)
if attr:
kwargs = {attribute: attr}
return kwargs
def _get_resource(self, mgr, attribute):
kwargs = self._validate_attribute(attribute)
if kwargs:
return mgr(client=self.manager.client).get(**kwargs)
def _get_resources(self, mgr, attribute):
kwargs = self._validate_attribute(attribute)
if kwargs:
try:
return mgr(client=self.manager.client).findall(**kwargs)
except Exception:
pass
return []
class ServiceMixin(BaseAttributeMixin):
@property
def service(self):
return self._get_resource(ServiceManager, 'service_id')
class FieldMixin(BaseAttributeMixin):
@property
def field(self):
return self._get_resource(FieldManager, 'field_id')
class GroupMixin(BaseAttributeMixin):
@property
def group(self):
return self._get_resource(GroupManager, 'group_id')
class FieldsMixin(BaseAttributeMixin):
attribute = ''
@property
def fields(self):
return self._get_resources(FieldManager, self.attribute)
class MappingsMixin(BaseAttributeMixin):
attribute = ''
@property
def mappings(self):
return self._get_resources(MappingManager, self.attribute)
class ThresholdsMixin(BaseAttributeMixin):
attribute = ''
@property
def thresholds(self):
return self._get_resources(ThresholdManager, self.attribute)
class Service(base.Resource, FieldsMixin, MappingsMixin, ThresholdsMixin):
key = 'service'
attribute = 'service_id'
def __repr__(self):
return "<hashmap.Service %s>" % self._info
class ServiceManager(base.CrudManager):
resource_class = Service
base_url = '/v1/rating/module_config/hashmap'
key = 'service'
collection_key = 'services'
class Field(base.Resource, ServiceMixin, MappingsMixin, ThresholdsMixin):
key = 'field'
attribute = 'field_id'
def __repr__(self):
return "<hashmap.Field %s>" % self._info
class FieldManager(base.CrudManager):
resource_class = Field
base_url = '/v1/rating/module_config/hashmap'
key = 'field'
collection_key = 'fields'
class Mapping(base.Resource, ServiceMixin, FieldMixin, GroupMixin):
key = 'mapping'
def __repr__(self):
return "<hashmap.Mapping %s>" % self._info
class MappingManager(base.CrudManager):
resource_class = Mapping
base_url = '/v1/rating/module_config/hashmap'
key = 'mapping'
collection_key = 'mappings'
class Group(base.Resource, MappingsMixin, ThresholdsMixin):
key = 'group'
attribute = 'group_id'
def __repr__(self):
return "<hashmap.Group %s>" % self._info
def delete(self, recursive=False):
return self.manager.delete(group_id=self.group_id, recursive=recursive)
class GroupManager(base.CrudManager):
resource_class = Group
base_url = '/v1/rating/module_config/hashmap'
key = 'group'
collection_key = 'groups'
def delete(self, group_id, recursive=False):
url = self.build_url(group_id=group_id)
if recursive:
url += "?recursive=True"
return self._delete(url)
class Threshold(base.Resource, ServiceMixin, FieldMixin, GroupMixin):
key = 'threshold'
def __repr__(self):
return "<hashmap.Threshold %s>" % self._info
class ThresholdManager(base.CrudManager):
resource_class = Threshold
base_url = '/v1/rating/module_config/hashmap'
key = 'threshold'
collection_key = 'thresholds'
|
{
"content_hash": "5d59236e05010bcd59a46ed184e24ca4",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 79,
"avg_line_length": 25.210884353741495,
"alnum_prop": 0.6548839719373988,
"repo_name": "muraliselva10/python-cloudkittyclient",
"id": "85f4d00249e2d0dcf77cf70b38d888ab227c1cae",
"size": "4313",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cloudkittyclient/v1/rating/hashmap/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "219364"
}
],
"symlink_target": ""
}
|
"""SVG handling utilities."""
def svg2str(display_object, dpi=300):
"""
Serializes a nilearn display object as a string
"""
from io import StringIO
image_buf = StringIO()
display_object.frame_axes.figure.savefig(
image_buf, dpi=dpi, format="svg", facecolor="k", edgecolor="k"
)
image_buf.seek(0)
return image_buf.getvalue()
def combine_svg(svg_list, axis="vertical"):
"""
Composes the input svgs into one standalone svg
"""
import numpy as np
import svgutils.transform as svgt
# Read all svg files and get roots
svgs = [svgt.fromstring(f.encode("utf-8")) for f in svg_list]
roots = [f.getroot() for f in svgs]
# Query the size of each
sizes = [(int(f.width[:-2]), int(f.height[:-2])) for f in svgs]
if axis == "vertical":
# Calculate the scale to fit all widths
scales = [1.0] * len(svgs)
if not all([width[0] == sizes[0][0] for width in sizes[1:]]):
ref_size = sizes[0]
for i, els in enumerate(sizes):
scales[i] = ref_size[0] / els[0]
newsizes = [
tuple(size) for size in np.array(sizes) * np.array(scales)[..., np.newaxis]
]
totalsize = [newsizes[0][0], np.sum(newsizes, axis=0)[1]]
elif axis == "horizontal":
# Calculate the scale to fit all heights
scales = [1.0] * len(svgs)
if not all([height[0] == sizes[0][1] for height in sizes[1:]]):
ref_size = sizes[0]
for i, els in enumerate(sizes):
scales[i] = ref_size[1] / els[1]
newsizes = [
tuple(size) for size in np.array(sizes) * np.array(scales)[..., np.newaxis]
]
totalsize = [np.sum(newsizes, axis=0)[0], newsizes[0][1]]
# Compose the views panel: total size is the width of
# any element (used the first here) and the sum of heights
fig = svgt.SVGFigure(totalsize[0], totalsize[1])
if axis == "vertical":
yoffset = 0
for i, r in enumerate(roots):
size = newsizes[i]
r.moveto(0, yoffset, scale=scales[i])
yoffset += size[1]
fig.append(r)
elif axis == "horizontal":
xoffset = 0
for i, r in enumerate(roots):
size = newsizes[i]
r.moveto(xoffset, 0, scale=scales[i])
xoffset += size[0]
fig.append(r)
return fig
def extract_svg(display_object, dpi=300):
"""
Removes the preamble of the svg files generated with nilearn
"""
image_svg = svg2str(display_object, dpi)
start_idx = image_svg.find("<svg ")
end_idx = image_svg.rfind("</svg>")
return image_svg[start_idx:end_idx]
|
{
"content_hash": "aa4e6f9d10ff925cbdf839512dea6dbf",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 87,
"avg_line_length": 31.183908045977013,
"alnum_prop": 0.5665315149281238,
"repo_name": "poldracklab/mriqc",
"id": "076ea35de93e90b5ab5f47f9ee9fd391b106c821",
"size": "3592",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mriqc/viz/svg.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "5560"
},
{
"name": "Dockerfile",
"bytes": "7384"
},
{
"name": "HTML",
"bytes": "16340"
},
{
"name": "JavaScript",
"bytes": "136130"
},
{
"name": "Jupyter Notebook",
"bytes": "280517"
},
{
"name": "Makefile",
"bytes": "1274"
},
{
"name": "Python",
"bytes": "473828"
},
{
"name": "Shell",
"bytes": "960"
},
{
"name": "Tcl",
"bytes": "2936"
}
],
"symlink_target": ""
}
|
"""Tests for jax_cfd.grids."""
# TODO(jamieas): Consider updating these tests using the `hypothesis` framework.
from absl.testing import absltest
from absl.testing import parameterized
import jax
import jax.numpy as jnp
from jax_cfd.base import boundaries
from jax_cfd.base import grids
from jax_cfd.base import test_util
import numpy as np
class GridArrayTest(test_util.TestCase):
def test_tree_util(self):
array = grids.GridArray(jnp.arange(3), offset=(0,), grid=grids.Grid((3,)))
flat, treedef = jax.tree_flatten(array)
roundtripped = jax.tree_unflatten(treedef, flat)
self.assertArrayEqual(array, roundtripped)
def test_consistent_offset(self):
data = jnp.arange(3)
grid = grids.Grid((3,))
array_offset_0 = grids.GridArray(data, offset=(0,), grid=grid)
array_offset_1 = grids.GridArray(data, offset=(1,), grid=grid)
offset = grids.consistent_offset(array_offset_0, array_offset_0)
self.assertEqual(offset, (0,))
with self.assertRaises(grids.InconsistentOffsetError):
grids.consistent_offset(array_offset_0, array_offset_1)
def test_averaged_offset(self):
data = jnp.arange(3)
grid = grids.Grid((3,))
array_offset_0 = grids.GridArray(data, offset=(0,), grid=grid)
array_offset_1 = grids.GridArray(data, offset=(1,), grid=grid)
averaged_offset = grids.averaged_offset(array_offset_0, array_offset_1)
self.assertEqual(averaged_offset, (0.5,))
def test_control_volume_offsets(self):
data = jnp.arange(5, 5)
grid = grids.Grid((5, 5))
array = grids.GridArray(data, offset=(0, 0), grid=grid)
cv_offset = grids.control_volume_offsets(array)
self.assertEqual(cv_offset, ((0.5, 0), (0, 0.5)))
def test_consistent_grid(self):
data = jnp.arange(3)
offset = (0,)
array_grid_3 = grids.GridArray(data, offset, grid=grids.Grid((3,)))
array_grid_5 = grids.GridArray(data, offset, grid=grids.Grid((5,)))
grid = grids.consistent_grid(array_grid_3, array_grid_3)
self.assertEqual(grid, grids.Grid((3,)))
with self.assertRaises(grids.InconsistentGridError):
grids.consistent_grid(array_grid_3, array_grid_5)
def test_add_sub_correctness(self):
values_1 = np.random.uniform(size=(5, 5))
values_2 = np.random.uniform(size=(5, 5))
offsets = (0.5, 0.5)
grid = grids.Grid((5, 5))
input_array_1 = grids.GridArray(values_1, offsets, grid)
input_array_2 = grids.GridArray(values_2, offsets, grid)
actual_sum = input_array_1 + input_array_2
actual_sub = input_array_1 - input_array_2
expected_sum = grids.GridArray(values_1 + values_2, offsets, grid)
expected_sub = grids.GridArray(values_1 - values_2, offsets, grid)
self.assertAllClose(actual_sum, expected_sum, atol=1e-7)
self.assertAllClose(actual_sub, expected_sub, atol=1e-7)
def test_add_sub_offset_raise(self):
values_1 = np.random.uniform(size=(5, 5))
values_2 = np.random.uniform(size=(5, 5))
offset_1 = (0.5, 0.5)
offset_2 = (0.5, 0.0)
grid = grids.Grid((5, 5))
input_array_1 = grids.GridArray(values_1, offset_1, grid)
input_array_2 = grids.GridArray(values_2, offset_2, grid)
with self.assertRaises(grids.InconsistentOffsetError):
_ = input_array_1 + input_array_2
with self.assertRaises(grids.InconsistentOffsetError):
_ = input_array_1 - input_array_2
def test_add_sub_grid_raise(self):
values_1 = np.random.uniform(size=(5, 5))
values_2 = np.random.uniform(size=(5, 5))
offset = (0.5, 0.5)
grid_1 = grids.Grid((5, 5), domain=((0, 1), (0, 1)))
grid_2 = grids.Grid((5, 5), domain=((-2, 2), (-2, 2)))
input_array_1 = grids.GridArray(values_1, offset, grid_1)
input_array_2 = grids.GridArray(values_2, offset, grid_2)
with self.assertRaises(grids.InconsistentGridError):
_ = input_array_1 + input_array_2
with self.assertRaises(grids.InconsistentGridError):
_ = input_array_1 - input_array_2
def test_mul_div_correctness(self):
values_1 = np.random.uniform(size=(5, 5))
values_2 = np.random.uniform(size=(5, 5))
scalar = 3.1415
offset = (0.5, 0.5)
grid = grids.Grid((5, 5))
input_array_1 = grids.GridArray(values_1, offset, grid)
input_array_2 = grids.GridArray(values_2, offset, grid)
actual_mul = input_array_1 * input_array_2
array_1_times_scalar = input_array_1 * scalar
expected_1_times_scalar = grids.GridArray(values_1 * scalar, offset, grid)
actual_div = input_array_1 / 2.5
expected_div = grids.GridArray(values_1 / 2.5, offset, grid)
expected_mul = grids.GridArray(values_1 * values_2, offset, grid)
self.assertAllClose(actual_mul, expected_mul, atol=1e-7)
self.assertAllClose(
array_1_times_scalar, expected_1_times_scalar, atol=1e-7)
self.assertAllClose(actual_div, expected_div, atol=1e-7)
def test_add_inplace(self):
values_1 = np.random.uniform(size=(5, 5))
values_2 = np.random.uniform(size=(5, 5))
offsets = (0.5, 0.5)
grid = grids.Grid((5, 5))
array = grids.GridArray(values_1, offsets, grid)
array += values_2
expected = grids.GridArray(values_1 + values_2, offsets, grid)
self.assertAllClose(array, expected, atol=1e-7)
def test_jit(self):
u = grids.GridArray(jnp.ones([10, 10]), (.5, .5), grids.Grid((10, 10)))
def f(u):
return u.data < 2.
self.assertAllClose(f(u), jax.jit(f)(u))
def test_applied(self):
grid = grids.Grid((10, 10))
offset = (0.5, 0.5)
u = grids.GridArray(jnp.ones([10, 10]), offset, grid)
expected = grids.GridArray(-jnp.ones([10, 10]), offset, grid)
actual = grids.applied(jnp.negative)(u)
self.assertAllClose(expected, actual)
class GridVariableTest(test_util.TestCase):
def test_constructor_and_attributes(self):
with self.subTest('1d'):
grid = grids.Grid((10,))
data = np.zeros((10,), dtype=np.float32)
array = grids.GridArray(data, offset=(0.5,), grid=grid)
bc = boundaries.periodic_boundary_conditions(grid.ndim)
variable = grids.GridVariable(array, bc)
self.assertArrayEqual(variable.array, array)
self.assertEqual(variable.bc, bc)
self.assertEqual(variable.dtype, np.float32)
self.assertEqual(variable.shape, (10,))
self.assertArrayEqual(variable.data, data)
self.assertEqual(variable.offset, (0.5,))
self.assertEqual(variable.grid, grid)
with self.subTest('2d'):
grid = grids.Grid((10, 10))
data = np.zeros((10, 10), dtype=np.float32)
array = grids.GridArray(data, offset=(0.5, 0.5), grid=grid)
bc = boundaries.periodic_boundary_conditions(grid.ndim)
variable = grids.GridVariable(array, bc)
self.assertArrayEqual(variable.array, array)
self.assertEqual(variable.bc, bc)
self.assertEqual(variable.dtype, np.float32)
self.assertEqual(variable.shape, (10, 10))
self.assertArrayEqual(variable.data, data)
self.assertEqual(variable.offset, (0.5, 0.5))
self.assertEqual(variable.grid, grid)
with self.subTest('batch dim data'):
grid = grids.Grid((10, 10))
data = np.zeros((5, 10, 10), dtype=np.float32)
array = grids.GridArray(data, offset=(0.5, 0.5), grid=grid)
bc = boundaries.periodic_boundary_conditions(grid.ndim)
variable = grids.GridVariable(array, bc)
self.assertArrayEqual(variable.array, array)
self.assertEqual(variable.bc, bc)
self.assertEqual(variable.dtype, np.float32)
self.assertEqual(variable.shape, (5, 10, 10))
self.assertArrayEqual(variable.data, data)
self.assertEqual(variable.offset, (0.5, 0.5))
self.assertEqual(variable.grid, grid)
with self.subTest('raises exception'):
with self.assertRaisesRegex(ValueError,
'Incompatible dimension between grid and bc'):
grid = grids.Grid((10,))
data = np.zeros((10,))
array = grids.GridArray(data, offset=(0.5,), grid=grid) # 1D
bc = boundaries.periodic_boundary_conditions(ndim=2) # 2D
grids.GridVariable(array, bc)
@parameterized.parameters(
dict(
shape=(10,),
offset=(0.0,),
),
dict(
shape=(10,),
offset=(0.5,),
),
dict(
shape=(10,),
offset=(1.0,),
),
dict(
shape=(10, 10),
offset=(1.0, 0.0),
),
dict(
shape=(10, 10, 10),
offset=(1.0, 0.0, 0.5),
),
)
def test_interior_consistency_periodic(self, shape, offset):
grid = grids.Grid(shape)
data = np.random.randint(0, 10, shape)
array = grids.GridArray(data, offset=offset, grid=grid)
bc = boundaries.periodic_boundary_conditions(ndim=len(shape))
u = grids.GridVariable(array, bc)
u_interior = u.trim_boundary()
self.assertArrayEqual(u_interior, u.array)
@parameterized.parameters(
dict(
shape=(10,),
bc=boundaries.dirichlet_boundary_conditions(ndim=1),
),
dict(
shape=(10,),
bc=boundaries.neumann_boundary_conditions(ndim=1),
),
dict(
shape=(10, 10),
bc=boundaries.dirichlet_boundary_conditions(ndim=2),
),
dict(
shape=(10, 10),
bc=boundaries.neumann_boundary_conditions(ndim=2),
),
dict(
shape=(10, 10, 10),
bc=boundaries.dirichlet_boundary_conditions(ndim=3),
),
dict(
shape=(10, 10, 10),
bc=boundaries.neumann_boundary_conditions(ndim=3),
),
)
def test_interior_consistency_no_edge_offsets(self, bc, shape):
grid = grids.Grid(shape)
data = np.random.randint(0, 10, shape)
array = grids.GridArray(data, offset=(0.5,) * len(shape), grid=grid)
u = grids.GridVariable(array, bc)
u_interior = u.trim_boundary()
self.assertArrayEqual(u_interior, u.array)
@parameterized.parameters(
dict(
shape=(10,),
bc=boundaries.neumann_boundary_conditions(ndim=1),
offset=(0.5,)),
dict(
shape=(10, 10),
bc=boundaries.neumann_boundary_conditions(ndim=2),
offset=(0.5, 0.5)),
dict(
shape=(10, 10, 10),
bc=boundaries.neumann_boundary_conditions(ndim=3),
offset=(0.5, 0.5, 0.5)),
)
def test_interior_consistency_neumann(self, shape, bc, offset):
grid = grids.Grid(shape)
data = np.random.randint(0, 10, shape)
array = grids.GridArray(data, offset=offset, grid=grid)
u = grids.GridVariable(array, bc)
u_interior = u.trim_boundary()
self.assertArrayEqual(u_interior, u.array)
@parameterized.parameters(
dict(
shape=(10,),
bc=boundaries.dirichlet_boundary_conditions(ndim=1),
offset=(0.0,)),
dict(
shape=(10, 10),
bc=boundaries.dirichlet_boundary_conditions(ndim=2),
offset=(0.0, 0.0)),
dict(
shape=(10, 10, 10),
bc=boundaries.dirichlet_boundary_conditions(ndim=3),
offset=(0.0, 0.0, 0.0)),
)
def test_interior_consistency_edge_offsets_dirichlet(self, shape, bc, offset):
grid = grids.Grid(shape)
data = np.random.randint(0, 10, shape)
array = grids.GridArray(data, offset=offset, grid=grid)
u = grids.GridVariable(array, bc)
u_interior = u.trim_boundary()
self.assertEqual(u_interior.offset,
tuple(offset + 1 for offset in u.array.offset))
self.assertEqual(u_interior.grid.ndim, u.array.grid.ndim)
self.assertEqual(u_interior.grid.step, u.array.grid.step)
def test_interior_dirichlet(self):
data = np.array([
[11, 12, 13, 14, 15],
[21, 22, 23, 24, 25],
[31, 32, 33, 34, 35],
[41, 42, 43, 44, 45],
])
grid = grids.Grid(shape=(4, 5), domain=((0, 1), (0, 1)))
bc = boundaries.dirichlet_boundary_conditions(ndim=2)
with self.subTest('offset=(1, 0.5)'):
offset = (1., 0.5)
array = grids.GridArray(data, offset, grid)
u = grids.GridVariable(array, bc)
u_interior = u.trim_boundary()
answer = np.array([[11, 12, 13, 14, 15], [21, 22, 23, 24, 25],
[31, 32, 33, 34, 35]])
self.assertArrayEqual(u_interior.data, answer)
self.assertEqual(u_interior.offset, offset)
self.assertEqual(u.grid, grid)
with self.subTest('offset=(1, 1)'):
offset = (1., 1.)
array = grids.GridArray(data, offset, grid)
u = grids.GridVariable(array, bc)
u_interior = u.trim_boundary()
answer = np.array([[11, 12, 13, 14], [21, 22, 23, 24], [31, 32, 33, 34]])
self.assertArrayEqual(u_interior.data, answer)
self.assertEqual(u_interior.grid, grid)
with self.subTest('offset=(0.0, 0.5)'):
offset = (0., 0.5)
array = grids.GridArray(data, offset, grid)
u = grids.GridVariable(array, bc)
u_interior = u.trim_boundary()
answer = np.array([[21, 22, 23, 24, 25], [31, 32, 33, 34, 35],
[41, 42, 43, 44, 45]])
self.assertArrayEqual(u_interior.data, answer)
self.assertEqual(u_interior.grid, grid)
with self.subTest('offset=(0.0, 0.0)'):
offset = (0.0, 0.0)
array = grids.GridArray(data, offset, grid)
u = grids.GridVariable(array, bc)
u_interior = u.trim_boundary()
answer = np.array([[22, 23, 24, 25], [32, 33, 34, 35], [42, 43, 44, 45]])
self.assertArrayEqual(u_interior.data, answer)
self.assertEqual(u_interior.grid, grid)
with self.subTest('offset=(0.5, 0.0)'):
offset = (0.5, 0.0)
array = grids.GridArray(data, offset, grid)
u = grids.GridVariable(array, bc)
u_interior = u.trim_boundary()
answer = np.array([[12, 13, 14, 15], [22, 23, 24, 25], [32, 33, 34, 35],
[42, 43, 44, 45]])
self.assertArrayEqual(u_interior.data, answer)
self.assertEqual(u_interior.grid, grid)
# this is consistent for all offsets, not just edge and center.
with self.subTest('offset=(0.25, 0.75)'):
offset = (0.25, 0.75)
array = grids.GridArray(data, offset, grid)
u = grids.GridVariable(array, bc)
u_interior = u.trim_boundary()
self.assertArrayEqual(u_interior.data, data)
self.assertEqual(u_interior.grid, grid)
@parameterized.parameters(
dict(
shape=(10,),
bc=boundaries.periodic_boundary_conditions(ndim=1),
padding=(1, 1),
axis=0,
),
dict(
shape=(10, 10),
bc=boundaries.dirichlet_boundary_conditions(ndim=2),
padding=(2, 1),
axis=1,
),
dict(
shape=(10, 10, 10),
bc=boundaries.neumann_boundary_conditions(ndim=3),
padding=(0, 2),
axis=2,
),
)
def test_shift_pad_trim(self, shape, bc, padding, axis):
grid = grids.Grid(shape)
data = np.random.randint(0, 10, shape)
array = grids.GridArray(data, offset=(0.5,) * len(shape), grid=grid)
u = grids.GridVariable(array, bc)
with self.subTest('shift'):
self.assertArrayEqual(
u.shift(offset=1, axis=axis), bc.shift(array, 1, axis))
with self.subTest('raises exception'):
with self.assertRaisesRegex(ValueError,
'Incompatible dimension between grid and bc'):
grid = grids.Grid((10,))
data = np.zeros((10,))
array = grids.GridArray(data, offset=(0.5,), grid=grid) # 1D
bc = boundaries.periodic_boundary_conditions(ndim=2) # 2D
grids.GridVariable(array, bc)
def test_unique_boundary_conditions(self):
grid = grids.Grid((5,))
array = grids.GridArray(np.arange(5), offset=(0.5,), grid=grid)
bc1 = boundaries.periodic_boundary_conditions(grid.ndim)
bc2 = boundaries.dirichlet_boundary_conditions(grid.ndim)
x_bc1 = grids.GridVariable(array, bc1)
y_bc1 = grids.GridVariable(array, bc1)
z_bc2 = grids.GridVariable(array, bc2)
bc = grids.unique_boundary_conditions(x_bc1, y_bc1)
self.assertEqual(bc, bc1)
with self.assertRaises(grids.InconsistentBoundaryConditionsError):
grids.unique_boundary_conditions(x_bc1, y_bc1, z_bc2)
class GridArrayTensorTest(test_util.TestCase):
def test_tensor_transpose(self):
grid = grids.Grid((5, 5))
offset = (0.5, 0.5)
a = grids.GridArray(1 * jnp.ones([5, 5]), offset, grid)
b = grids.GridArray(2 * jnp.ones([5, 5]), offset, grid)
c = grids.GridArray(3 * jnp.ones([5, 5]), offset, grid)
d = grids.GridArray(4 * jnp.ones([5, 5]), offset, grid)
tensor = grids.GridArrayTensor([[a, b], [c, d]])
self.assertIsInstance(tensor, np.ndarray)
transposed_tensor = np.transpose(tensor)
self.assertAllClose(tensor[0, 1], transposed_tensor[1, 0])
class GridTest(test_util.TestCase):
def test_constructor_and_attributes(self):
with self.subTest('1d'):
grid = grids.Grid((10,))
self.assertEqual(grid.shape, (10,))
self.assertEqual(grid.step, (1.0,))
self.assertEqual(grid.domain, ((0, 10.),))
self.assertEqual(grid.ndim, 1)
self.assertEqual(grid.cell_center, (0.5,))
self.assertEqual(grid.cell_faces, ((1.0,),))
with self.subTest('1d domain scalar size'):
grid = grids.Grid((10,), domain=10)
self.assertEqual(grid.domain, ((0.0, 10.0),))
with self.subTest('2d'):
grid = grids.Grid(
(10, 10),
step=0.1,
)
self.assertEqual(grid.step, (0.1, 0.1))
self.assertEqual(grid.domain, ((0, 1.0), (0, 1.0)))
self.assertEqual(grid.ndim, 2)
self.assertEqual(grid.cell_center, (0.5, 0.5))
self.assertEqual(grid.cell_faces, ((1.0, 0.5), (0.5, 1.0)))
with self.subTest('3d'):
grid = grids.Grid((10, 10, 10), step=(0.1, 0.2, 0.5))
self.assertEqual(grid.step, (0.1, 0.2, 0.5))
self.assertEqual(grid.domain, ((0, 1.0), (0, 2.0), (0, 5.0)))
self.assertEqual(grid.ndim, 3)
self.assertEqual(grid.cell_center, (0.5, 0.5, 0.5))
self.assertEqual(grid.cell_faces,
((1.0, 0.5, 0.5), (0.5, 1.0, 0.5), (0.5, 0.5, 1.0)))
with self.subTest('1d domain'):
grid = grids.Grid((10,), domain=[(-2, 2)])
self.assertEqual(grid.step, (2 / 5,))
self.assertEqual(grid.domain, ((-2., 2.),))
self.assertEqual(grid.ndim, 1)
self.assertEqual(grid.cell_center, (0.5,))
self.assertEqual(grid.cell_faces, ((1.0,),))
with self.subTest('2d domain'):
grid = grids.Grid((10, 20), domain=[(-2, 2), (0, 3)])
self.assertEqual(grid.step, (4 / 10, 3 / 20))
self.assertEqual(grid.domain, ((-2., 2.), (0., 3.)))
self.assertEqual(grid.ndim, 2)
self.assertEqual(grid.cell_center, (0.5, 0.5))
self.assertEqual(grid.cell_faces, ((1.0, 0.5), (0.5, 1.0)))
with self.subTest('2d periodic'):
grid = grids.Grid((10, 20), domain=2 * np.pi)
self.assertEqual(grid.step, (2 * np.pi / 10, 2 * np.pi / 20))
self.assertEqual(grid.domain, ((0., 2 * np.pi), (0., 2 * np.pi)))
self.assertEqual(grid.ndim, 2)
with self.assertRaisesRegex(TypeError, 'cannot provide both'):
grids.Grid((2,), step=(1.0,), domain=[(0, 2.0)])
with self.assertRaisesRegex(ValueError, 'length of domain'):
grids.Grid((2, 3), domain=[(0, 1)])
with self.assertRaisesRegex(ValueError, 'pairs of numbers'):
grids.Grid((2,), domain=[(0, 1, 2)])
with self.assertRaisesRegex(ValueError, 'length of step'):
grids.Grid((2, 3), step=(1.0,))
def test_stagger(self):
grid = grids.Grid((10, 10))
array_1 = jnp.zeros((10, 10))
array_2 = jnp.ones((10, 10))
u, v = grid.stagger((array_1, array_2))
self.assertEqual(u.offset, (1.0, 0.5))
self.assertEqual(v.offset, (0.5, 1.0))
def test_center(self):
grid = grids.Grid((10, 10))
with self.subTest('array ndim same as grid'):
array_1 = jnp.zeros((10, 10))
array_2 = jnp.zeros((20, 30))
v = (array_1, array_2) # tuple is a simple pytree
v_centered = grid.center(v)
self.assertLen(v_centered, 2)
self.assertIsInstance(v_centered[0], grids.GridArray)
self.assertIsInstance(v_centered[1], grids.GridArray)
self.assertEqual(v_centered[0].shape, (10, 10))
self.assertEqual(v_centered[1].shape, (20, 30))
self.assertEqual(v_centered[0].offset, (0.5, 0.5))
self.assertEqual(v_centered[1].offset, (0.5, 0.5))
with self.subTest('array ndim different than grid'):
# Assigns offset dimension based on grid.ndim
array_1 = jnp.zeros((10,))
array_2 = jnp.ones((10, 10, 10))
v = (array_1, array_2) # tuple is a simple pytree
v_centered = grid.center(v)
self.assertLen(v_centered, 2)
self.assertIsInstance(v_centered[0], grids.GridArray)
self.assertIsInstance(v_centered[1], grids.GridArray)
self.assertEqual(v_centered[0].shape, (10,))
self.assertEqual(v_centered[1].shape, (10, 10, 10))
self.assertEqual(v_centered[0].offset, (0.5, 0.5))
self.assertEqual(v_centered[1].offset, (0.5, 0.5))
def test_axes_and_mesh(self):
with self.subTest('1d'):
grid = grids.Grid((5,), step=0.1)
axes = grid.axes()
self.assertLen(axes, 1)
self.assertAllClose(axes[0], [0.05, 0.15, 0.25, 0.35, 0.45])
mesh = grid.mesh()
self.assertLen(mesh, 1)
self.assertAllClose(axes[0], mesh[0]) # in 1d, mesh matches array
with self.subTest('1d with offset'):
grid = grids.Grid((5,), step=0.1)
axes = grid.axes(offset=(0,))
self.assertLen(axes, 1)
self.assertAllClose(axes[0], [0.0, 0.1, 0.2, 0.3, 0.4])
mesh = grid.mesh(offset=(0,))
self.assertLen(mesh, 1)
self.assertAllClose(axes[0], mesh[0]) # in 1d, mesh matches array
with self.subTest('2d'):
grid = grids.Grid((4, 6), domain=[(-2, 2), (0, 3)])
axes = grid.axes()
self.assertLen(axes, 2)
self.assertAllClose(axes[0], [-1.5, -0.5, 0.5, 1.5])
self.assertAllClose(axes[1], [0.25, 0.75, 1.25, 1.75, 2.25, 2.75])
mesh = grid.mesh()
self.assertLen(mesh, 2)
self.assertEqual(mesh[0].shape, (4, 6))
self.assertEqual(mesh[1].shape, (4, 6))
self.assertAllClose(mesh[0][:, 0], axes[0])
self.assertAllClose(mesh[1][0, :], axes[1])
with self.subTest('2d with offset'):
grid = grids.Grid((4, 6), domain=[(-2, 2), (0, 3)])
axes = grid.axes(offset=(0, 1))
self.assertLen(axes, 2)
self.assertAllClose(axes[0], [-2.0, -1.0, 0.0, 1.0])
self.assertAllClose(axes[1], [0.5, 1.0, 1.5, 2.0, 2.5, 3.0])
mesh = grid.mesh(offset=(0, 1))
self.assertLen(mesh, 2)
self.assertEqual(mesh[0].shape, (4, 6))
self.assertEqual(mesh[1].shape, (4, 6))
self.assertAllClose(mesh[0][:, 0], axes[0])
self.assertAllClose(mesh[1][0, :], axes[1])
@parameterized.parameters(
dict(
shape=(10,),
fn=lambda x: 2 * np.ones_like(x),
offset=None,
expected_array=2 * np.ones((10,)),
expected_offset=(0.5,)),
dict(
shape=(10, 10),
fn=lambda x, y: np.ones_like(x) + np.ones_like(y),
offset=(1, 0.5),
expected_array=2 * np.ones((10, 10)),
expected_offset=(1, 0.5)),
dict(
shape=(10, 10, 10),
fn=lambda x, y, z: np.ones_like(z),
offset=None,
expected_array=np.ones((10, 10, 10)),
expected_offset=(0.5, 0.5, 0.5)),
)
def test_eval_on_mesh_default_offset(self, shape, fn, offset, expected_array,
expected_offset):
grid = grids.Grid(shape, step=0.1)
expected = grids.GridArray(expected_array, expected_offset, grid)
actual = grid.eval_on_mesh(fn, offset)
self.assertArrayEqual(expected, actual)
def test_spectral_axes(self):
length = 42.
shape = (64,)
grid = grids.Grid(shape, domain=((0, length),))
xs, = grid.axes()
fft_xs, = grid.fft_axes()
fft_xs *= 2 * jnp.pi # convert ordinal to angular frequencies
# compare the derivative of the sine function (i.e. cosine) with its
# derivative computed in frequency-space. Note that this derivative involves
# the computed frequencies so it can serve as a test.
angular_freq = 2 * jnp.pi / length
ys = jnp.sin(angular_freq * xs)
expected = angular_freq * jnp.cos(angular_freq * xs)
actual = jnp.fft.ifft(1j * fft_xs * jnp.fft.fft(ys))
self.assertAllClose(expected, actual, atol=1e-4)
def test_real_spectral_axes_1d(self):
length = 42.
shape = (64,)
grid = grids.Grid(shape, domain=((0, length),))
xs, = grid.axes()
fft_xs, = grid.rfft_axes()
fft_xs *= 2 * jnp.pi # convert ordinal to angular frequencies
# compare the derivative of the sine function (i.e. cosine) with its
# derivative computed in frequency-space. Note that this derivative involves
# the computed frequencies so it can serve as a test.
angular_freq = 2 * jnp.pi / length
ys = jnp.sin(angular_freq * xs)
expected = angular_freq * jnp.cos(angular_freq * xs)
actual = jnp.fft.irfft(1j * fft_xs * jnp.fft.rfft(ys))
self.assertAllClose(expected, actual, atol=1e-4)
def test_real_spectral_axes_nd_shape(self):
dim = 3
grid_size = 64
shape = (grid_size,) * dim
domain = ((0, 2 * jnp.pi),) * dim
grid = grids.Grid(shape, domain=(domain))
xs1, xs2, xs3 = grid.rfft_axes()
self.assertEqual(len(xs1), grid_size)
self.assertEqual(len(xs2), grid_size)
self.assertEqual(len(xs3), grid_size // 2 + 1)
def test_domain_interior_masks(self):
with self.subTest('1d'):
grid = grids.Grid((5,))
expected = [[1, 1, 1, 1, 0]]
self.assertAllClose(grids.domain_interior_masks(grid), expected)
with self.subTest('2d'):
grid = grids.Grid((3, 3))
expected = ([[1, 1, 1], [1, 1, 1], [0, 0, 0]], [[1, 1, 0], [1, 1, 0],
[1, 1, 0]])
self.assertAllClose(grids.domain_interior_masks(grid), expected)
with self.subTest('3d'):
grid = grids.Grid((3, 4, 5))
actual = grids.domain_interior_masks(grid)
self.assertLen(actual, 3)
# masks are zero on the outer edge, 1 on the interior
self.assertAllClose(actual[0][:-1, :, :], 1)
self.assertAllClose(actual[0][-1, :, :], 0)
self.assertAllClose(actual[1][:, :-1, :], 1)
self.assertAllClose(actual[1][:, -1, :], 0)
self.assertAllClose(actual[2][:, :, :-1], 1)
self.assertAllClose(actual[2][:, :, -1], 0)
if __name__ == '__main__':
absltest.main()
|
{
"content_hash": "4f74084dcc83b009421ed300ccba957c",
"timestamp": "",
"source": "github",
"line_count": 705,
"max_line_length": 80,
"avg_line_length": 37.65815602836879,
"alnum_prop": 0.606651851293834,
"repo_name": "google/jax-cfd",
"id": "fd17b90c70aae182a2f89c545b4da53530806d20",
"size": "27124",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "jax_cfd/base/grids_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "7029140"
},
{
"name": "Python",
"bytes": "715552"
}
],
"symlink_target": ""
}
|
from processmonitor import ProcessIOMonitor
from processperfmonitor import ProcessPerfMonitor
|
{
"content_hash": "baaaf14e1733c17059bc60b4a63993bb",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 49,
"avg_line_length": 47,
"alnum_prop": 0.9148936170212766,
"repo_name": "pombredanne/air",
"id": "4bb154a3c2d28bd69eac3920bed3cd5e7ed1ae58",
"size": "94",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "air/monitoring/__init__.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
import pytest
import numpy as np
import os
import matplotlib
from distutils.spawn import find_executable
from astropy.coordinates import SkyCoord
from astropy import units
from astropy.wcs import WCS
from astropy.io import fits
from frb.figures import finder
remote_data = pytest.mark.remote_data
def data_path(filename):
data_dir = os.path.join(os.path.dirname(__file__), 'files')
return os.path.join(data_dir, filename)
def test_basic():
if not find_executable('latex'):
pass
return
# Requires a local latex installation which travis doesn't have..
# Load up an image
hdul = fits.open(data_path('DES_r.fits'))
header = hdul[0].header
image = hdul[0].data
wcs = WCS(header)
# Make it
coord = SkyCoord('J214425.25-403400.81', unit=(units.hourangle, units.deg))
fig, ax = finder.generate(image, wcs, 'FRB 180924', primary_coord=coord,
vmnx=(-10., 200.), outfile=data_path('tst.png'))
# Test
assert isinstance(fig, matplotlib.figure.Figure)
'''
# Log -- Gives a memory crash!
fig, ax = finder.generate(image, wcs, 'FRB 180924', log_stretch=True, primary_coord=coord,
vmnx=(-10., 200.), outfile=data_path('tst.png'))
'''
# Cutout
fig, ax = finder.generate(image, wcs, 'FRB 180924', cutout=(coord, 20*units.arcsec),
primary_coord=coord,
vmnx=(-10., 200.), outfile=data_path('tst.png'))
|
{
"content_hash": "41aafcc551d73086e1c89ae5c61f9659",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 94,
"avg_line_length": 29.607843137254903,
"alnum_prop": 0.6225165562913907,
"repo_name": "FRBs/FRB",
"id": "3b07f888d10a1ccfe6053a803a8917ab84b146ac",
"size": "1536",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "frb/tests/test_finder.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "2903347"
},
{
"name": "Python",
"bytes": "793709"
}
],
"symlink_target": ""
}
|
from pprint import pprint
from hpOneView.oneview_client import OneViewClient
from hpOneView.exceptions import HPOneViewException
from config_loader import try_load_from_file
config = {
"ip": "172.16.102.59",
"credentials": {
"userName": "administrator",
"password": ""
}
}
# Try load config from a file (if there is a config file)
config = try_load_from_file(config)
oneview_client = OneViewClient(config)
# Get Statistics
print("Get a switch statistics")
try:
switch_statistics = oneview_client.switches.get_statistics(
"30c04831-169e-4618-86b2-7a46310ebaea")
pprint(switch_statistics)
except HPOneViewException as e:
print(e.msg['message'])
# Get Statistics with port_name
print("Get a switch statistics with portName")
try:
switch_statistics = oneview_client.switches.get_statistics(
"30c04831-169e-4618-86b2-7a46310ebaea", "1.2")
switch_statistics = oneview_client.switches.get_statistics(
"30c04831-169e-4618-86b2-7a46310ebaea", "1.2")
pprint(switch_statistics)
except HPOneViewException as e:
print(e.msg['message'])
# Get all switches
print("Get all switches in domain")
switches_all = oneview_client.switches.get_all()
pprint(switches_all)
# Get switch by id
try:
print("Get switch by id")
switch_by_id = oneview_client.switches.get(
"30c04831-169e-4618-86b2-7a46310ebaea")
pprint(switch_by_id)
except HPOneViewException as e:
print(e.msg['message'])
# Get a switch by uri
try:
print("Get switch by uri")
switch_by_uri = oneview_client.switches.get(
"/rest/switches/30c04831-169e-4618-86b2-7a46310ebaea")
pprint(switch_by_uri)
except HPOneViewException as e:
print(e.msg['message'])
# Get environmental configuration of switch by id
try:
print("Get environmental configuration of switch by id")
switch_by_id = oneview_client.switches.get_environmental_configuration(
"30c04831-169e-4618-86b2-7a46310ebaea")
pprint(switch_by_id)
except HPOneViewException as e:
print(e.msg['message'])
# Get environmental configuration of switch by uri
try:
print("Get environmental configuration of switch by uri")
switch_by_uri = oneview_client.switches.get_environmental_configuration(
"/rest/switches/30c04831-169e-4618-86b2-7a46310ebaea")
pprint(switch_by_uri)
except HPOneViewException as e:
print(e.msg['message'])
# Get switch by rackName
try:
print("Get switch by rack name")
switch_by_rack_name = oneview_client.switches.get_by(
"rackName", "Test Name")
pprint(switch_by_rack_name)
except HPOneViewException as e:
print(e.msg['message'])
|
{
"content_hash": "8e534c2cae377d9fe4a595399d9491bd",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 76,
"avg_line_length": 30.448275862068964,
"alnum_prop": 0.7149867874669686,
"repo_name": "danielreed/python-hpOneView",
"id": "da634d5dfeeaef14156d147b66d6b9bfb1cca30c",
"size": "3809",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/switches.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "983681"
}
],
"symlink_target": ""
}
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ebooks', '0111_auto_20200830_1753'),
]
operations = [
migrations.AlterField(
model_name='book',
name='icon_style',
field=models.CharField(blank=True, choices=[('r', 'r - regular'), ('s', 's - solid'), ('l', 'l - light'), ('d', 'd - duotone'), ('b', 'b - brand')], default='', max_length=1, verbose_name='icon style'),
),
migrations.AlterField(
model_name='chapter',
name='icon_style',
field=models.CharField(blank=True, choices=[('r', 'r - regular'), ('s', 's - solid'), ('l', 'l - light'), ('d', 'd - duotone'), ('b', 'b - brand')], default='', max_length=1, verbose_name='icon style'),
),
migrations.AlterField(
model_name='part',
name='icon_style',
field=models.CharField(blank=True, choices=[('r', 'r - regular'), ('s', 's - solid'), ('l', 'l - light'), ('d', 'd - duotone'), ('b', 'b - brand')], default='', max_length=1, verbose_name='icon style'),
),
migrations.AlterField(
model_name='replacementrule',
name='icon_style',
field=models.CharField(blank=True, choices=[('r', 'r - regular'), ('s', 's - solid'), ('l', 'l - light'), ('d', 'd - duotone'), ('b', 'b - brand')], default='', max_length=1, verbose_name='icon style'),
),
migrations.AlterField(
model_name='secret',
name='icon_style',
field=models.CharField(blank=True, choices=[('r', 'r - regular'), ('s', 's - solid'), ('l', 'l - light'), ('d', 'd - duotone'), ('b', 'b - brand')], default='', max_length=1, verbose_name='icon style'),
),
migrations.AlterField(
model_name='section',
name='icon_style',
field=models.CharField(blank=True, choices=[('r', 'r - regular'), ('s', 's - solid'), ('l', 'l - light'), ('d', 'd - duotone'), ('b', 'b - brand')], default='', max_length=1, verbose_name='icon style'),
),
migrations.AlterField(
model_name='sectionaspect',
name='icon_style',
field=models.CharField(blank=True, choices=[('r', 'r - regular'), ('s', 's - solid'), ('l', 'l - light'), ('d', 'd - duotone'), ('b', 'b - brand')], default='', max_length=1, verbose_name='icon style'),
),
migrations.AlterField(
model_name='universalsection',
name='icon_style',
field=models.CharField(blank=True, choices=[('r', 'r - regular'), ('s', 's - solid'), ('l', 'l - light'), ('d', 'd - duotone'), ('b', 'b - brand')], default='', max_length=1, verbose_name='icon style'),
),
]
|
{
"content_hash": "b69d97eb47b22fcb740dbf61c744c85b",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 214,
"avg_line_length": 54.411764705882355,
"alnum_prop": 0.5113513513513513,
"repo_name": "flavoi/diventi",
"id": "60608643b48f5db86583ecdb4eede5f3d195d830",
"size": "2825",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "diventi/ebooks/migrations/0112_auto_20200830_1803.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "385265"
},
{
"name": "Procfile",
"bytes": "46"
},
{
"name": "Python",
"bytes": "826530"
}
],
"symlink_target": ""
}
|
from classes import Command, FakeCommand
from wireutils import color_print, format, ansi_colors
import nbtencoder as nbt
def ride(entities, have_id=True):
bottommost = None
absoluteBottommost = None
for entity in entities[::-1]:
if bottommost == None:
absoluteBottommost = entity
else:
bottommost["Passengers"] = [entity]
bottommost = entity
if not have_id: del absoluteBottommost["id"]
return absoluteBottommost
def generate_sand(command_obj, direction):
if isinstance(command_obj, FakeCommand):
return normal_sand(command_obj.block, command_obj.data)
tag = {
"Block": nbt.noquote_str(command_obj.block),
"TileEntityData": {
"Command": str(command_obj),
"TrackOutput": nbt.int_b(0)
},
"DropItem":0,
"Time": 1,
"id": nbt.noquote_str("falling_block")
}
data = direction+8 if command_obj.cond else direction
if data:
tag["Data"] = data
return tag
def normal_sand(block, data=0):
tag = {
"Block": nbt.noquote_str(block),
"Time": 1,
"DropItem":0,
"id": nbt.noquote_str("falling_block")
}
if data:
tag["Data"] = data
return tag
def gen_stack(init_commands, clock_commands, mode, loud=False):
final_command_obj = None
if clock_commands or init_commands:
command_sands = []
repeatoffsets = []
if mode == 'i':
if clock_commands and isinstance(clock_commands[0], Command):
repeatoffsets.append(len(clock_commands) + 2)
for command in clock_commands:
if command.block == "repeating_command_block" and not command.cond and command is not clock_commands[0]:
repeatoffsets.append(len(clock_commands) - clock_commands.index(command) + 2 + len(repeatoffsets))
filloffset = len(init_commands) + len(repeatoffsets)
if filloffset: filloffset += 1
if filloffset:
if loud:
color_print("minecraft:command_block:0\n - Initialization", color=ansi_colors.DARKGRAY, allow_repeat=True)
sand = normal_sand("command_block")
if mode == 'i':
sand["TileEntityData"] = {
"auto": 1
}
command_sands.append(sand)
for command in init_commands:
if loud:
color_print(command.prettystr(), allow_repeat=True)
command_sands.append(generate_sand(command, 0))
for offset in repeatoffsets[::-1]:
blockdata = Command(format("blockdata ~ ~-{offset} ~ {auto:1b}", offset = offset), init=True)
if loud:
color_print(blockdata.prettystr(), allow_repeat=True)
sand = generate_sand(blockdata, 0)
command_sands.append(sand)
if filloffset:
fill = Command(format("fill ~ ~-1 ~ ~ ~{offset} ~ air", offset = filloffset), init=True)
if loud:
color_print(fill.prettystr(), allow_repeat=True)
color_print("minecraft:barrier\n - Initialization", color=ansi_colors.DARKGRAY, allow_repeat=True)
command_sands.append(generate_sand(fill, 0))
command_sands.append(normal_sand("barrier"))
for command in clock_commands[::-1]:
if command is clock_commands[0] and isinstance(command, Command):
command.block = "repeating_command_block"
command_sands.append(generate_sand(command, 1))
else:
sand = generate_sand(command, 1)
if command.block == "repeating_command_block" and command.cond:
sand["TileEntityData"]["auto"] = 1
command_sands.append(sand)
if loud:
color_print(command.prettystr(), allow_repeat=True)
final_command_obj = nbt.cmd("summon falling_block ~ ~1 ~ ", ride(command_sands, False))
final_command = nbt.JSON2Command(final_command_obj)
return final_command
|
{
"content_hash": "a7a08b3fbfd584da4ef6f5ab6d8cee26",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 111,
"avg_line_length": 31.53211009174312,
"alnum_prop": 0.6947919697410533,
"repo_name": "destruc7i0n/OneCommand",
"id": "a1d879f79db8714b4855c5ccf07366b548a890aa",
"size": "3437",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sands.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "49500"
}
],
"symlink_target": ""
}
|
from pybeats.api import BeatsAPI
class Base(object):
type = "unknown"
fields = []
refs = []
def __init__(self, **data):
self._update_from_data(data)
def _update_from_data(self, data):
self._update_fields_from_data(data)
self._update_refs_from_refs(data.get('refs', []))
def _update_fields_from_data(self, data):
for field in self.fields:
if field in data:
setattr(self, 'identifier' if field == 'id' else field, data.get(field))
def _update_refs_from_refs(self, refs):
for ref in self.refs:
if ref in refs:
plural = ref[-1] == 's'
if plural:
refarr = []
for refdata in refs[ref]:
refarr.append(Ref(**refdata))
setattr(self, ref, refarr)
else:
setattr(self, ref, Ref(**refs[ref]))
def serialize_to_dictionary(self, with_refs=True):
obj = {}
obj['type'] = self.type
for field in self.fields:
if hasattr(self, field):
obj[field] = getattr(self, 'identifier' if field == 'id' else field)
if not with_refs:
return obj
if len(self.refs) > 0:
obj['refs'] = {}
for ref in self.refs:
if not hasattr(self, ref):
continue
plural = ref[-1] == 's'
if plural:
obj['refs'][ref] = []
for r in getattr(self, ref):
obj['refs'][ref].append(r.serialize_to_dictionary())
else:
obj['refs'][ref] = getattr(self, ref).serialize_to_dictionary()
return obj
@staticmethod
def class_for_type(data_type):
import meta
import playlist
import editorial
import search
import library
import image
if data_type == 'album':
return meta.Album
elif data_type == 'artist':
return meta.Artist
elif data_type == 'track':
return meta.Track
elif data_type == 'genre':
return meta.Genre
elif data_type == 'user':
return meta.User
elif data_type == 'playlist':
return playlist.Playlist
elif data_type == 'bio':
return editorial.Bio
elif data_type == 'review':
return editorial.Review
elif data_type == 'recommendation':
return editorial.Recommendation
elif data_type == 'search_result':
return search.SearchResult
elif data_type == 'mymusic_album':
return library.MyMusicAlbum
elif data_type == 'mymusic_track':
return library.MyMusicTrack
elif data_type == 'mymusic_artist':
return library.MyMusicArtist
elif data_type == 'image':
return image.Image
return None
@staticmethod
def pluralize_type(data_type):
if data_type == 'activity':
return 'activities'
else:
return "{0}s".format(data_type)
class Ref(Base):
type = "ref"
fields = ['ref_type', 'display', 'id']
def __init__(self, **data):
super(Ref, self).__init__(**data)
def create_full_object(self, api):
cls = Base.class_for_type(self.ref_type)
obj = cls(id=self.identifier)
obj.fetch(api)
return obj
@property
def relative_path(self):
return "{0}/{1}".format(Base.pluralize_type(self.ref_type), self.identifier)
def default_image_url(self, size):
return "http://im.api.beatsmusic.com/api/{0}/{1}/images/default?size={2}".format(Base.pluralize_type(self.ref_type), self.identifier, size)
class Object(Base):
def __init__(self, **data):
super(Object, self).__init__(**data)
def _update_from_data(self, data):
self.identifier = data.get('id', '')
super(Object, self)._update_from_data(data)
def serialize_to_dictionary(self, with_refs=True):
obj = super(Object, self).serialize_to_dictionary(with_refs)
obj['id'] = self.identifier
return obj
@property
def relative_path(self):
return "{0}/{1}".format(Base.pluralize_type(self.type), self.identifier)
def default_image_url(self, size):
return "http://im.api.beatsmusic.com/api/{0}/images/default?size={1}".format(self.relative_path, size)
@property
def display_string(self):
raise NotImplementedError(self.__class__.__name__ + '.display_string')
def fetch(self, api, **kwargs):
if self.identifier:
data = api._get_resource_metadata(self.type, self.identifier, **kwargs).get('data')
self._update_from_data(data)
@classmethod
def get(cls, api, **kwargs):
coll = PagingCollection("{0}".format(Base.pluralize_type(cls.type)), **kwargs)
coll.fetch_next(api)
return coll
def _get_collection(self, api, path, **kwargs):
coll = PagingCollection("{0}/{1}".format(self.relative_path, path), **kwargs)
coll.fetch_next(api)
return coll
def _get_authed_collection(self, api, path, **kwargs):
coll = PagingAuthedCollection("{0}/{1}".format(self.relative_path, path), **kwargs)
coll.fetch_next(api)
return coll
class AuthedObject(Object):
def __init__(self, **data):
super(AuthedObject, self).__init__(**data)
def fetch(self, api, **kwargs):
if self.identifier:
data = api._authed_get_resource_metadata(self.type, self.identifier, **kwargs).get('data')
self._update_from_data(data)
class Collection(object):
def __init__(self, relative_path, **kwargs):
self.relative_path = relative_path
self.elements = []
self.options = kwargs
def __iter__(self):
return self.elements.__iter__()
def next(self):
return self.elements.next()
def get_at(self, index):
return self.elements[index]
def _process_datum(self, datum):
cls = Base.class_for_type(datum.get('type', ''))
self.elements.append(cls(**datum))
def _process_data(self, data):
for datum in data:
self._process_datum(datum)
def _fetch_data(self, api, **kwargs):
return api._get_collection(self.relative_path, **kwargs)
def fetch(self, api, **kwargs):
self.elements = []
payload = {}
payload.update(self.options)
payload.update(kwargs)
response_data = self._fetch_data(api,**payload)
if response_data is not None:
new_elements = response_data.get('data', [])
self._process_data(new_elements)
class PagingCollection(Collection):
def __init__(self, relative_path, **kwargs):
super(PagingCollection, self).__init__(relative_path, **kwargs)
self.total = -1
self.page_size = 20
def _fetch_page(self, api, **kwargs):
page_data = self._fetch_data(api, **kwargs)
try:
self.total = page_data.get('info', {}).get('total')
new_elements = page_data.get('data', [])
self._process_data(new_elements)
except Exception, err:
# handle failure to get anything
if self.total == -1:
self.total = 0
def fetch_next(self, api, **kwargs):
if self.at_end:
return
payload = {
'offset': self.count,
'limit': self.page_size
}
payload.update(self.options)
payload.update(kwargs)
self._fetch_page(api, **payload)
def fetch(self, api, **kwargs):
while not self.at_end:
self.fetch_next(api, **kwargs)
# convenience name
def fetch_rest(self, api, **kwargs):
self.fetch(api, **kwargs)
@property
def count(self):
return len(self.elements)
@property
def at_end(self):
return self.total == self.count
class PagingAuthedCollection(PagingCollection):
def __init__(self, relative_path, **kwargs):
super(PagingAuthedCollection, self).__init__(relative_path, **kwargs)
def _fetch_data(self, api, **kwargs):
return api._authed_get_collection(self.relative_path, **kwargs)
|
{
"content_hash": "3ed04ead5eb2fd5cbb0d716fd49c1e9a",
"timestamp": "",
"source": "github",
"line_count": 273,
"max_line_length": 147,
"avg_line_length": 30.564102564102566,
"alnum_prop": 0.5612416107382551,
"repo_name": "imsparsh/pybeats",
"id": "b9dc9c95392d6c6db3e05b5aac9f8020aa9e89c0",
"size": "8344",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "pybeats/model/_core.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "39296"
}
],
"symlink_target": ""
}
|
import numpy as np
import matplotlib.pyplot as plt
import glob
import pyfits
from corner import hist2d
from matplotlib.colors import LogNorm
from matplotlib import rc
plt.rc('text', usetex=True)
#rc('text.latex', preamble = ','.join('''
# \usepackage{txfonts}
# \usepackage{lmodern}
# '''.split()))
plt.rc('font', family='serif')
direc = '/users/annaho/Data/LAMOST/Mass_And_Age'
f = pyfits.open("%s/Ho2016b_Catalog.fits" %direc)
a = f[1].data
f.close()
feh = a['MH']
am = a['AM']
snr = a['SNR']
chisq = a['RedChisq']
choose = snr > 20
fig, ax = plt.subplots(1,1, sharex=True, sharey=True, figsize=(8,5))
hist2d(feh[choose], am[choose], ax=ax, bins=100, range=[[-2.2,.9],[-0.2,0.5]])
ax.set_xlabel("[Fe/H] (dex)" + " from Cannon/LAMOST", fontsize=16)
fig.text(
0.04, 0.5, r"$\mathrm{[\alpha/M]}$" + " (dex) from Cannon/LAMOST",
fontsize=16, va = 'center', rotation='vertical')
label = r"Objects with SNR \textgreater 20"
props = dict(boxstyle='round', facecolor='white')
ax.tick_params(axis='x', labelsize=16)
ax.tick_params(axis='y', labelsize=16)
ax.text(0.05, 0.85, label,
horizontalalignment='left', verticalalignment='bottom',
transform=ax.transAxes, fontsize=16, bbox=props)
#plt.show()
plt.savefig("feh_alpha.png")
|
{
"content_hash": "105dfb07fb6a7f22d3f95b2d8d7361e2",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 78,
"avg_line_length": 30.878048780487806,
"alnum_prop": 0.6619273301737757,
"repo_name": "annayqho/TheCannon",
"id": "dc47ba74f3d302e4f03b62f551a72417cd9e6133",
"size": "1266",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/lamost/mass_age/paper_plots/feh_alpha.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "406"
},
{
"name": "Python",
"bytes": "616358"
},
{
"name": "Shell",
"bytes": "37"
},
{
"name": "TeX",
"bytes": "73358"
}
],
"symlink_target": ""
}
|
from __future__ import division, print_function, absolute_import
__all__ = [
"test_pickle", "test_pickle",
]
import sys
import pytest
import pickle
import numpy as np
from george import GP, kernels, BasicSolver, HODLRSolver
def _fake_compute(arg, *args, **kwargs):
assert 0, "Unpickled GP shouldn't need to be computed"
@pytest.mark.skipif(sys.version_info < (3, 0), reason="requires python3")
@pytest.mark.parametrize("solver,success", [(BasicSolver, True),
(HODLRSolver, False)])
def test_pickle(solver, success, N=50, seed=123):
np.random.seed(seed)
kernel = 0.1 * kernels.ExpSquaredKernel(1.5)
kernel.pars = [1, 2]
gp = GP(kernel, solver=solver)
x = np.random.rand(100)
gp.compute(x, 1e-2)
s = pickle.dumps(gp, -1)
gp = pickle.loads(s)
if success:
gp.compute = _fake_compute
gp.lnlikelihood(np.sin(x))
|
{
"content_hash": "e878363565d48ce87c2fcd92565b9b8c",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 73,
"avg_line_length": 26.88235294117647,
"alnum_prop": 0.6367614879649891,
"repo_name": "dfm/george",
"id": "331847725955e0178f6661bd6ead8936f0a222f3",
"size": "939",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/test_pickle.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "139245"
},
{
"name": "Python",
"bytes": "113279"
}
],
"symlink_target": ""
}
|
"""
Wrapper for OpenCV's support vector machine implementation.
"""
import cv2
class StatModel(object):
def __init__(self):
self.model_file = True
def load(self, model_file):
self.model_file = model_file
self.model.load(model_file)
def save(self, model_file):
self.model.save(model_file)
class SVM(StatModel):
def __init__(self):
"""Initializes an SVM"""
super(SVM, self).__init__()
self.model = cv2.SVM()
def train(self, samples, labels):
"""Trains the SVM from a list of samples and their associated labels
:param samples: list of samples to use for training
:type samples: numpy.ndarray
:param labels: labels for the samples
:type labels: numpy.ndarray
"""
params = dict(kernel_type=cv2.SVM_RBF, svm_type=cv2.SVM_C_SVC)
self.model.train_auto(samples, labels, None, None, params)
def predict(self, sample):
"""Predict a class for a sample.
:param sample: the sample to classify
:returns: numpy.float32
"""
return self.model.predict(sample, True)
def __getstate__(self):
"""Get the current state of the object, namely the file path to
the current SVM model. Used by the pickle module for serialization.
:returns: str -- the current state
"""
return self.model_file
def __setstate__(self, state):
"""Set the current state of the object, namely the file path to
an SVM model. Used by the pickle module for serialization.
:param state: the new state
:type state: str
"""
self.__init__()
self.model_file = state
if self.model_file is not True:
self.load(state)
|
{
"content_hash": "11d8b256e80b6e8e5360fa51d5f945da",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 76,
"avg_line_length": 27.03030303030303,
"alnum_prop": 0.600896860986547,
"repo_name": "vismantic-ohtuprojekti/image-filtering-suite",
"id": "535c6dc98ee72e3fadff6ea85d2876af9d7f7af1",
"size": "1784",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "qualipy/utils/svm.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "155186"
}
],
"symlink_target": ""
}
|
import spconv.core_cc as _ext
from spconv.core_cc.csrc.sparse.all import SpconvOps
from spconv.core_cc.csrc.utils.boxops import BoxOps
from spconv.core_cc.cumm.common import CompileInfo
CPU_ONLY_BUILD = SpconvOps.is_cpu_only_build()
BUILD_CUMM_VERSION = SpconvOps.cumm_version()
BUILD_PCCM_VERSION = SpconvOps.pccm_version()
HAS_BOOST = BoxOps.has_boost()
COMPILED_CUDA_ARCHS = set(CompileInfo.get_compiled_cuda_arch())
COMPILED_CUDA_GEMM_ARCHS = set(CompileInfo.get_compiled_gemm_cuda_arch())
|
{
"content_hash": "21e7f730721921ebf0c2e8b4cb7d50ae",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 73,
"avg_line_length": 38.23076923076923,
"alnum_prop": 0.7847082494969819,
"repo_name": "traveller59/spconv",
"id": "339bb5d9666a774f93c10223a8fc9e14cbd7dce1",
"size": "1069",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spconv/cppconstants.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CMake",
"bytes": "793"
},
{
"name": "Cuda",
"bytes": "22366"
},
{
"name": "PowerShell",
"bytes": "7328"
},
{
"name": "Python",
"bytes": "1017630"
},
{
"name": "Shell",
"bytes": "4631"
}
],
"symlink_target": ""
}
|
import unittest
from strategies.base import Decision, BaseStrategy
import random
class RandomStrategy(BaseStrategy):
"""
_input (obj:list): data to make decision on (prices in this case)
"""
def analysis(self):
rand = []
for item in self.input:
rand.append(random.random())
if sum(rand) > 1:
self.decision = Decision(1, sum(rand)-1)
else:
self.decision = Decision(0, 1-sum(rand))
class TestRandomStrategy(unittest.TestCase):
def setUp(self):
self.strat = RandomStrategy(input = [1, 2])
def test_random_strat(self):
for i in range(20):
self.strat.analysis()
self.assertIn(self.strat.get_decision().action, [0,1])
self.assertLess(self.strat.get_decision().error, 1)
|
{
"content_hash": "f61a75521b6c21342f5fcb697b0f9ab7",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 69,
"avg_line_length": 25.53125,
"alnum_prop": 0.6034271725826194,
"repo_name": "saapooch/Carl-Chandra-Analysis",
"id": "6b9b73ae4b5a1f34847f588c33ec04bd0024288f",
"size": "817",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/strategies/test_base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "46820"
}
],
"symlink_target": ""
}
|
r"""Registry of landlab components being used.
The landlab registry keeps track of landlab components that have
be instantiated by a user. A user can then get a list of all
the components in use and then print a list of citations for
all of the components they have used.
Examples
--------
>>> from landlab import registry
>>> registry.registered
('landlab',)
>>> print(registry.format_citations())
# Citations
## landlab
@article{hobley2017creative,
title={Creative computing with Landlab: an open-source toolkit
for building, coupling, and exploring two-dimensional
numerical models of Earth-surface dynamics},
author={Hobley, Daniel EJ and Adams, Jordan M and Nudurupati,
Sai Siddhartha and Hutton, Eric WH and Gasparini, Nicole M and
Istanbulluoglu, Erkan and Tucker, Gregory E},
journal={Earth Surface Dynamics},
volume={5},
number={1},
pages={21},
year={2017},
publisher={Copernicus GmbH}
}
@article{barnhart2020short,
author = {Barnhart, K. R. and Hutton, E. W. H. and Tucker, G. E.
and Gasparini, N. M. and Istanbulluoglu, E. and Hobley,
D. E. J. and Lyons, N. J. and Mouchene, M. and Nudurupati,
S. S. and Adams, J. M. and Bandaragoda, C.},
title = {Short communication: Landlab v2.0: A software package
for Earth surface dynamics},
journal = {Earth Surface Dynamics Discussions},
volume = {2020},
year = {2020},
pages = {1--25},
url = {https://www.earth-surf-dynam-discuss.net/esurf-2020-12/},
doi = {10.5194/esurf-2020-12}
}
When a component contains citation information, and the component has been
instantiated (not just imported) the component citation is also included.
>>> from landlab import RasterModelGrid
>>> from landlab.components import Flexure
>>> grid = RasterModelGrid((4, 5))
>>> _ = grid.add_zeros("lithosphere__overlying_pressure_increment", at="node")
>>> _ = grid.add_zeros("lithosphere_surface__elevation_increment", at="node")
>>> flexure = Flexure(grid)
>>> print(registry.format_citations())
# Citations
## landlab
@article{hobley2017creative,
title={Creative computing with Landlab: an open-source toolkit
for building, coupling, and exploring two-dimensional
numerical models of Earth-surface dynamics},
author={Hobley, Daniel EJ and Adams, Jordan M and Nudurupati,
Sai Siddhartha and Hutton, Eric WH and Gasparini, Nicole M and
Istanbulluoglu, Erkan and Tucker, Gregory E},
journal={Earth Surface Dynamics},
volume={5},
number={1},
pages={21},
year={2017},
publisher={Copernicus GmbH}
}
@article{barnhart2020short,
author = {Barnhart, K. R. and Hutton, E. W. H. and Tucker, G. E.
and Gasparini, N. M. and Istanbulluoglu, E. and Hobley,
D. E. J. and Lyons, N. J. and Mouchene, M. and Nudurupati,
S. S. and Adams, J. M. and Bandaragoda, C.},
title = {Short communication: Landlab v2.0: A software package
for Earth surface dynamics},
journal = {Earth Surface Dynamics Discussions},
volume = {2020},
year = {2020},
pages = {1--25},
url = {https://www.earth-surf-dynam-discuss.net/esurf-2020-12/},
doi = {10.5194/esurf-2020-12}
}
<BLANKLINE>
## Flexure
@article{hutton2008sedflux,
title={Sedflux 2.0: An advanced process-response model that
generates three-dimensional stratigraphy},
author={Hutton, Eric WH and Syvitski, James PM},
journal={Computers \& Geosciences},
volume={34},
number={10},
pages={1319--1337},
year={2008},
publisher={Pergamon}
}
Finally, the component's citation information is accessible through an
attribute called ``cite_as``:
>>> print(Flexure.cite_as)
@article{hutton2008sedflux,
title={Sedflux 2.0: An advanced process-response model that
generates three-dimensional stratigraphy},
author={Hutton, Eric WH and Syvitski, James PM},
journal={Computers \& Geosciences},
volume={34},
number={10},
pages={1319--1337},
year={2008},
publisher={Pergamon}
}
"""
import os
from . import _info
from .core.messages import indent_and_wrap
class ComponentRegistry(object):
"""A registry for instantiated landlab components."""
def __init__(self, objs=None):
self._registered = []
if objs is not None:
try:
[self.add(obj) for obj in objs]
except TypeError:
self.add(objs)
def add(self, cls):
"""Add a class to the registry.
Parameters
----------
cls : Component
A landlab component to register as used.
"""
if cls not in self._registered:
self._registered.append(cls)
@property
def registered(self):
"""All registered classes.
Returns
-------
tuple
The components in the registry.
Examples
--------
>>> from landlab._registry import ComponentRegistry
>>> registry = ComponentRegistry()
>>> registry.registered
()
>>> class FooBar(object):
... pass
>>> registry.add(FooBar)
>>> registry.registered
('FooBar',)
"""
return tuple([ComponentRegistry.get_name(obj) for obj in self._registered])
@staticmethod
def format_citation(obj):
"""Format a single citation.
Parameters
----------
obj : Component
A landlab component class or instance.
Returns
-------
str
The formatted citation, or "None" if there is no citation
given.
Examples
--------
>>> from landlab._registry import ComponentRegistry
>>> registry = ComponentRegistry()
>>> class DoNothingComponent(object):
... pass
>>> print(registry.format_citation(DoNothingComponent))
## DoNothingComponent
None
>>> class SorterAndSearcher(object):
... _cite_as = '''
... @book{knuth1998art,
... title={The art of computer programming: sorting and searching},
... author={Knuth, Donald Ervin},
... volume={3},
... year={1998},
... publisher={Pearson Education}
... }'''
>>> print(registry.format_citation(SorterAndSearcher))
## SorterAndSearcher
@book{knuth1998art,
title={The art of computer programming: sorting and searching},
author={Knuth, Donald Ervin},
volume={3},
year={1998},
publisher={Pearson Education}
}
"""
name = ComponentRegistry.get_name(obj)
header = ["## {name}".format(name=name)]
cite_as = ComponentRegistry.get_citations(obj)
body = []
for citation in cite_as:
body.append(indent_and_wrap(citation, indent=" " * 4))
return os.linesep.join(header + body)
@staticmethod
def get_name(obj):
"""Get the display name for an object.
Examples
--------
>>> from landlab._registry import ComponentRegistry
>>> class MontyPython(object):
... name = "Eric Idle"
>>> ComponentRegistry.get_name(MontyPython)
'Eric Idle'
>>> class MontyPython(object):
... _name = "Graham Chapman"
>>> ComponentRegistry.get_name(MontyPython)
'Graham Chapman'
>>> class MontyPython(object):
... pass
>>> ComponentRegistry.get_name(MontyPython)
'MontyPython'
"""
name = "Unknown"
for attr in ("name", "_name", "__name__"):
try:
name = getattr(obj, attr)
except AttributeError:
pass
else:
break
return name
@staticmethod
def get_citations(obj):
"""Get a list of citations from an object."""
citations = "None"
for attr in ("cite_as", "_cite_as"):
try:
citations = getattr(obj, attr)
except AttributeError:
pass
else:
break
if isinstance(citations, str):
citations = [citations]
return citations
def format_citations(self):
"""Format citations for all registered components.
Returns
-------
str
The formatted citations.
Examples
--------
>>> from landlab._registry import ComponentRegistry
>>> registry = ComponentRegistry()
>>> class HolyGrailFinder(object):
... _name = 'Monty Python'
... _cite_as = ['''@book{python2000holy,
... title={the Holy Grail},
... author={Python, Monty and Chapman, Graham and Cleese, John and Gilliam, Terry and Jones, Terry and Idle, Eric and Palin, Michael},
... year={2000},
... publisher={EMI Records}
... }''',
... '''@book{chapman1989complete,
... title={The Complete Monty Python's Flying Circus: All the Words. Volume one},
... author={Chapman, Graham and Python, Monty},
... volume={1},
... year={1989},
... publisher={Pantheon}
... }''']
>>> class Evolution(object):
... _cite_as = '''
... @book{darwin1859origin,
... title={On the origin of species},
... author={Darwin, Charles},
... year={1859},
... publisher={Lulu. com}
... }'''
>>> registry.add(HolyGrailFinder)
>>> registry.add(Evolution)
>>> print(registry.format_citations())
# Citations
## Monty Python
@book{python2000holy,
title={the Holy Grail},
author={Python, Monty and Chapman, Graham and Cleese, John and
Gilliam, Terry and Jones, Terry and Idle, Eric and Palin,
Michael},
year={2000},
publisher={EMI Records}
}
@book{chapman1989complete,
title={The Complete Monty Python's Flying Circus: All the Words.
Volume one},
author={Chapman, Graham and Python, Monty},
volume={1},
year={1989},
publisher={Pantheon}
}
<BLANKLINE>
## Evolution
@book{darwin1859origin,
title={On the origin of species},
author={Darwin, Charles},
year={1859},
publisher={Lulu. com}
}
"""
header = ["# Citations"]
body = []
for cls in self._registered:
body.append(self.format_citation(cls))
return os.linesep.join(header + [(2 * os.linesep).join(body)])
def __repr__(self):
return "ComponentRegistry({0})".format(repr(self.registered))
registry = ComponentRegistry(_info)
|
{
"content_hash": "7f0a9355c30e08b4dd2bb629afdb749f",
"timestamp": "",
"source": "github",
"line_count": 348,
"max_line_length": 150,
"avg_line_length": 31.93103448275862,
"alnum_prop": 0.5661447084233261,
"repo_name": "cmshobe/landlab",
"id": "e3d8abc710a89287fec6e534049857bc3ab1b3a4",
"size": "11135",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "landlab/_registry.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1359"
},
{
"name": "HTML",
"bytes": "99948"
},
{
"name": "Jupyter Notebook",
"bytes": "701992"
},
{
"name": "Makefile",
"bytes": "1924"
},
{
"name": "PowerShell",
"bytes": "7128"
},
{
"name": "Python",
"bytes": "4132304"
},
{
"name": "Shell",
"bytes": "2691"
},
{
"name": "TeX",
"bytes": "19453"
}
],
"symlink_target": ""
}
|
"""Proxy AMI-related calls from cloud controller to objectstore service."""
import binascii
import os
import shutil
import tarfile
import tempfile
from xml.etree import ElementTree
import boto.s3.connection
import eventlet
from nova import crypto
from nova import exception
from nova import flags
from nova import image
from nova import log as logging
from nova import utils
from nova.api.ec2 import ec2utils
LOG = logging.getLogger("nova.image.s3")
FLAGS = flags.FLAGS
flags.DEFINE_string('image_decryption_dir', '/tmp',
'parent dir for tempdir used for image decryption')
flags.DEFINE_string('s3_access_key', 'notchecked',
'access key to use for s3 server for images')
flags.DEFINE_string('s3_secret_key', 'notchecked',
'secret key to use for s3 server for images')
class S3ImageService(object):
"""Wraps an existing image service to support s3 based register."""
def __init__(self, service=None, *args, **kwargs):
self.service = service or image.get_default_image_service()
self.service.__init__(*args, **kwargs)
def create(self, context, metadata, data=None):
"""Create an image.
metadata['properties'] should contain image_location.
"""
image = self._s3_create(context, metadata)
return image
def delete(self, context, image_id):
self.service.delete(context, image_id)
def update(self, context, image_id, metadata, data=None):
image = self.service.update(context, image_id, metadata, data)
return image
def index(self, context):
return self.service.index(context)
def detail(self, context):
return self.service.detail(context)
def show(self, context, image_id):
return self.service.show(context, image_id)
def show_by_name(self, context, name):
return self.service.show_by_name(context, name)
@staticmethod
def _conn(context):
# NOTE(vish): access and secret keys for s3 server are not
# checked in nova-objectstore
access = FLAGS.s3_access_key
secret = FLAGS.s3_secret_key
calling = boto.s3.connection.OrdinaryCallingFormat()
return boto.s3.connection.S3Connection(aws_access_key_id=access,
aws_secret_access_key=secret,
is_secure=False,
calling_format=calling,
port=FLAGS.s3_port,
host=FLAGS.s3_host)
@staticmethod
def _download_file(bucket, filename, local_dir):
key = bucket.get_key(filename)
local_filename = os.path.join(local_dir, filename)
key.get_contents_to_filename(local_filename)
return local_filename
def _s3_parse_manifest(self, context, metadata, manifest):
manifest = ElementTree.fromstring(manifest)
image_format = 'ami'
image_type = 'machine'
try:
kernel_id = manifest.find('machine_configuration/kernel_id').text
if kernel_id == 'true':
image_format = 'aki'
image_type = 'kernel'
kernel_id = None
except Exception:
kernel_id = None
try:
ramdisk_id = manifest.find('machine_configuration/ramdisk_id').text
if ramdisk_id == 'true':
image_format = 'ari'
image_type = 'ramdisk'
ramdisk_id = None
except Exception:
ramdisk_id = None
try:
arch = manifest.find('machine_configuration/architecture').text
except Exception:
arch = 'x86_64'
# NOTE(yamahata):
# EC2 ec2-budlne-image --block-device-mapping accepts
# <virtual name>=<device name> where
# virtual name = {ami, root, swap, ephemeral<N>}
# where N is no negative integer
# device name = the device name seen by guest kernel.
# They are converted into
# block_device_mapping/mapping/{virtual, device}
#
# Do NOT confuse this with ec2-register's block device mapping
# argument.
mappings = []
try:
block_device_mapping = manifest.findall('machine_configuration/'
'block_device_mapping/'
'mapping')
for bdm in block_device_mapping:
mappings.append({'virtual': bdm.find('virtual').text,
'device': bdm.find('device').text})
except Exception:
mappings = []
properties = metadata['properties']
properties['project_id'] = context.project_id
properties['architecture'] = arch
if kernel_id:
properties['kernel_id'] = ec2utils.ec2_id_to_id(kernel_id)
if ramdisk_id:
properties['ramdisk_id'] = ec2utils.ec2_id_to_id(ramdisk_id)
if mappings:
properties['mappings'] = mappings
metadata.update({'disk_format': image_format,
'container_format': image_format,
'status': 'queued',
'is_public': False,
'properties': properties})
metadata['properties']['image_state'] = 'pending'
image = self.service.create(context, metadata)
return manifest, image
def _s3_create(self, context, metadata):
"""Gets a manifext from s3 and makes an image."""
image_path = tempfile.mkdtemp(dir=FLAGS.image_decryption_dir)
image_location = metadata['properties']['image_location']
bucket_name = image_location.split('/')[0]
manifest_path = image_location[len(bucket_name) + 1:]
bucket = self._conn(context).get_bucket(bucket_name)
key = bucket.get_key(manifest_path)
manifest = key.get_contents_as_string()
manifest, image = self._s3_parse_manifest(context, metadata, manifest)
image_id = image['id']
def delayed_create():
"""This handles the fetching and decrypting of the part files."""
log_vars = {'image_location': image_location,
'image_path': image_path}
metadata['properties']['image_state'] = 'downloading'
self.service.update(context, image_id, metadata)
try:
parts = []
elements = manifest.find('image').getiterator('filename')
for fn_element in elements:
part = self._download_file(bucket,
fn_element.text,
image_path)
parts.append(part)
# NOTE(vish): this may be suboptimal, should we use cat?
enc_filename = os.path.join(image_path, 'image.encrypted')
with open(enc_filename, 'w') as combined:
for filename in parts:
with open(filename) as part:
shutil.copyfileobj(part, combined)
except Exception:
LOG.exception(_("Failed to download %(image_location)s "
"to %(image_path)s"), log_vars)
metadata['properties']['image_state'] = 'failed_download'
self.service.update(context, image_id, metadata)
return
metadata['properties']['image_state'] = 'decrypting'
self.service.update(context, image_id, metadata)
try:
hex_key = manifest.find('image/ec2_encrypted_key').text
encrypted_key = binascii.a2b_hex(hex_key)
hex_iv = manifest.find('image/ec2_encrypted_iv').text
encrypted_iv = binascii.a2b_hex(hex_iv)
# FIXME(vish): grab key from common service so this can run on
# any host.
cloud_pk = crypto.key_path(context.project_id)
dec_filename = os.path.join(image_path, 'image.tar.gz')
self._decrypt_image(enc_filename, encrypted_key,
encrypted_iv, cloud_pk,
dec_filename)
except Exception:
LOG.exception(_("Failed to decrypt %(image_location)s "
"to %(image_path)s"), log_vars)
metadata['properties']['image_state'] = 'failed_decrypt'
self.service.update(context, image_id, metadata)
return
metadata['properties']['image_state'] = 'untarring'
self.service.update(context, image_id, metadata)
try:
unz_filename = self._untarzip_image(image_path, dec_filename)
except Exception:
LOG.exception(_("Failed to untar %(image_location)s "
"to %(image_path)s"), log_vars)
metadata['properties']['image_state'] = 'failed_untar'
self.service.update(context, image_id, metadata)
return
metadata['properties']['image_state'] = 'uploading'
self.service.update(context, image_id, metadata)
try:
with open(unz_filename) as image_file:
self.service.update(context, image_id,
metadata, image_file)
except Exception:
LOG.exception(_("Failed to upload %(image_location)s "
"to %(image_path)s"), log_vars)
metadata['properties']['image_state'] = 'failed_upload'
self.service.update(context, image_id, metadata)
return
metadata['properties']['image_state'] = 'available'
metadata['status'] = 'active'
self.service.update(context, image_id, metadata)
shutil.rmtree(image_path)
eventlet.spawn_n(delayed_create)
return image
@staticmethod
def _decrypt_image(encrypted_filename, encrypted_key, encrypted_iv,
cloud_private_key, decrypted_filename):
key, err = utils.execute('openssl',
'rsautl',
'-decrypt',
'-inkey', '%s' % cloud_private_key,
process_input=encrypted_key,
check_exit_code=False)
if err:
raise exception.Error(_('Failed to decrypt private key: %s')
% err)
iv, err = utils.execute('openssl',
'rsautl',
'-decrypt',
'-inkey', '%s' % cloud_private_key,
process_input=encrypted_iv,
check_exit_code=False)
if err:
raise exception.Error(_('Failed to decrypt initialization '
'vector: %s') % err)
_out, err = utils.execute('openssl', 'enc',
'-d', '-aes-128-cbc',
'-in', '%s' % (encrypted_filename,),
'-K', '%s' % (key,),
'-iv', '%s' % (iv,),
'-out', '%s' % (decrypted_filename,),
check_exit_code=False)
if err:
raise exception.Error(_('Failed to decrypt image file '
'%(image_file)s: %(err)s') %
{'image_file': encrypted_filename,
'err': err})
@staticmethod
def _untarzip_image(path, filename):
tar_file = tarfile.open(filename, 'r|gz')
tar_file.extractall(path)
image_file = tar_file.getnames()[0]
tar_file.close()
return os.path.join(path, image_file)
|
{
"content_hash": "d8ef961c3a0cd210de024ba88b8078a8",
"timestamp": "",
"source": "github",
"line_count": 305,
"max_line_length": 79,
"avg_line_length": 40.17377049180328,
"alnum_prop": 0.5200359095731657,
"repo_name": "30loops/nova",
"id": "3435558878649adc74b0ff65e3bdc530f29d2952",
"size": "13030",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/image/s3.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
from combat.finishers.impale import Impale
from combat.finishers.chokepunch import ChokePunch
from combat.finishers.crushskull import CrushSkull
|
{
"content_hash": "6a30d038b98c008ab7858813be2c4322",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 50,
"avg_line_length": 48.333333333333336,
"alnum_prop": 0.8758620689655172,
"repo_name": "ChrisLR/Python-Roguelike-Template",
"id": "8ab19f7b99a7ca76cf958436b050f420cc17bea9",
"size": "145",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "combat/finishers/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "267082"
}
],
"symlink_target": ""
}
|
__author__ = "Tom De Smedt"
__version__ = "1.9.5.6"
__copyright__ = "Copyright (c) 2008 Tom De Smedt"
__license__ = "GPL"
######################################################################################################
import cluster
import event
import layout
import proximity
import style
#### GRAPH NODE ######################################################################################
class node:
def __init__(self, graph, id="", radius=8, style=style.DEFAULT, category="", label=None,
properties={}):
""" A node with a unique id in the graph.
Its position is calculated by graph.layout.
The node's radius and style define how it looks onscreen.
"""
self.graph = graph
self.id = id
self.category = category
self.label = label or self.id
self.links = links()
self.vx = 0
self.vy = 0
self.force = layout.Point(0, 0)
self.r = radius
self.style = style
self._visited = False
self._betweenness = None
self._eigenvalue = None
for k, v in properties.items():
if not k in self.__dict__:
self.__dict__[k] = v
def _edges(self):
return self.links._edges.values()
edges = property(_edges)
def _is_leaf(self):
return len(self.links) == 1
is_leaf = property(_is_leaf)
def can_reach(self, node, traversable=lambda node, edge: True):
""" Returns True if given node can be reached over traversable edges.
To enforce edge direction, use a node==edge.node1 traversable.
"""
if isinstance(node, str):
node = self.graph[node]
for n in self.graph.nodes:
n._visited = False
return proximity.depth_first_search(self,
visit=lambda n: node == n,
traversable=traversable
)
def _get_betweenness(self):
if self._betweenness == None:
self.graph.betweenness_centrality()
return self._betweenness
betweenness = property(_get_betweenness)
traffic = betweenness
def _get_eigenvalue(self):
if self._eigenvalue == None:
self.graph.eigenvector_centrality()
return self._eigenvalue
eigenvalue = property(_get_eigenvalue)
weight = eigenvalue
def _x(self): return self.vx * self.graph.d
def _y(self): return self.vy * self.graph.d
x = property(_x)
y = property(_y)
def __contains__(self, pt):
""" True if pt.x, pt.y is inside the node's absolute position.
"""
if abs(self.graph.x+self.x-pt.x) < self.r*2 and \
abs(self.graph.y+self.y-pt.y) < self.r*2:
return True
else:
return False
def flatten(self, distance=1):
return cluster.flatten(self, distance)
def __and__(self, node, distance=1):
return cluster.intersection(
self.flatten(distance), node.flatten(distance))
def __or__(self, node, distance=1):
return cluster.union(
self.flatten(distance), node.flatten(distance))
def __sub__(self, node, distance=1):
return cluster.difference(
self.flatten(distance), node.flatten(distance))
def __repr__(self):
try: return "<"+str(self.id)+" node>"
except:
return "<"+self.id.encode("utf-8")+" node>"
def __str__(self):
try: return str(self.id)
except:
return self.id.encode("utf-8")
def __eq__(self, node):
if not isinstance(node, self.__class__): return False
return self.id == node.id
#### GRAPH NODE LINKS ################################################################################
class links(list):
""" A list in which each node has an associated edge.
The edge() method returns the edge for a given node id.
"""
def __init__(self):
self._edges = dict()
def append(self, node, edge=None):
if edge: self._edges[node.id] = edge
list.append(self, node)
def remove(self, node):
if self._edges.has_key(node.id): del self._edges[node.id]
list.remove(self, node)
def edge(self, id):
if isinstance(id, node): id = id.id
return self._edges[id]
##### GRAPH EDGE #####################################################################################
class edge(object):
def __init__(self, node1, node2, weight=0.0, length=1.0, label="", properties={}):
self.node1 = node1
self.node2 = node2
self.weight = weight
self.length = length
self.label = label
for k, v in properties.items():
if not k in self.__dict__:
self.__dict__[k] = v
def _get_length(self):
return self._length
def _set_length(self, v):
self._length = max(0.1, v)
length = property(_get_length, _set_length)
#### GRAPH ###########################################################################################
LAYOUT_CIRCLE = "circle"
LAYOUT_SPRING = "spring"
layout_ = layout # there's also a "layout" parameter in graph.__init__()
class graph(dict):
def __init__(self, iterations=1000, distance=1.0, layout=LAYOUT_SPRING):
self.nodes = []
self.edges = []
self.root = None
# Calculates positions for nodes.
self.layout = layout_.__dict__[layout+"_layout"](self, iterations)
self.d = node(None).r * 2.5 * distance
# Hover, click and drag event handler.
self.events = event.events(self, _ctx)
# Enhanced dictionary of all styles.
self.styles = style.styles(self)
self.styles.append(style.style(style.DEFAULT, _ctx))
self.alpha = 0
# Try to specialize intensive math operations.
try:
import psyco
psyco.bind(self.layout._bounds)
psyco.bind(self.layout.iterate)
psyco.bind(self.__or__)
psyco.bind(cluster.flatten)
psyco.bind(cluster.subgraph)
psyco.bind(cluster.clique)
psyco.bind(cluster.partition)
psyco.bind(proximity.dijkstra_shortest_path)
psyco.bind(proximity.brandes_betweenness_centrality)
psyco.bind(proximity.eigenvector_centrality)
psyco.bind(style.edge_arrow)
psyco.bind(style.edge_label)
#print "using psyco"
except:
pass
def _get_distance(self):
return self.d / (node(None).r * 2.5)
def _set_distance(self, value):
self.d = node(None).r * 2.5 * value
distance = property(_get_distance, _set_distance)
def copy(self, empty=False):
""" Create a copy of the graph (by default with nodes and edges).
"""
g = graph(self.layout.n, self.distance, self.layout.type)
g.layout = self.layout.copy(g)
g.styles = self.styles.copy(g)
g.events = self.events.copy(g)
if not empty:
for n in self.nodes:
g.add_node(n.id, n.r, n.style, n.category, n.label, (n == self.root), n.__dict__)
for e in self.edges:
g.add_edge(e.node1.id, e.node2.id, e.weight, e.length, e.label, e.__dict__)
return g
def clear(self):
""" Remove nodes and edges and reset the layout.
"""
dict.clear(self)
self.nodes = []
self.edges = []
self.root = None
self.layout.i = 0
self.alpha = 0
def new_node(self, *args, **kwargs):
""" Returns a node object; can be overloaded when the node class is subclassed.
"""
return node(*args, **kwargs)
def new_edge(self, *args, **kwargs):
""" Returns an edge object; can be overloaded when the edge class is subclassed.
"""
return edge(*args, **kwargs)
def add_node(self, id, radius=8, style=style.DEFAULT, category="", label=None, root=False,
properties={}):
""" Add node from id and return the node object.
"""
if self.has_key(id):
return self[id]
if not isinstance(style, str) and style.__dict__.has_key["name"]:
style = style.name
n = self.new_node(self, id, radius, style, category, label, properties)
self[n.id] = n
self.nodes.append(n)
if root: self.root = n
return n
def add_nodes(self, nodes):
""" Add nodes from a list of id's.
"""
try: [self.add_node(n) for n in nodes]
except:
pass
def add_edge(self, id1, id2, weight=0.0, length=1.0, label="", properties={}):
""" Add weighted (0.0-1.0) edge between nodes, creating them if necessary.
The weight represents the importance of the connection (not the cost).
"""
if id1 == id2: return None
if not self.has_key(id1): self.add_node(id1)
if not self.has_key(id2): self.add_node(id2)
n1 = self[id1]
n2 = self[id2]
# If a->b already exists, don't re-create it.
# However, b->a may still pass.
if n1 in n2.links:
if n2.links.edge(n1).node1 == n1:
return self.edge(id1, id2)
weight = max(0.0, min(weight, 1.0))
e = self.new_edge(n1, n2, weight, length, label, properties)
self.edges.append(e)
n1.links.append(n2, e)
n2.links.append(n1, e)
return e
def remove_node(self, id):
""" Remove node with given id.
"""
if self.has_key(id):
n = self[id]
self.nodes.remove(n)
del self[id]
# Remove all edges involving id and all links to it.
for e in list(self.edges):
if n in (e.node1, e.node2):
if n in e.node1.links:
e.node1.links.remove(n)
if n in e.node2.links:
e.node2.links.remove(n)
self.edges.remove(e)
def remove_edge(self, id1, id2):
""" Remove edges between nodes with given id's.
"""
for e in list(self.edges):
if id1 in (e.node1.id, e.node2.id) and \
id2 in (e.node1.id, e.node2.id):
e.node1.links.remove(e.node2)
e.node2.links.remove(e.node1)
self.edges.remove(e)
def node(self, id):
""" Returns the node in the graph associated with the given id.
"""
if self.has_key(id):
return self[id]
return None
def edge(self, id1, id2):
""" Returns the edge between the nodes with given id1 and id2.
"""
if id1 in self and \
id2 in self and \
self[id2] in self[id1].links:
return self[id1].links.edge(id2)
return None
def __getattr__(self, a):
""" Returns the node in the graph associated with the given id.
"""
if self.has_key(a):
return self[a]
raise AttributeError, "graph object has no attribute '"+str(a)+"'"
def update(self, iterations=10):
""" Iterates the graph layout and updates node positions.
"""
# The graph fades in when initially constructed.
self.alpha += 0.05
self.alpha = min(self.alpha, 1.0)
# Iterates over the graph's layout.
# Each step the graph's bounds are recalculated
# and a number of iterations are processed,
# more and more as the layout progresses.
if self.layout.i == 0:
self.layout.prepare()
self.layout.i += 1
elif self.layout.i == 1:
self.layout.iterate()
elif self.layout.i < self.layout.n:
n = min(iterations, self.layout.i / 10 + 1)
for i in range(n):
self.layout.iterate()
# Calculate the absolute center of the graph.
min_, max = self.layout.bounds
self.x = _ctx.WIDTH - max.x*self.d - min_.x*self.d
self.y = _ctx.HEIGHT - max.y*self.d - min_.y*self.d
self.x /= 2
self.y /= 2
return not self.layout.done
def solve(self):
""" Iterates the graph layout until done.
"""
self.layout.solve()
self.alpha = 1.0
def _done(self):
return self.layout.done
done = property(_done)
def offset(self, node):
""" Returns the distance from the center to the given node.
"""
x = self.x + node.x - _ctx.WIDTH/2
y = self.y + node.y - _ctx.HEIGHT/2
return x, y
def draw(self, dx=0, dy=0, weighted=False, directed=False, highlight=[], traffic=None):
""" Layout the graph incrementally.
The graph is drawn at the center of the canvas.
The weighted and directed parameters visualize edge weight and direction.
The highlight specifies list of connected nodes.
The path will be colored according to the "highlight" style.
Clicking and dragging events are monitored.
"""
self.update()
# Draw the graph background.
s = self.styles.default
s.graph_background(s)
# Center the graph on the canvas.
_ctx.push()
_ctx.translate(self.x+dx, self.y+dy)
# Indicate betweenness centrality.
if traffic:
if isinstance(traffic, bool):
traffic = 5
for n in self.nodes_by_betweenness()[:traffic]:
try: s = self.styles[n.style]
except: s = self.styles.default
if s.graph_traffic:
s.graph_traffic(s, n, self.alpha)
# Draw the edges and their labels.
s = self.styles.default
if s.edges:
s.edges(s, self.edges, self.alpha, weighted, directed)
# Draw each node in the graph.
# Apply individual style to each node (or default).
for n in self.nodes:
try: s = self.styles[n.style]
except: s = self.styles.default
if s.node:
s.node(s, n, self.alpha)
# Highlight the given shortest path.
try: s = self.styles.highlight
except: s = self.styles.default
if s.path:
s.path(s, self, highlight)
# Draw node id's as labels on each node.
for n in self.nodes:
try: s = self.styles[n.style]
except: s = self.styles.default
if s.node_label:
s.node_label(s, n, self.alpha)
# Events for clicked and dragged nodes.
# Nodes will resist being dragged by attraction and repulsion,
# put the event listener on top to get more direct feedback.
self.events.update()
_ctx.pop()
def prune(self, depth=0):
""" Removes all nodes with less or equal links than depth.
"""
for n in list(self.nodes):
if len(n.links) <= depth:
self.remove_node(n.id)
trim = prune
def shortest_path(self, id1, id2, heuristic=None, directed=False):
""" Returns a list of node id's connecting the two nodes.
"""
try: return proximity.dijkstra_shortest_path(self, id1, id2, heuristic, directed)
except:
return None
def betweenness_centrality(self, normalized=True, directed=False):
""" Calculates betweenness centrality and returns an node id -> weight dictionary.
Node betweenness weights are updated in the process.
"""
bc = proximity.brandes_betweenness_centrality(self, normalized, directed)
for id, w in bc.iteritems(): self[id]._betweenness = w
return bc
def eigenvector_centrality(self, normalized=True, reversed=True, rating={},
start=None, iterations=100, tolerance=0.0001):
""" Calculates eigenvector centrality and returns an node id -> weight dictionary.
Node eigenvalue weights are updated in the process.
"""
ec = proximity.eigenvector_centrality(
self, normalized, reversed, rating, start, iterations, tolerance
)
for id, w in ec.iteritems(): self[id]._eigenvalue = w
return ec
def nodes_by_betweenness(self, treshold=0.0):
""" Returns nodes sorted by betweenness centrality.
Nodes with a lot of passing traffic will be at the front of the list.
"""
nodes = [(n.betweenness, n) for n in self.nodes if n.betweenness > treshold]
nodes.sort(); nodes.reverse()
return [n for w, n in nodes]
nodes_by_traffic = nodes_by_betweenness
def nodes_by_eigenvalue(self, treshold=0.0):
""" Returns nodes sorted by eigenvector centrality.
Nodes with a lot of incoming traffic will be at the front of the list
"""
nodes = [(n.eigenvalue, n) for n in self.nodes if n.eigenvalue > treshold]
nodes.sort(); nodes.reverse()
return [n for w, n in nodes]
nodes_by_weight = nodes_by_eigenvalue
def nodes_by_category(self, category):
""" Returns nodes with the given category attribute.
"""
return [n for n in self.nodes if n.category == category]
def _leaves(self):
""" Returns a list of nodes that have only one connection.
"""
return [node for node in self.nodes if node.is_leaf]
leaves = property(_leaves)
def crown(self, depth=2):
""" Returns a list of leaves, nodes connected to leaves, etc.
"""
nodes = []
for node in self.leaves: nodes += node.flatten(depth-1)
return cluster.unique(nodes)
fringe = crown
def _density(self):
""" The number of edges in relation to the total number of possible edges.
"""
return 2.0*len(self.edges) / (len(self.nodes) * (len(self.nodes)-1))
density = property(_density)
def _is_complete(self) : return self.density == 1.0
def _is_dense(self) : return self.density > 0.65
def _is_sparse(self) : return self.density < 0.35
is_complete = property(_is_complete)
is_dense = property(_is_dense)
is_sparse = property(_is_sparse)
def sub(self, id, distance=1):
return cluster.subgraph(self, id, distance)
subgraph = sub
def __and__(self, graph):
nodes = cluster.intersection(cluster.flatten(self), cluster.flatten(graph))
all = self | graph
return cluster.subgraph(all, nodes, 0)
intersect = __and__
def __or__(self, graph):
g = self.copy()
for n in graph.nodes:
root = (g.root==None and graph.root==n)
g.add_node(n.id, n.r, n.style, n.category, n.label, root, n.__dict__)
for e in graph.edges:
g.add_edge(e.node1.id, e.node2.id, e.weight, e.length, e.label, e.__dict__)
return g
join = __or__
def __sub__(self, graph):
nodes = cluster.difference(cluster.flatten(self), cluster.flatten(graph))
all = self | graph
return cluster.subgraph(all, nodes, 0)
subtract = __sub__
def _is_clique(self):
return cluster.is_clique(self)
is_clique = property(_is_clique)
def clique(self, id, distance=0):
return cluster.subgraph(self, cluster.clique(self, id), distance)
def cliques(self, threshold=3, distance=0):
g = []
c = cluster.cliques(self, threshold)
for nodes in c: g.append(cluster.subgraph(self, nodes, distance))
return g
def split(self):
return cluster.partition(self)
### DYNAMIC GRAPH ####################################################################################
class xgraph(graph):
""" A dynamic graph where a clicked node loads new data.
Nodes are clickable and will load a new graph based on
the following methods (that need to be subclassed or monkey patched):
1) has_node(id): returns True when the id is a node in the dataset.
2) get_links(id): a list of (weight, id) tuples directly connected to the node
3) get_cluster(id): a list of (weight, id, [links]) tuples of node id's that are
connected to the given node via the node id's in the links list (distance 2).
The idea is that you have a dataset stored in files or a database,
and use the dynamic graph's method to describe how the data is read
and interlinked. The graph is then automatically kept up to date
as you browse through the connected nodes.
"""
def __init__(self, iterations=500, distance=1.0, layout=LAYOUT_SPRING):
graph.__init__(self, iterations, distance, layout)
self.styles = create().styles
self.events.click = self.click
self.max = 20
self._dx = 0
self._dy = 0
def has_node(self, id):
return True
def get_links(self, id):
return []
def get_cluster(self, id):
return []
def load(self, id):
""" Rebuilds the graph around the given node id.
"""
self.clear()
# Root node.
self.add_node(id, root=True)
# Directly connected nodes have priority.
for w, id2 in self.get_links(id):
self.add_edge(id, id2, weight=w)
if len(self) > self.max:
break
# Now get all the other nodes in the cluster.
for w, id2, links in self.get_cluster(id):
for id3 in links:
self.add_edge(id3, id2, weight=w)
self.add_edge(id, id3, weight=w)
#if len(links) == 0:
# self.add_edge(id, id2)
if len(self) > self.max:
break
# Provide a backlink to the previous root.
if self.event.clicked:
g.add_node(self.event.clicked)
def click(self, node):
""" Callback from graph.events when a node is clicked.
"""
if not self.has_node(node.id): return
if node == self.root: return
self._dx, self._dy = self.offset(node)
self.previous = self.root.id
self.load(node.id)
def draw(self, weighted=False, directed=False, highlight=[], traffic=None):
# A new graph unfolds from the position of the clicked node.
graph.draw(self, self._dx, self._dy,
weighted, directed, highlight, traffic
)
self._dx *= 0.9
self._dy *= 0.9
#### COMMANDS ########################################################################################
def create(iterations=1000, distance=1.0, layout=LAYOUT_SPRING, depth=True):
""" Returns a new graph with predefined styling.
"""
global _ctx
try:
from nodebox.graphics import RGB
#_ctx.colormode(RGB)
g = graph(iterations, distance, layout)
except:
#_ctx = None
g = graph(iterations, distance, layout)
return g
# Styles for different types of nodes.
s = style.style
g.styles.append(s(style.LIGHT , _ctx, fill = _ctx.color(0.0, 0.0, 0.0, 0.20)))
g.styles.append(s(style.DARK , _ctx, fill = _ctx.color(0.3, 0.5, 0.7, 0.75)))
g.styles.append(s(style.BACK , _ctx, fill = _ctx.color(0.5, 0.8, 0.0, 0.50)))
g.styles.append(s(style.IMPORTANT, _ctx, fill = _ctx.color(0.3, 0.6, 0.8, 0.75)))
g.styles.append(s(style.HIGHLIGHT, _ctx, stroke = _ctx.color(1.0, 0.0, 0.5), strokewidth=1.5))
g.styles.append(s(style.MARKED , _ctx))
g.styles.append(s(style.ROOT , _ctx, text = _ctx.color(1.0, 0.0, 0.4, 1.00),
stroke = _ctx.color(0.8, 0.8, 0.8, 0.60),
strokewidth = 1.5,
fontsize = 16,
textwidth = 150))
# Important nodes get a double stroke.
def important_node(s, node, alpha=1.0):
style.style(None, _ctx).node(s, node, alpha)
r = node.r * 1.4
_ctx.nofill()
_ctx.oval(node.x-r, node.y-r, r*2, r*2)
# Marked nodes have an inner dot.
def marked_node(s, node, alpha=1.0):
style.style(None, _ctx).node(s, node, alpha)
r = node.r * 0.3
_ctx.fill(s.stroke)
_ctx.oval(node.x-r, node.y-r, r*2, r*2)
g.styles.important.node = important_node
g.styles.marked.node = marked_node
g.styles.depth = depth
# Styling guidelines. All nodes have the default style, except:
# 1) a node directly connected to the root gets the LIGHT style.
# 2) a node with more than 4 edges gets the DARK style.
# 3) a node with a weight of 0.75-1.0 gets the IMPORTANT style.
# 4) the graph.root node gets the ROOT style.
# 5) the node last clicked gets the BACK style.
g.styles.guide.append(style.LIGHT , lambda graph, node: graph.root in node.links)
g.styles.guide.append(style.DARK , lambda graph, node: len(node.links) > 4)
g.styles.guide.append(style.IMPORTANT , lambda graph, node: node.weight > 0.75)
g.styles.guide.append(style.ROOT , lambda graph, node: node == graph.root)
g.styles.guide.append(style.BACK , lambda graph, node: node == graph.events.clicked)
# An additional rule applies every node's weight to its radius.
def balance(graph, node):
node.r = node.r*0.75 + node.r*node.weight*0.75
g.styles.guide.append("balance", balance)
# An additional rule that keeps leaf nodes closely clustered.
def cluster(graph, node):
if len(node.links) == 1:
node.links.edge(node.links[0]).length *= 0.5
g.styles.guide.append("cluster", cluster)
g.styles.guide.order = [
style.LIGHT, style.DARK, style.IMPORTANT, style.ROOT, style.BACK, "balance", "nurse"
]
return g
# 1.9.5.6
# Fixed circle_layout copy (number of orbits and starting angle weren't copied).
# 1.9.5.5
# graph.add_node and graph.add_edge call graph.new_node and graph.new_edge respectively.
# This should make subclassing nodes and edges a little easier.
# 1.9.5.4
# Fixex bug in spring_layout.tweak().
# Added directed=False parameter to dijkstra_shortest_path() and brandes_betweenness_centrality().
# 1.9.5.3
# Copies of nodes and edges correctly copy arbitrary attributes,
# e.g. edge.context, edge.relation and edge.author in a Perception graph.
# 1.9.5.2
# Reverted to old cluster.unique() (less fast but retains sort order).
# 1.9.5.1
# graph.draw() in push/pop.
# graph.node_id works like graph.node(id).
# Added graph.leaves property.
# Added graph.fringe() method.
# Added node.is_leaf property.
# Added node.can_reach().
# Added proximity.depth_first_search().
# graph.style.align supports RIGHT and CENTER.
# graph.layout.refresh() False rekindles the animation.
# import graph works outside NodeBox.
# 1.9.5
# Changed property names in spring_layout class.
# Added orbit property to circle_layout.
# Added force and repulsion properties to spring_layout.
# Increased default repulsion radius from 7 to 15.
# Added nurse behavior to the styleguide (edge length for leaves is 0.5).
# 1.9.4
# Edges now have a length property that controls individual attraction.
# 1.9.2.1
# proximity.eigenvector_centrality() yields warning
# instead of exception if it does not converge.
# Added heuristic parameter to proximity.dijkstra_shortest_path().
# Added layout.spring_layout.tweak()
# Added cluster.partition()
|
{
"content_hash": "b372a1e67371190107d37259686db4a5",
"timestamp": "",
"source": "github",
"line_count": 845,
"max_line_length": 102,
"avg_line_length": 33.57869822485207,
"alnum_prop": 0.5491647282723621,
"repo_name": "est/nodebox-gl",
"id": "aebef07c99615611856d6725958dd2cea1c65077",
"size": "29061",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "libraries/graph/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "998878"
},
{
"name": "Perl",
"bytes": "48202"
},
{
"name": "Python",
"bytes": "858930"
}
],
"symlink_target": ""
}
|
import os
import subprocess
import GafferSceneTest
class OSLTestCase( GafferSceneTest.SceneTestCase ) :
def compileShader( self, sourceFileName ) :
outputFileName = self.temporaryDirectory() + "/" + os.path.splitext( os.path.basename( sourceFileName ) )[0] + ".oso"
subprocess.check_call(
[ "oslc", "-q" ] +
[ "-I" + p for p in os.environ.get( "OSL_SHADER_PATHS", "" ).split( ":" ) ] +
[ "-o", outputFileName, sourceFileName ]
)
return os.path.splitext( outputFileName )[0]
|
{
"content_hash": "b37c4a4249f95fd5c4337faa985acc01",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 119,
"avg_line_length": 27.666666666666668,
"alnum_prop": 0.6666666666666666,
"repo_name": "andrewkaufman/gaffer",
"id": "13b91fde10a2c8cce6dd24370115e336bbd8dbf0",
"size": "2289",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "python/GafferOSLTest/OSLTestCase.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5790"
},
{
"name": "C",
"bytes": "61993"
},
{
"name": "C++",
"bytes": "9572701"
},
{
"name": "CMake",
"bytes": "85201"
},
{
"name": "GLSL",
"bytes": "6208"
},
{
"name": "Python",
"bytes": "10279312"
},
{
"name": "Ruby",
"bytes": "419"
},
{
"name": "Shell",
"bytes": "14580"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import copy
import errno
import logging
import os
import subprocess
import warnings
from future import standard_library
standard_library.install_aliases()
from builtins import str
from collections import OrderedDict
from configparser import ConfigParser
# show Airflow's deprecation warnings
warnings.filterwarnings(
action='default', category=DeprecationWarning, module='airflow')
warnings.filterwarnings(
action='default', category=PendingDeprecationWarning, module='airflow')
class AirflowConfigException(Exception):
pass
try:
from cryptography.fernet import Fernet
except ImportError:
pass
def generate_fernet_key():
try:
FERNET_KEY = Fernet.generate_key().decode()
except NameError:
FERNET_KEY = "cryptography_not_found_storing_passwords_in_plain_text"
return FERNET_KEY
def expand_env_var(env_var):
"""
Expands (potentially nested) env vars by repeatedly applying
`expandvars` and `expanduser` until interpolation stops having
any effect.
"""
if not env_var:
return env_var
while True:
interpolated = os.path.expanduser(os.path.expandvars(str(env_var)))
if interpolated == env_var:
return interpolated
else:
env_var = interpolated
def run_command(command):
"""
Runs command and returns stdout
"""
process = subprocess.Popen(
command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, stderr = process.communicate()
if process.returncode != 0:
raise AirflowConfigException(
"Cannot execute {}. Error code is: {}. Output: {}, Stderr: {}"
.format(command, process.returncode, output, stderr)
)
return output
defaults = {
'core': {
'unit_test_mode': False,
'parallelism': 32,
'load_examples': True,
'plugins_folder': None,
'security': None,
'donot_pickle': False,
'remote_base_log_folder': '',
'remote_log_conn_id': '',
'encrypt_s3_logs': False,
's3_log_folder': '', # deprecated!
'dag_concurrency': 16,
'max_active_runs_per_dag': 16,
'executor': 'SequentialExecutor',
'dags_are_paused_at_creation': True,
'sql_alchemy_pool_size': 5,
'sql_alchemy_pool_recycle': 3600,
'dagbag_import_timeout': 30,
'non_pooled_task_slot_count': 128,
},
'operators': {
'default_owner': 'airflow'
},
'webserver': {
'base_url': 'http://localhost:8080',
'web_server_host': '0.0.0.0',
'web_server_port': '8080',
'web_server_worker_timeout': 120,
'authenticate': False,
'filter_by_owner': False,
'demo_mode': False,
'secret_key': 'airflowified',
'expose_config': False,
'workers': 4,
'worker_class': 'sync',
'access_logfile': '',
'error_logfile': '',
},
'scheduler': {
'statsd_on': False,
'statsd_host': 'localhost',
'statsd_port': 8125,
'statsd_prefix': 'airflow',
'job_heartbeat_sec': 5,
'scheduler_heartbeat_sec': 60,
'authenticate': False,
'max_threads': 2,
},
'celery': {
'broker_url': 'sqla+mysql://airflow:airflow@localhost:3306/airflow',
'celery_app_name': 'airflow.executors.celery_executor',
'celery_result_backend': 'db+mysql://airflow:airflow@localhost:3306/airflow',
'celeryd_concurrency': 16,
'default_queue': 'default',
'flower_port': '5555',
'worker_log_server_port': '8793',
},
'email': {
'email_backend': 'airflow.utils.email.send_email_smtp',
},
'smtp': {
'smtp_starttls': True,
'smtp_ssl': False,
'smtp_user': '',
'smtp_password': '',
},
'kerberos': {
'ccache': '/tmp/airflow_krb5_ccache',
'principal': 'airflow', # gets augmented with fqdn
'reinit_frequency': '3600',
'kinit_path': 'kinit',
'keytab': 'airflow.keytab',
},
'github_enterprise': {
'api_rev': 'v3'
},
'admin': {
'hide_sensitive_variable_fields': True,
},
}
DEFAULT_CONFIG = """\
[core]
# The home folder for airflow, default is ~/airflow
airflow_home = {AIRFLOW_HOME}
# The folder where your airflow pipelines live, most likely a
# subfolder in a code repository
# This path must be absolute
dags_folder = {AIRFLOW_HOME}/dags
# The folder where airflow should store its log files
# This path must be absolute
base_log_folder = {AIRFLOW_HOME}/logs
# Airflow can store logs remotely in AWS S3 or Google Cloud Storage. Users
# must supply a remote location URL (starting with either 's3://...' or
# 'gs://...') and an Airflow connection id that provides access to the storage
# location.
remote_base_log_folder =
remote_log_conn_id =
# Use server-side encryption for logs stored in S3
encrypt_s3_logs = False
# deprecated option for remote log storage, use remote_base_log_folder instead!
# s3_log_folder =
# The executor class that airflow should use. Choices include
# SequentialExecutor, LocalExecutor, CeleryExecutor
executor = SequentialExecutor
# The SqlAlchemy connection string to the metadata database.
# SqlAlchemy supports many different database engine, more information
# their website
sql_alchemy_conn = sqlite:///{AIRFLOW_HOME}/airflow.db
# The SqlAlchemy pool size is the maximum number of database connections
# in the pool.
sql_alchemy_pool_size = 5
# The SqlAlchemy pool recycle is the number of seconds a connection
# can be idle in the pool before it is invalidated. This config does
# not apply to sqlite.
sql_alchemy_pool_recycle = 3600
# The amount of parallelism as a setting to the executor. This defines
# the max number of task instances that should run simultaneously
# on this airflow installation
parallelism = 32
# The number of task instances allowed to run concurrently by the scheduler
dag_concurrency = 16
# Are DAGs paused by default at creation
dags_are_paused_at_creation = True
# When not using pools, tasks are run in the "default pool",
# whose size is guided by this config element
non_pooled_task_slot_count = 128
# The maximum number of active DAG runs per DAG
max_active_runs_per_dag = 16
# Whether to load the examples that ship with Airflow. It's good to
# get started, but you probably want to set this to False in a production
# environment
load_examples = True
# Where your Airflow plugins are stored
plugins_folder = {AIRFLOW_HOME}/plugins
# Secret key to save connection passwords in the db
fernet_key = {FERNET_KEY}
# Whether to disable pickling dags
donot_pickle = False
# How long before timing out a python file import while filling the DagBag
dagbag_import_timeout = 30
[operators]
# The default owner assigned to each new operator, unless
# provided explicitly or passed via `default_args`
default_owner = Airflow
[webserver]
# The base url of your website as airflow cannot guess what domain or
# cname you are using. This is used in automated emails that
# airflow sends to point links to the right web server
base_url = http://localhost:8080
# The ip specified when starting the web server
web_server_host = 0.0.0.0
# The port on which to run the web server
web_server_port = 8080
# The time the gunicorn webserver waits before timing out on a worker
web_server_worker_timeout = 120
# Secret key used to run your flask app
secret_key = temporary_key
# Number of workers to run the Gunicorn web server
workers = 4
# The worker class gunicorn should use. Choices include
# sync (default), eventlet, gevent
worker_class = sync
# Log files for the gunicorn webserver. '-' means log to stderr.
access_logfile = -
error_logfile = -
# Expose the configuration file in the web server
expose_config = true
# Set to true to turn on authentication:
# http://pythonhosted.org/airflow/installation.html#web-authentication
authenticate = False
# Filter the list of dags by owner name (requires authentication to be enabled)
filter_by_owner = False
[email]
email_backend = airflow.utils.email.send_email_smtp
[smtp]
# If you want airflow to send emails on retries, failure, and you want to use
# the airflow.utils.email.send_email_smtp function, you have to configure an smtp
# server here
smtp_host = localhost
smtp_starttls = True
smtp_ssl = False
smtp_user = airflow
smtp_port = 25
smtp_password = airflow
smtp_mail_from = airflow@airflow.com
[celery]
# This section only applies if you are using the CeleryExecutor in
# [core] section above
# The app name that will be used by celery
celery_app_name = airflow.executors.celery_executor
# The concurrency that will be used when starting workers with the
# "airflow worker" command. This defines the number of task instances that
# a worker will take, so size up your workers based on the resources on
# your worker box and the nature of your tasks
celeryd_concurrency = 16
# When you start an airflow worker, airflow starts a tiny web server
# subprocess to serve the workers local log files to the airflow main
# web server, who then builds pages and sends them to users. This defines
# the port on which the logs are served. It needs to be unused, and open
# visible from the main web server to connect into the workers.
worker_log_server_port = 8793
# The Celery broker URL. Celery supports RabbitMQ, Redis and experimentally
# a sqlalchemy database. Refer to the Celery documentation for more
# information.
broker_url = sqla+mysql://airflow:airflow@localhost:3306/airflow
# Another key Celery setting
celery_result_backend = db+mysql://airflow:airflow@localhost:3306/airflow
# Celery Flower is a sweet UI for Celery. Airflow has a shortcut to start
# it `airflow flower`. This defines the port that Celery Flower runs on
flower_port = 5555
# Default queue that tasks get assigned to and that worker listen on.
default_queue = default
[scheduler]
# Task instances listen for external kill signal (when you clear tasks
# from the CLI or the UI), this defines the frequency at which they should
# listen (in seconds).
job_heartbeat_sec = 5
# The scheduler constantly tries to trigger new tasks (look at the
# scheduler section in the docs for more information). This defines
# how often the scheduler should run (in seconds).
scheduler_heartbeat_sec = 5
# Statsd (https://github.com/etsy/statsd) integration settings
# statsd_on = False
# statsd_host = localhost
# statsd_port = 8125
# statsd_prefix = airflow
# The scheduler can run multiple threads in parallel to schedule dags.
# This defines how many threads will run. However airflow will never
# use more threads than the amount of cpu cores available.
max_threads = 2
[mesos]
# Mesos master address which MesosExecutor will connect to.
master = localhost:5050
# The framework name which Airflow scheduler will register itself as on mesos
framework_name = Airflow
# Number of cpu cores required for running one task instance using
# 'airflow run <dag_id> <task_id> <execution_date> --local -p <pickle_id>'
# command on a mesos slave
task_cpu = 1
# Memory in MB required for running one task instance using
# 'airflow run <dag_id> <task_id> <execution_date> --local -p <pickle_id>'
# command on a mesos slave
task_memory = 256
# Enable framework checkpointing for mesos
# See http://mesos.apache.org/documentation/latest/slave-recovery/
checkpoint = False
# Failover timeout in milliseconds.
# When checkpointing is enabled and this option is set, Mesos waits
# until the configured timeout for
# the MesosExecutor framework to re-register after a failover. Mesos
# shuts down running tasks if the
# MesosExecutor framework fails to re-register within this timeframe.
# failover_timeout = 604800
# Enable framework authentication for mesos
# See http://mesos.apache.org/documentation/latest/configuration/
authenticate = False
# Mesos credentials, if authentication is enabled
# default_principal = admin
# default_secret = admin
[admin]
# UI to hide sensitive variable fields when set to True
hide_sensitive_variable_fields = True
"""
TEST_CONFIG = """\
[core]
airflow_home = {AIRFLOW_HOME}
dags_folder = {TEST_DAGS_FOLDER}
base_log_folder = {AIRFLOW_HOME}/logs
executor = SequentialExecutor
sql_alchemy_conn = sqlite:///{AIRFLOW_HOME}/unittests.db
unit_test_mode = True
load_examples = True
donot_pickle = False
dag_concurrency = 16
dags_are_paused_at_creation = False
fernet_key = {FERNET_KEY}
non_pooled_task_slot_count = 128
[operators]
default_owner = airflow
[webserver]
base_url = http://localhost:8080
web_server_host = 0.0.0.0
web_server_port = 8080
[email]
email_backend = airflow.utils.email.send_email_smtp
[smtp]
smtp_host = localhost
smtp_user = airflow
smtp_port = 25
smtp_password = airflow
smtp_mail_from = airflow@airflow.com
[celery]
celery_app_name = airflow.executors.celery_executor
celeryd_concurrency = 16
worker_log_server_port = 8793
broker_url = sqla+mysql://airflow:airflow@localhost:3306/airflow
celery_result_backend = db+mysql://airflow:airflow@localhost:3306/airflow
flower_port = 5555
default_queue = default
[scheduler]
job_heartbeat_sec = 1
scheduler_heartbeat_sec = 5
authenticate = true
max_threads = 2
"""
class ConfigParserWithDefaults(ConfigParser):
# These configuration elements can be fetched as the stdout of commands
# following the "{section}__{name}__cmd" pattern, the idea behind this is to not
# store password on boxes in text files.
as_command_stdout = {
('core', 'sql_alchemy_conn'),
('core', 'fernet_key'),
('celery', 'broker_url'),
('celery', 'celery_result_backend')
}
def __init__(self, defaults, *args, **kwargs):
self.defaults = defaults
ConfigParser.__init__(self, *args, **kwargs)
self.is_validated = False
def _validate(self):
if (
self.get("core", "executor") != 'SequentialExecutor' and
"sqlite" in self.get('core', 'sql_alchemy_conn')):
raise AirflowConfigException("error: cannot use sqlite with the {}".
format(self.get('core', 'executor')))
self.is_validated = True
def _get_env_var_option(self, section, key):
# must have format AIRFLOW__{SECTION}__{KEY} (note double underscore)
env_var = 'AIRFLOW__{S}__{K}'.format(S=section.upper(), K=key.upper())
if env_var in os.environ:
return expand_env_var(os.environ[env_var])
def _get_cmd_option(self, section, key):
fallback_key = key + '_cmd'
if (
(section, key) in ConfigParserWithDefaults.as_command_stdout and
self.has_option(section, fallback_key)):
command = self.get(section, fallback_key)
return run_command(command)
def get(self, section, key, **kwargs):
section = str(section).lower()
key = str(key).lower()
d = self.defaults
# first check environment variables
option = self._get_env_var_option(section, key)
if option:
return option
# ...then the config file
if self.has_option(section, key):
return expand_env_var(
ConfigParser.get(self, section, key, **kwargs))
# ...then commands
option = self._get_cmd_option(section, key)
if option:
return option
# ...then the defaults
if section in d and key in d[section]:
return expand_env_var(d[section][key])
else:
logging.warn("section/key [{section}/{key}] not found "
"in config".format(**locals()))
raise AirflowConfigException(
"section/key [{section}/{key}] not found "
"in config".format(**locals()))
def getboolean(self, section, key):
val = str(self.get(section, key)).lower().strip()
if '#' in val:
val = val.split('#')[0].strip()
if val == "true":
return True
elif val == "false":
return False
else:
raise AirflowConfigException("Not a boolean.")
def getint(self, section, key):
return int(self.get(section, key))
def getfloat(self, section, key):
return float(self.get(section, key))
def read(self, filenames):
ConfigParser.read(self, filenames)
self._validate()
def as_dict(self, display_source=False, display_sensitive=False):
"""
Returns the current configuration as an OrderedDict of OrderedDicts.
:param display_source: If False, the option value is returned. If True,
a tuple of (option_value, source) is returned. Source is either
'airflow.cfg' or 'default'.
:type display_source: bool
:param display_sensitive: If True, the values of options set by env
vars and bash commands will be displayed. If False, those options
are shown as '< hidden >'
:type display_sensitive: bool
"""
cfg = copy.deepcopy(self._sections)
# remove __name__ (affects Python 2 only)
for options in cfg.values():
options.pop('__name__', None)
# add source
if display_source:
for section in cfg:
for k, v in cfg[section].items():
cfg[section][k] = (v, 'airflow.cfg')
# add env vars and overwrite because they have priority
for ev in [ev for ev in os.environ if ev.startswith('AIRFLOW__')]:
try:
_, section, key = ev.split('__')
opt = self._get_env_var_option(section, key)
except ValueError:
opt = None
if opt:
if not display_sensitive:
opt = '< hidden >'
if display_source:
opt = (opt, 'env var')
cfg.setdefault(section.lower(), OrderedDict()).update(
{key.lower(): opt})
# add bash commands
for (section, key) in ConfigParserWithDefaults.as_command_stdout:
opt = self._get_cmd_option(section, key)
if opt:
if not display_sensitive:
opt = '< hidden >'
if display_source:
opt = (opt, 'bash cmd')
cfg.setdefault(section, OrderedDict()).update({key: opt})
# add defaults
for section in sorted(self.defaults):
for key in sorted(self.defaults[section].keys()):
if key not in cfg.setdefault(section, OrderedDict()):
opt = str(self.defaults[section][key])
if display_source:
cfg[section][key] = (opt, 'default')
else:
cfg[section][key] = opt
return cfg
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise AirflowConfigException('Had trouble creating a directory')
"""
Setting AIRFLOW_HOME and AIRFLOW_CONFIG from environment variables, using
"~/airflow" and "~/airflow/airflow.cfg" respectively as defaults.
"""
if 'AIRFLOW_HOME' not in os.environ:
AIRFLOW_HOME = expand_env_var('~/airflow')
else:
AIRFLOW_HOME = expand_env_var(os.environ['AIRFLOW_HOME'])
mkdir_p(AIRFLOW_HOME)
if 'AIRFLOW_CONFIG' not in os.environ:
if os.path.isfile(expand_env_var('~/airflow.cfg')):
AIRFLOW_CONFIG = expand_env_var('~/airflow.cfg')
else:
AIRFLOW_CONFIG = AIRFLOW_HOME + '/airflow.cfg'
else:
AIRFLOW_CONFIG = expand_env_var(os.environ['AIRFLOW_CONFIG'])
# Set up dags folder for unit tests
# this directory won't exist if users install via pip
_TEST_DAGS_FOLDER = os.path.join(
os.path.dirname(os.path.dirname(os.path.realpath(__file__))),
'tests',
'dags')
if os.path.exists(_TEST_DAGS_FOLDER):
TEST_DAGS_FOLDER = _TEST_DAGS_FOLDER
else:
TEST_DAGS_FOLDER = os.path.join(AIRFLOW_HOME, 'dags')
def parameterized_config(template):
"""
Generates a configuration from the provided template + variables defined in
current scope
:param template: a config content templated with {{variables}}
"""
FERNET_KEY = generate_fernet_key()
all_vars = {k: v for d in [globals(), locals()] for k, v in d.items()}
return template.format(**all_vars)
TEST_CONFIG_FILE = AIRFLOW_HOME + '/unittests.cfg'
if not os.path.isfile(TEST_CONFIG_FILE):
logging.info("Creating new airflow config file for unit tests in: " +
TEST_CONFIG_FILE)
with open(TEST_CONFIG_FILE, 'w') as f:
f.write(parameterized_config(TEST_CONFIG))
if not os.path.isfile(AIRFLOW_CONFIG):
# These configuration options are used to generate a default configuration
# when it is missing. The right way to change your configuration is to alter
# your configuration file, not this code.
logging.info("Creating new airflow config file in: " + AIRFLOW_CONFIG)
with open(AIRFLOW_CONFIG, 'w') as f:
f.write(parameterized_config(DEFAULT_CONFIG))
logging.info("Reading the config from " + AIRFLOW_CONFIG)
def test_mode():
conf = ConfigParserWithDefaults(defaults)
conf.read(TEST_CONFIG)
conf = ConfigParserWithDefaults(defaults)
conf.read(AIRFLOW_CONFIG)
def get(section, key, **kwargs):
return conf.get(section, key, **kwargs)
def getboolean(section, key):
return conf.getboolean(section, key)
def getfloat(section, key):
return conf.getfloat(section, key)
def getint(section, key):
return conf.getint(section, key)
def has_option(section, key):
return conf.has_option(section, key)
def remove_option(section, option):
return conf.remove_option(section, option)
def as_dict(display_source=False, display_sensitive=False):
return conf.as_dict(
display_source=display_source, display_sensitive=display_sensitive)
as_dict.__doc__ = conf.as_dict.__doc__
def set(section, option, value): # noqa
return conf.set(section, option, value)
########################
# convenience method to access config entries
def get_dags_folder():
return os.path.expanduser(get('core', 'DAGS_FOLDER'))
|
{
"content_hash": "e466827ca4d92957c05950015e70e388",
"timestamp": "",
"source": "github",
"line_count": 718,
"max_line_length": 85,
"avg_line_length": 31.31615598885794,
"alnum_prop": 0.6626639982210363,
"repo_name": "jwi078/incubator-airflow",
"id": "3403656415dfb896a6f6751de97a28b8cba2fec6",
"size": "23053",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "airflow/configuration.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "36361"
},
{
"name": "HTML",
"bytes": "102860"
},
{
"name": "JavaScript",
"bytes": "889411"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "1032823"
},
{
"name": "Shell",
"bytes": "12235"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
def factory(solver_str):
if solver_str == 'glpk':
import linprog.pyglpklp as pyglpklp
#res = [pyglpklp.linprog(obj, A_ub, b_ub) for obj in directions_ext]
#lp_fun = ft.partial(pyglpklp.linprog, A_ub=A_ub, b_ub=b_ub)
lp_fun = pyglpklp.linprog
elif solver_str == 'gurobi':
import linprog.pygurobi as pygurobi
#res = [pygurobi.linprog(obj, A_ub, b_ub) for obj in directions_ext]
#lp_fun = ft.partial(pygurobi.linprog, A_ub=A_ub, b_ub=b_ub)
lp_fun = pygurobi.linprog
elif solver_str == 'scipy':
import linprog.scipyopt as scipyopt
#lp_fun = ft.partial(scipyopt.linprog, A_ub=A_ub, b_ub=b_ub)
lp_fun = scipyopt.linprog
else:
raise NotImplementedError('solver selected: {}'.format(solver_str))
return lp_fun
|
{
"content_hash": "cdc4341a02a5a19a41de890cf691b109",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 76,
"avg_line_length": 34.629629629629626,
"alnum_prop": 0.6374331550802139,
"repo_name": "zutshi/S3CAMR",
"id": "9f44a15b3d768d3e06d8746c1844445296d8432f",
"size": "935",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/linprog/linprog.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "2970"
},
{
"name": "MATLAB",
"bytes": "14618"
},
{
"name": "Makefile",
"bytes": "232"
},
{
"name": "Python",
"bytes": "528716"
},
{
"name": "Shell",
"bytes": "199"
}
],
"symlink_target": ""
}
|
import sys
from PyQt4.QtGui import *
# Create an PyQT4 application object.
app = QApplication(sys.argv)
# The QWidget widget is the base class of all user interface objects in PyQt4.
window = QMainWindow()
# Set window size.
window.resize(320, 240)
# Set window title
window.setWindowTitle("Hello World!")
# ==========================================================
# ==========================================================
# Create a button in the window
button = QPushButton('change button size', window)
button.move(20,20)
button.resize(button.sizeHint())
# Create the actions
def on_click():
print("Button clicked.")
button.resize(button.size().width(),100)
def on_press():
print('on_press()')
def on_release():
print('on_release()')
# connect the signals to the slots
button.clicked.connect(on_click)
button.pressed.connect(on_press)
button.released.connect(on_release)
# ==========================================================
# Show window
window.show()
sys.exit(app.exec_())
|
{
"content_hash": "d1dabec977a2fa95f54671fd836087bb",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 78,
"avg_line_length": 21.163265306122447,
"alnum_prop": 0.583413693346191,
"repo_name": "3WiseMen/python",
"id": "01cf7a08e601dd440596de1a519489006a855baf",
"size": "1037",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "14. PyQT/Button2.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5638580"
}
],
"symlink_target": ""
}
|
from django.db import migrations
import django_countries.fields
class Migration(migrations.Migration):
dependencies = [
('extrequests', '0012_auto_20190822_1906'),
]
operations = [
migrations.AddField(
model_name='selforganizedsubmission',
name='country',
field=django_countries.fields.CountryField(max_length=2, null=True, verbose_name='Country'),
),
]
|
{
"content_hash": "09b997834598b1db4edea1988e0d2706",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 104,
"avg_line_length": 25.529411764705884,
"alnum_prop": 0.6359447004608295,
"repo_name": "swcarpentry/amy",
"id": "9b3bdef229c17901c633dbad1f5193ad9f7cb563",
"size": "483",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "amy/extrequests/migrations/0013_selforganizedsubmission_country.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2855"
},
{
"name": "HTML",
"bytes": "252906"
},
{
"name": "JavaScript",
"bytes": "22092"
},
{
"name": "Makefile",
"bytes": "2915"
},
{
"name": "Python",
"bytes": "1431147"
}
],
"symlink_target": ""
}
|
import sys
from zoo.pipeline.api.utils import remove_batch
from .engine.topology import KerasNet
from bigdl.util.common import to_list
from zoo.common.utils import callZooFunc
if sys.version >= '3':
long = int
unicode = str
class Sequential(KerasNet):
"""
Container for a sequential model.
# Arguments
name: String to specify the name of the sequential model. Default is None.
>>> sequential = Sequential(name="seq1")
creating: createZooKerasSequential
"""
def __init__(self, jvalue=None, **kwargs):
super(Sequential, self).__init__(jvalue, **kwargs)
# TODO: expose is_built from scala side
def is_built(self):
try:
self.get_output_shape()
return True
except:
return False
def add(self, model):
from zoo.pipeline.api.autograd import Lambda
if (isinstance(model, Lambda)):
if not self.is_built():
if not model.input_shape:
raise Exception("You should specify inputShape for the first layer")
input_shapes = model.input_shape
else:
input_shapes = self.get_output_shape()
model = model.create(remove_batch(input_shapes))
self.value.add(model.value)
return self
@staticmethod
def from_jvalue(jvalue, bigdl_type="float"):
"""
Create a Python Model base on the given java value
:param jvalue: Java object create by Py4j
:return: A Python Model
"""
model = Sequential(jvalue=jvalue)
model.value = jvalue
return model
class Model(KerasNet):
"""
Container for a graph model.
# Arguments
input: An input node or a list of input nodes.
output: An output node or a list of output nodes.
name: String to specify the name of the graph model. Default is None.
"""
def __init__(self, input, output, jvalue=None, **kwargs):
super(Model, self).__init__(jvalue,
to_list(input),
to_list(output),
**kwargs)
def save_graph_topology(self, log_path, backward=False):
"""
Save the current model graph to a folder, which can be displayed in TensorBoard
by running the command:
tensorboard --logdir log_path
# Arguments
log_path: The path to save the model graph.
backward: The name of the application.
"""
callZooFunc(self.bigdl_type, "zooSaveGraphTopology",
self.value,
log_path,
backward)
def new_graph(self, outputs):
value = callZooFunc(self.bigdl_type, "newGraph", self.value, outputs)
return self.from_jvalue(value)
def freeze_up_to(self, names):
callZooFunc(self.bigdl_type, "freezeUpTo", self.value, names)
def unfreeze(self, names):
callZooFunc(self.bigdl_type, "unFreeze", self.value, names)
@staticmethod
def from_jvalue(jvalue, bigdl_type="float"):
"""
Create a Python Model base on the given java value
:param jvalue: Java object create by Py4j
:return: A Python Model
"""
model = Model([], [], jvalue=jvalue)
model.value = jvalue
return model
|
{
"content_hash": "93112f5dfcbd84c440a93ca74d38568d",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 88,
"avg_line_length": 30.645454545454545,
"alnum_prop": 0.5876594482349451,
"repo_name": "intel-analytics/analytics-zoo",
"id": "ebcffea296f1bdeabd33245f3a96da09c36e44a2",
"size": "3962",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyzoo/zoo/pipeline/api/keras/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "73165"
},
{
"name": "Groovy",
"bytes": "1613"
},
{
"name": "Java",
"bytes": "209136"
},
{
"name": "Jupyter Notebook",
"bytes": "24437284"
},
{
"name": "Makefile",
"bytes": "11724"
},
{
"name": "PureBasic",
"bytes": "593"
},
{
"name": "Python",
"bytes": "4085490"
},
{
"name": "RobotFramework",
"bytes": "17467"
},
{
"name": "Scala",
"bytes": "3562801"
},
{
"name": "Shell",
"bytes": "413512"
}
],
"symlink_target": ""
}
|
from django.conf.urls import patterns, url
import views
urlpatterns = patterns('',
url(
r'^$',
views.ApplicationListView.as_view(),
name='list_applications'
),
url(
r'^list/(?P<pk>\d+)/$',
views.ApplicationListView.as_view(),
name='list_applications_opening'
),
url(
r'^(?P<pk>\d+)/$',
views.ApplicationDetailView.as_view(),
name='application_detail'
),
url(
r'^(?P<application_id>\d+)/create_message/$',
views.ApplicationMessageCreateView.as_view(),
name='create_message'
),
url(
r'^add/(?P<opening_id>\d+)/$',
views.ManualApplicationView.as_view(),
name='manual_application'
),
url(
r'^rate/(?P<application_id>\d+)/(?P<rating>-?\d+)$',
views.RateView.as_view(),
name='rate'
)
)
|
{
"content_hash": "dcebb49f0977c618690365d00c7e4089",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 59,
"avg_line_length": 23.694444444444443,
"alnum_prop": 0.5463071512309496,
"repo_name": "hizardapp/Hizard",
"id": "3ad6fb0526763cda958fdedfe44f266142bd1612",
"size": "853",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hyrodactil/applications/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "105805"
},
{
"name": "CoffeeScript",
"bytes": "4934"
},
{
"name": "JavaScript",
"bytes": "262408"
},
{
"name": "Python",
"bytes": "261061"
},
{
"name": "Ruby",
"bytes": "860"
}
],
"symlink_target": ""
}
|
import csv
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from pymongo import ASCENDING
from moocng.badges.models import Award
from moocng.courses.marks import calculate_course_mark
from moocng.courses.models import Course, KnowledgeQuantum
from moocng.mongodb import get_db
class Command(BaseCommand):
help = ('Generate a csv file with one row per student with their stats on '
'the given course')
args = '<course_id_or_slug>'
option_list = BaseCommand.option_list + (
make_option(
'--filename',
dest='filename',
default=None,
help='Output filename.'
),
)
def handle(self, *args, **options):
if len(args) != 1:
raise CommandError('Wrong number of arguments')
try:
if args[0].isdigit():
course = Course.objects.only('id', 'slug').get(id=args[0])
else:
course = Course.objects.only('id', 'slug').get(slug=args[0])
except Course.DoesNotExist:
raise CommandError('The course defined by "%s" does not exist' % args[0])
filename = options.get('filename') or '%s.csv' % course.slug
# Global stats (course level), repeated in every student
units = course.unit_set.all().count()
kqs = KnowledgeQuantum.objects.filter(unit__course__id=course.id).count()
completed_first_unit = 0
db = get_db()
activity = db.get_collection('activity')
rows = []
badge = course.completion_badge
for student in course.students.all():
row = [
student.username,
student.get_full_name().encode('utf-8', 'ignore'),
student.date_joined.isoformat(),
course.id,
units,
kqs,
]
course_act = activity.find(
{
'course_id': course.id,
'user_id': student.id,
},
sort=[('_id', ASCENDING), ]
)
course_act_count = course_act.count()
# Per students stats
if course_act_count > 0:
row.append(course_act[0]['_id'].generation_time.isoformat())
else:
row.append('N/A')
progress = (course_act_count * 100) / kqs
row.append(progress)
completed_units = 0
first = True
for unit in course.unit_set.only('id').all():
kqs_in_unit = unit.knowledgequantum_set.count()
act = activity.find({
'user_id': student.id,
'unit_id': unit.id,
}).count()
if kqs_in_unit == act:
completed_units += 1
if first:
first = False
completed_first_unit += completed_units
row.append(completed_units)
row.append(course_act_count)
mark, _ = calculate_course_mark(course, student)
row.append(mark)
if badge:
row.append(Award.objects.filter(badge=badge, user=student).exists())
else:
row.append('N/A')
rows.append(row)
with open(filename, 'wb') as csvfile:
writer = csv.writer(csvfile)
writer.writerow([
'email',
'full_name',
'platform_date_joined',
'course_id',
'course_units',
'course_nuggets',
'first_activity_date',
'progress_percentage',
'completed_units',
'completed_nuggets',
'score',
'got_completion_badge',
])
writer.writerows(rows)
|
{
"content_hash": "8294144c6f5be98603ade59889e37a48",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 85,
"avg_line_length": 31.934959349593495,
"alnum_prop": 0.5030549898167006,
"repo_name": "GeographicaGS/moocng",
"id": "647b869a1b2ba86bbed3d9a46b525e927607f50d",
"size": "4496",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "moocng/portal/management/commands/per_students_extra_stats.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "162701"
},
{
"name": "HTML",
"bytes": "362912"
},
{
"name": "JavaScript",
"bytes": "1911286"
},
{
"name": "Python",
"bytes": "2723710"
},
{
"name": "Shell",
"bytes": "24842"
}
],
"symlink_target": ""
}
|
import ast
import errno
import os
import platform
import shutil
import stat
import subprocess
import sys
from distutils import log
from distutils.command.build_ext import build_ext
from distutils.command.sdist import sdist
from distutils.errors import DistutilsError
PY3 = sys.version_info[0] == 3
def makedirs(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno==errno.EEXIST and os.path.isdir(path) and os.access(path, os.R_OK | os.W_OK | os.X_OK):
return
raise
def rmtree(path):
def remove_readonly(func, path, excinfo):
os.chmod(path, stat.S_IWRITE)
func(path)
try:
shutil.rmtree(path, onerror=remove_readonly)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def exec_process(cmdline, silent=True, input=None, **kwargs):
"""Execute a subprocess and returns the returncode, stdout buffer and stderr buffer.
Optionally prints stdout and stderr while running."""
try:
sub = subprocess.Popen(args=cmdline, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs)
stdout, stderr = sub.communicate(input=input)
returncode = sub.returncode
if PY3:
stderr = stderr.decode(sys.stderr.encoding)
stdout = stdout.decode(sys.stdout.encoding)
if not silent:
sys.stdout.write(stdout)
sys.stderr.write(stderr)
except OSError as e:
if e.errno == errno.ENOENT:
raise DistutilsError('"%s" is not present on this system' % cmdline[0])
else:
raise
if returncode != 0:
output = 'stderr:\n%s\nstdout:\n%s' % (stderr.rstrip("\n"), stdout.rstrip("\n"))
raise DistutilsError('Got return value %d while executing "%s", output was:\n%s' % (returncode, " ".join(cmdline), output))
return stdout
def prepare_windows_env(env):
env.pop('VS140COMNTOOLS', None)
env.pop('VS120COMNTOOLS', None)
env.pop('VS110COMNTOOLS', None)
if sys.version_info < (3, 3):
env.pop('VS100COMNTOOLS', None)
env['GYP_MSVS_VERSION'] = '2008'
else:
env['GYP_MSVS_VERSION'] = '2010'
if env.get('PYTHON'):
return # Already manually set by user.
if sys.version_info[:2] == (2, 7):
env['PYTHON'] = sys.executable
return # The current executable is fine.
# Try if `python` on PATH is the right one. If we would execute
# `python` directly the current executable might be used so we
# delegate this to cmd.
cmd = ['cmd.exe', '/C', 'python', '-c', 'import sys; '
'v = str(sys.version_info[:2]); sys.stdout.write(v); '
'sys.stdout.flush()']
try:
sub = subprocess.Popen(cmd, stdout=subprocess.PIPE)
stdout, _ = sub.communicate()
version = ast.literal_eval(stdout.decode(sys.stdout.encoding).strip())
if version == (2, 7):
return # Python on PATH is fine
except OSError:
pass
# Check default install locations
path = os.path.join('%SYSTEMDRIVE%', 'Python27', 'python.exe')
path = os.path.expandvars(path)
if os.path.isfile(path):
log.info('Using "%s" to build libuv...' % path)
env['PYTHON'] = path
else:
raise DistutilsError('No appropriate Python version found. An '
'installation of 2.7 is required to '
'build libuv. You can set the environment '
'variable "PYTHON" to point to a custom '
'installation location.')
class libuv_build_ext(build_ext):
libuv_dir = os.path.join('deps', 'libuv')
libuv_repo = 'https://github.com/libuv/libuv.git'
libuv_branch = 'v1.x'
libuv_revision = '30c8be0'
libuv_patches = []
user_options = build_ext.user_options
user_options.extend([
("libuv-clean-compile", None, "Clean libuv tree before compilation"),
("libuv-force-fetch", None, "Remove libuv (if present) and fetch it again"),
("libuv-verbose-build", None, "Print output of libuv build process"),
("use-system-libuv", None, "Use the system provided libuv, instead of the bundled one")
])
boolean_options = build_ext.boolean_options
boolean_options.extend(["libuv-clean-compile", "libuv-force-fetch", "libuv-verbose-build", "use-system-libuv"])
def initialize_options(self):
build_ext.initialize_options(self)
self.libuv_clean_compile = 0
self.libuv_force_fetch = 0
self.libuv_verbose_build = 0
self.use_system_libuv = 0
def build_extensions(self):
self.force = self.force or self.libuv_force_fetch or self.libuv_clean_compile
if self.use_system_libuv:
if sys.platform == 'win32':
raise DistutilsError('using a system provided libuv is unsupported on Windows')
self.compiler.add_library('uv')
else:
if sys.platform == 'win32':
self.libuv_lib = os.path.join(self.libuv_dir, 'Release', 'lib', 'libuv.lib')
else:
self.libuv_lib = os.path.join(self.libuv_dir, '.libs', 'libuv.a')
self.get_libuv()
# Set compiler options
self.extensions[0].extra_objects.extend([self.libuv_lib])
self.compiler.add_include_dir(os.path.join(self.libuv_dir, 'include'))
if sys.platform.startswith('linux'):
self.compiler.add_library('rt')
elif sys.platform == 'win32':
self.extensions[0].define_macros.append(('WIN32', 1))
self.extensions[0].extra_link_args.extend(['/NODEFAULTLIB:libcmt', '/LTCG'])
self.compiler.add_library('advapi32')
self.compiler.add_library('iphlpapi')
self.compiler.add_library('psapi')
self.compiler.add_library('shell32')
self.compiler.add_library('userenv')
self.compiler.add_library('ws2_32')
elif sys.platform.startswith('freebsd'):
self.compiler.add_library('kvm')
build_ext.build_extensions(self)
def get_libuv(self):
#self.debug_mode = bool(self.debug) or hasattr(sys, 'gettotalrefcount')
def download_libuv():
log.info('Downloading libuv...')
makedirs(self.libuv_dir)
exec_process(['git', 'clone', '-b', self.libuv_branch, self.libuv_repo, self.libuv_dir])
exec_process(['git', 'reset', '--hard', self.libuv_revision], cwd=self.libuv_dir)
def patch_libuv():
if self.libuv_patches:
log.info('Patching libuv...')
for patch_file in self.libuv_patches:
exec_process(['patch', '--forward', '-d', self.libuv_dir, '-p0', '-i', os.path.abspath(patch_file)])
def build_libuv():
cflags = '-fPIC'
env = os.environ.copy()
env['CFLAGS'] = ' '.join(x for x in (cflags, env.get('CFLAGS', None), env.get('ARCHFLAGS', None)) if x)
log.info('Building libuv...')
if sys.platform == 'win32':
prepare_windows_env(env)
libuv_arch = {'32bit': 'x86', '64bit': 'x64'}[platform.architecture()[0]]
exec_process(['cmd.exe', '/C', 'vcbuild.bat', libuv_arch, 'release'], cwd=self.libuv_dir, env=env, shell=True, silent=not self.libuv_verbose_build)
else:
exec_process(['sh', 'autogen.sh'], cwd=self.libuv_dir, env=env, silent=not self.libuv_verbose_build)
exec_process(['./configure'], cwd=self.libuv_dir, env=env, silent=not self.libuv_verbose_build)
exec_process(['make'], cwd=self.libuv_dir, env=env, silent=not self.libuv_verbose_build)
if self.libuv_force_fetch:
rmtree('deps')
if not os.path.exists(self.libuv_dir):
try:
download_libuv()
except BaseException:
rmtree('deps')
raise
patch_libuv()
build_libuv()
else:
if self.libuv_clean_compile:
if sys.platform == 'win32':
env = os.environ.copy()
prepare_windows_env(env)
exec_process(['cmd.exe', '/C', 'vcbuild.bat', 'clean'], cwd=self.libuv_dir, env=env, shell=True)
rmtree(os.path.join(self.libuv_dir, 'Release'))
else:
exec_process(['make', 'clean'], cwd=self.libuv_dir)
exec_process(['make', 'distclean'], cwd=self.libuv_dir)
if not os.path.exists(self.libuv_lib):
log.info('libuv needs to be compiled.')
build_libuv()
else:
log.info('No need to build libuv.')
class libuv_sdist(sdist):
libuv_dir = os.path.join('deps', 'libuv')
libuv_repo = libuv_build_ext.libuv_repo
libuv_branch = libuv_build_ext.libuv_branch
libuv_revision = libuv_build_ext.libuv_revision
libuv_patches = libuv_build_ext.libuv_patches
gyp_dir = os.path.join(libuv_dir, 'build', 'gyp')
gyp_repo = 'https://chromium.googlesource.com/external/gyp.git'
def initialize_options(self):
sdist.initialize_options(self)
rmtree('deps')
makedirs(self.libuv_dir)
log.info('Downloading libuv...')
exec_process(['git', 'clone', '-b', self.libuv_branch, self.libuv_repo, self.libuv_dir])
exec_process(['git', 'checkout', self.libuv_revision], cwd=self.libuv_dir)
if self.libuv_patches:
log.info('Patching libuv...')
for patch_file in self.libuv_patches:
exec_process(['patch', '--forward', '-d', self.libuv_dir, '-p0', '-i', os.path.abspath(patch_file)])
rmtree(os.path.join(self.libuv_dir, '.git'))
log.info('Downloading gyp...')
exec_process(['git', 'clone', self.gyp_repo, self.gyp_dir])
rmtree(os.path.join(self.gyp_dir, 'test'))
rmtree(os.path.join(self.gyp_dir, '.git'))
|
{
"content_hash": "84493a006066449b20a19e64b62013df",
"timestamp": "",
"source": "github",
"line_count": 243,
"max_line_length": 163,
"avg_line_length": 41.534979423868315,
"alnum_prop": 0.5853561874566531,
"repo_name": "fivejjs/pyuv",
"id": "8b951686945fe4b68c7e6058c202f2dc8c399c42",
"size": "10094",
"binary": false,
"copies": "1",
"ref": "refs/heads/v1.x",
"path": "setup_libuv.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "347830"
},
{
"name": "Python",
"bytes": "137756"
},
{
"name": "Shell",
"bytes": "87"
}
],
"symlink_target": ""
}
|
"""
txpostgres is a library for accessing a PostgreSQL_ database from the Twisted_
framework. It builds upon asynchronous features of the Psycopg_ database
library, which in turn exposes the asynchronous features of libpq_, the
PostgreSQL C library.
It requires a version of Psycopg that includes support for `asynchronous
connections`_ (versions 2.2.0 and later) and a reasonably recent Twisted (it
has been tested with Twisted 10.2 onward). Alternatively, psycopg2cffi_ or
psycopg2-ctypes_ can be used in lieu of Psycopg.
txpostgres tries to present an interface that will be familiar to users of both
Twisted and Psycopg. It features a :class:`~txpostgres.txpostgres.Cursor`
wrapper class that mimics the interface of a Psycopg :psycopg:`cursor
<cursor.html#cursor>` but returns :d:`Deferred` objects. It also provides a
:class:`~txpostgres.txpostgres.Connection` class that is meant to be a drop-in
replacement for Twisted's :tm:`adbapi.Connection
<enterprise.adbapi.Connection>` with some small differences regarding
connection establishing.
The main advantage of txpostgres over Twisted's built-in database support is
non-blocking connection building and complete lack of thread usage.
The library is distributed under the MIT License, see the LICENSE file for
details. You can contact the author, Jan Urbański, at wulczer@wulczer.org. Feel
free to download the source_, file bugs in the `issue tracker`_ and consult the
documentation_
.. _PostgreSQL: http://www.postgresql.org/
.. _Twisted: http://twistedmatrix.com/
.. _Psycopg: http://initd.org/psycopg/
.. _Python: http://www.python.org/
.. _libpq: http://www.postgresql.org/docs/current/static/libpq-async.html
.. _`asynchronous connections`:
http://initd.org/psycopg/docs/advanced.html#async-support
.. _psycopg2cffi: https://github.com/chtd/psycopg2cffi
.. _psycopg2-ctypes: http://pypi.python.org/pypi/psycopg2ct
.. _source: https://github.com/wulczer/txpostgres
.. _issue tracker: https://github.com/wulczer/txpostgres/issues
.. _documentation: http://txpostgres.readthedocs.org/
"""
from __future__ import absolute_import
from zope.interface.declarations import implementer
from twisted.internet import interfaces, main, defer, task
from twisted.python import failure, log
from txpostgres.psycopg2_impl import psycopg2
try:
psycopg2.extensions.POLL_OK
except AttributeError:
import warnings
warnings.warn(RuntimeWarning(
"psycopg2 does not have async support. "
"You need at least version 2.2.0 of psycopg2 "
"to use txpostgres."))
__all__ = ['Connection', 'Cursor', 'ConnectionPool', '_PollingMixin',
'AlreadyConnected', 'RollbackFailed',
'UnexpectedPollResult', 'AlreadyPolling']
class UnexpectedPollResult(Exception):
"""
Polling returned an unexpected result.
"""
class AlreadyPolling(Exception):
"""
The previous poll cycle has not been finished yet.
This probably indicates an issue in txpostgres, rather than in user code.
"""
class _CancelInProgress(Exception):
"""
A query cancellation is in progress.
"""
@implementer(interfaces.IReadWriteDescriptor)
class _PollingMixin(object):
"""
An object that wraps something pollable. It can take care of waiting for
the wrapped pollable to reach the OK state and adapts the pollable's
interface to :tm:`interfaces.IReadWriteDescriptor
<internet.interfaces.IReadWriteDescriptor>`. It will forward all attribute
access that is has not been wrapped to the underlying pollable. Useful as a
mixin for classes that wrap a psycopg2 pollable object.
:var reactor: The reactor that the class will use to wait for the wrapped
pollable to reach the OK state.
:vartype reactor: an :tm:`IReactorFDSet
<internet.interfaces.IReactorFDSet>` provider
:var prefix: Prefix used during log formatting to indicate context.
:vartype prefix: :class:`str`
"""
reactor = None
prefix = "pollable"
_pollingD = None
def pollable(self):
"""
Return the pollable object. Subclasses should override this.
:return: A psycopg2 pollable.
"""
raise NotImplementedError()
def poll(self):
"""
Start polling the wrapped pollable.
:return: A :d:`Deferred` that will fire with an instance of this class
when the pollable reaches the OK state.
"""
# this should never be called while the previous Deferred is still
# active, as it would clobber its reference
if self._pollingD:
return defer.fail(AlreadyPolling())
ret = self._pollingD = defer.Deferred(self._cancel)
# transform a psycopg2 QueryCanceledError into CancelledError
self._pollingD.addErrback(self._handleCancellation)
self.continuePolling()
return ret
def continuePolling(self, swallowErrors=False):
"""
Move forward in the poll cycle. This will call psycopg2's
:psycopg:`poll() <connection.html#connection.poll>` on the wrapped
pollable and either wait for more I/O or callback or errback the
:d:`Deferred` returned earlier if the polling cycle has been completed.
:param swallowErrors: Should errors with no one to report them to be
ignored.
:type swallowErrors: bool
:raise: :exc:`~txpostgres.txpostgres.UnexpectedPollResult` when
:meth:`poll` returns a result from outside of the
:psycopg:`expected list <extensions.html#poll-constants>`.
"""
# This method often gets called from the reactor's doRead/doWrite
# handlers. Don't callback or errback the polling Deferred here, as
# arbitrary user code can be run by that and we don't want to deal with
# reentrancy issues if this user code tries running queries. The
# polling Deferred might also be simply not present, if we got called
# from a doRead after receiving a NOTIFY event.
try:
state = self.pollable().poll()
except:
if self._pollingD:
d, self._pollingD = self._pollingD, None
self.reactor.callLater(0, d.errback, failure.Failure())
elif not swallowErrors:
# no one to report the error to
raise
else:
if state == psycopg2.extensions.POLL_OK:
if self._pollingD:
d, self._pollingD = self._pollingD, None
self.reactor.callLater(0, d.callback, self)
elif state == psycopg2.extensions.POLL_WRITE:
self.reactor.addWriter(self)
elif state == psycopg2.extensions.POLL_READ:
self.reactor.addReader(self)
else:
if self._pollingD:
d, self._pollingD = self._pollingD, None
self.reactor.callLater(
0, d.errback, UnexpectedPollResult())
elif not swallowErrors:
# no one to report the error to
raise UnexpectedPollResult()
def doRead(self):
self.reactor.removeReader(self)
if not self.pollable().closed:
self.continuePolling()
def doWrite(self):
self.reactor.removeWriter(self)
if not self.pollable().closed:
self.continuePolling()
def logPrefix(self):
return self.prefix
def fileno(self):
# this should never get called after the pollable has been
# disconnected, but Twisted versions affected by bug #4539 might cause
# it to happen, in which case we should return -1
if self.pollable().closed:
return -1
return self.pollable().fileno()
def connectionLost(self, reason):
# Do not errback self._pollingD here if the connection is still open!
# We need to keep on polling until it reports an error, which will
# errback self._pollingD with the correct failure. If we errback here,
# we won't finish the polling cycle, which would leave psycopg2 in a
# state where it thinks there's still an async query underway.
#
# If the connection got lost right after the first poll(), the Deferred
# returned from it will never fire, leaving the caller hanging forever,
# unless we push the connection state forward here. OTOH, if the
# connection is already closed, there's no pollable to poll, so if
# self._pollingD is still present, the only option is to errback it to
# prevent its waiters from hanging (you can't poll() a closed psycopg2
# connection)
if not self.pollable().closed:
# we're pushing the polling cycle to report pending failures, so if
# there's no one to report them to, swallow them
self.continuePolling(swallowErrors=True)
elif self._pollingD:
d, self._pollingD = self._pollingD, None
d.errback(reason)
def _cancel(self, d):
try:
self.pollable().cancel()
except AttributeError:
# the pollable has no cancellation support, ignore
pass
# prevent Twisted from errbacking the deferred being cancelled, because
# the PostgreSQL protocol requires finishing the entire polling process
# before reusing the connection
raise _CancelInProgress()
def _handleCancellation(self, f):
f.trap(psycopg2.extensions.QueryCanceledError)
return failure.Failure(defer.CancelledError())
# Hack required to work with the Gtk2 reactor in Twisted <=11.0, which
# tries to access the "disconnected" property on the IReadWriteDescriptor
# it polls. To avoid attribute errors, forward that access to the "closed"
# property of the underlying connection.
def disconnected(self):
return self.pollable().closed
disconnected = property(disconnected)
# forward all other access to the underlying connection
def __getattr__(self, name):
return getattr(self.pollable(), name)
class Cursor(_PollingMixin):
"""
A wrapper for a psycopg2 asynchronous cursor.
The wrapper will forward almost everything to the wrapped cursor, so the
usual DB-API interface can be used, but it will return :d:`Deferred`
objects that will fire with the DB-API results.
Remember that the PostgreSQL protocol does not support concurrent
asynchronous queries execution, so you need to take care not to execute a
query while another is still being processed.
In most cases you should just use the
:class:`~txpostgres.txpostgres.Connection` methods that will handle the
locking necessary to prevent concurrent query execution.
"""
def __init__(self, cursor, connection):
self.reactor = connection.reactor
self.prefix = "cursor"
self._connection = connection
self._cursor = cursor
def pollable(self):
return self._connection.pollable()
def execute(self, query, params=None):
"""
A regular DB-API execute, but returns a :d:`Deferred`.
The caller must be careful not to call this method twice on cursors
from the same connection without waiting for the previous execution to
complete.
:return: A :d:`Deferred` that will fire with the results of the
DB-API execute.
"""
return self._doit('execute', query, params)
def callproc(self, procname, params=None):
"""
A regular DB-API callproc, but returns a :d:`Deferred`.
The caller must be careful not to call this method twice on cursors
from the same connection without waiting for the previous execution to
complete.
:return: A :d:`Deferred` that will fire with the results of the
DB-API callproc.
"""
return self._doit('callproc', procname, params)
def _doit(self, name, *args, **kwargs):
try:
getattr(self._cursor, name)(*args, **kwargs)
except:
return defer.fail()
# tell the connection that a cursor is starting its poll cycle
self._connection.cursorRunning(self)
def finishedAndPassthrough(ret):
# tell the connection that the poll cycle has finished
self._connection.cursorFinished(self)
return ret
d = self.poll()
return d.addBoth(finishedAndPassthrough)
def close(self):
"""
Close the cursor.
Once closed, the cursor cannot be used again.
:returns: :class:`None`
"""
return self._cursor.close()
def __getattr__(self, name):
# the pollable is the connection, but the wrapped object is the cursor
return getattr(self._cursor, name)
class AlreadyConnected(Exception):
"""
The database connection is already open.
"""
class RollbackFailed(Exception):
"""
Rolling back the transaction failed, the connection might be in an unusable
state.
:var connection: The connection that failed to roll back its transaction.
:vartype connection: :class:`~txpostgres.txpostgres.Connection`
:var originalFailure: The failure that caused the connection to try to
roll back the transaction.
:vartype originalFailure: a Twisted :tm:`Failure <python.failure.Failure>`
"""
def __init__(self, connection, originalFailure):
self.connection = connection
self.originalFailure = originalFailure
def __str__(self):
return "<RollbackFailed, original error: %s>" % self.originalFailure
class Connection(_PollingMixin):
"""
A wrapper for a psycopg2 asynchronous connection.
The wrapper forwards almost everything to the wrapped connection, but
provides additional methods for compatibility with :tm:`adbapi.Connection
<enterprise.adbapi.Connection>`.
:param reactor: A Twisted reactor or :class:`None`, which means the current
reactor
:param cooperator: A Twisted :tm:`Cooperator <internet.task.Cooperator>` to
process :pg:`NOTIFY <notify>` events or :class:`None`, which means
using :tm:`task.cooperate <internet.task.cooperate>`
:var connectionFactory: The factory used to produce connections, defaults
to :psycopg:`psycopg2.connect <module.html#psycopg2.connect>`
:vartype connectionFactory: any callable
:var cursorFactory: The factory used to produce cursors, defaults to
:class:`~txpostgres.txpostgres.Cursor`
:vartype cursorFactory: a callable accepting two positional arguments, a
:psycopg:`psycopg2.cursor <cursor.html#cursor>` and a
:class:`~txpostgres.txpostgres.Connection`
"""
connectionFactory = staticmethod(psycopg2.connect)
cursorFactory = Cursor
def __init__(self, reactor=None, cooperator=None, detector=None):
if not reactor:
from twisted.internet import reactor
if not cooperator:
# the task module provides cooperate()
cooperator = task
self.reactor = reactor
self.cooperator = cooperator
self.detector = detector
self.prefix = "connection"
# this lock will be used to prevent concurrent query execution
self.lock = defer.DeferredLock()
self._connection = None
# a set of cursors that should be notified about a disconnection
self._cursors = set()
# observers for NOTIFY events
self._notifyObservers = set()
def pollable(self):
return self._connection
def connect(self, *args, **kwargs):
"""
Connect to the database.
Any arguments will be passed to :attr:`connectionFactory`. Use them to
pass database names, usernames, passwords, etc.
:return: A :d:`Deferred` that will fire when the connection is open.
:raise: :exc:`~txpostgres.txpostgres.AlreadyConnected` when the
connection has already been opened.
"""
if self.detector:
self.detector.setReconnectable(self, *args, **kwargs)
if self._connection and not self._connection.closed:
return defer.fail(AlreadyConnected())
kwargs['async'] = True
try:
self._connection = self.connectionFactory(*args, **kwargs)
except:
return defer.fail()
def startReadingAndPassthrough(ret):
self.reactor.addReader(self)
return ret
# The connection is always a reader in the reactor, to receive NOTIFY
# events immediately when they're available.
d = self.poll()
return d.addCallback(startReadingAndPassthrough)
def close(self):
"""
Close the connection and disconnect from the database.
:return: :class:`None`
"""
# We'll be closing the underlying socket so stop watching it.
self.reactor.removeReader(self)
self.reactor.removeWriter(self)
# make it safe to call Connection.close() multiple times, psycopg2
# treats this as an error but we don't
if not self._connection.closed:
self._connection.close()
# The above closed the connection socket from C code. Normally we would
# get connectionLost called on all readers and writers of that socket,
# but not if we're using the epoll reactor. According to the epoll(2)
# man page, closing a file descriptor causes it to be removed from all
# epoll sets automatically. In that case, the reactor might not have
# the chance to notice that the connection has been closed. To cover
# that, call connectionLost explicitly on the Connection and all
# outstanding Cursors. It's OK if connectionLost ends up being called
# twice, as the second call will not have any effects.
for cursor in set(self._cursors):
cursor.connectionLost(failure.Failure(main.CONNECTION_DONE))
self.connectionLost(failure.Failure(main.CONNECTION_DONE))
def cursor(self):
"""
Create an asynchronous cursor using :attr:`cursorFactory`.
"""
return self.cursorFactory(self._connection.cursor(), self)
def runQuery(self, *args, **kwargs):
"""
Execute an SQL query and return the result.
An asynchronous cursor will be created and its
:meth:`~txpostgres.txpostgres.Cursor.execute` method will be invoked
with the provided arguments. After the query completes the results will
be fetched and the returned :d:`Deferred` will fire with the result.
The connection is always in autocommit mode, so the query will be run
in a one-off transaction. In case of errors a :tm:`Failure
<python.failure.Failure>` will be returned.
It is safe to call this method multiple times without waiting for the
first query to complete.
:return: A :d:`Deferred` that will fire with the return value of the
cursor's :meth:`fetchall` method.
"""
return self._doit(self._runQuery, *args, **kwargs)
def _runQuery(self, *args, **kwargs):
c = self.cursor()
d = c.execute(*args, **kwargs)
return d.addCallback(lambda c: c.fetchall())
def runOperation(self, *args, **kwargs):
"""
Execute an SQL query and discard the result.
Identical to :meth:`~txpostgres.txpostgres.Connection.runQuery`, but
the result won't be fetched and instead :class:`None` will be
returned. It is intended for statements that do not normally return
values, like INSERT or DELETE.
It is safe to call this method multiple times without waiting for the
first query to complete.
:return: A :d:`Deferred` that will fire :class:`None`.
"""
return self._doit(self._runOperation, *args, **kwargs)
def _runOperation(self, *args, **kwargs):
c = self.cursor()
d = c.execute(*args, **kwargs)
return d.addCallback(lambda _: None)
def runInteraction(self, interaction, *args, **kwargs):
"""
Run commands in a transaction and return the result.
:obj:`interaction` should be a callable that will be passed a
:class:`~txpostgres.txpostgres.Cursor` object. Before calling
:obj:`interaction` a new transaction will be started, so the callable
can assume to be running all its commands in a transaction. If
:obj:`interaction` returns a :d:`Deferred` processing will wait for it
to fire before proceeding. You should not close the provided
:class:`~txpostgres.txpostgres.Cursor`.
After :obj:`interaction` finishes work the transaction will be
automatically committed. If it raises an exception or returns a
:tm:`Failure <python.failure.Failure>` the connection will be rolled
back instead.
If committing the transaction fails it will be rolled back instead and
the failure obtained trying to commit will be returned.
If rolling back the transaction fails the failure obtained from the
rollback attempt will be logged and a
:exc:`~txpostgres.txpostgres.RollbackFailed` failure will be
returned. The returned failure will contain references to the original
failure that caused the transaction to be rolled back and to the
:class:`~txpostgres.txpostgres.Connection` in which that happened, so
the user can take a decision whether she still wants to be using it or
just close it, because an open transaction might have been left open in
the database.
It is safe to call this method multiple times without waiting for the
first query to complete.
:param interaction: A callable whose first argument is a
:class:`~txpostgres.txpostgres.Cursor`.
:type interaction: any callable
:return: A :d:`Deferred` that will fire with the return value of
:obj:`interaction`.
"""
return self._doit(self._runInteraction, interaction, *args, **kwargs)
def _runInteraction(self, interaction, *args, **kwargs):
c = self.cursor()
d = c.execute("begin")
d.addCallback(interaction, *args, **kwargs)
def commitAndPassthrough(ret, cursor):
e = cursor.execute("commit")
return e.addCallback(lambda _: ret)
def rollbackAndPassthrough(f, cursor):
# maybeDeferred in case cursor.execute raises a synchronous
# exception
e = defer.maybeDeferred(cursor.execute, "rollback")
def justPanic(rf):
log.err(rf)
return defer.fail(RollbackFailed(self, f))
# if rollback failed panic
e.addErrback(justPanic)
# otherwise reraise the original failure
return e.addCallback(lambda _: f)
d.addCallback(commitAndPassthrough, c)
d.addErrback(rollbackAndPassthrough, c)
return d
def _doit(self, method, *args, **kwargs):
if self.detector:
args = (method, ) + args
method = self.detector.callChecking
return self.lock.run(method, *args, **kwargs)
def cancel(self, d):
"""
Cancel the current operation. The cancellation does not happen
immediately, because the PostgreSQL protocol requires that the
application waits for confirmation after the query has been cancelled.
Be careful when cancelling an interaction, because if the interaction
includes sending multiple queries to the database server, you can't
really be sure which one are you cancelling.
:param d: a :d:`Deferred` returned by one of
:class:`~txpostgres.txpostgres.Connection` methods.
"""
try:
d.cancel()
except _CancelInProgress:
pass
def cursorRunning(self, cursor):
"""
Called automatically when a :class:`~txpostgres.txpostgres.Cursor`
created by this :class:`~txpostgres.txpostgres.Connection` starts
polling after executing a query. User code should never have to call
this method.
"""
# The cursor will now proceed to poll the psycopg2 connection, so stop
# polling it ourselves until it's done. Failure to do so would result
# in the connection "stealing" the POLL_OK result that appears after
# the query is completed and the Deferred returned from the cursor's
# poll() will never fire.
self.reactor.removeReader(self)
self._cursors.add(cursor)
def cursorFinished(self, cursor):
"""
Called automatically when a :class:`~txpostgres.txpostgres.Cursor`
created by this :class:`~txpostgres.txpostgres.Connection` is done with
polling after executing a query. User code should never have to call
this method.
"""
self._cursors.remove(cursor)
# The cursor is done polling, resume watching the connection for NOTIFY
# events. Be careful to check the connection state, because it might
# have been closed while the cursor was polling and adding ourselves as
# a reader to a closed connection would be an error.
if not self._connection.closed:
self.reactor.addReader(self)
# While cursor was running, some notifies could have been
# delivered, so check for them.
self.checkForNotifies()
def doRead(self):
# call superclass to handle the pending read event on the socket
_PollingMixin.doRead(self)
# check for NOTIFY events
self.checkForNotifies()
# continue watching for NOTIFY events, but be careful to check the
# connection state in case one of the notify handler function caused a
# disconnection
if not self._connection.closed:
self.reactor.addReader(self)
def connectionLost(self, reason):
_PollingMixin.connectionLost(self, reason)
if self.detector:
self.detector.checkForDeadConnection(reason)
def checkForNotifies(self):
"""
Check if :pg:`NOTIFY <notify>` events have been received and if so,
dispatch them to the registered observers, using the :tm:`Cooperator
<internet.task.Cooperator>` provided in the constructor. This is done
automatically, user code should never need to call this method.
"""
# avoid creating a CooperativeTask in the common case of no notifies
if self._connection.notifies:
self.cooperator.cooperate(self._checkForNotifies())
def _checkForNotifies(self):
notifies = self._connection.notifies[:]
del self._connection.notifies[:]
for notify in notifies:
# don't iterate over self._notifyObservers directly because the
# observer function might call removeNotifyObserver, thus modifying
# the set while it's being iterated
for observer in self.getNotifyObservers():
# this method is run from inside the global Cooperator, so
# there's no one to report errors to -- just log them; use
# maybeDeferred in case the observer returns a failing Deferred
# that would stop the cooperator from processing remaining
# observers
yield defer.maybeDeferred(observer, notify).addErrback(log.err)
def addNotifyObserver(self, observer):
"""
Add an observer function that will get called whenever a :pg:`NOTIFY
<notify>` event is delivered to this connection. Any number of
observers can be added to a connection. Adding an observer that's
already been added is ignored.
Observer functions are processed using the :tm:`Cooperator
<internet.task.Cooperator>` provided in the constructor to avoid
blocking the reactor thread when processing large numbers of events. If
an observer returns a :d:`Deferred`, processing waits until it fires or
errbacks.
There are no guarantees as to the order in which observer functions are
called when :pg:`NOTIFY <notify>` events are delivered. Exceptions in
observers are logged and discarded.
:param observer: A callable whose first argument is a
:psycopg:`psycopg2.extensions.Notify
<extensions.html#psycopg2.extensions.Notify>`.
:type observer: any callable
"""
self._notifyObservers.add(observer)
def removeNotifyObserver(self, observer):
"""
Remove a previously added observer function. Removing an observer
that's never been added will be ignored.
:param observer: A callable that should no longer be called on
:pg:`NOTIFY <notify>` events.
:type observer: any callable
"""
self._notifyObservers.discard(observer)
def getNotifyObservers(self):
"""
Get the currently registered notify observers.
:return: A set of callables that will get called on :pg:`NOTIFY
<notify>` events.
:rtype: :class:`set`
"""
return set(self._notifyObservers)
class ConnectionPool(object):
"""
A poor man's pool of :class:`~txpostgres.txpostgres.Connection` instances.
:var min: The amount of connections that will be open when
:meth:`~ConnectionPool.start` is called. The pool never opens or closes
connections on its own after starting. Defaults to 3.
:vartype min: int
:var connectionFactory: The factory used to produce connections, defaults
to :class:`~txpostgres.txpostgres.Connection`.
:vartype connectionFactory: any callable
:var reactor: The reactor passed to :attr:`.connectionFactory`.
:var cooperator: The cooperator passed to :attr:`.connectionFactory`.
"""
min = 3
connectionFactory = Connection
reactor = None
cooperator = None
def __init__(self, _ignored, *connargs, **connkw):
"""
Create a new connection pool.
Any positional or keyword arguments other than the first one and a
:obj:`min` keyword argument are passed to :attr:`connectionFactory`
when connecting. Use these arguments to pass database names, usernames,
passwords, etc.
:param _ignored: Ignored, for :tm:`adbapi.ConnectionPool
<enterprise.adbapi.ConnectionPool>` compatibility.
:type _ignored: any object
"""
if not self.reactor:
from twisted.internet import reactor
self.reactor = reactor
# for adbapi compatibility, min can be passed in kwargs
if 'min' in connkw:
self.min = connkw.pop('min')
self.connargs = connargs
self.connkw = connkw
self.connections = set(
[self.connectionFactory(self.reactor, self.cooperator)
for _ in range(self.min)])
# to avoid checking out more connections than there are pooled in total
self._semaphore = defer.DeferredSemaphore(self.min)
def start(self):
"""
Start the connection pool.
This will create as many connections as the pool's :attr:`min` variable
says.
:return: A :d:`Deferred` that fires when all connection have succeeded.
"""
# use DeferredList here, as gatherResults only got a consumeErrors
# keyword argument in Twisted 11.1.0
d = defer.DeferredList([c.connect(*self.connargs, **self.connkw)
for c in self.connections],
fireOnOneErrback=True, consumeErrors=True)
return d.addCallback(lambda _: self)
def close(self):
"""
Stop the pool.
Disconnects all connections.
:returns: :class:`None`
"""
for c in self.connections:
c.close()
def remove(self, connection):
"""
Remove a connection from the pool.
Provided to be able to remove broken connections from the pool. The
caller should make sure the removed connection does not have queries
pending.
:param connection: The connection to be removed.
:type connection: an object produced by the pool's
:attr:`connectionFactory`
"""
if not self.connections:
raise ValueError("Connection still in use")
self.connections.remove(connection)
self._semaphore.limit -= 1
self._semaphore.acquire() # bleargh...
def add(self, connection):
"""
Add a connection to the pool.
Provided to be able to extend the pool with new connections.
:param connection: The connection to be added.
:type connection: an object compatible with those produced
by the pool's :attr:`connectionFactory`
"""
self.connections.add(connection)
self._semaphore.limit += 1
self._semaphore.release() # uuuugh...
def _putBackAndPassthrough(self, result, connection):
self.connections.add(connection)
return result
def runQuery(self, *args, **kwargs):
"""
Execute an SQL query using a pooled connection and return the result.
One of the pooled connections will be chosen, its
:meth:`~txpostgres.txpostgres.Connection.runQuery` method will be
called and the resulting :d:`Deferred` will be returned.
:return: A :d:`Deferred` obtained by a pooled connection's
:meth:`~txpostgres.txpostgres.Connection.runQuery`
"""
return self._semaphore.run(self._runQuery, *args, **kwargs)
def _runQuery(self, *args, **kwargs):
c = self.connections.pop()
d = c.runQuery(*args, **kwargs)
return d.addBoth(self._putBackAndPassthrough, c)
def runOperation(self, *args, **kwargs):
"""
Execute an SQL query using a pooled connection and discard the result.
One of the pooled connections will be chosen, its
:meth:`~txpostgres.txpostgres.Connection.runOperation` method will be
called and the resulting :d:`Deferred` will be returned.
:return: A :d:`Deferred` obtained by a pooled connection's
:meth:`~txpostgres.txpostgres.Connection.runOperation`
"""
return self._semaphore.run(self._runOperation, *args, **kwargs)
def _runOperation(self, *args, **kwargs):
c = self.connections.pop()
d = c.runOperation(*args, **kwargs)
return d.addBoth(self._putBackAndPassthrough, c)
def runInteraction(self, interaction, *args, **kwargs):
"""
Run commands in a transaction using a pooled connection and return the
result.
One of the pooled connections will be chosen, its
:meth:`~txpostgres.txpostgres.Connection.runInteraction` method will be
called and the resulting :d:`Deferred` will be returned.
:param interaction: A callable that will be passed to
:meth:`Connection.runInteraction
<txpostgres.Connection.runInteraction>`
:type interaction: any callable
:return: A :d:`Deferred` obtained by a pooled connection's
:meth:`Connection.runInteraction
<txpostgres.Connection.runInteraction>`
"""
return self._semaphore.run(
self._runInteraction, interaction, *args, **kwargs)
def _runInteraction(self, interaction, *args, **kwargs):
c = self.connections.pop()
d = c.runInteraction(interaction, *args, **kwargs)
return d.addBoth(self._putBackAndPassthrough, c)
|
{
"content_hash": "a8d572c24927373936b7351660b9b758",
"timestamp": "",
"source": "github",
"line_count": 928,
"max_line_length": 79,
"avg_line_length": 38.73706896551724,
"alnum_prop": 0.653388227439635,
"repo_name": "wulczer/txpostgres",
"id": "af161a85041d65c605c41fdd0a830c1cb435acaa",
"size": "36041",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "txpostgres/txpostgres.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "124081"
}
],
"symlink_target": ""
}
|
def append_controls(oc, response, page, callback, **params):
page = int(page)
if 'pagination' in response['data']:
next_callback = Callback(callback, page=page+1, **params)
previous_callback = Callback(callback, page=page-1, **params)
pagination = response['data']['pagination']
if pagination['page'] and pagination['pages']:
previous_pagination_message = '%d / %d' % (int(pagination['page']-1), int(pagination['pages']))
next_pagination_message = '%d / %d' % (int(pagination['page']), int(pagination['pages']))
else:
previous_pagination_message = ''
next_pagination_message = ''
if not pagination['has_previous']:
previous_message = ''
else:
previous_message = 'Back (' + previous_pagination_message + ')'
if not pagination['has_next']:
next_message = ''
else:
next_message = 'Next (' + next_pagination_message + ')'
if pagination['has_previous']:
oc.add(DirectoryObject(
key=previous_callback,
title=unicode(previous_message),
thumb=R(BACK_ICON)
))
if pagination['has_next']:
oc.add(DirectoryObject(
key=next_callback,
title=unicode(next_message),
thumb=R(NEXT_ICON)
))
|
{
"content_hash": "2dda7a489776aaa5a5ea9d554cfafe3c",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 107,
"avg_line_length": 36.282051282051285,
"alnum_prop": 0.5406360424028268,
"repo_name": "shvets/music-plex-plugin",
"id": "fefef6905462816162eb94639557fe8341065812",
"size": "1415",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/lib/plex_plugin/Contents/Code/pagination.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "45601"
}
],
"symlink_target": ""
}
|
from google.cloud import aiplatform
# [START aiplatform_sdk_resume_experiment_run_sample]
def resume_experiment_run_sample(
experiment_name: str,
run_name: str,
project: str,
location: str,
):
aiplatform.init(experiment_name=experiment_name, project=project, location=location)
aiplatform.start_run(run=run_name, resume=True)
# [END aiplatform_sdk_resume_experiment_run_sample]
|
{
"content_hash": "5e7a46a72576624640f32ee66df58929",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 88,
"avg_line_length": 25.5625,
"alnum_prop": 0.7359413202933985,
"repo_name": "googleapis/python-aiplatform",
"id": "6ba254f07f00b6247281ca0020bd658af4e41cf9",
"size": "986",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/model-builder/experiment_tracking/resume_experiment_run_sample.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "23977004"
},
{
"name": "Shell",
"bytes": "30668"
}
],
"symlink_target": ""
}
|
"""
Module debug: various debugging utilities. Set DEBUG=False to disable them.
trap(message, breakOn) -- used to set code coverage breakpoints in the code.
bugprint(message) -- same as print, but only works in DEBUG mode.
printrap(message, breakOn) -- print a message (always), then call trap.
bugprintrap(message, breakOn) -- print a message and call trap (in DEBUG mode).
Created on Dec 4, 2011
@author: malcolmm
No copyright claimed - do anything you want with this code.
"""
from __future__ import print_function
import time
#To turn off these utilities set this to False
DEBUG = True
def startTime():
return time.clock()
def timeElapsed(since):
return time.clock() - since
class Trap(UserWarning):
pass
def trap(message="IT'S A TRAP!", breakOn=True):
'''Break into the debugger if breakOn evaluates to True.
Raise (and catch) an instance of the Trap exception.
With optional error message, pass that message into Trap's constructor.
With optional breakOn, raise the exception only if breakOn evaluates to True.
**Be sure that your IDE is set to break on caught (Trap or UserWarning)
exceptions.**
'''
if DEBUG and breakOn:
try:
raise Trap(message)
except Trap:
pass
def bugprint(*args, **kwargs):
'''Same as built-in print, but only works in DEBUG mode.'''
if DEBUG:
print(*args, **kwargs)
def printrap(message, breakOn=True):
'''Print a message to the console (always), and call trap in DEBUG mode.
If DEBUG is set to True, call trap with message and the optional breakOn
argument.
'''
print(message)
if DEBUG:
trap(message, breakOn)
def bugprintrap(message, breakOn=True):
'''Print a message to the console and call trap (in DEBUG mode only).
If DEBUG is set to True, print message and call trap with that message
and the optional breakOn argument.
'''
if DEBUG:
print(message)
trap(message, breakOn)
|
{
"content_hash": "87ea13cb8b14348a895b3c02af25e449",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 81,
"avg_line_length": 29.305555555555557,
"alnum_prop": 0.6469194312796208,
"repo_name": "wyattjoh/Dea1337",
"id": "867c88134f3b6db23dd071ed212dcd6cc17987e4",
"size": "2110",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "debug.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "47173"
}
],
"symlink_target": ""
}
|
"""
Transport
"""
module = request.controller
if not settings.has_module(module):
raise HTTP(404, body="Module disabled: %s" % module)
# -----------------------------------------------------------------------------
def index():
"Module's Home Page"
return s3db.cms_index(module)
# -----------------------------------------------------------------------------
def airport():
""" RESTful CRUD controller """
# Pre-processor
def prep(r):
# Location Filter
s3db.gis_location_filter(r)
if r.interactive:
if r.component:
if r.component.name == "human_resource":
s3db.org_site_staff_config(r)
elif r.component.name == "inv_item":
# remove CRUD generated buttons in the tabs
s3db.configure("inv_inv_item",
create = False,
deletable = False,
editable = False,
listadd = False,
)
elif r.method == "update":
field = r.table.obsolete
field.readable = field.writable = True
return True
s3.prep = prep
return s3_rest_controller(rheader=transport_rheader)
# -----------------------------------------------------------------------------
def heliport():
""" RESTful CRUD controller """
# Pre-processor
def prep(r):
# Location Filter
s3db.gis_location_filter(r)
if r.interactive:
if r.component:
if r.component.name == "human_resource":
s3db.org_site_staff_config(r)
elif r.component.name == "inv_item":
# remove CRUD generated buttons in the tabs
s3db.configure("inv_inv_item",
create = False,
deletable = False,
editable = False,
listadd = False,
)
elif r.method == "update":
field = r.table.obsolete
field.readable = field.writable = True
return True
s3.prep = prep
return s3_rest_controller(rheader=transport_rheader)
# -----------------------------------------------------------------------------
def seaport():
""" RESTful CRUD controller """
# Pre-processor
def prep(r):
# Location Filter
s3db.gis_location_filter(r)
if r.interactive:
if r.component:
if r.component.name == "human_resource":
s3db.org_site_staff_config(r)
elif r.component.name == "inv_item":
# remove CRUD generated buttons in the tabs
s3db.configure("inv_inv_item",
create = False,
deletable = False,
editable = False,
listadd = False,
)
elif r.method == "update":
field = r.table.obsolete
field.readable = field.writable = True
return True
s3.prep = prep
return s3_rest_controller(rheader=transport_rheader)
# -----------------------------------------------------------------------------
def transport_rheader(r, tabs=[]):
# Need to use this format as otherwise /inv/incoming?viewing=org_office.x
# doesn't have an rheader
tablename, record = s3base.s3_rheader_resource(r)
r.record = record
r.table = s3db[tablename]
tabs = [(T("Details"), None)]
try:
tabs = tabs + s3db.req_tabs(r)
except:
pass
try:
tabs = tabs + s3db.inv_tabs(r)
except:
pass
rheader_fields = [["name"], ["location_id"]]
rheader = S3ResourceHeader(rheader_fields, tabs)(r)
return rheader
# END =========================================================================
|
{
"content_hash": "4ed3ada0607fe2d5b32d64dceb22605d",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 79,
"avg_line_length": 33.12,
"alnum_prop": 0.427536231884058,
"repo_name": "code-for-india/sahana_shelter_worldbank",
"id": "4e4aa2e563d6112eddbfe038e004f6ce6f06dadc",
"size": "4165",
"binary": false,
"copies": "2",
"ref": "refs/heads/hackathon",
"path": "controllers/transport.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1214342"
},
{
"name": "JavaScript",
"bytes": "16755282"
},
{
"name": "PHP",
"bytes": "15220"
},
{
"name": "Perl",
"bytes": "500"
},
{
"name": "Python",
"bytes": "27298931"
},
{
"name": "Shell",
"bytes": "893"
},
{
"name": "XSLT",
"bytes": "2245739"
}
],
"symlink_target": ""
}
|
"""Domain object for statistics models."""
__author__ = 'Sean Lip'
import copy
import operator
import re
from core.platform import models
(stats_models,) = models.Registry.import_models([models.NAMES.statistics])
class StateCounter(object):
"""Domain object that keeps counts associated with states.
All methods and properties in this file should be independent of the
specific storage model used.
"""
def __init__(self, first_entry_count, subsequent_entries_count,
resolved_answer_count, active_answer_count):
self.first_entry_count = first_entry_count
self.subsequent_entries_count = subsequent_entries_count
self.resolved_answer_count = resolved_answer_count
self.active_answer_count = active_answer_count
@property
def total_entry_count(self):
"""Total number of entries to the state."""
return self.first_entry_count + self.subsequent_entries_count
@property
def no_answer_count(self):
"""Number of times a reader left without entering an answer."""
return (self.first_entry_count + self.subsequent_entries_count
- self.resolved_answer_count - self.active_answer_count)
@classmethod
def get(cls, exploration_id, state_name):
state_counter_model = stats_models.StateCounterModel.get_or_create(
exploration_id, state_name)
return cls(
state_counter_model.first_entry_count,
state_counter_model.subsequent_entries_count,
state_counter_model.resolved_answer_count,
state_counter_model.active_answer_count
)
class StateRuleAnswerLog(object):
"""Domain object that stores answers which match different state rules.
All methods and properties in this file should be independent of the
specific storage model used.
"""
def __init__(self, answers):
# This dict represents a log of answers that hit this rule and that
# have not been resolved. The keys of this dict are the answers encoded
# as HTML strings, and the values are integer counts representing how
# many times the answer has been entered.
self.answers = copy.deepcopy(answers)
@property
def total_answer_count(self):
"""Total count of answers for this rule that have not been resolved."""
# TODO(sll): Cache this computed property.
total_count = 0
for answer, count in self.answers.iteritems():
total_count += count
return total_count
@classmethod
def get_multi(cls, exploration_id, rule_data):
"""Gets domain objects corresponding to the given rule data.
Args:
exploration_id: the exploration id
rule_data: a list of dicts, each with the following keys:
(state_name, handler_name, rule_str).
"""
# TODO(sll): Should each rule_str be unicode instead?
answer_log_models = (
stats_models.StateRuleAnswerLogModel.get_or_create_multi(
exploration_id, rule_data))
return [cls(answer_log_model.answers)
for answer_log_model in answer_log_models]
@classmethod
def get(cls, exploration_id, state_name, handler_name, rule_str):
# TODO(sll): Deprecate this method.
return cls.get_multi(exploration_id, [{
'state_name': state_name,
'handler_name': handler_name,
'rule_str': rule_str
}])[0]
def get_top_answers(self, N):
"""Returns the top N answers.
Args:
N: the maximum number of answers to return.
Returns:
A list of (answer, count) tuples for the N answers with the highest
counts.
"""
return sorted(
self.answers.iteritems(), key=operator.itemgetter(1),
reverse=True)[:N]
class FeedbackItem(object):
"""Domain object for a generic feedback item.
All methods and properties in this file should be independent of the
specific storage model used.
"""
def __init__(self, feedback_item_model):
self.id = feedback_item_model.id
self.target_id = feedback_item_model.target_id
self.content = feedback_item_model.content
self.additional_data = feedback_item_model.additional_data
self.submitter_id = feedback_item_model.submitter_id
self.status = feedback_item_model.status
@classmethod
def _get_feedback_items_for_target(cls, target_id):
return [
FeedbackItem(f) for f in
stats_models.FeedbackItemModel.get_new_feedback_items_for_target(
target_id)]
@classmethod
def _get_target_id_for_state(cls, exploration_id, state_name):
return 'state:%s.%s' % (exploration_id, state_name)
@classmethod
def _get_target_id_for_exploration(cls, exploration_id):
return 'exploration:%s' % (exploration_id)
@classmethod
def get_exploration_id_from_target_id(cls, target_id):
# This should match strings generated by both _get_target_id_for_state
# and _get_target_id_for_exploration.
m = re.search('\:([^\.]*)', target_id)
return m.group(1) if m else None
@classmethod
def get_state_name_from_target_id(cls, target_id):
# This should match strings generated by _get_target_id_for_state.
m = re.search('\.(.*)', target_id)
return m.group(1) if m else None
@classmethod
def get_feedback_items_for_state(cls, exploration_id, state_name):
target_id = cls._get_target_id_for_state(exploration_id, state_name)
return cls._get_feedback_items_for_target(target_id)
@classmethod
def get_feedback_items_for_exploration(cls, exploration_id):
target_id = cls._get_target_id_for_exploration(exploration_id)
return cls._get_feedback_items_for_target(target_id)
@classmethod
def get_feedback_items_for_user(cls, user_id):
result = {}
for f in stats_models.FeedbackItemModel.get_feedback_items_for_user(user_id):
feedback_item = FeedbackItem(f)
result[feedback_item.id] = {
'target_id': feedback_item.target_id,
'content': feedback_item.content,
'status': feedback_item.status
}
return result
@classmethod
def _create_feedback_for_target(
cls, target_id, content, additional_data, submitter_id):
feedback_item_model = stats_models.FeedbackItemModel.get_or_create(
target_id, content, additional_data, submitter_id)
return cls(feedback_item_model)
@classmethod
def create_feedback_for_state(
cls, exploration_id, state_name, content, additional_data=None,
submitter_id=None):
target_id = cls._get_target_id_for_state(exploration_id, state_name)
return cls._create_feedback_for_target(
target_id, content, additional_data, submitter_id)
@classmethod
def create_feedback_for_exploration(
cls, exploration_id, content, additional_data=None,
submitter_id=None):
target_id = cls._get_target_id_for_exploration(exploration_id)
return cls._create_feedback_for_target(
target_id, content, additional_data, submitter_id)
def change_status(self, new_status):
self.status = new_status
|
{
"content_hash": "06f8e6599d5d2a28ea9489fee6ac05cf",
"timestamp": "",
"source": "github",
"line_count": 200,
"max_line_length": 85,
"avg_line_length": 37.24,
"alnum_prop": 0.6397690655209453,
"repo_name": "openhatch/oh-missions-oppia-beta",
"id": "aaf2197f0c45f065ac00bd993e2f49db80b259d5",
"size": "8071",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "core/domain/stats_domain.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "882"
},
{
"name": "CSS",
"bytes": "18641"
},
{
"name": "JavaScript",
"bytes": "388650"
},
{
"name": "Python",
"bytes": "815127"
},
{
"name": "Shell",
"bytes": "24026"
}
],
"symlink_target": ""
}
|
import wmi
import time
import math
import psutil
import subprocess
from tkinter import *
from tkinter import font
from comtypes import CLSCTX_ALL
from ctypes import cast, POINTER
from pycaw import AudioUtilities, IAudioEndpointVolume
from threading import Timer
import os
import sys
import socket
def giveMeLoadAverage():
p = psutil.Process()
p_cpu_times = p.cpu_times()
stat = p_cpu_times
cpu_percent = p.cpu_percent(1)
percent = cpu_percent, "%"
return stat, percent
def giveMeIpV4():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
ip = s.getsockname()[0]
s.close()
return ip
def giveMeIpPing():
results = subprocess.check_output(["ping", "-n", "1", "localhost"])
results = results.decode("ascii", "ignore")
results = results.replace("\r","")
ls = results.split("\n")
ls = ls[2]
return ls
def giveMeRemainingSpace():
mybytes = psutil.disk_usage('/').free / 1000000000
return 'Free space on disk:', math.floor(mybytes), 'Go'
def giveMeTemperature():
w = wmi.WMI(namespace="root\wmi")
temperature_info = w.MSAcpi_ThermalZoneTemperature()[0]
return str((temperature_info.CurrentTemperature-2732)/10.0), "celsius"
c = wmi.WMI ()
def giveMeWamp():
for process in c.Win32_Service ():
if process.Name == "wampapache64":
if process.State == 'Stopped' :
return "offline"
else :
return "online"
def giveMeDhcp():
for process in c.Win32_Service ():
if process.Name == "Dhcp" :
if process.State == 'Stopped' :
return "offline"
else :
return "online"
def giveMeWifi():
results = subprocess.check_output(["netsh", "wlan", "show", "network"])
results = results.decode("ascii", "ignore")
results = results.replace("\r","")
ls = results.split("\n")
ls = ls[4:]
ssids = []
x = 0
f = open( 'conf.txt', 'w' )
while x < len(ls):
if x % 5 == 0:
ssids.append(ls[x])
f.write( ls[x] + '\n' )
x += 1
f.close()
return ssids[0]
def secs2hours(secs):
mm, ss = divmod(secs, 60)
hh, mm = divmod(mm, 60)
return "%d:%02d:%02d" % (hh, mm, ss)
def giveMeBattery():
battery = psutil.sensors_battery()
return "charge = %s%%, time left = %s" % (battery.percent, secs2hours(battery.secsleft))
def giveMePercentBattery():
battery = psutil.sensors_battery()
return battery.percent
def giveMeTime():
return time.strftime("%A %d %B %Y %H:%M:%S")
def giveMeVolume():
devices = AudioUtilities.GetSpeakers()
interface = devices.Activate(
IAudioEndpointVolume._iid_, CLSCTX_ALL, None)
volume = cast(interface, POINTER(IAudioEndpointVolume))
volume.GetMute()
volume.GetMasterVolumeLevel()
volume.GetVolumeRange()
getTheFuckinVolum = volume.GetMasterVolumeLevelScalar()
getTheFuckinVolum = getTheFuckinVolum * 100
return math.floor(getTheFuckinVolum)
def maj():
# on arrive ici toutes les 1000 ms
global Text2
global Text3
global Rect2
global Percent1
global Rect4
global Percent2
global Text5_2
global Text4_3
canvas.delete(Text3)
canvas.delete(Text2)
canvas.delete(Rect2)
canvas.delete(Rect4)
canvas.delete(Percent1)
canvas.delete(Percent2)
canvas.delete(Text5_2)
canvas.delete(Text4_3)
Text2 = canvas.create_text(1055, 165, text=giveMeTime(), font=orbiclean,fill = font_white)
Text3 = canvas.create_text(1055, 315, text=giveMeBattery(), font=orbiclean, fill = font_white)
random1 = 1035 + giveMePercentBattery()*1.6
Rect2 = canvas.create_rectangle(1035, 10, random1, 35, fill="#00FF00")
Percent1 = canvas.create_text(1110, 20, text=str(giveMePercentBattery())+'%', font=orbiclean, fill='white')
random2 = 590 - (giveMeVolume()) * 1.6
Rect4 = canvas.create_rectangle(1155, random2, 1190, 590, fill="red")
Percent2 = canvas.create_text(1173, 510, text=str(giveMeVolume()), font=orbiclean, fill='white')
Text5_2 = canvas.create_text(170, 315, text=giveMeIpPing(), font=orbiclean, fill = font_white)
Text4_3 = canvas.create_text(1040, 455, text=giveMeLoadAverage(), font=orbiclean, fill = font_white)
fenetre.after(1000,maj)
def wampMaj():
global Text7_2
global Text8_2
canvas.delete(Text7_2)
canvas.delete(Text8_2)
Text7_2 = canvas.create_text(105, 15, text=giveMeDhcp(), fill='red', font=FatBigHyppo)
Text8_2 = canvas.create_text(105, 35, text=giveMeWamp(), fill='red', font=FatBigHyppo)
fenetre = Tk()
fenetre.title("l'Oeil de Xana")
# helv36 = tkFont.Font(family = "Helvetica",size = 36,weight = "bold") import tkFont
FatBigHyppo = font.Font(family='Orbitron', size=12, weight='bold')
orbiclean = font.Font(family='Orbitron', size=8)
font.families()
fenetre.resizable(width=False, height=False)
canvas = Canvas(fenetre, width=1200, height=600, background="#DBDBDB", cursor="arrow")
canvas.pack()
photo = PhotoImage(file="xana.png") # need resize
canvas.create_image(0, 0, anchor=NW, image=photo)
colorLines = "white"
colorLetters = "black"
font_white = "white"
white_line1 = canvas.create_line(0, 150, 200, 150, fill=colorLines)
white_line1_2 = canvas.create_line(200, 150, 200, 170, fill=colorLines)
Text1 = canvas.create_text(140, 140, text="Temperature", font=FatBigHyppo, fill = font_white )
Text1_2 = canvas.create_text(155, 165, text=giveMeTemperature(), font=orbiclean,fill = font_white)
white_line2 = canvas.create_line(950, 150, 1200, 150, fill=colorLines)
white_line2_2 = canvas.create_line(950, 150, 950, 170, fill=colorLines)
Text2_title = canvas.create_text(1020, 140, text='Date and Time', font=FatBigHyppo, fill = font_white)
Text2 = canvas.create_text(985, 165, text=giveMeTime(), font=orbiclean, fill = font_white)
white_line3 = canvas.create_line(950, 300, 1200, 300, fill=colorLines)
white_line3_2 = canvas.create_line(950, 300, 950, 320, fill=colorLines)
Text3_title = canvas.create_text(985, 290, text='Battery', font=FatBigHyppo, fill = font_white)
Text3 = canvas.create_text(985, 315, text=giveMeBattery(), font=orbiclean, fill = font_white)
Rect1 = canvas.create_rectangle(1035, 10, 1195, 35, fill=colorLetters)
random1 = 1035 + giveMePercentBattery()*1.6
Rect2 = canvas.create_rectangle(1035, 10, random1, 35, fill="#FD3F92")
Percent1 = canvas.create_text(1100, 20, text=str(giveMePercentBattery())+'%', font=orbiclean, fill='white')
white_line4 = canvas.create_line(950, 400, 1200, 400, fill=colorLines)
white_line4_2 = canvas.create_line(950, 400, 950, 420, fill=colorLines)
Text4_Title = canvas.create_text(975, 390, text="Disk", font=FatBigHyppo, fill = font_white)
Text4 = canvas.create_text(1040, 415, text=giveMeRemainingSpace(), font=orbiclean,fill = font_white)
Text4_2 = canvas.create_text(1000, 435, text="{use of the cpu => }", font=orbiclean, fill = font_white)
Text4_3 = canvas.create_text(1040, 455, text=giveMeLoadAverage(), font=orbiclean, fill = font_white)
Rect3 = canvas.create_rectangle(1155, 430, 1190, 590, fill=colorLetters)
random2 = 590 - (giveMeVolume()) * 1.6
Rect4 = canvas.create_rectangle(1155, random2, 1190, 590, fill="red")
Percent2 = canvas.create_text(1173, 510, text=str(giveMeVolume()), font=orbiclean, fill='white')
white_line5 = canvas.create_line(0, 300, 250, 300, fill=colorLines)
white_line5_2 = canvas.create_line(250, 300, 250, 320, fill=colorLines)
Text5 = canvas.create_text(215, 290, text="PING", font=FatBigHyppo)
Text5_2 = canvas.create_text(170, 315, text=giveMeIpPing(), font=orbiclean,fill = font_white)
Text5_3 = canvas.create_text(170, 335, text=giveMeIpV4(), font=orbiclean, fill = font_white)
white_line6 = canvas.create_line(0, 450, 300, 450, fill=colorLines)
white_line6_2 = canvas.create_line(300, 450, 300, 470, fill=colorLines)
Text6_Title = canvas.create_text(265, 440, text='Wifi', font=FatBigHyppo, fill = font_white)
Text6 = canvas.create_text(200, 460, text=giveMeWifi(), font=orbiclean, fill = font_white)
Text7 = canvas.create_text(40, 15, text="DHCP : ", font=FatBigHyppo, fill = font_white)
Text7_2 = canvas.create_text(100, 15, text="Loading...", fill='red', font=orbiclean)
Text8 = canvas.create_text(40, 35, text="WAMP : ", font=FatBigHyppo, fill = font_white)
Text8_2 = canvas.create_text(100, 35, text="Loading...", fill='red', font=orbiclean)
maj()
#wampMaj()
fenetre.mainloop()
# changer la taille = font=("Purisa", 12)
# effet hober = activefill="red"
# changer curseur = cursor="pirate"
|
{
"content_hash": "9089d403a0ff87dbabfae9e6d2842883",
"timestamp": "",
"source": "github",
"line_count": 222,
"max_line_length": 111,
"avg_line_length": 38.5,
"alnum_prop": 0.6816426816426816,
"repo_name": "grotadmorv/i3status",
"id": "d353dbb21cd7bbfed0464656f62ef0a228a0195c",
"size": "8547",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "interface_graphique/interface_graphique.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "45134"
}
],
"symlink_target": ""
}
|
import os
from setuptools import setup
def read(*paths):
"""Build a file path from *paths* and return the contents."""
with open(os.path.join(*paths), 'r') as f:
return f.read()
setup(
name='pyMPM',
version='0.1.0',
description='Python version of the MPM millimeter wave propagation model',
url='http://github.com/cchwala/pyMPM',
license='BSD',
author='Christian Chwala',
author_email='christian.chwala@kit.edu',
packages=['pyMPM'],
include_package_data=True,
install_requires=['Numpy'],
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Scientific/Engineering :: Atmospheric Science",
"License :: OSI Approved :: BSD License",
'Programming Language :: Python :: 2.7',
],
keywords='microwave propagation atmosphere'
)
|
{
"content_hash": "d11ce2fdd13d403033a1fafd952b2adb",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 78,
"avg_line_length": 28.79310344827586,
"alnum_prop": 0.6335329341317365,
"repo_name": "cchwala/pyMPM",
"id": "3a6fbde30acd7aae7d9fbf0dbd9d4ffff3e89d91",
"size": "835",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "478565"
},
{
"name": "Python",
"bytes": "7736"
}
],
"symlink_target": ""
}
|
"""
Rest Test MS
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Series of tools designed to help test the rest endpoints of mircroservices
:Copyright: (c) 2017 DELL Inc. or its subsidiaries. All Rights Reserved.
:License: Apache 2.0, see LICENSE for more details.
:Author: Akash Kwatra
Created on June 23, 2017
"""
__title__ = 'Rest Test MS'
__author__ = 'Akash Kwatra'
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2017 DELL Inc.'
|
{
"content_hash": "c5f0fe4ede05d1b8a19d3653cdfd106f",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 74,
"avg_line_length": 26.5625,
"alnum_prop": 0.6564705882352941,
"repo_name": "MichaelRegert/smi-test-automation",
"id": "98f511b471fee26669efc50ce2dcb56038ac6601",
"size": "579",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "smi_tests/resttestms/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "98703"
}
],
"symlink_target": ""
}
|
from elasticsearchadmin import *
|
{
"content_hash": "d15de512fcf6d5072c785a36eb216bac",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 32,
"avg_line_length": 33,
"alnum_prop": 0.8484848484848485,
"repo_name": "anchor/elasticsearchadmin",
"id": "5ea61c581446bcb8e550246761fa772a0eba7581",
"size": "33",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "esadmin/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "10025"
}
],
"symlink_target": ""
}
|
from config.circos_setting import CircosSetting
from abbrator import Abbrator
from file_provider import file_provider, tmp_output_dir
from letterman import Letterman
#:__all__= ["file_provider", "abbrator"]
|
{
"content_hash": "7dab7057b4755ac281ded82f61db9589",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 55,
"avg_line_length": 41.4,
"alnum_prop": 0.7971014492753623,
"repo_name": "ecotox/pacfm",
"id": "01bacb5ebf44eee0914a3588071a06f25da1edcc",
"size": "207",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pacfm/model/helper/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AMPL",
"bytes": "96986"
},
{
"name": "C",
"bytes": "2253401"
},
{
"name": "Java",
"bytes": "28647"
},
{
"name": "Makefile",
"bytes": "88653"
},
{
"name": "Matlab",
"bytes": "14547"
},
{
"name": "Objective-C",
"bytes": "1061"
},
{
"name": "OpenEdge ABL",
"bytes": "99470"
},
{
"name": "Pascal",
"bytes": "34142"
},
{
"name": "Perl",
"bytes": "705775"
},
{
"name": "Python",
"bytes": "224920"
},
{
"name": "Shell",
"bytes": "17458"
}
],
"symlink_target": ""
}
|
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
import time
import os
import eHive
class WatchDirectory(PatternMatchingEventHandler):
def __init__(self, file_name, observer, event_triggered):
self.observer = observer
self.event_triggered = event_triggered
super(WatchDirectory, self).__init__(
patterns=[file_name],
case_sensitive=False,
)
def on_any_event(self, event):
self.observer.stop()
self.event_triggered = True
class Monitor:
def __init__(self):
self.observer = Observer()
self.event_triggered = False
def watch(self, directory_name, file_name, watch_until_hours=48):
watch_event = WatchDirectory(file_name, self.observer, self.event_triggered)
self.observer.schedule(watch_event, directory_name, recursive=True)
self.observer.start()
now = time.time()
watch_until = now + (watch_until_hours * 60 * 60)
try:
while self.observer.is_alive() and time.time() <= watch_until :
time.sleep(1)
if not self.observer.is_alive() and watch_event.event_triggered:
self.observer.join()
return True
except:
self.observer.stop()
self.observer.stop()
self.observer.join()
return False
class FileWatcher(eHive.BaseRunnable):
""" Trigger the event based on file creation in the watched directory """
def run(self):
directory = self.param('directory')
file_name = '*'
watch_until = self.param_required('watch_until')
wait = self.param_required('wait')
if self.param('file_name') and self.param('species'):
file_name = self.param('file_name').format(self.param('species')[0])
elif self.param('file_name'):
file_name=self.param('file_name').format('*')
if wait :
file_status = self.wait_for_file(directory, file_name, watch_until)
if not file_status:
raise Exception(f"No event observed in directory : {directory} before timeout")
def wait_for_file(self, directory, file_name, watch_until):
if not file_name:
file_name = '*'
watch_dir = Monitor()
event_found = watch_dir.watch(directory, file_name, watch_until)
return event_found
|
{
"content_hash": "452d2ca51bb562568c0767b1dd56e4d0",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 95,
"avg_line_length": 33.888888888888886,
"alnum_prop": 0.6086065573770492,
"repo_name": "Ensembl/ensembl-production",
"id": "ae7ba9483f123ce286cff6c4a2420680afc74e88",
"size": "3089",
"binary": false,
"copies": "1",
"ref": "refs/heads/release/108",
"path": "src/python/ensembl/production/hive/FileWatcher.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "AngelScript",
"bytes": "1412"
},
{
"name": "Perl",
"bytes": "2213925"
},
{
"name": "Python",
"bytes": "85746"
},
{
"name": "Shell",
"bytes": "47944"
}
],
"symlink_target": ""
}
|
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.template import RequestContext
from django.shortcuts import render, render_to_response
from django.views.generic import View
from django.http import HttpResponse
from customers import models as m
import sys, json
from itertools import chain
from dashboard import Dashboard
#@login_required(login_url='/auth/login/')
class Venue(Dashboard, View):
try:
def __init__(self):
self.template_name = 'customers/venue.html'
self.wishlist_list = []
self.filter_values = (10000, 60000)
self.popular_price_filter_values = (18000, 40000)
self.venues = None
def get_context_data(self, **kwargs):
context = super(Venue, self).get_context_data(**kwargs)
return context
def get_queryset(self, **kwargs):
query = "select id, name from venue_types"
self.venue_subcategories = m.Venue_Types.objects.raw(query)
return self.venue_subcategories
def prepare_wishlist_data(self, *args, **kwargs):
request = args[0]
if request.user.username:
user_id = m.Users.objects.get(username= request.user.username).id
query = "select id, ref_id from wishlist where user_id=" + str(user_id)
wishlist = m.Wishlist.objects.raw(query)
for wish in wishlist:
self.wishlist_list.append(str(wish.ref_id))
return self.wishlist_list
def get_venues(self, request, **kwargs):
type = self.kwargs['type']
confidence_check_filter = request.GET.get('confidence_check_filter')
self.venues = m.Venues.objects.filter(type_id=type)
#filter product according to availability
if request.user.username:
user_id = m.Users.objects.get(username= request.user.username).id
try:
preference_date=m.Main_Preferences.objects.get(user_id=user_id).date
except m.Main_Preferences.DoesNotExist:
preference_date=None
if preference_date:
try:
venue_booked=m.Product_Availability.objects.filter(start_date=preference_date , ref_id__startswith='VN').values('ref_id')
venue_booked_ref_ids= [d['ref_id'] for d in venue_booked]
self.venues=self.venues.exclude(ref_id__in=venue_booked_ref_ids)
except m.Product_Availability.DoesNotExist:
venue_booked=None
#filter venues according to preference sublocation
try:
preference_location=m.Main_Preferences.objects.get(user_id=user_id).sublocation
except Main_Preferences.DoesNotExist:
preference_location=None
if preference_location:
try:
location_venue=m.Address.objects.filter(locality__icontains=preference_location).values('id')
location_venue_ids = [d['id'] for d in location_venue]
self.venues=self.venues.filter(address_id__in = location_venue_ids)
except m.Address.DoesNotExist:
location_venue=None
#Filter by barati confidence. Select only if confidence > 20%
if confidence_check_filter == 'add_confidence':
self.venues = self.venues.exclude(barati_confidence_perc__isnull=True)
self.venues = self.venues.exclude(barati_confidence_perc__lte=20.0)
#if usr logged in main_pref se data uthao n exclude venue not available
#Filter by discount
discounts_dict = {}
discounts_dict = {
'0-10' : request.GET.get('0-10'),
'10-20' : request.GET.get('10-20'),
'20-30' : request.GET.get('20-30'),
'30-40' : request.GET.get('30-40'),
'40-100' : request.GET.get('40-100'),
}
venues, venues_0_10, venues_10_20, venues_20_30, venues_30_40, venues_40_100 = [], [], [], [], [], []
for key, value in discounts_dict.iteritems():
if value == 'filter':
if key == '0-10':
if self.venues:
#self.venues = self.venues.exclude(discount_perc__isnull=True)
venues_0_10 = self.venues.filter(discount_perc__range=(0,10))
if key == '10-20':
if self.venues:
#self.venues = self.venues.exclude(discount_perc__isnull=True)
venues_10_20 = self.venues.filter(discount_perc__range=(10,20))
if key == '20-30':
if self.venues:
#self.venues = self.venues.exclude(discount_perc__isnull=True)
venues_20_30 = self.venues.filter(discount_perc__range=(20,30))
if key == '30-40':
if self.venues:
#self.venues = self.venues.exclude(discount_perc__isnull=True)
venues_30_40 = self.venues.filter(discount_perc__range=(30,40))
if key == '40-100':
if self.venues:
#self.venues = self.venues.exclude(discount_perc__isnull=True)
venues_40_100 = self.venues.filter(discount_perc__range=(40,100))
for key, value in discounts_dict.iteritems():
if value == 'filter':
self.venues = None
venues = list(chain(venues_0_10, venues_10_20, venues_20_30, venues_30_40, venues_40_100))
if venues:
self.venues = venues
if self.venues is None:
self.venues = []
return self.venues
def get_price_filtered_venues(self, request, selected_filter_values, **kwargs):
type = self.kwargs['type']
query = "select id, name from venues where type_id = " + type
if selected_filter_values is not None:
self.venues = m.Venues.objects.filter(type_id=type, actual_price__range=selected_filter_values)#raw(query)
else:
self.venues = m.Venues.objects.filter(type_id=type)#raw(query)
return self.venues
#@login_required(login_url='/auth/login/')
def get(self, request, **kwargs):
subcategories = self.get_context_data()['venue_types']
venues = self.get_venues(request)
wishlist_list = self.prepare_wishlist_data(request)
filter_values = self.filter_values
#Get tax
tax = super(Venue, self).get_tax('venue')
context_dict = {
'subcategories' : subcategories, 'venues' : venues, 'category' : 'venues', 'type' : self.kwargs['type'], \
'wishlist_list' : wishlist_list, 'filter_values' : filter_values, \
'popular_price_filter_values' : self.popular_price_filter_values, 'tax' : tax
}
context_dict.update(self.get_context_data(request=request))
return render(request, self.template_name, context_dict)
@page_template('customers/venue.html')
def post(self, request, **kwargs):
slider_values = request.POST.get('slider');
selected_filter_values = None
if slider_values is not None:
selected_filter_values = tuple(slider_values.split(','))
subcategories = self.get_context_data()['venue_types']
venues = self.get_price_filtered_venues(request, selected_filter_values)
wishlist_list = self.prepare_wishlist_data(request)
#Get tax
tax = super(Venue, self).get_tax('venue')
context_dict = {
'subcategories' : subcategories, 'venues' : venues, 'category' : 'venues', 'type' : self.kwargs['type'],\
'wishlist_list' : wishlist_list, 'filter_values' : self.filter_values,\
'selected_filter_values' : selected_filter_values, 'tax' : tax
}
context_dict.update(self.get_context_data(request=request))
return render(request, self.template_name, context_dict)
except Exception as e:
print e
print sys.exc_traceback.tb_lineno
|
{
"content_hash": "0fa8146a34ed7834543fc04c603b34c2",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 139,
"avg_line_length": 47.9766081871345,
"alnum_prop": 0.5847147732813262,
"repo_name": "abhaystoic/barati",
"id": "21248f20b8d7e80dd6d9853b368fa6a3f3870114",
"size": "8204",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "barati/customers/views_cluster/venue.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "363180"
},
{
"name": "CoffeeScript",
"bytes": "18651"
},
{
"name": "HTML",
"bytes": "314595"
},
{
"name": "JavaScript",
"bytes": "424509"
},
{
"name": "PHP",
"bytes": "742"
},
{
"name": "PLpgSQL",
"bytes": "156474"
},
{
"name": "Python",
"bytes": "192592"
},
{
"name": "Ruby",
"bytes": "198"
}
],
"symlink_target": ""
}
|
@annot('int -> int noenv')
def triple(n):
return n * 3
@annot('(int -> int noenv) -> int noenv')
def apply_two(f):
g = f
return g(2)
FV = new_env('FV', int)
@annot('void -> int')
def getctx():
return env(FV)
@annot('void -> void')
def make_multiple_func_vals():
# Sanity check multiple FuncVal ctx bindings
f1 = getctx
f2 = getctx
assert f1() + f2() == 8, "Ctx-bound func vals"
def main():
assert apply_two(triple) == 6, "Function object"
in_env(FV, 4, make_multiple_func_vals)
return 0
|
{
"content_hash": "de51ea9bf48cee36666b2e9e3a735ef1",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 52,
"avg_line_length": 20.615384615384617,
"alnum_prop": 0.5932835820895522,
"repo_name": "pshc/archipelago",
"id": "631138f1defdb2e0d61baaa3f0daf1c80d11d3ed",
"size": "537",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/funcval.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "15213"
},
{
"name": "C++",
"bytes": "9448"
},
{
"name": "Objective-C",
"bytes": "18281"
},
{
"name": "Python",
"bytes": "313034"
}
],
"symlink_target": ""
}
|
import uuid
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils.translation import ugettext_lazy as _
from asgiref.sync import async_to_sync
from channels.layers import get_channel_layer
from slugify import slugify
class NotificationQuerySet(models.query.QuerySet):
"""Personalized queryset created to improve model usability"""
def unread(self):
"""Return only unread items in the current queryset"""
return self.filter(unread=True)
def read(self):
"""Return only read items in the current queryset"""
return self.filter(unread=False)
def mark_all_as_read(self, recipient=None):
"""Mark as read any unread elements in the current queryset with
optional filter by recipient first.
"""
qs = self.unread()
if recipient:
qs = qs.filter(recipient=recipient)
return qs.update(unread=False)
def mark_all_as_unread(self, recipient=None):
"""Mark as unread any read elements in the current queryset with
optional filter by recipient first.
"""
qs = self.read()
if recipient:
qs = qs.filter(recipient=recipient)
return qs.update(unread=True)
def get_most_recent(self):
"""Returns the most recent unread elements in the queryset"""
return self.unread()[:5]
class Notification(models.Model):
"""
Action model describing the actor acting out a verb (on an optional target).
Nomenclature based on http://activitystrea.ms/specs/atom/1.0/
This model is an adaptation from the django package django-notifications at
https://github.com/django-notifications/django-notifications
Generalized Format::
<actor> <verb> <time>
<actor> <verb> <action_object> <time>
Examples::
<Sebastian> <Logged In> <1 minute ago>
<Sebastian> <commented> <Article> <2 hours ago>
"""
LIKED = "L"
COMMENTED = "C"
FAVORITED = "F"
ANSWERED = "A"
ACCEPTED_ANSWER = "W"
EDITED_ARTICLE = "E"
ALSO_COMMENTED = "K"
LOGGED_IN = "I"
LOGGED_OUT = "O"
VOTED = "V"
SHARED = "S"
SIGNUP = "U"
REPLY = "R"
NOTIFICATION_TYPES = (
(LIKED, _("liked")),
(COMMENTED, _("commented")),
(FAVORITED, _("cavorited")),
(ANSWERED, _("answered")),
(ACCEPTED_ANSWER, _("accepted")),
(EDITED_ARTICLE, _("edited")),
(ALSO_COMMENTED, _("also commented")),
(LOGGED_IN, _("logged in")),
(LOGGED_OUT, _("logged out")),
(VOTED, _("voted on")),
(SHARED, _("shared")),
(SIGNUP, _("created an account")),
(REPLY, _("replied to")),
)
actor = models.ForeignKey(
settings.AUTH_USER_MODEL, related_name="notify_actor", on_delete=models.CASCADE
)
recipient = models.ForeignKey(
settings.AUTH_USER_MODEL,
blank=False,
related_name="notifications",
on_delete=models.CASCADE,
)
unread = models.BooleanField(default=True, db_index=True)
timestamp = models.DateTimeField(auto_now_add=True)
uuid_id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
slug = models.SlugField(max_length=210, null=True, blank=True)
verb = models.CharField(max_length=1, choices=NOTIFICATION_TYPES)
action_object_content_type = models.ForeignKey(
ContentType,
blank=True,
null=True,
related_name="notify_action_object",
on_delete=models.CASCADE,
)
action_object_object_id = models.CharField(max_length=50, blank=True, null=True)
action_object = GenericForeignKey(
"action_object_content_type", "action_object_object_id"
)
objects = NotificationQuerySet.as_manager()
class Meta:
verbose_name = _("Notification")
verbose_name_plural = _("Notifications")
ordering = ("-timestamp",)
def __str__(self):
if self.action_object:
return f"{self.actor} {self.get_verb_display()} {self.action_object} {self.time_since()} ago"
return f"{self.actor} {self.get_verb_display()} {self.time_since()} ago"
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(
f"{self.recipient} {self.uuid_id} {self.verb}",
lowercase=True,
max_length=200,
)
super().save(*args, **kwargs)
def time_since(self, now=None):
"""
Shortcut for the ``django.utils.timesince.timesince`` function of the
current timestamp.
"""
from django.utils.timesince import timesince
return timesince(self.timestamp, now)
def get_icon(self):
"""Model method to validate notification type and return the closest
icon to the verb.
"""
if self.verb == "C" or self.verb == "A" or self.verb == "K":
return "fa-comment"
elif self.verb == "I" or self.verb == "U" or self.verb == "O":
return "fa-users"
elif self.verb == "L":
return "fa-heart"
elif self.verb == "F":
return "fa-star"
elif self.verb == "W":
return "fa-check-circle"
elif self.verb == "E":
return "fa-pencil"
elif self.verb == "V":
return "fa-plus"
elif self.verb == "S":
return "fa-share-alt"
elif self.verb == "R":
return "fa-reply"
def mark_as_read(self):
if self.unread:
self.unread = False
self.save()
def mark_as_unread(self):
if not self.unread:
self.unread = True
self.save()
def notification_handler(actor, recipient, verb, **kwargs):
"""
Handler function to create a Notification instance.
:requires:
:param actor: User instance of that user who makes the action.
:param recipient: User instance, a list of User instances or string
'global' defining who should be notified.
:param verb: Notification attribute with the right choice from the list.
:optional:
:param action_object: Model instance on which the verb was executed.
:param key: String defining what kind of notification is going to be created.
:param id_value: UUID value assigned to a specific element in the DOM.
"""
key = kwargs.pop("key", "notification")
id_value = kwargs.pop("id_value", None)
if recipient == "global":
users = get_user_model().objects.all().exclude(username=actor.username)
for user in users:
Notification.objects.create(
actor=actor,
recipient=user,
verb=verb,
action_object=kwargs.pop("action_object", None),
)
notification_broadcast(actor, key)
elif isinstance(recipient, list):
for user in recipient:
Notification.objects.create(
actor=actor,
recipient=get_user_model().objects.get(username=user),
verb=verb,
action_object=kwargs.pop("action_object", None),
)
elif isinstance(recipient, get_user_model()):
Notification.objects.create(
actor=actor,
recipient=recipient,
verb=verb,
action_object=kwargs.pop("action_object", None),
)
notification_broadcast(
actor, key, id_value=id_value, recipient=recipient.username
)
else:
pass
def notification_broadcast(actor, key, **kwargs):
"""Notification handler to broadcast calls to the recieve layer of the
WebSocket consumer of this app.
:requires:
:param actor: User instance of that user who makes the action.
:param key: String parameter to indicate the client which action to
perform.
:optional:
:param id_value: UUID value assigned to a specific element in the DOM.
:param recipient: String indicating the name of that who needs to be
notified.
"""
channel_layer = get_channel_layer()
id_value = kwargs.pop("id_value", None)
recipient = kwargs.pop("recipient", None)
payload = {
"type": "receive",
"key": key,
"actor_name": actor.username,
"id_value": id_value,
"recipient": recipient,
}
async_to_sync(channel_layer.group_send)("notifications", payload)
|
{
"content_hash": "6b8dcaff47573cb41ea0eecc07d08747",
"timestamp": "",
"source": "github",
"line_count": 273,
"max_line_length": 105,
"avg_line_length": 31.73992673992674,
"alnum_prop": 0.6021927293710329,
"repo_name": "vitorfs/bootcamp",
"id": "76229cc3491c2ace8be35f90b1778227a360e350",
"size": "8665",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bootcamp/notifications/models.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "271283"
},
{
"name": "Dockerfile",
"bytes": "2257"
},
{
"name": "HTML",
"bytes": "77296"
},
{
"name": "JavaScript",
"bytes": "31300"
},
{
"name": "Python",
"bytes": "165979"
},
{
"name": "Shell",
"bytes": "7829"
}
],
"symlink_target": ""
}
|
"""This module contains Google BigQuery to Google Cloud Storage operator."""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Sequence
from google.api_core.exceptions import Conflict
from google.api_core.retry import Retry
from google.cloud.bigquery import DEFAULT_RETRY, ExtractJob
from airflow import AirflowException
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.bigquery import BigQueryHook
from airflow.providers.google.cloud.links.bigquery import BigQueryTableLink
if TYPE_CHECKING:
from airflow.utils.context import Context
class BigQueryToGCSOperator(BaseOperator):
"""
Transfers a BigQuery table to a Google Cloud Storage bucket.
.. seealso::
For more details about these parameters:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
:param source_project_dataset_table: The dotted
``(<project>.|<project>:)<dataset>.<table>`` BigQuery table to use as the
source data. If ``<project>`` is not included, project will be the project
defined in the connection json. (templated)
:param destination_cloud_storage_uris: The destination Google Cloud
Storage URI (e.g. gs://some-bucket/some-file.txt). (templated) Follows
convention defined here:
https://cloud.google.com/bigquery/exporting-data-from-bigquery#exportingmultiple
:param project_id: Google Cloud Project where the job is running
:param compression: Type of compression to use.
:param export_format: File format to export.
:param field_delimiter: The delimiter to use when extracting to a CSV.
:param print_header: Whether to print a header for a CSV file extract.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:param labels: a dictionary containing labels for the job/query,
passed to BigQuery
:param location: The location used for the operation.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param result_retry: How to retry the `result` call that retrieves rows
:param result_timeout: The number of seconds to wait for `result` method before using `result_retry`
:param job_id: The ID of the job. It will be suffixed with hash of job configuration
unless ``force_rerun`` is True.
The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or
dashes (-). The maximum length is 1,024 characters. If not provided then uuid will
be generated.
:param force_rerun: If True then operator will use hash of uuid as job id suffix
:param reattach_states: Set of BigQuery job's states in case of which we should reattach
to the job. Should be other than final states.
"""
template_fields: Sequence[str] = (
"source_project_dataset_table",
"destination_cloud_storage_uris",
"labels",
"impersonation_chain",
)
template_ext: Sequence[str] = ()
ui_color = "#e4e6f0"
operator_extra_links = (BigQueryTableLink(),)
def __init__(
self,
*,
source_project_dataset_table: str,
destination_cloud_storage_uris: list[str],
project_id: str | None = None,
compression: str = "NONE",
export_format: str = "CSV",
field_delimiter: str = ",",
print_header: bool = True,
gcp_conn_id: str = "google_cloud_default",
delegate_to: str | None = None,
labels: dict | None = None,
location: str | None = None,
impersonation_chain: str | Sequence[str] | None = None,
result_retry: Retry = DEFAULT_RETRY,
result_timeout: float | None = None,
job_id: str | None = None,
force_rerun: bool = False,
reattach_states: set[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.source_project_dataset_table = source_project_dataset_table
self.destination_cloud_storage_uris = destination_cloud_storage_uris
self.compression = compression
self.export_format = export_format
self.field_delimiter = field_delimiter
self.print_header = print_header
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.labels = labels
self.location = location
self.impersonation_chain = impersonation_chain
self.result_retry = result_retry
self.result_timeout = result_timeout
self.job_id = job_id
self.force_rerun = force_rerun
self.reattach_states: set[str] = reattach_states or set()
self.hook: BigQueryHook | None = None
@staticmethod
def _handle_job_error(job: ExtractJob) -> None:
if job.error_result:
raise AirflowException(f"BigQuery job {job.job_id} failed: {job.error_result}")
def _prepare_configuration(self):
source_project, source_dataset, source_table = self.hook.split_tablename(
table_input=self.source_project_dataset_table,
default_project_id=self.project_id or self.hook.project_id,
var_name="source_project_dataset_table",
)
configuration: dict[str, Any] = {
"extract": {
"sourceTable": {
"projectId": source_project,
"datasetId": source_dataset,
"tableId": source_table,
},
"compression": self.compression,
"destinationUris": self.destination_cloud_storage_uris,
"destinationFormat": self.export_format,
}
}
if self.labels:
configuration["labels"] = self.labels
if self.export_format == "CSV":
# Only set fieldDelimiter and printHeader fields if using CSV.
# Google does not like it if you set these fields for other export
# formats.
configuration["extract"]["fieldDelimiter"] = self.field_delimiter
configuration["extract"]["printHeader"] = self.print_header
return configuration
def execute(self, context: Context):
self.log.info(
"Executing extract of %s into: %s",
self.source_project_dataset_table,
self.destination_cloud_storage_uris,
)
hook = BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
location=self.location,
impersonation_chain=self.impersonation_chain,
)
self.hook = hook
configuration = self._prepare_configuration()
job_id = hook.generate_job_id(
job_id=self.job_id,
dag_id=self.dag_id,
task_id=self.task_id,
logical_date=context["logical_date"],
configuration=configuration,
force_rerun=self.force_rerun,
)
try:
self.log.info("Executing: %s", configuration)
job: ExtractJob = hook.insert_job(
job_id=job_id,
configuration=configuration,
project_id=self.project_id,
location=self.location,
timeout=self.result_timeout,
retry=self.result_retry,
)
self._handle_job_error(job)
except Conflict:
# If the job already exists retrieve it
job = hook.get_job(
project_id=self.project_id,
location=self.location,
job_id=job_id,
)
if job.state in self.reattach_states:
# We are reattaching to a job
job.result(timeout=self.result_timeout, retry=self.result_retry)
self._handle_job_error(job)
else:
# Same job configuration so we need force_rerun
raise AirflowException(
f"Job with id: {job_id} already exists and is in {job.state} state. If you "
f"want to force rerun it consider setting `force_rerun=True`."
f"Or, if you want to reattach in this scenario add {job.state} to `reattach_states`"
)
conf = job.to_api_repr()["configuration"]["extract"]["sourceTable"]
dataset_id, project_id, table_id = conf["datasetId"], conf["projectId"], conf["tableId"]
BigQueryTableLink.persist(
context=context,
task_instance=self,
dataset_id=dataset_id,
project_id=project_id,
table_id=table_id,
)
|
{
"content_hash": "cdcee8a574404a95d94c733e99cc6dc6",
"timestamp": "",
"source": "github",
"line_count": 217,
"max_line_length": 104,
"avg_line_length": 43.37327188940092,
"alnum_prop": 0.6261155971100723,
"repo_name": "nathanielvarona/airflow",
"id": "b229257d3d711dc6b22b659394881304e671ca9c",
"size": "10199",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "airflow/providers/google/cloud/transfers/bigquery_to_gcs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25980"
},
{
"name": "Dockerfile",
"bytes": "70681"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "173025"
},
{
"name": "JavaScript",
"bytes": "142848"
},
{
"name": "Jinja",
"bytes": "38895"
},
{
"name": "Jupyter Notebook",
"bytes": "5482"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "23169682"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "211967"
},
{
"name": "TypeScript",
"bytes": "484556"
}
],
"symlink_target": ""
}
|
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Person'
db.create_table(u'core_person', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('kind', self.gf('django.db.models.fields.CharField')(max_length=1)),
('university', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['core.Person'], null=True, blank=True)),
('course', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)),
('semester', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('cpf', self.gf('django.db.models.fields.CharField')(unique=True, max_length=20, blank=True)),
('email', self.gf('django.db.models.fields.EmailField')(unique=True, max_length=100)),
('city', self.gf('django.db.models.fields.CharField')(max_length=50, blank=True)),
('facebook', self.gf('django.db.models.fields.URLField')(max_length=200, blank=True)),
('twitter', self.gf('django.db.models.fields.CharField')(max_length=50, blank=True)),
('image', self.gf('django.db.models.fields.URLField')(max_length=200, blank=True)),
('is_active', self.gf('django.db.models.fields.BooleanField')(default=True)),
('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal(u'core', ['Person'])
def backwards(self, orm):
# Deleting model 'Person'
db.delete_table(u'core_person')
models = {
u'core.person': {
'Meta': {'ordering': "['name']", 'object_name': 'Person'},
'city': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'course': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'cpf': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '100'}),
'facebook': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'kind': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'semester': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'twitter': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'university': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Person']", 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
}
}
complete_apps = ['core']
|
{
"content_hash": "0acd57d1ded35ddf7b71075868d1584c",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 141,
"avg_line_length": 64.05263157894737,
"alnum_prop": 0.5858668857847165,
"repo_name": "lucascastejon/cleandevcertificates",
"id": "6700195061ad09f1ca0e5c7db7d753f691b86074",
"size": "3675",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cleandevcertificates/core/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "462"
},
{
"name": "JavaScript",
"bytes": "15712"
},
{
"name": "Python",
"bytes": "44914"
}
],
"symlink_target": ""
}
|
from autobahn.asyncio.websocket import WebSocketClientProtocol, \
WebSocketClientFactory
import trollius
class MyClientProtocol(WebSocketClientProtocol):
def onConnect(self, response):
print("Server connected: {0}".format(response.peer))
@trollius.coroutine
def onOpen(self):
print("WebSocket connection open.")
# start sending messages every second ..
while True:
self.sendMessage(u"Hello, world!".encode('utf8'))
self.sendMessage(b"\x00\x01\x03\x04", isBinary=True)
yield trollius.sleep(1)
def onMessage(self, payload, isBinary):
if isBinary:
print("Binary message received: {0} bytes".format(len(payload)))
else:
print("Text message received: {0}".format(payload.decode('utf8')))
def onClose(self, wasClean, code, reason):
print("WebSocket connection closed: {0}".format(reason))
if __name__ == '__main__':
factory = WebSocketClientFactory("ws://localhost:9000", debug=False)
factory.protocol = MyClientProtocol
loop = trollius.get_event_loop()
coro = loop.create_connection(factory, '127.0.0.1', 9000)
loop.run_until_complete(coro)
loop.run_forever()
loop.close()
|
{
"content_hash": "1ff02c294871458b7e20554f7185333a",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 78,
"avg_line_length": 30.4390243902439,
"alnum_prop": 0.6538461538461539,
"repo_name": "iffy/AutobahnPython",
"id": "ad572cb803c66ee7ed1b2440eb033a47bd9f621b",
"size": "2525",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/asyncio/websocket/echo/client_coroutines_py2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "2711"
},
{
"name": "HTML",
"bytes": "86275"
},
{
"name": "JavaScript",
"bytes": "104724"
},
{
"name": "Makefile",
"bytes": "4809"
},
{
"name": "Python",
"bytes": "1304463"
},
{
"name": "Shell",
"bytes": "158"
}
],
"symlink_target": ""
}
|
from setuptools import setup
def readme():
with open('README.rst') as f:
return f.read()
setup(
name = 'sinchsms',
version = '1.0.4',
description = 'A module to send sms using the Sinch REST apis, www.sinch.com',
long_description = readme(),
author = 'Slava Savitskiy',
author_email = 'slava@sinch.com',
url = 'https://github.com/sinch/python-sinch-sms.git',
keywords = ['sms', 'sinch'],
py_modules = ['sinchsms'],
classifiers = [
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Communications",
"Development Status :: 4 - Beta"
],
)
|
{
"content_hash": "f0af466d19eba6d3026167252896e4f9",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 80,
"avg_line_length": 31.85185185185185,
"alnum_prop": 0.6162790697674418,
"repo_name": "sinch/python-sinch-sms",
"id": "6c4d634f96434473ae3e488e153093debec157ab",
"size": "860",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5602"
}
],
"symlink_target": ""
}
|
import os
import re
import warnings
from six.moves.urllib_parse import urlparse
from . import env
HEROKU_POSTGRES_ENV_NAME_RE = re.compile('HEROKU_POSTGRESQL_[A-Z_]*URL')
def from_heroku_envvars(config):
var_map = {
# SQL-Alchemy
'DATABASE_URL': 'SQLALCHEMY_DATABASE_URI',
# Celery w/ RabbitMQ
'BROKER_URL': 'RABBITMQ_URL',
'REDISTOGO_URL': 'REDIS_URL',
'MONGOLAB_URI': 'MONGO_URI',
'MONGOHQ_URL': 'MONGO_URI',
'CLOUDANT_URL': 'COUCHDB_URL',
'MEMCACHIER_SERVERS': 'CACHE_MEMCACHED_SERVERS',
'MEMCACHIER_USERNAME': 'CACHE_MEMCACHED_USERNAME',
'MEMCACHIER_PASSWORD': 'CACHE_MEMCACHED_PASSWORD',
}
# search postgresql config using regex
if 'DATABASE_URL' not in os.environ:
for k in os.environ.keys():
if HEROKU_POSTGRES_ENV_NAME_RE.match(k):
var_map[k] = 'SQLALCHEMY_DATABASE_URI'
warnings.warn('Using {0} as the database URL. However, '
'really should promote this or another URL '
'to DATABASE_URL by running \'heroku pg:'
'promote {0}\''.format(k), RuntimeWarning)
var_list = [
# Sentry
'SENTRY_DSN',
# Exceptional
'EXCEPTIONAL_API_KEY',
# Flask-GoogleFed
'GOOGLE_DOMAIN',
# Mailgun
'MAILGUN_API_KEY',
'MAILGUN_SMTP_LOGIN',
'MAILGUN_SMTP_PASSWORD',
'MAILGUN_SMTP_PORT',
'MAILGUN_SMTP_SERVER',
# SendGrid
'SENDGRID_USERNAME',
'SENDGRID_PASSWORD'
]
# import the relevant envvars
env.from_envvars(config, envvars=var_list, as_json=False)
env.from_envvars(config, envvars=var_map, as_json=False)
# fix up configuration
if 'MAILGUN_SMTP_SERVER' in config:
config['SMTP_SERVER'] = config['MAILGUN_SMTP_SERVER']
config['SMTP_PORT'] = config['MAILGUN_SMTP_PORT']
config['SMTP_LOGIN'] = config['MAILGUN_SMTP_LOGIN']
config['SMTP_PASSWORD'] = config['MAILGUN_SMTP_PASSWORD']
config['SMTP_USE_TLS'] = True
elif 'SENDGRID_USERNAME' in config:
config['SMTP_SERVER'] = 'smtp.sendgrid.net'
config['SMTP_PORT'] = 25
config['SMTP_LOGIN'] = config['SENDGRID_USERNAME']
config['SMTP_PASSWORD'] = config['SENDGRID_PASSWORD']
config['SMTP_USE_TLS'] = True
# convert to Flask-Mail specific configuration
if 'MAILGUN_SMTP_SERVER' in config or\
'SENDGRID_PASSWORD' in config:
config['MAIL_SERVER'] = config['SMTP_SERVER']
config['MAIL_PORT'] = config['SMTP_PORT']
config['MAIL_USE_TLS'] = config['SMTP_USE_TLS']
config['MAIL_USERNAME'] = config['SMTP_LOGIN']
config['MAIL_PASSWORD'] = config['SMTP_PASSWORD']
# for backwards compatiblity, redis:
if 'REDIS_URL' in config:
url = urlparse(config['REDIS_URL'])
config['REDIS_HOST'] = url.hostname
config['REDIS_PORT'] = url.port
config['REDIS_PASSWORD'] = url.password
# FIXME: missing db#?
if 'MONGO_URI' in config:
url = urlparse(config['MONGO_URI'])
config['MONGODB_USER'] = url.username
config['MONGODB_PASSWORD'] = url.password
config['MONGODB_HOST'] = url.hostname
config['MONGODB_PORT'] = url.port
config['MONGODB_DB'] = url.path[1:]
|
{
"content_hash": "52eb9e3ba59724ff3a249de03136a98a",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 74,
"avg_line_length": 33.81188118811881,
"alnum_prop": 0.5891654465592973,
"repo_name": "mbr/flask-appconfig",
"id": "92f55dfd7b5f6378f9c6bdae7d7285f72d75e032",
"size": "3438",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flask_appconfig/heroku.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "26736"
}
],
"symlink_target": ""
}
|
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
import os
conn_string = os.path.join(os.path.dirname(__file__), 'db.sqlite')
engine = create_engine('sqlite:///' + conn_string,
convert_unicode=True)
db_session = scoped_session(sessionmaker(
autocommit=False,
autoflush=False,
bind=engine))
Base = declarative_base()
Base.query = db_session.query_property()
def init_db():
import snailmail.models.mail
import snailmail.models.user
Base.metadata.create_all(bind=engine)
|
{
"content_hash": "d270a8a23688fa766b1c407637bf9f32",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 66,
"avg_line_length": 27.5,
"alnum_prop": 0.7388429752066116,
"repo_name": "davenportw15/SnailMail",
"id": "9ea713af85cae976d61fbc79e48323932dde639c",
"size": "660",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "snailmail/database.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11831"
},
{
"name": "HTML",
"bytes": "3834"
},
{
"name": "JavaScript",
"bytes": "1560"
},
{
"name": "Python",
"bytes": "11626"
}
],
"symlink_target": ""
}
|
from kafka.structs import *
from kafka.errors import *
|
{
"content_hash": "9558ecb96d3e59dd46e2d91ef42130fa",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 27,
"avg_line_length": 27.5,
"alnum_prop": 0.7818181818181819,
"repo_name": "zackdever/kafka-python",
"id": "5761f722e78b7781d13d44c168682a8d82f8cc60",
"size": "55",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kafka/common.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "675698"
},
{
"name": "Shell",
"bytes": "2646"
}
],
"symlink_target": ""
}
|
#==========================================================================
#
# Copyright Insight Software Consortium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#==========================================================================*/
#
# Test GrayscaleErodeImageFilter
#
import sys
import itk
itk.auto_progress(2)
inputImage = sys.argv[1]
outputImage = sys.argv[2]
radiusValue = int(sys.argv[3])
PixelType = itk.UC
Dimension = 2
ImageType = itk.Image[PixelType, Dimension]
ReaderType = itk.ImageFileReader[ImageType]
reader = ReaderType.New()
reader.SetFileName(inputImage)
StructuringElementType = itk.FlatStructuringElement[Dimension]
structuringElement = StructuringElementType.Ball(radiusValue)
GrayscaleFilterType = itk.GrayscaleErodeImageFilter[
ImageType, ImageType, StructuringElementType]
grayscaleFilter = GrayscaleFilterType.New()
grayscaleFilter.SetInput(reader.GetOutput())
grayscaleFilter.SetKernel(structuringElement)
WriterType = itk.ImageFileWriter[ImageType]
writer = WriterType.New()
writer.SetFileName(outputImage)
writer.SetInput(grayscaleFilter.GetOutput())
writer.Update()
|
{
"content_hash": "64d936e6c063d5c03b735712074d13f5",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 77,
"avg_line_length": 30.98181818181818,
"alnum_prop": 0.6842723004694836,
"repo_name": "RayRuizhiLiao/ITK_4D",
"id": "a8655ed41de0d21adcb11b5c932d11e77f285010",
"size": "1727",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Wrapping/Generators/Python/Tests/GrayscaleErodeImageFilter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "572693"
},
{
"name": "C++",
"bytes": "36720665"
},
{
"name": "CMake",
"bytes": "1448020"
},
{
"name": "CSS",
"bytes": "18346"
},
{
"name": "Java",
"bytes": "29480"
},
{
"name": "Objective-C++",
"bytes": "6753"
},
{
"name": "Perl",
"bytes": "6113"
},
{
"name": "Python",
"bytes": "385395"
},
{
"name": "Ruby",
"bytes": "309"
},
{
"name": "Shell",
"bytes": "92050"
},
{
"name": "Tcl",
"bytes": "75202"
},
{
"name": "XSLT",
"bytes": "8874"
}
],
"symlink_target": ""
}
|
from datetime import datetime
import logging
from airflow.models import BaseOperator, DagRun
from airflow.utils.decorators import apply_defaults
from airflow import settings
class DagRunOrder(object):
def __init__(self, run_id=None, payload=None):
self.run_id = run_id
self.payload = payload
class TriggerDagRunOperator(BaseOperator):
"""
Triggers a DAG run for a specified ``dag_id`` if a criteria is met
:param trigger_dag_id: the dag_id to trigger
:type trigger_dag_id: str
:param python_callable: a reference to a python function that will be
called while passing it the ``context`` object and a placeholder
object ``obj`` for your callable to fill and return if you want
a DagRun created. This ``obj`` object contains a ``run_id`` and
``payload`` attribute that you can modify in your function.
The ``run_id`` should be a unique identifier for that DAG run, and
the payload has to be a picklable object that will be made available
to your tasks while executing that DAG run. Your function header
should look like ``def foo(context, dag_run_obj):``
:type python_callable: python callable
"""
template_fields = tuple()
template_ext = tuple()
ui_color = '#ffefeb'
@apply_defaults
def __init__(
self,
trigger_dag_id,
python_callable,
*args, **kwargs):
super(TriggerDagRunOperator, self).__init__(*args, **kwargs)
self.python_callable = python_callable
self.trigger_dag_id = trigger_dag_id
def execute(self, context):
dro = DagRunOrder(run_id='trig__' + datetime.now().isoformat())
dro = self.python_callable(context, dro)
if dro:
session = settings.Session()
dr = DagRun(
dag_id=self.trigger_dag_id,
run_id=dro.run_id,
conf=dro.payload,
external_trigger=True)
logging.info("Creating DagRun {}".format(dr))
session.add(dr)
session.commit()
session.close()
else:
logging.info("Criteria not met, moving on")
|
{
"content_hash": "2cd674c73c6f2287c1b188d167c62535",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 76,
"avg_line_length": 36.68333333333333,
"alnum_prop": 0.6188096319854611,
"repo_name": "jwi078/incubator-airflow",
"id": "7f8bb53400ad70d7f8c66f3f99037d01b125c684",
"size": "2201",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "airflow/operators/dagrun_operator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "36361"
},
{
"name": "HTML",
"bytes": "102860"
},
{
"name": "JavaScript",
"bytes": "889411"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "1032823"
},
{
"name": "Shell",
"bytes": "12235"
}
],
"symlink_target": ""
}
|
import copy
import json
import re
from svtplay_dl.error import ServiceError
from svtplay_dl.fetcher.hls import HLS
from svtplay_dl.fetcher.hls import hlsparse
from svtplay_dl.fetcher.http import HTTP
from svtplay_dl.service import Service
class Ruv(Service):
supported_domains = ["ruv.is"]
def get(self):
data = self.get_urldata()
match = re.search(r'"([^"]+geo.php)"', data)
if match:
data = self.http.request("get", match.group(1)).content
match = re.search(r"punktur=\(([^ ]+)\)", data)
if match:
janson = json.loads(match.group(1))
self.config.get("live", checklive(janson["result"][1]))
streams = hlsparse(self.config, self.http.request("get", janson["result"][1]), janson["result"][1], output=self.output)
for n in list(streams.keys()):
yield streams[n]
else:
yield ServiceError("Can't find json info")
else:
match = re.search(r'<source [^ ]*[ ]*src="([^"]+)" ', self.get_urldata())
if not match:
yield ServiceError("Can't find video info for: %s" % self.url)
return
if match.group(1).endswith("mp4"):
yield HTTP(copy.copy(self.config), match.group(1), 800, output=self.output)
else:
m3u8_url = match.group(1)
self.config.set("live", checklive(m3u8_url))
yield HLS(copy.copy(self.config), m3u8_url, 800, output=self.output)
def checklive(url):
return True if re.search("live", url) else False
|
{
"content_hash": "a16e369d8e365d67a9957ca760328c98",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 135,
"avg_line_length": 37.31818181818182,
"alnum_prop": 0.5657734470158343,
"repo_name": "olof/debian-svtplay-dl",
"id": "b8d91a1c5d3112779a9812a56528f9a137d39225",
"size": "1731",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/svtplay_dl/service/ruv.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "380"
},
{
"name": "Makefile",
"bytes": "2930"
},
{
"name": "Python",
"bytes": "393738"
},
{
"name": "Shell",
"bytes": "2423"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.contrib.postgres.fields
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='FunContent',
fields=[
('id', models.AutoField(primary_key=True, verbose_name='ID', serialize=False, auto_created=True)),
('message_type', models.CharField(db_index=True, max_length='32', choices=[('quote', 'Quote'), ('fortune', 'Fortune'), ('fact', 'Fun Fact'), ('joke', 'Joke')])),
('message', models.TextField(blank=True, help_text='The main content. This could be the quote, joke, forture, etc')),
('author', models.CharField(max_length=256, blank=True, help_text='Author or attribution for a quote', null=True)),
('keywords', django.contrib.postgres.fields.ArrayField(size=None, blank=True, default=list, base_field=models.CharField(max_length=32, blank=True))),
],
options={
'verbose_name_plural': 'Fun Content',
'verbose_name': 'Fun Content',
'ordering': ['id'],
},
),
]
|
{
"content_hash": "f3685599b78732aebc586a399a5e3408",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 177,
"avg_line_length": 43.714285714285715,
"alnum_prop": 0.5874183006535948,
"repo_name": "tndatacommons/tndata_backend",
"id": "37848bc0c78dc88db4023202b24ebcbd8f8e0cca",
"size": "1248",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tndata_backend/rewards/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "29078"
},
{
"name": "HTML",
"bytes": "680433"
},
{
"name": "JavaScript",
"bytes": "186991"
},
{
"name": "Makefile",
"bytes": "393"
},
{
"name": "Python",
"bytes": "2023392"
},
{
"name": "Shell",
"bytes": "2282"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function
from .opengl import (OpenGLBuffer,
OpenGLTexture,
OpenGLShader,
OpenGLRender)
from .devices import PygameDevice
from .display import PygameDisplay
|
{
"content_hash": "368bfdfaca228e172a14fdfb0498fabf",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 64,
"avg_line_length": 25.636363636363637,
"alnum_prop": 0.6205673758865248,
"repo_name": "jsa4000/OpenGL-Python",
"id": "cd6f713f562926b3c1b3f1f0472e264fe132cc13",
"size": "282",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zero/drivers/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "790"
},
{
"name": "GLSL",
"bytes": "126"
},
{
"name": "Python",
"bytes": "267717"
}
],
"symlink_target": ""
}
|
import unittest
import os
from lxml import etree
from redi import redi
file_dir = os.path.dirname(os.path.realpath(__file__))
goal_dir = os.path.join(file_dir, "../")
proj_root = os.path.abspath(goal_dir)+'/'
class TestUpdateRedcapFieldNameValueAndUnits(unittest.TestCase):
def setUp(self):
self.raw_xml = """<study>
<subject>
<Study_Id>22</Study_Id>
<Collection_Date>10/01/17</Collection_Date>
<Collection_Time>13:50</Collection_Time>
<Component_Name>HEMATOCRIT</Component_Name>
<loinc_code>1534436</loinc_code>
<Reference_Unit>%</Reference_Unit>
<Result_Value>34.5</Result_Value>
<timestamp/><redcapFormName/><eventName/><formDateField/><formCompletedFieldName/><redcapFieldNameValue/><redcapFieldNameUnits/></subject>
<subject>
<Study_Id>22</Study_Id>
<Collection_Date>10/01/17</Collection_Date>
<Collection_Time>13:50</Collection_Time>
<Component_Name>HEMOGLOBIN</Component_Name>
<loinc_code>1534435</loinc_code>
<Reference_Unit>g/dL</Reference_Unit>
<Result_Value>11.3</Result_Value>
<timestamp/><redcapFormName/><eventName/><formDateField/><formCompletedFieldName/><redcapFieldNameValue/><redcapFieldNameUnits/></subject>
<subject>
<Study_Id>22</Study_Id>
<Collection_Date>05/05/22</Collection_Date>
<Collection_Time>12:38</Collection_Time>
<Component_Name>BILIRUBIN DIRECT</Component_Name>
<loinc_code>1558221</loinc_code>
<Reference_Unit>mg/dL</Reference_Unit>
<Result_Value>0.8</Result_Value>
<timestamp/><redcapFormName/><eventName/><formDateField/><formCompletedFieldName/><redcapFieldNameValue/><redcapFieldNameUnits/></subject>
</study>
"""
self.data_tree = etree.ElementTree(etree.fromstring(self.raw_xml))
self.lookup_table = """<rediFieldMap>
<clinicalComponent>
<loinc_code>1534435</loinc_code>
<clinicalComponentName>HEMOGLOBIN</clinicalComponentName>
<redcapFormName>cbc</redcapFormName>
<redcapFieldNameValue>hemo_lborres</redcapFieldNameValue>
<redcapFieldNameValueDescriptiveText>Hemoglobin</redcapFieldNameValueDescriptiveText>
<redcapFieldNameUnits>hemo_lborresu</redcapFieldNameUnits>
<redcapFieldNameUnitsDescriptiveText>Hemoglobin units</redcapFieldNameUnitsDescriptiveText>
<lbtest>hemo_lbtest</lbtest>
<lbtestcd>hemo_lbtestcd</lbtestcd>
</clinicalComponent>
<clinicalComponent>
<loinc_code>1558221</loinc_code>
<clinicalComponentName>BILIRUBIN DIRECT</clinicalComponentName>
<redcapFormName>chemistry</redcapFormName>
<redcapFieldNameValue>dbil_lborres</redcapFieldNameValue>
<redcapFieldNameValueDescriptiveText>Direct Bilirubin</redcapFieldNameValueDescriptiveText>
<redcapFieldNameUnits>dbil_lborresu</redcapFieldNameUnits>
<redcapFieldNameUnitsDescriptiveText>Direct Bilirubin units</redcapFieldNameUnitsDescriptiveText>
<lbtest>Total Bilirubin</lbtest>
<lbtestcd>BILI</lbtestcd>
</clinicalComponent>
</rediFieldMap>
"""
self.lookup_table_tree = etree.ElementTree(etree.fromstring(self.lookup_table))
self.output = """<study>
<subject>
<Study_Id>22</Study_Id>
<Collection_Date>10/01/17</Collection_Date>
<Collection_Time>13:50</Collection_Time>
<Component_Name>HEMATOCRIT</Component_Name>
<loinc_code>1534436</loinc_code>
<Reference_Unit>%</Reference_Unit>
<Result_Value>34.5</Result_Value>
<timestamp/><redcapFormName/><eventName/><formDateField/><formCompletedFieldName/><redcapFieldNameValue>undefined</redcapFieldNameValue><redcapFieldNameUnits>redcapFieldNameUnitsUndefined</redcapFieldNameUnits></subject>
<subject>
<Study_Id>22</Study_Id>
<Collection_Date>10/01/17</Collection_Date>
<Collection_Time>13:50</Collection_Time>
<Component_Name>HEMOGLOBIN</Component_Name>
<loinc_code>1534435</loinc_code>
<Reference_Unit>g/dL</Reference_Unit>
<Result_Value>11.3</Result_Value>
<timestamp/><redcapFormName/><eventName/><formDateField/><formCompletedFieldName/><redcapFieldNameValue>hemo_lborres</redcapFieldNameValue><redcapFieldNameUnits>hemo_lborresu</redcapFieldNameUnits></subject>
<subject>
<Study_Id>22</Study_Id>
<Collection_Date>05/05/22</Collection_Date>
<Collection_Time>12:38</Collection_Time>
<Component_Name>BILIRUBIN DIRECT</Component_Name>
<loinc_code>1558221</loinc_code>
<Reference_Unit>mg/dL</Reference_Unit>
<Result_Value>0.8</Result_Value>
<timestamp/><redcapFormName/><eventName/><formDateField/><formCompletedFieldName/><redcapFieldNameValue>dbil_lborres</redcapFieldNameValue><redcapFieldNameUnits>dbil_lborresu</redcapFieldNameUnits></subject>
</study>
"""
self.expect = etree.tostring(etree.fromstring(self.output))
return()
def test_update_redcap_field_name_value_and_units(self):
redi.update_redcap_field_name_value_and_units(self.data_tree, self.lookup_table_tree, 'undefined')
self.result = etree.tostring(self.data_tree)
self.assertEqual(self.expect, self.result)
def tearDown(self):
return()
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "8f0541e6974a4378e1fb1403811a3eee",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 220,
"avg_line_length": 45.228813559322035,
"alnum_prop": 0.704515645493723,
"repo_name": "indera/redi",
"id": "a71330d69b10962a0338f5550e8ef58201acafa3",
"size": "5337",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/TestUpdateRedcapFieldNameValueAndUnits.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "10233"
},
{
"name": "PHP",
"bytes": "24097"
},
{
"name": "Perl",
"bytes": "848"
},
{
"name": "Python",
"bytes": "427169"
},
{
"name": "R",
"bytes": "7847"
},
{
"name": "Ruby",
"bytes": "363227"
},
{
"name": "Shell",
"bytes": "14860"
},
{
"name": "XSLT",
"bytes": "15145"
}
],
"symlink_target": ""
}
|
"""Code generation utilities"""
import imp
import json
import os
import pkgutil
import pprint
import re
import sys
import textwrap
import jsonschema
from .utils import (SchemaInfo, is_valid_identifier, indent_docstring, indent_arglist,
load_metaschema)
class CodeSnippet(object):
"""Object whose repr() is a string of code"""
def __init__(self, code):
self.code = code
def __repr__(self):
return self.code
def _get_args(info):
"""Return the list of args & kwds for building the __init__ function"""
# TODO: - set additional properties correctly
# - handle patternProperties etc.
required = set()
kwds = set()
invalid_kwds = set()
# TODO: specialize for anyOf/oneOf?
if info.is_allOf():
# recursively call function on all children
arginfo = [_get_args(child) for child in info.allOf]
nonkeyword = all(args[0] for args in arginfo)
required = set.union(set(), *(args[1] for args in arginfo))
kwds = set.union(set(), *(args[2] for args in arginfo))
invalid_kwds = set.union(set(), *(args[3] for args in arginfo))
additional = all(args[4] for args in arginfo)
elif info.is_empty() or info.is_compound():
nonkeyword = True
additional = True
elif info.is_value():
nonkeyword = True
additional=False
elif info.is_object():
invalid_kwds = ({p for p in info.required if not is_valid_identifier(p)} |
{p for p in info.properties if not is_valid_identifier(p)})
required = {p for p in info.required if is_valid_identifier(p)}
kwds = {p for p in info.properties if is_valid_identifier(p)}
kwds -= required
nonkeyword = False
additional = True
#additional = info.additionalProperties or info.patternProperties
else:
raise ValueError("Schema object not understood")
return (nonkeyword, required, kwds, invalid_kwds, additional)
class SchemaClassGenerator(object):
"""Class that defines methods for generating code from schemas
Parameters
----------
classname : string
The name of the class to generate
schema : dict
The dictionary defining the schema class
rootschema : dict (optional)
The root schema for the class
basename : string (default: "SchemaBase")
The name of the base class to use in the class definition
schemarepr : CodeSnippet or object, optional
An object whose repr will be used in the place of the explicit schema.
This can be useful, for example, when the generated code should reference
a predefined schema object. The user must ensure that the schema within
the evaluated code is identical to the schema used to generate the code.
rootschemarepr : CodeSnippet or object, optional
An object whose repr will be used in the place of the explicit root
schema.
"""
schema_class_template = textwrap.dedent('''
class {classname}({basename}):
"""{docstring}"""
_schema = {schema!r}
_rootschema = {rootschema!r}
{init_code}
''')
init_template = textwrap.dedent("""
def __init__({arglist}):
super({classname}, self).__init__({super_arglist})
""").lstrip()
def _process_description(self, description):
return description
def __init__(self, classname, schema, rootschema=None,
basename='SchemaBase', schemarepr=None, rootschemarepr=None,
nodefault=()):
self.classname = classname
self.schema = schema
self.rootschema = rootschema
self.basename = basename
self.schemarepr = schemarepr
self.rootschemarepr = rootschemarepr
self.nodefault = nodefault
def schema_class(self):
"""Generate code for a schema class"""
rootschema = self.rootschema if self.rootschema is not None else self.schema
schemarepr = self.schemarepr if self.schemarepr is not None else self.schema
rootschemarepr = self.rootschemarepr
if rootschemarepr is None:
if rootschema is self.schema:
rootschemarepr = CodeSnippet('_schema')
else:
rootschemarepr = rootschema
return self.schema_class_template.format(
classname=self.classname,
basename=self.basename,
schema=schemarepr,
rootschema=rootschemarepr,
docstring=self.docstring(indent=4),
init_code=self.init_code(indent=4)
)
def docstring(self, indent=0):
# TODO: add a general description at the top, derived from the schema.
# for example, a non-object definition should list valid type, enum
# values, etc.
# TODO: use _get_args here for more information on allOf objects
info = SchemaInfo(self.schema, self.rootschema)
doc = ["{} schema wrapper".format(self.classname),
'',
info.medium_description]
if info.description:
doc += self._process_description( #remove condition description
re.sub(r"\n\{\n(\n|.)*\n\}",'',info.description)).splitlines()
if info.properties:
nonkeyword, required, kwds, invalid_kwds, additional = _get_args(info)
doc += ['',
'Attributes',
'----------',
'']
for prop in sorted(required) + sorted(kwds) + sorted(invalid_kwds):
propinfo = info.properties[prop]
doc += ["{} : {}".format(prop, propinfo.short_description),
" {}".format(self._process_description(propinfo.description))]
if len(doc) > 1:
doc += ['']
return indent_docstring(doc, indent_level=indent, width=100, lstrip=True)
def init_code(self, indent=0):
"""Return code suitablde for the __init__ function of a Schema class"""
info = SchemaInfo(self.schema, rootschema=self.rootschema)
nonkeyword, required, kwds, invalid_kwds, additional =_get_args(info)
nodefault=set(self.nodefault)
required -= nodefault
kwds -= nodefault
args = ['self']
super_args = []
if nodefault:
args.extend(sorted(nodefault))
elif nonkeyword:
args.append('*args')
super_args.append('*args')
args.extend('{}=Undefined'.format(p)
for p in sorted(required) + sorted(kwds))
super_args.extend('{0}={0}'.format(p)
for p in sorted(nodefault) + sorted(required) + sorted(kwds))
if additional:
args.append('**kwds')
super_args.append('**kwds')
arg_indent_level = 9 + indent
super_arg_indent_level = 23 + len(self.classname) + indent
initfunc = self.init_template.format(classname=self.classname,
arglist=indent_arglist(args, indent_level=arg_indent_level),
super_arglist=indent_arglist(super_args, indent_level=super_arg_indent_level))
if indent:
initfunc = ('\n' + indent * ' ').join(initfunc.splitlines())
return initfunc
class SchemaModuleGenerator(object):
"""Generate a Python module implementing the schema
Parameters
----------
schema : dict
The root schema description
root_name : string
The name of the root class (default: 'Root')
schemapi_import : string
The import path for schemapi (default: 'schemapi')
"""
schema_module_header = textwrap.dedent("""
# Module generated by SchemaModuleGenerator
from {schemapi} import SchemaBase, Undefined
""")
def __init__(self, schema, root_name='Root', schemapi_import='schemapi'):
self.schema = schema
self.root_name = root_name
self.schemapi_import = schemapi_import
self._validate()
def _validate(self):
metaschema = load_metaschema()
jsonschema.validate(self.schema, metaschema)
def module_code(self):
"""Generate a Python module implementing the schema"""
definitions = self.schema.get('definitions', {})
if self.root_name in definitions:
raise ValueError(f"root_name='{self.root_name}' exists in definitions; "
"please choose a different name")
code = ['"""Module generated by SchemaModuleGenerator"""',
f"from {self.schemapi_import} import SchemaBase, Undefined"]
schemarepr = textwrap.indent(pprint.pformat(self.schema), 4 * ' ').lstrip()
root = SchemaClassGenerator(self.root_name, self.schema,
schemarepr=CodeSnippet(schemarepr))
code.append(root.schema_class())
for name, subschema in definitions.items():
schemarepr = f"{{'$ref': '#/definitions/{name}'}}"
rootschemarepr = f'{self.root_name}._schema'
gen = SchemaClassGenerator(classname=name,
schema=subschema,
rootschema=self.schema,
schemarepr=CodeSnippet(schemarepr),
rootschemarepr=CodeSnippet(rootschemarepr))
code.append(gen.schema_class())
return '\n\n'.join(code)
def write_module(self, modulename):
"""Write the schema module to the given filename
Parameters
----------
modulename : string or Path
the path to the module (should end with a .py extension)
Returns
-------
modulepath : string
the full absolute path to the written module
"""
modulename = os.fspath(modulename) # support pathlib.Path & others
code = self.module_code()
with open(modulename, 'w') as f:
f.write(code)
return os.path.abspath(modulename)
def import_as(self, modulename, add_to_sys_modules=True):
"""Import wrapper as a dynamically-generated module.
Parameters
----------
modulename : string
a valid Python module name.
add_to_sys_modules : boolean
if True (default) then add the modulename to sys.modules to allow
accessing the module contents via standard import statements.
Returns
-------
module :
the dynamically-created module.
"""
module = imp.new_module(modulename)
if add_to_sys_modules:
sys.modules[modulename] = module
exec(self.module_code(), module.__dict__)
return module
|
{
"content_hash": "75280d2d2a1b68e2c63e371f0b9a59a0",
"timestamp": "",
"source": "github",
"line_count": 294,
"max_line_length": 123,
"avg_line_length": 36.91836734693877,
"alnum_prop": 0.5910263497328174,
"repo_name": "altair-viz/schemapi",
"id": "5cee08304115d5dc24bfb30e7c9e29e203e289ce",
"size": "10854",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "schemapi/codegen.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "13583"
},
{
"name": "Makefile",
"bytes": "140"
},
{
"name": "Python",
"bytes": "64409"
}
],
"symlink_target": ""
}
|
import base64
import datetime
import hashlib
import hmac
import json
import os
import random
import re
import string
import zipfile
import six
from dateutil.tz import tzlocal
from requests import Session
from requests.adapters import HTTPAdapter
from restle.exceptions import HTTPException
from six import text_type
import databasin
from databasin.datasets import DatasetResource, DatasetListResource, DatasetImportListResource, DatasetImportResource
from databasin.exceptions import LoginError, DatasetImportError
from databasin.jobs import JobResource
from databasin.uploads import TemporaryFileResource, TEMPORARY_FILE_DETAIL_PATH, TemporaryFileListResource
from databasin.utils import ResourcePaginator, raise_for_authorization
# IDE inspection trips over these as imports
urljoin = six.moves.urllib_parse.urljoin
urlencode = six.moves.urllib_parse.urlencode
DATASET_DETAIL_PATH = '/api/v1/datasets/{id}/'
DATASET_IMPORT_DETAIL_PATH = '/api/v1/dataset_imports/{id}/'
DATASET_IMPORT_LIST_PATH = '/api/v1/dataset_imports/'
DATASET_LIST_PATH = '/api/v1/datasets/'
DEFAULT_HOST = 'databasin.org'
JOB_CREATE_PATH = '/api/v1/jobs/'
JOB_DETAIL_PATH = '/api/v1/jobs/{id}/'
LOGIN_PATH = '/auth/api/login/'
TEMPORARY_FILE_LIST_PATH = '/api/v1/uploads/temporary-files/'
TEMPORARY_FILE_UPLOAD_PATH = '/uploads/upload-temporary-file/'
METADATA_FILE_UPLOAD_PATH = '/datasets/{id}/import/metadata/'
DATASET_IMPORT_ID_RE = re.compile(r'\/import\/([^\/]*)\/')
class RefererHTTPAdapter(HTTPAdapter):
def add_headers(self, request, **kwargs):
request.headers['Referer'] = request.url
if request.method.lower() not in {'get', 'head'} and 'csrftoken' in request._cookies:
request.headers['X-CSRFToken'] = request._cookies['csrftoken']
class Client(object):
def __init__(self, host=DEFAULT_HOST, user=None, api_key=None):
self._session = Session()
self._session.client = self
self._session.headers = {'user-agent': 'python-databasin/{}'.format(databasin.__version__)}
self._session.mount('https://', RefererHTTPAdapter())
self._session.mount('http://', RefererHTTPAdapter())
self.base_url = 'https://{}'.format(host)
self.username = None
self.api_key = None
self.set_api_key(user, api_key)
def get(self, *args, **kwargs):
self.update_headers()
return self._session.get(*args, **kwargs)
def post(self, *args, **kwargs):
self.update_headers()
return self._session.post(*args, **kwargs)
def update_headers(self):
if self.api_key is None:
try:
del self._session.headers['x-api-user']
del self._session.headers['x-api-time']
del self._session.headers['x-api-signature']
except KeyError:
pass
return
date = datetime.datetime.now(tzlocal()).isoformat().encode()
salt = ''.join(random.SystemRandom().choice(string.ascii_letters) for _ in range(10)).encode()
self._session.headers.update({
'x-api-user': self.username,
'x-api-time': date,
'x-api-signature': b':'.join((salt, base64.urlsafe_b64encode(
hmac.new(hashlib.sha1(salt + self.api_key).digest(), msg=date, digestmod=hashlib.sha1).digest()
)))
})
def build_url(self, path):
return urljoin(self.base_url, path)
def login(self, username, password):
url = self.build_url(LOGIN_PATH)
# Make a get request first to get the CSRF token cookie
r = self.get(self.build_url('/'))
r.raise_for_status()
r = self.post(url, data={
'username': username,
'password': password,
'csrfmiddlewaretoken': r.cookies['csrftoken']
}, allow_redirects=False)
r.raise_for_status()
if not 'sessionid' in r.cookies:
raise LoginError
self.username = username
def set_api_key(self, username, api_key):
if username is None and api_key is not None:
raise ValueError('A user is required with API keys')
if isinstance(api_key, text_type):
api_key = api_key.encode()
self.username = username
self.api_key = api_key
def list_datasets(self, filters={}, items_per_page=100):
self.update_headers()
filters['limit'] = items_per_page
url = '{0}?{1}'.format(self.build_url(DATASET_LIST_PATH), urlencode(filters))
return ResourcePaginator(DatasetListResource.get(url, session=self._session, lazy=False))
def list_my_datasets(self, **kwargs):
if not self.username:
return []
filters = kwargs.get('filters', {})
filters['owner_id'] = self.username
kwargs['filters'] = filters
return self.list_datasets(**kwargs)
def get_dataset(self, dataset_id):
self.update_headers()
try:
return DatasetResource.get(
self.build_url(DATASET_DETAIL_PATH.format(id=dataset_id)), session=self._session, lazy=False
)
except HTTPException as e:
raise_for_authorization(e.response, self.username is not None)
raise
def list_imports(self, filters={}):
self.update_headers()
url = self.build_url(DATASET_IMPORT_LIST_PATH)
if filters:
url += '?{0}'.format(urlencode(filters))
return ResourcePaginator(DatasetImportListResource.get(url, session=self._session, lazy=False))
def get_import(self, import_id):
self.update_headers()
try:
return DatasetImportResource.get(
self.build_url(DATASET_IMPORT_DETAIL_PATH.format(id=import_id)), session=self._session, lazy=False
)
except HTTPException as e:
raise_for_authorization(e.response, self.username is not None)
raise
def create_job(self, name, job_args={}, block=False):
self.update_headers()
job = JobResource.create(self.build_url(JOB_CREATE_PATH), name=name, job_args=job_args, session=self._session)
if block:
job.join()
return job
def get_job(self, job_id):
self.update_headers()
try:
return JobResource.get(
self.build_url(JOB_DETAIL_PATH.format(id=job_id)), session=self._session, lazy=False
)
except HTTPException as e:
raise_for_authorization(e.response, self.username is not None)
raise
def upload_temporary_file(self, f, filename=None):
self.update_headers()
return TemporaryFileResource.upload(
self.build_url(TEMPORARY_FILE_UPLOAD_PATH), f, filename=filename, session=self._session
)
def list_temporary_files(self):
self.update_headers()
return ResourcePaginator(
TemporaryFileListResource.get(self.build_url(TEMPORARY_FILE_LIST_PATH), session=self._session, lazy=False)
)
def get_temporary_file(self, uuid):
self.update_headers()
try:
return TemporaryFileResource.get(
self.build_url(TEMPORARY_FILE_DETAIL_PATH.format(uuid=uuid)), session=self._session, lazy=False
)
except HTTPException as e:
raise_for_authorization(e.response, self.username is not None)
raise
def import_lpk(self, lpk_file, xml=None):
if lpk_file.endswith('.lpk'):
f = open(lpk_file, 'rb')
else:
raise ValueError('File must be an ArcGIS Layer Package with a .lpk extension')
filename = os.path.basename(lpk_file)
tmp_file = self.upload_temporary_file(f, filename=filename)
f.close()
job_args = {
'file': tmp_file.uuid,
'url': None,
'dataset_type': 'ArcGIS_Native'
}
job = self.create_job('create_import_job', job_args=job_args, block=True)
import_id = json.loads(job.message)['next_uri'].strip('/').split('/')[-2]
final_job_args = {
'import_id': import_id
}
if xml is not None:
xml_filename = os.path.basename(xml)
with open(xml) as f:
files = {'data': (xml_filename, f)}
data = {'layerOrderArray': 0, 'source': ''}
url = self.build_url(METADATA_FILE_UPLOAD_PATH.format(id=import_id))
r = self.post(url, files=files, data=data)
r.raise_for_status()
final_job = self.create_job('finalize_import_job', job_args=final_job_args, block=True)
if final_job.status != 'succeeded':
raise DatasetImportError('Import failed: {0}'.format(final_job.message))
data = json.loads(final_job.message)
next_uri = data['next_uri']
if '/import/' in next_uri:
dataset_import_id = DATASET_IMPORT_ID_RE.search(next_uri).group(1)
dataset_import = self.get_import(dataset_import_id)
dataset_import.cancel()
raise DatasetImportError(
'Layer Package imports must have all necessary metadata information necessary for one-step import.'
)
dataset_id = next_uri.strip('/').split('/')[-1]
return self.get_dataset(dataset_id)
def import_netcdf_dataset(self, nc_or_zip_file, style=None):
if nc_or_zip_file.endswith('.zip'):
f = open(nc_or_zip_file, 'a+b')
zf = zipfile.ZipFile(f, 'a')
elif nc_or_zip_file.endswith('.nc'):
f = six.BytesIO()
zf = zipfile.ZipFile(f, 'w', zipfile.ZIP_DEFLATED)
zf.write(nc_or_zip_file, os.path.basename(nc_or_zip_file))
else:
raise ValueError('File must be .nc or .zip')
try:
if style is not None and isinstance(style, six.string_types):
style = json.loads(style)
if style:
zf.writestr('style.json', json.dumps(style))
elif not any(name.endswith('style.json') for name in zf.namelist()):
raise ValueError(
'Import must include style information (either in the zip archive or passed in as an argument)'
)
zf.close()
f.seek(0)
filename = '{0}.zip'.format(os.path.splitext(os.path.basename(nc_or_zip_file))[0])
tmp_file = self.upload_temporary_file(f, filename=filename)
finally:
zf.close()
f.close()
job_args = {
'file': tmp_file.uuid,
'url': None,
'dataset_type': 'NetCDF_Native'
}
job = self.create_job('create_import_job', job_args=job_args, block=True)
if job.status != 'succeeded':
raise DatasetImportError('Import failed: {0}'.format(job.message))
# For now, we require imports to have all metadata and style info necessary for one-step import. Later we may
# add support for multi-stage import, wherein the user could incrementally provide necessary information.
data = json.loads(job.message)
next_uri = data['next_uri']
if '/import/' in next_uri:
dataset_import_id = DATASET_IMPORT_ID_RE.search(next_uri).group(1)
dataset_import = self.get_import(dataset_import_id)
dataset_import.cancel()
raise DatasetImportError(
'NetCDF imports must have all necessary style and metadata information necessary for one-step import.'
)
dataset_id = next_uri.strip('/').split('/')[-1]
return self.get_dataset(dataset_id)
|
{
"content_hash": "6a99214ef17810e48e3fce4950735071",
"timestamp": "",
"source": "github",
"line_count": 332,
"max_line_length": 118,
"avg_line_length": 35.29819277108434,
"alnum_prop": 0.6066217254031914,
"repo_name": "consbio/python-databasin",
"id": "dd18ce0ab5debd4d39cc9c6a48c41d5dff0d029f",
"size": "11719",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "databasin/client.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "331008"
}
],
"symlink_target": ""
}
|
"""Test that thread-local storage can be read correctly."""
from __future__ import print_function
import unittest2
import os
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TlsGlobalTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
TestBase.setUp(self)
if self.getPlatform() == "freebsd" or self.getPlatform() == "linux":
# LD_LIBRARY_PATH must be set so the shared libraries are found on
# startup
if "LD_LIBRARY_PATH" in os.environ:
self.runCmd(
"settings set target.env-vars " +
self.dylibPath +
"=" +
os.environ["LD_LIBRARY_PATH"] +
":" +
self.getBuildDir())
else:
self.runCmd("settings set target.env-vars " +
self.dylibPath + "=" + self.getBuildDir())
self.addTearDownHook(
lambda: self.runCmd(
"settings remove target.env-vars " +
self.dylibPath))
# TLS works differently on Windows, this would need to be implemented
# separately.
@skipIfWindows
@expectedFailureAll(
bugnumber="llvm.org/pr28392",
oslist=no_match(
lldbplatformutil.getDarwinOSTriples()))
def test(self):
"""Test thread-local storage."""
self.build()
exe = self.getBuildArtifact("a.out")
target = self.dbg.CreateTarget(exe)
if self.platformIsDarwin():
self.registerSharedLibrariesWithTarget(target, ['liba.dylib'])
line1 = line_number('main.c', '// thread breakpoint')
lldbutil.run_break_set_by_file_and_line(
self, "main.c", line1, num_expected_locations=1, loc_exact=True)
self.runCmd("run", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.runCmd("process status", "Get process status")
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped',
'stop reason = breakpoint'])
# BUG: sometimes lldb doesn't change threads to the stopped thread.
# (unrelated to this test).
self.runCmd("thread select 2", "Change thread")
# Check that TLS evaluates correctly within the thread.
self.expect("expr var_static", VARIABLES_DISPLAYED_CORRECTLY,
patterns=["\(int\) \$.* = 88"])
self.expect("expr var_shared", VARIABLES_DISPLAYED_CORRECTLY,
patterns=["\(int\) \$.* = 66"])
# Continue on the main thread
line2 = line_number('main.c', '// main breakpoint')
lldbutil.run_break_set_by_file_and_line(
self, "main.c", line2, num_expected_locations=1, loc_exact=True)
self.runCmd("continue", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.runCmd("process status", "Get process status")
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped',
'stop reason = breakpoint'])
# BUG: sometimes lldb doesn't change threads to the stopped thread.
# (unrelated to this test).
self.runCmd("thread select 1", "Change thread")
# Check that TLS evaluates correctly within the main thread.
self.expect("expr var_static", VARIABLES_DISPLAYED_CORRECTLY,
patterns=["\(int\) \$.* = 44"])
self.expect("expr var_shared", VARIABLES_DISPLAYED_CORRECTLY,
patterns=["\(int\) \$.* = 33"])
|
{
"content_hash": "badd518e0f03bcae3b4a1ebf0dc0f7c0",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 78,
"avg_line_length": 39.15625,
"alnum_prop": 0.5754189944134078,
"repo_name": "llvm-mirror/lldb",
"id": "52e63ecac995084ecfbf27bf1996767f21dd7a2d",
"size": "3759",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "packages/Python/lldbsuite/test/lang/c/tls_globals/TestTlsGlobals.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "131618"
},
{
"name": "C",
"bytes": "195293"
},
{
"name": "C++",
"bytes": "23346708"
},
{
"name": "CMake",
"bytes": "167302"
},
{
"name": "DTrace",
"bytes": "334"
},
{
"name": "LLVM",
"bytes": "6106"
},
{
"name": "Makefile",
"bytes": "50396"
},
{
"name": "Objective-C",
"bytes": "106956"
},
{
"name": "Objective-C++",
"bytes": "24806"
},
{
"name": "Perl",
"bytes": "72175"
},
{
"name": "Python",
"bytes": "3669886"
},
{
"name": "Shell",
"bytes": "6573"
},
{
"name": "Vim script",
"bytes": "8434"
}
],
"symlink_target": ""
}
|
from django.db import models
from authentication.models import Account
from django.db import models
from posts.models import Post
from datetime import timedelta
class PrefSignUpManager(models.Manager):
def create_signup(self, post, name, location, duration,
begin_time_list, end_time_list):
if len(begin_time_list) != len(end_time_list):
raise ValueError('Begin time list and end time list do not match')
sheet = self.model(post = post, name = name, location = location,
duration = duration)
sheet.save()
min_delta = timedelta(minutes = duration)
for i in range(0,len(begin_time_list)):
begin_time = begin_time_list[i]
end_time = end_time_list[i]
PrefTimeBlock.objects.create_timeblock(sheet, begin_time, end_time, min_delta)
return sheet
'''
Best Field to represent time? The operations that we will do with minimum duration:
1. Add minimum duration to staring time
Options:
1. TimeField: represented in Python by a datetime.time instance
2. DateTimeField: represented in Python by a datetime.datetime instance
Doc on datetime:
https://docs.python.org/2/library/datetime.html
Seems that the best class to represent the maximum and minimum duration is datetime.time.
What's the corresponding Django model field? TimeField.
Could we diff two datetime.datetime and get a datetime.time?
Actually, let's use integer field. Because for any time comparison and arthimetic,
datetime.timedelta is the easiest. But Django does not have a good field to store
datetime.timedelta, so it would be created on the fly. Also, there is no easy way
to convert a datetime.time to datetime.timedelta.
Assumption: the unit for max/min_duration is minute.
'''
class PrefSignUp(models.Model):
# Make sure this OnetoOne field can be empty. i.e., A post doesn't have to have a O2O
# field defined to a SignUp object. And
post = models.OneToOneField(Post, primary_key=True)
name = models.CharField(max_length=50, null=True, blank=True)
location = models.CharField(max_length=100, null=True, blank=True)
duration = models.IntegerField(default=120)
resolved = models.BooleanField(default=False, blank=False)
objects = PrefSignUpManager()
# How to add a list of things? e.g. A list of blocks? Manytomany field? or an actual list
# We're gonna use ForeignKey. Just like Posts and User
class PrefTimeBlockManage(models.Manager):
def create_timeblock(self, sheet, begin_time, end_time, delta):
# What would happen if the back end raises exception? Server crash? Would Frontend get
# any results back?
if end_time < begin_time:
raise ValueError('Block end time is earier than begin time')
time_block = self.model(sheet = sheet, start_time = begin_time, end_time = end_time)
time_block.save()
time = begin_time
while time + delta <= end_time:
slot_begin = time
slot_end = time + delta
slot = PrefSignUpSlot(block = time_block, start_time = slot_begin, end_time = slot_end)
slot.save()
time = slot_end
class PrefTimeBlock(models.Model):
sheet = models.ForeignKey(PrefSignUp, related_name="myblocks", blank=True)
start_time = models.DateTimeField(auto_now=False, auto_now_add=False, blank=True, null = True)
end_time = models.DateTimeField(auto_now=False, auto_now_add=False, blank=True, null = True)
objects = PrefTimeBlockManage()
class PrefSignUpSlot(models.Model):
block = models.ForeignKey(PrefTimeBlock, related_name="myslots", blank=True)
# The list users who chose this slot as prefered. Example: Post to KGroup
# shared_with = models.ManyToManyField(KGroup, blank=True, through='access.AccessRule')
requester_list = models.ManyToManyField(Account, related_name="requester_list", through='SignUpPreference',
blank=True, null=True)
# owner is None until originator tries to resolve the schedule.
owner = models.ForeignKey(Account, related_name='resolved_owner', blank=True, null=True)
start_time = models.DateTimeField(auto_now=False, auto_now_add=False, blank=True, null = True)
end_time = models.DateTimeField(auto_now=False, auto_now_add=False, blank=True, null = True)
'''
Whenever a requestor chooses a slot, an instance of SignUpPref is created.
This is the through field used in the requtor_list in PreSignUpSlot
'''
class SignUpPreference(models.Model):
slot = models.ForeignKey(PrefSignUpSlot, blank=True)
requester = models.ForeignKey(Account, blank=True)
# 3 = Strongly prefer; 2 = Slightly prefer; 1 = If I have to; 0 = not selected
pref = models.PositiveSmallIntegerField()
|
{
"content_hash": "62f581f1acb35800e3f806eef7f34041",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 111,
"avg_line_length": 41.567796610169495,
"alnum_prop": 0.6880733944954128,
"repo_name": "bewallyt/Kalendr",
"id": "e2f4a7d56228813db5eb022144daea6850941d36",
"size": "4905",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pre_signup/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "33291"
},
{
"name": "HTML",
"bytes": "111343"
},
{
"name": "JavaScript",
"bytes": "182367"
},
{
"name": "Python",
"bytes": "156830"
},
{
"name": "Shell",
"bytes": "84"
},
{
"name": "TeX",
"bytes": "71707"
}
],
"symlink_target": ""
}
|
import sys
import py
from rpython.jit.tl.tla.test_tla import assemble
py.path.local(__file__)
def usage():
print >> sys.stderr, 'Usage: tla_assembler.py filename.tla.py'
sys.exit(1)
def main():
if len(sys.argv) != 2:
usage()
filename = sys.argv[1]
if not filename.endswith('.tla.py'):
usage()
outname = filename[:-len('.py')]
mydict = {}
execfile(filename, mydict)
bytecode = assemble(mydict['code'])
f = open(outname, 'w')
f.write(bytecode)
f.close()
print '%s successfully assembled' % outname
if __name__ == '__main__':
main()
|
{
"content_hash": "b65562967e87f4a888791d5fe0228ef3",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 66,
"avg_line_length": 21.678571428571427,
"alnum_prop": 0.5947281713344317,
"repo_name": "jptomo/rpython-lang-scheme",
"id": "8f485718a8ad1f26bf138e0210df5a940134fed5",
"size": "630",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "rpython/jit/tl/tla/tla_assembler.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "161293"
},
{
"name": "Batchfile",
"bytes": "5289"
},
{
"name": "C",
"bytes": "335765"
},
{
"name": "C++",
"bytes": "12638"
},
{
"name": "Emacs Lisp",
"bytes": "3149"
},
{
"name": "HCL",
"bytes": "155"
},
{
"name": "Makefile",
"bytes": "6988"
},
{
"name": "Objective-C",
"bytes": "1907"
},
{
"name": "Python",
"bytes": "16129160"
},
{
"name": "Scheme",
"bytes": "3"
},
{
"name": "Shell",
"bytes": "721"
},
{
"name": "VimL",
"bytes": "1107"
}
],
"symlink_target": ""
}
|
"""Utilities for file download and caching."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import abstractmethod
from contextlib import closing
import errno
import functools
import gc
import hashlib
import multiprocessing
import multiprocessing.dummy
import os
import random
import shutil
import signal
import sys
import tarfile
import threading
import time
import weakref
import zipfile
import numpy as np
import six
from six.moves.urllib.error import HTTPError
from six.moves.urllib.error import URLError
from six.moves.urllib.request import urlopen
from tensorflow.python.framework import ops
from tensorflow.python.keras.utils.generic_utils import Progbar
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import keras_export
try:
import queue
except ImportError:
import Queue as queue
try:
import typing
is_iterator = lambda x: isinstance(x, typing.Iterator)
except ImportError:
# Python2 uses next, and Python3 should have typing so __next__ is not needed.
is_iterator = lambda x: hasattr(x, '__iter__') and hasattr(x, 'next')
if sys.version_info[0] == 2:
def urlretrieve(url, filename, reporthook=None, data=None):
"""Replacement for `urlretrive` for Python 2.
Under Python 2, `urlretrieve` relies on `FancyURLopener` from legacy
`urllib` module, known to have issues with proxy management.
Arguments:
url: url to retrieve.
filename: where to store the retrieved data locally.
reporthook: a hook function that will be called once
on establishment of the network connection and once
after each block read thereafter.
The hook will be passed three arguments;
a count of blocks transferred so far,
a block size in bytes, and the total size of the file.
data: `data` argument passed to `urlopen`.
"""
def chunk_read(response, chunk_size=8192, reporthook=None):
content_type = response.info().get('Content-Length')
total_size = -1
if content_type is not None:
total_size = int(content_type.strip())
count = 0
while True:
chunk = response.read(chunk_size)
count += 1
if reporthook is not None:
reporthook(count, chunk_size, total_size)
if chunk:
yield chunk
else:
break
response = urlopen(url, data)
with open(filename, 'wb') as fd:
for chunk in chunk_read(response, reporthook=reporthook):
fd.write(chunk)
else:
from six.moves.urllib.request import urlretrieve
def is_generator_or_sequence(x):
"""Check if `x` is a Keras generator type."""
builtin_iterators = (str, list, tuple, dict, set, frozenset)
if isinstance(x, (ops.Tensor, np.ndarray) + builtin_iterators):
return False
return tf_inspect.isgenerator(x) or isinstance(x, Sequence) or is_iterator(x)
def _extract_archive(file_path, path='.', archive_format='auto'):
"""Extracts an archive if it matches tar, tar.gz, tar.bz, or zip formats.
Arguments:
file_path: path to the archive file
path: path to extract the archive file
archive_format: Archive format to try for extracting the file.
Options are 'auto', 'tar', 'zip', and None.
'tar' includes tar, tar.gz, and tar.bz files.
The default 'auto' is ['tar', 'zip'].
None or an empty list will return no matches found.
Returns:
True if a match was found and an archive extraction was completed,
False otherwise.
"""
if archive_format is None:
return False
if archive_format == 'auto':
archive_format = ['tar', 'zip']
if isinstance(archive_format, six.string_types):
archive_format = [archive_format]
for archive_type in archive_format:
if archive_type == 'tar':
open_fn = tarfile.open
is_match_fn = tarfile.is_tarfile
if archive_type == 'zip':
open_fn = zipfile.ZipFile
is_match_fn = zipfile.is_zipfile
if is_match_fn(file_path):
with open_fn(file_path) as archive:
try:
archive.extractall(path)
except (tarfile.TarError, RuntimeError, KeyboardInterrupt):
if os.path.exists(path):
if os.path.isfile(path):
os.remove(path)
else:
shutil.rmtree(path)
raise
return True
return False
@keras_export('keras.utils.get_file')
def get_file(fname,
origin,
untar=False,
md5_hash=None,
file_hash=None,
cache_subdir='datasets',
hash_algorithm='auto',
extract=False,
archive_format='auto',
cache_dir=None):
"""Downloads a file from a URL if it not already in the cache.
By default the file at the url `origin` is downloaded to the
cache_dir `~/.keras`, placed in the cache_subdir `datasets`,
and given the filename `fname`. The final location of a file
`example.txt` would therefore be `~/.keras/datasets/example.txt`.
Files in tar, tar.gz, tar.bz, and zip formats can also be extracted.
Passing a hash will verify the file after download. The command line
programs `shasum` and `sha256sum` can compute the hash.
Arguments:
fname: Name of the file. If an absolute path `/path/to/file.txt` is
specified the file will be saved at that location.
origin: Original URL of the file.
untar: Deprecated in favor of 'extract'.
boolean, whether the file should be decompressed
md5_hash: Deprecated in favor of 'file_hash'.
md5 hash of the file for verification
file_hash: The expected hash string of the file after download.
The sha256 and md5 hash algorithms are both supported.
cache_subdir: Subdirectory under the Keras cache dir where the file is
saved. If an absolute path `/path/to/folder` is
specified the file will be saved at that location.
hash_algorithm: Select the hash algorithm to verify the file.
options are 'md5', 'sha256', and 'auto'.
The default 'auto' detects the hash algorithm in use.
extract: True tries extracting the file as an Archive, like tar or zip.
archive_format: Archive format to try for extracting the file.
Options are 'auto', 'tar', 'zip', and None.
'tar' includes tar, tar.gz, and tar.bz files.
The default 'auto' is ['tar', 'zip'].
None or an empty list will return no matches found.
cache_dir: Location to store cached files, when None it
defaults to the [Keras
Directory](/faq/#where-is-the-keras-configuration-filed-stored).
Returns:
Path to the downloaded file
"""
if cache_dir is None:
cache_dir = os.path.join(os.path.expanduser('~'), '.keras')
if md5_hash is not None and file_hash is None:
file_hash = md5_hash
hash_algorithm = 'md5'
datadir_base = os.path.expanduser(cache_dir)
if not os.access(datadir_base, os.W_OK):
datadir_base = os.path.join('/tmp', '.keras')
datadir = os.path.join(datadir_base, cache_subdir)
_makedirs_exist_ok(datadir)
if untar:
untar_fpath = os.path.join(datadir, fname)
fpath = untar_fpath + '.tar.gz'
else:
fpath = os.path.join(datadir, fname)
download = False
if os.path.exists(fpath):
# File found; verify integrity if a hash was provided.
if file_hash is not None:
if not validate_file(fpath, file_hash, algorithm=hash_algorithm):
print('A local file was found, but it seems to be '
'incomplete or outdated because the ' + hash_algorithm +
' file hash does not match the original value of ' + file_hash +
' so we will re-download the data.')
download = True
else:
download = True
if download:
print('Downloading data from', origin)
class ProgressTracker(object):
# Maintain progbar for the lifetime of download.
# This design was chosen for Python 2.7 compatibility.
progbar = None
def dl_progress(count, block_size, total_size):
if ProgressTracker.progbar is None:
if total_size == -1:
total_size = None
ProgressTracker.progbar = Progbar(total_size)
else:
ProgressTracker.progbar.update(count * block_size)
error_msg = 'URL fetch failure on {}: {} -- {}'
try:
try:
urlretrieve(origin, fpath, dl_progress)
except HTTPError as e:
raise Exception(error_msg.format(origin, e.code, e.msg))
except URLError as e:
raise Exception(error_msg.format(origin, e.errno, e.reason))
except (Exception, KeyboardInterrupt) as e:
if os.path.exists(fpath):
os.remove(fpath)
raise
ProgressTracker.progbar = None
if untar:
if not os.path.exists(untar_fpath):
_extract_archive(fpath, datadir, archive_format='tar')
return untar_fpath
if extract:
_extract_archive(fpath, datadir, archive_format)
return fpath
def _makedirs_exist_ok(datadir):
if six.PY3:
os.makedirs(datadir, exist_ok=True) # pylint: disable=unexpected-keyword-arg
else:
# Python 2 doesn't have the exist_ok arg, so we try-except here.
try:
os.makedirs(datadir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def _hash_file(fpath, algorithm='sha256', chunk_size=65535):
"""Calculates a file sha256 or md5 hash.
Example:
```python
_hash_file('/path/to/file.zip')
'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'
```
Arguments:
fpath: path to the file being validated
algorithm: hash algorithm, one of 'auto', 'sha256', or 'md5'.
The default 'auto' detects the hash algorithm in use.
chunk_size: Bytes to read at a time, important for large files.
Returns:
The file hash
"""
if (algorithm == 'sha256') or (algorithm == 'auto' and len(hash) == 64):
hasher = hashlib.sha256()
else:
hasher = hashlib.md5()
with open(fpath, 'rb') as fpath_file:
for chunk in iter(lambda: fpath_file.read(chunk_size), b''):
hasher.update(chunk)
return hasher.hexdigest()
def validate_file(fpath, file_hash, algorithm='auto', chunk_size=65535):
"""Validates a file against a sha256 or md5 hash.
Arguments:
fpath: path to the file being validated
file_hash: The expected hash string of the file.
The sha256 and md5 hash algorithms are both supported.
algorithm: Hash algorithm, one of 'auto', 'sha256', or 'md5'.
The default 'auto' detects the hash algorithm in use.
chunk_size: Bytes to read at a time, important for large files.
Returns:
Whether the file is valid
"""
if (algorithm == 'sha256') or (algorithm == 'auto' and len(file_hash) == 64):
hasher = 'sha256'
else:
hasher = 'md5'
if str(_hash_file(fpath, hasher, chunk_size)) == str(file_hash):
return True
else:
return False
class ThreadsafeIter(object):
"""Wrap an iterator with a lock and propagate exceptions to all threads."""
def __init__(self, it):
self.it = it
self.lock = threading.Lock()
# After a generator throws an exception all subsequent next() calls raise a
# StopIteration Exception. This, however, presents an issue when mixing
# generators and threading because it means the order of retrieval need not
# match the order in which the generator was called. This can make it appear
# that a generator exited normally when in fact the terminating exception is
# just in a different thread. In order to provide thread safety, once
# self.it has thrown an exception we continue to throw the same exception.
self._exception = None
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
with self.lock:
if self._exception:
raise self._exception # pylint: disable=raising-bad-type
try:
return next(self.it)
except Exception as e:
self._exception = e
raise
def threadsafe_generator(f):
@functools.wraps(f)
def g(*a, **kw):
return ThreadsafeIter(f(*a, **kw))
return g
@keras_export('keras.utils.Sequence')
class Sequence(object):
"""Base object for fitting to a sequence of data, such as a dataset.
Every `Sequence` must implement the `__getitem__` and the `__len__` methods.
If you want to modify your dataset between epochs you may implement
`on_epoch_end`.
The method `__getitem__` should return a complete batch.
Notes:
`Sequence` are a safer way to do multiprocessing. This structure guarantees
that the network will only train once
on each sample per epoch which is not the case with generators.
Examples:
```python
from skimage.io import imread
from skimage.transform import resize
import numpy as np
import math
# Here, `x_set` is list of path to the images
# and `y_set` are the associated classes.
class CIFAR10Sequence(Sequence):
def __init__(self, x_set, y_set, batch_size):
self.x, self.y = x_set, y_set
self.batch_size = batch_size
def __len__(self):
return math.ceil(len(self.x) / self.batch_size)
def __getitem__(self, idx):
batch_x = self.x[idx * self.batch_size:(idx + 1) *
self.batch_size]
batch_y = self.y[idx * self.batch_size:(idx + 1) *
self.batch_size]
return np.array([
resize(imread(file_name), (200, 200))
for file_name in batch_x]), np.array(batch_y)
```
"""
@abstractmethod
def __getitem__(self, index):
"""Gets batch at position `index`.
Arguments:
index: position of the batch in the Sequence.
Returns:
A batch
"""
raise NotImplementedError
@abstractmethod
def __len__(self):
"""Number of batch in the Sequence.
Returns:
The number of batches in the Sequence.
"""
raise NotImplementedError
def on_epoch_end(self):
"""Method called at the end of every epoch.
"""
pass
def __iter__(self):
"""Create a generator that iterate over the Sequence."""
for item in (self[i] for i in range(len(self))):
yield item
def iter_sequence_infinite(seq):
"""Iterates indefinitely over a Sequence.
Arguments:
seq: Sequence instance.
Yields:
Batches of data from the Sequence.
"""
while True:
for item in seq:
yield item
# Global variables to be shared across processes
_SHARED_SEQUENCES = {}
# We use a Value to provide unique id to different processes.
_SEQUENCE_COUNTER = None
# Because multiprocessing pools are inherently unsafe, starting from a clean
# state can be essential to avoiding deadlocks. In order to accomplish this, we
# need to be able to check on the status of Pools that we create.
_DATA_POOLS = weakref.WeakSet()
_WORKER_ID_QUEUE = None # Only created if needed.
_WORKER_IDS = set()
_FORCE_THREADPOOL = False
_FORCE_THREADPOOL_LOCK = threading.RLock()
def dont_use_multiprocessing_pool(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
with _FORCE_THREADPOOL_LOCK:
global _FORCE_THREADPOOL
old_force_threadpool, _FORCE_THREADPOOL = _FORCE_THREADPOOL, True
out = f(*args, **kwargs)
_FORCE_THREADPOOL = old_force_threadpool
return out
return wrapped
def get_pool_class(use_multiprocessing):
global _FORCE_THREADPOOL
if not use_multiprocessing or _FORCE_THREADPOOL:
return multiprocessing.dummy.Pool # ThreadPool
logging.warning(
'multiprocessing can interact badly with TensorFlow, causing '
'nondeterministic deadlocks. For high performance data pipelines tf.data '
'is recommended.')
return multiprocessing.Pool
def get_worker_id_queue():
"""Lazily create the queue to track worker ids."""
global _WORKER_ID_QUEUE
if _WORKER_ID_QUEUE is None:
_WORKER_ID_QUEUE = multiprocessing.Queue()
return _WORKER_ID_QUEUE
def init_pool(seqs):
global _SHARED_SEQUENCES
_SHARED_SEQUENCES = seqs
@keras_export('keras.experimental.terminate_keras_multiprocessing_pools')
def terminate_keras_multiprocessing_pools(grace_period=0.1, use_sigkill=False):
"""Destroy Keras' multiprocessing pools to prevent deadlocks.
In general multiprocessing.Pool can interact quite badly with other, seemingly
unrelated, parts of a codebase due to Pool's reliance on fork. This method
cleans up all pools which are known to belong to Keras (and thus can be safely
terminated).
Args:
grace_period: Time (in seconds) to wait for process cleanup to propagate.
use_sigkill: Boolean of whether or not to perform a cleanup pass using
SIGKILL.
Returns:
A list of human readable strings describing all issues encountered. It is up
to the caller to decide whether to treat this as an error condition.
"""
errors = []
# First cleanup the pools spawned by Keras. If we start killing workers and
# a parent pool is still alive it will just spawn replacements which we don't
# want.
gc.collect()
for pool in _DATA_POOLS:
pool.close()
pool.terminate()
# We do not join the pool, because that would wait forever if a worker
# refused to exit.
# Finally, delete our reference to the pool so that we do not block garbage
# collection.
del pool
# If there were any pools, sleep for a small grace period to allow everything
# to finalize.
if _DATA_POOLS:
time.sleep(grace_period)
# Now we kill any workers which are still alive. However we must compare
# the worker identifier to the set of identifiers which are known to have been
# spawned by pools belonging to Keras to avoid deleting unrelated workers.
# First we call the .terminate() method of a worker, and then if it still
# persists we directly send a signal to the process. Certain worker tasks may
# be able to gracefully handle shutdown, so we send a SIGTERM and then
# optionally follow up with a SIGKILL.
visited_workers = set()
cleanup_passes = ['.terminate', 'SIGTERM']
if use_sigkill:
cleanup_passes.append('SIGKILL')
cleanup_passes.append('log')
for cleanup_pass in cleanup_passes:
while True:
# In rare cases, queue.qsize() overestimates the number of elements. This
# loop is designed to be more robust.
try:
_WORKER_IDS.add(get_worker_id_queue().get_nowait())
except queue.Empty:
break
gc.collect()
workers_terminated_this_pass = False
for worker in multiprocessing.active_children():
ident = worker.ident
if ident in _WORKER_IDS and worker.is_alive():
try:
if cleanup_pass == '.terminate':
# First we ask nicely.
worker.terminate()
worker.join(timeout=grace_period)
visited_workers.add(ident)
workers_terminated_this_pass = True
elif cleanup_pass in ('SIGTERM', 'SIGKILL'):
# Then we ask increasingly tersely.
os.kill(worker.pid, signal.SIGKILL if cleanup_pass == 'SIGKILL'
else signal.SIGTERM)
workers_terminated_this_pass = True
elif cleanup_pass == 'log':
# And finally we give up and log the failure.
errors.append('worker still alive: {}, pid={}, hash={}'
.format(worker.name, worker.pid, hash(worker)))
except OSError:
# Worker exited since the start of this loop.
pass
if workers_terminated_this_pass:
# There can be a small propagation delay between worker destruction and
# workers reporting False for is_alive and no longer appearing in the
# list of active children. Once again, we sleep for a small grace period.
# This prevents false positives from workers which are simply still in the
# process of spinning down.
time.sleep(grace_period)
# Finally we remove the visited worker ids to handle the edge case that a
# pid is reused.
_WORKER_IDS.difference_update(visited_workers)
gc.collect()
for pool in _DATA_POOLS:
errors.append('pool still exists: {}, hash={}'.format(pool, hash(pool)))
return errors
def get_index(uid, i):
"""Get the value from the Sequence `uid` at index `i`.
To allow multiple Sequences to be used at the same time, we use `uid` to
get a specific one. A single Sequence would cause the validation to
overwrite the training Sequence.
Arguments:
uid: int, Sequence identifier
i: index
Returns:
The value at index `i`.
"""
return _SHARED_SEQUENCES[uid][i]
@keras_export('keras.utils.SequenceEnqueuer')
class SequenceEnqueuer(object):
"""Base class to enqueue inputs.
The task of an Enqueuer is to use parallelism to speed up preprocessing.
This is done with processes or threads.
Example:
```python
enqueuer = SequenceEnqueuer(...)
enqueuer.start()
datas = enqueuer.get()
for data in datas:
# Use the inputs; training, evaluating, predicting.
# ... stop sometime.
enqueuer.close()
```
The `enqueuer.get()` should be an infinite stream of datas.
"""
def __init__(self, sequence,
use_multiprocessing=False):
self.sequence = sequence
self.use_multiprocessing = use_multiprocessing
global _SEQUENCE_COUNTER
if _SEQUENCE_COUNTER is None:
try:
_SEQUENCE_COUNTER = multiprocessing.Value('i', 0)
except OSError:
# In this case the OS does not allow us to use
# multiprocessing. We resort to an int
# for enqueuer indexing.
_SEQUENCE_COUNTER = 0
if isinstance(_SEQUENCE_COUNTER, int):
self.uid = _SEQUENCE_COUNTER
_SEQUENCE_COUNTER += 1
else:
# Doing Multiprocessing.Value += x is not process-safe.
with _SEQUENCE_COUNTER.get_lock():
self.uid = _SEQUENCE_COUNTER.value
_SEQUENCE_COUNTER.value += 1
self.workers = 0
self.executor_fn = None
self.queue = None
self.run_thread = None
self.stop_signal = None
def is_running(self):
return self.stop_signal is not None and not self.stop_signal.is_set()
def start(self, workers=1, max_queue_size=10):
"""Starts the handler's workers.
Arguments:
workers: Number of workers.
max_queue_size: queue size
(when full, workers could block on `put()`)
"""
if self.use_multiprocessing:
self.executor_fn = self._get_executor_init(workers)
else:
# We do not need the init since it's threads.
self.executor_fn = lambda _: get_pool_class(False)(workers)
self.workers = workers
self.queue = queue.Queue(max_queue_size)
self.stop_signal = threading.Event()
self.run_thread = threading.Thread(target=self._run)
self.run_thread.daemon = True
self.run_thread.start()
def _send_sequence(self):
"""Sends current Iterable to all workers."""
# For new processes that may spawn
_SHARED_SEQUENCES[self.uid] = self.sequence
def stop(self, timeout=None):
"""Stops running threads and wait for them to exit, if necessary.
Should be called by the same thread which called `start()`.
Arguments:
timeout: maximum time to wait on `thread.join()`
"""
self.stop_signal.set()
with self.queue.mutex:
self.queue.queue.clear()
self.queue.unfinished_tasks = 0
self.queue.not_full.notify()
self.run_thread.join(timeout)
_SHARED_SEQUENCES[self.uid] = None
def __del__(self):
if self.is_running():
self.stop()
@abstractmethod
def _run(self):
"""Submits request to the executor and queue the `Future` objects."""
raise NotImplementedError
@abstractmethod
def _get_executor_init(self, workers):
"""Gets the Pool initializer for multiprocessing.
Arguments:
workers: Number of workers.
Returns:
Function, a Function to initialize the pool
"""
raise NotImplementedError
@abstractmethod
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
# Returns
Generator yielding tuples `(inputs, targets)`
or `(inputs, targets, sample_weights)`.
"""
raise NotImplementedError
@keras_export('keras.utils.OrderedEnqueuer')
class OrderedEnqueuer(SequenceEnqueuer):
"""Builds a Enqueuer from a Sequence.
Used in `fit_generator`, `evaluate_generator`, `predict_generator`.
Arguments:
sequence: A `tf.keras.utils.data_utils.Sequence` object.
use_multiprocessing: use multiprocessing if True, otherwise threading
shuffle: whether to shuffle the data at the beginning of each epoch
"""
def __init__(self, sequence, use_multiprocessing=False, shuffle=False):
super(OrderedEnqueuer, self).__init__(sequence, use_multiprocessing)
self.shuffle = shuffle
def _get_executor_init(self, workers):
"""Gets the Pool initializer for multiprocessing.
Arguments:
workers: Number of workers.
Returns:
Function, a Function to initialize the pool
"""
def pool_fn(seqs):
pool = get_pool_class(True)(
workers, initializer=init_pool_generator,
initargs=(seqs, None, get_worker_id_queue()))
_DATA_POOLS.add(pool)
return pool
return pool_fn
def _wait_queue(self):
"""Wait for the queue to be empty."""
while True:
time.sleep(0.1)
if self.queue.unfinished_tasks == 0 or self.stop_signal.is_set():
return
def _run(self):
"""Submits request to the executor and queue the `Future` objects."""
sequence = list(range(len(self.sequence)))
self._send_sequence() # Share the initial sequence
while True:
if self.shuffle:
random.shuffle(sequence)
with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor:
for i in sequence:
if self.stop_signal.is_set():
return
self.queue.put(
executor.apply_async(get_index, (self.uid, i)), block=True)
# Done with the current epoch, waiting for the final batches
self._wait_queue()
if self.stop_signal.is_set():
# We're done
return
# Call the internal on epoch end.
self.sequence.on_epoch_end()
self._send_sequence() # Update the pool
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
Yields:
The next element in the queue, i.e. a tuple
`(inputs, targets)` or
`(inputs, targets, sample_weights)`.
"""
try:
while self.is_running():
inputs = self.queue.get(block=True).get()
self.queue.task_done()
if inputs is not None:
yield inputs
except Exception: # pylint: disable=broad-except
self.stop()
six.reraise(*sys.exc_info())
def init_pool_generator(gens, random_seed=None, id_queue=None):
"""Initializer function for pool workers.
Args:
gens: State which should be made available to worker processes.
random_seed: An optional value with which to seed child processes.
id_queue: A multiprocessing Queue of worker ids. This is used to indicate
that a worker process was created by Keras and can be terminated using
the cleanup_all_keras_forkpools utility.
"""
global _SHARED_SEQUENCES
_SHARED_SEQUENCES = gens
worker_proc = multiprocessing.current_process()
# name isn't used for anything, but setting a more descriptive name is helpful
# when diagnosing orphaned processes.
worker_proc.name = 'Keras_worker_{}'.format(worker_proc.name)
if random_seed is not None:
np.random.seed(random_seed + worker_proc.ident)
if id_queue is not None:
# If a worker dies during init, the pool will just create a replacement.
id_queue.put(worker_proc.ident, block=True, timeout=0.1)
def next_sample(uid):
"""Gets the next value from the generator `uid`.
To allow multiple generators to be used at the same time, we use `uid` to
get a specific one. A single generator would cause the validation to
overwrite the training generator.
Arguments:
uid: int, generator identifier
Returns:
The next value of generator `uid`.
"""
return six.next(_SHARED_SEQUENCES[uid])
@keras_export('keras.utils.GeneratorEnqueuer')
class GeneratorEnqueuer(SequenceEnqueuer):
"""Builds a queue out of a data generator.
The provided generator can be finite in which case the class will throw
a `StopIteration` exception.
Used in `fit_generator`, `evaluate_generator`, `predict_generator`.
Arguments:
generator: a generator function which yields data
use_multiprocessing: use multiprocessing if True, otherwise threading
wait_time: time to sleep in-between calls to `put()`
random_seed: Initial seed for workers,
will be incremented by one for each worker.
"""
def __init__(self, sequence,
use_multiprocessing=False,
random_seed=None):
super(GeneratorEnqueuer, self).__init__(sequence, use_multiprocessing)
self.random_seed = random_seed
def _get_executor_init(self, workers):
"""Gets the Pool initializer for multiprocessing.
Arguments:
workers: Number of works.
Returns:
A Function to initialize the pool
"""
def pool_fn(seqs):
pool = get_pool_class(True)(
workers, initializer=init_pool_generator,
initargs=(seqs, self.random_seed, get_worker_id_queue()))
_DATA_POOLS.add(pool)
return pool
return pool_fn
def _run(self):
"""Submits request to the executor and queue the `Future` objects."""
self._send_sequence() # Share the initial generator
with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor:
while True:
if self.stop_signal.is_set():
return
self.queue.put(
executor.apply_async(next_sample, (self.uid,)), block=True)
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
Yields:
The next element in the queue, i.e. a tuple
`(inputs, targets)` or
`(inputs, targets, sample_weights)`.
"""
try:
while self.is_running():
inputs = self.queue.get(block=True).get()
self.queue.task_done()
if inputs is not None:
yield inputs
except StopIteration:
# Special case for finite generators
last_ones = []
while self.queue.qsize() > 0:
last_ones.append(self.queue.get(block=True))
# Wait for them to complete
for f in last_ones:
f.wait()
# Keep the good ones
last_ones = [future.get() for future in last_ones if future.successful()]
for inputs in last_ones:
if inputs is not None:
yield inputs
except Exception as e: # pylint: disable=broad-except
self.stop()
if 'generator already executing' in str(e):
raise RuntimeError(
'Your generator is NOT thread-safe. '
'Keras requires a thread-safe generator when '
'`use_multiprocessing=False, workers > 1`. ')
six.reraise(*sys.exc_info())
|
{
"content_hash": "ac01285cb91f18e3307261311a1ec2b1",
"timestamp": "",
"source": "github",
"line_count": 999,
"max_line_length": 81,
"avg_line_length": 31.465465465465467,
"alnum_prop": 0.6581090538906916,
"repo_name": "ppwwyyxx/tensorflow",
"id": "38be0568b7564930d82892dc46abd43e9e1738f8",
"size": "32161",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/python/keras/utils/data_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "5003"
},
{
"name": "Batchfile",
"bytes": "45318"
},
{
"name": "C",
"bytes": "796611"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "76521274"
},
{
"name": "CMake",
"bytes": "6545"
},
{
"name": "Dockerfile",
"bytes": "81136"
},
{
"name": "Go",
"bytes": "1679107"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "952883"
},
{
"name": "Jupyter Notebook",
"bytes": "567243"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1254789"
},
{
"name": "Makefile",
"bytes": "61284"
},
{
"name": "Objective-C",
"bytes": "104706"
},
{
"name": "Objective-C++",
"bytes": "297774"
},
{
"name": "PHP",
"bytes": "24055"
},
{
"name": "Pascal",
"bytes": "3752"
},
{
"name": "Pawn",
"bytes": "17546"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "38709528"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "7469"
},
{
"name": "Shell",
"bytes": "643731"
},
{
"name": "Smarty",
"bytes": "34743"
},
{
"name": "Swift",
"bytes": "62814"
}
],
"symlink_target": ""
}
|
"""Abstract Protocol class."""
__all__ = ['Protocol', 'DatagramProtocol']
class BaseProtocol:
"""ABC for base protocol class.
Usually user implements protocols that derived from BaseProtocol
like Protocol or ProcessProtocol.
The only case when BaseProtocol should be implemented directly is
write-only transport like write pipe
"""
def connection_made(self, transport):
"""Called when a connection is made.
The argument is the transport representing the pipe connection.
To receive data, wait for data_received() calls.
When the connection is closed, connection_lost() is called.
"""
def connection_lost(self, exc):
"""Called when the connection is lost or closed.
The argument is an exception object or None (the latter
meaning a regular EOF is received or the connection was
aborted or closed).
"""
def pause_writing(self):
"""Called when the transport's buffer goes over the high-water mark.
Pause and resume calls are paired -- pause_writing() is called
once when the buffer goes strictly over the high-water mark
(even if subsequent writes increases the buffer size even
more), and eventually resume_writing() is called once when the
buffer size reaches the low-water mark.
Note that if the buffer size equals the high-water mark,
pause_writing() is not called -- it must go strictly over.
Conversely, resume_writing() is called when the buffer size is
equal or lower than the low-water mark. These end conditions
are important to ensure that things go as expected when either
mark is zero.
NOTE: This is the only Protocol callback that is not called
through EventLoop.call_soon() -- if it were, it would have no
effect when it's most needed (when the app keeps writing
without yielding until pause_writing() is called).
"""
def resume_writing(self):
"""Called when the transport's buffer drains below the low-water mark.
See pause_writing() for details.
"""
class Protocol(BaseProtocol):
"""ABC representing a protocol.
The user should implement this interface. They can inherit from
this class but don't need to. The implementations here do
nothing (they don't raise exceptions).
When the user wants to requests a transport, they pass a protocol
factory to a utility function (e.g., EventLoop.create_connection()).
When the connection is made successfully, connection_made() is
called with a suitable transport object. Then data_received()
will be called 0 or more times with data (bytes) received from the
transport; finally, connection_lost() will be called exactly once
with either an exception object or None as an argument.
State machine of calls:
start -> CM [-> DR*] [-> ER?] -> CL -> end
"""
def data_received(self, data):
"""Called when some data is received.
The argument is a bytes object.
"""
def eof_received(self):
"""Called when the other end calls write_eof() or equivalent.
If this returns a false value (including None), the transport
will close itself. If it returns a true value, closing the
transport is up to the protocol.
"""
class DatagramProtocol(BaseProtocol):
"""ABC representing a datagram protocol."""
def datagram_received(self, data, addr):
"""Called when some datagram is received."""
def error_received(self, exc):
"""Called when a send or receive operation raises an OSError.
(Other than BlockingIOError or InterruptedError.)
"""
class SubprocessProtocol(BaseProtocol):
"""ABC representing a protocol for subprocess calls."""
def pipe_data_received(self, fd, data):
"""Called when the subprocess writes data into stdout/stderr pipe.
fd is int file dascriptor.
data is bytes object.
"""
def pipe_connection_lost(self, fd, exc):
"""Called when a file descriptor associated with the child process is
closed.
fd is the int file descriptor that was closed.
"""
def process_exited(self):
"""Called when subprocess has exited."""
|
{
"content_hash": "cc5e2036a4f5b22f23873227a11b1c95",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 78,
"avg_line_length": 33.9765625,
"alnum_prop": 0.6654403311106002,
"repo_name": "mikhtonyuk/rxpython",
"id": "eb94fb6f2962e6b93c749cf94f33e9e58f2de2ed",
"size": "4349",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "asyncio/protocols.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "303672"
}
],
"symlink_target": ""
}
|
from test.support import verbose, run_unittest, import_module
#Skip these tests if either fcntl or termios is not available
fcntl = import_module('fcntl')
import_module('termios')
import errno
import pty
import os
import sys
import signal
import unittest
TEST_STRING_1 = b"I wish to buy a fish license.\n"
TEST_STRING_2 = b"For my pet fish, Eric.\n"
if verbose:
def debug(msg):
print(msg)
else:
def debug(msg):
pass
def normalize_output(data):
# Some operating systems do conversions on newline. We could possibly
# fix that by doing the appropriate termios.tcsetattr()s. I couldn't
# figure out the right combo on Tru64 and I don't have an IRIX box.
# So just normalize the output and doc the problem O/Ses by allowing
# certain combinations for some platforms, but avoid allowing other
# differences (like extra whitespace, trailing garbage, etc.)
# This is about the best we can do without getting some feedback
# from someone more knowledgable.
# OSF/1 (Tru64) apparently turns \n into \r\r\n.
if data.endswith(b'\r\r\n'):
return data.replace(b'\r\r\n', b'\n')
# IRIX apparently turns \n into \r\n.
if data.endswith(b'\r\n'):
return data.replace(b'\r\n', b'\n')
return data
# Marginal testing of pty suite. Cannot do extensive 'do or fail' testing
# because pty code is not too portable.
# XXX(nnorwitz): these tests leak fds when there is an error.
class PtyTest(unittest.TestCase):
def setUp(self):
# isatty() and close() can hang on some platforms. Set an alarm
# before running the test to make sure we don't hang forever.
self.old_alarm = signal.signal(signal.SIGALRM, self.handle_sig)
signal.alarm(10)
def tearDown(self):
# remove alarm, restore old alarm handler
signal.alarm(0)
signal.signal(signal.SIGALRM, self.old_alarm)
def handle_sig(self, sig, frame):
self.fail("isatty hung")
def test_basic(self):
try:
debug("Calling master_open()")
master_fd, slave_name = pty.master_open()
debug("Got master_fd '%d', slave_name '%s'" %
(master_fd, slave_name))
debug("Calling slave_open(%r)" % (slave_name,))
slave_fd = pty.slave_open(slave_name)
debug("Got slave_fd '%d'" % slave_fd)
except OSError:
# " An optional feature could not be imported " ... ?
raise unittest.SkipTest("Pseudo-terminals (seemingly) not functional.")
self.assertTrue(os.isatty(slave_fd), 'slave_fd is not a tty')
# Solaris requires reading the fd before anything is returned.
# My guess is that since we open and close the slave fd
# in master_open(), we need to read the EOF.
# Ensure the fd is non-blocking in case there's nothing to read.
orig_flags = fcntl.fcntl(master_fd, fcntl.F_GETFL)
fcntl.fcntl(master_fd, fcntl.F_SETFL, orig_flags | os.O_NONBLOCK)
try:
s1 = os.read(master_fd, 1024)
self.assertEquals(b'', s1)
except OSError as e:
if e.errno != errno.EAGAIN:
raise
# Restore the original flags.
fcntl.fcntl(master_fd, fcntl.F_SETFL, orig_flags)
debug("Writing to slave_fd")
os.write(slave_fd, TEST_STRING_1)
s1 = os.read(master_fd, 1024)
self.assertEquals(b'I wish to buy a fish license.\n',
normalize_output(s1))
debug("Writing chunked output")
os.write(slave_fd, TEST_STRING_2[:5])
os.write(slave_fd, TEST_STRING_2[5:])
s2 = os.read(master_fd, 1024)
self.assertEquals(b'For my pet fish, Eric.\n', normalize_output(s2))
os.close(slave_fd)
os.close(master_fd)
def test_fork(self):
debug("calling pty.fork()")
pid, master_fd = pty.fork()
if pid == pty.CHILD:
# stdout should be connected to a tty.
if not os.isatty(1):
debug("Child's fd 1 is not a tty?!")
os._exit(3)
# After pty.fork(), the child should already be a session leader.
# (on those systems that have that concept.)
debug("In child, calling os.setsid()")
try:
os.setsid()
except OSError:
# Good, we already were session leader
debug("Good: OSError was raised.")
pass
except AttributeError:
# Have pty, but not setsid()?
debug("No setsid() available?")
pass
except:
# We don't want this error to propagate, escaping the call to
# os._exit() and causing very peculiar behavior in the calling
# regrtest.py !
# Note: could add traceback printing here.
debug("An unexpected error was raised.")
os._exit(1)
else:
debug("os.setsid() succeeded! (bad!)")
os._exit(2)
os._exit(4)
else:
debug("Waiting for child (%d) to finish." % pid)
# In verbose mode, we have to consume the debug output from the
# child or the child will block, causing this test to hang in the
# parent's waitpid() call. The child blocks after a
# platform-dependent amount of data is written to its fd. On
# Linux 2.6, it's 4000 bytes and the child won't block, but on OS
# X even the small writes in the child above will block it. Also
# on Linux, the read() will throw an OSError (input/output error)
# when it tries to read past the end of the buffer but the child's
# already exited, so catch and discard those exceptions. It's not
# worth checking for EIO.
while True:
try:
data = os.read(master_fd, 80)
except OSError:
break
if not data:
break
sys.stdout.write(str(data.replace(b'\r\n', b'\n'),
encoding='ascii'))
##line = os.read(master_fd, 80)
##lines = line.replace('\r\n', '\n').split('\n')
##if False and lines != ['In child, calling os.setsid()',
## 'Good: OSError was raised.', '']:
## raise TestFailed("Unexpected output from child: %r" % line)
(pid, status) = os.waitpid(pid, 0)
res = status >> 8
debug("Child (%d) exited with status %d (%d)." % (pid, res, status))
if res == 1:
self.fail("Child raised an unexpected exception in os.setsid()")
elif res == 2:
self.fail("pty.fork() failed to make child a session leader.")
elif res == 3:
self.fail("Child spawned by pty.fork() did not have a tty as stdout")
elif res != 4:
self.fail("pty.fork() failed for unknown reasons.")
##debug("Reading from master_fd now that the child has exited")
##try:
## s1 = os.read(master_fd, 1024)
##except os.error:
## pass
##else:
## raise TestFailed("Read from master_fd did not raise exception")
os.close(master_fd)
# pty.fork() passed.
def test_main(verbose=None):
run_unittest(PtyTest)
if __name__ == "__main__":
test_main()
|
{
"content_hash": "132d4125d9708fabfc3896760b8bbef2",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 85,
"avg_line_length": 37.92537313432836,
"alnum_prop": 0.5610651974288338,
"repo_name": "MalloyPower/parsing-python",
"id": "e74ecd57469a11e128971e604389fff62bd1d314",
"size": "7623",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "front-end/testsuite-python-lib/Python-3.1/Lib/test/test_pty.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1963"
},
{
"name": "Lex",
"bytes": "238458"
},
{
"name": "Makefile",
"bytes": "4513"
},
{
"name": "OCaml",
"bytes": "412695"
},
{
"name": "Python",
"bytes": "17319"
},
{
"name": "Rascal",
"bytes": "523063"
},
{
"name": "Yacc",
"bytes": "429659"
}
],
"symlink_target": ""
}
|
from ops import *
import random
import math
from tensorflow.contrib.tensorboard.plugins import projector
# constants
ATOMS, ATOM_TO_NUM = getAllAtoms()
VOCAB_SIZE = len(ATOMS)
EMBEDDING_SIZE = 32
def saveWordsAsMetadata():
with open(LOG_DIR+"atoms_metadata.tsv", "w") as file:
file.write("Words\tIndex\n")
for word in ATOMS:
file.write("%s\t%s\n"%(word, ATOMS.index(word)))
valid_set = random.sample(range(VOCAB_SIZE), VOCAB_SIZE//3 if VOCAB_SIZE//3 >= 1 else 1)
# Variables
with tf.variable_scope("vars"):
embeddings = tf.Variable(
tf.random_uniform([VOCAB_SIZE, EMBEDDING_SIZE], minval=-1e-4, maxval=1e-4),
name='embeddings'
)
softmax_weights = tf.Variable(
tf.truncated_normal([VOCAB_SIZE, EMBEDDING_SIZE], stddev=1.0/math.sqrt(EMBEDDING_SIZE)),
name="weights"
)
softmax_biases = tf.Variable(
tf.zeros([VOCAB_SIZE]),
name = "biases"
)
# Placeholders
train_labels = tf.placeholder(tf.int32, [batch_size])
train_targets = tf.placeholder(tf.int32, [batch_size, 1])
valid_dataset = tf.constant(valid_set, dtype = tf.int32)
# Model
embed = tf.nn.embedding_lookup(embeddings, train_labels)
loss = tf.reduce_mean(tf.nn.sampled_softmax_loss(
weights = softmax_weights,
biases = softmax_biases,
inputs = embed,
labels = tf.cast(train_targets, tf.float32),
num_sampled = VOCAB_SIZE//2,
num_classes = VOCAB_SIZE,
))
tf.summary.scalar("atom_embed_loss", loss)
optim = tf.train.AdagradOptimizer(1.0).minimize(loss)
# optim = tf.train.AdamOptimizer(learning_rate).minimize(loss)
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(normalized_embeddings, valid_dataset)
similarity = tf.matmul(valid_embeddings, tf.transpose(normalized_embeddings))
# tensorboard stuff
# embeddings {
# tensor_name: 'word_embedding'
# metadata_path: '$LOG_DIR/metadata.tsv'
# }
config = projector.ProjectorConfig()
embedding = config.embeddings.add()
embedding.tensor_name = embeddings.name
merged_summary = tf.summary.merge_all()
"""
Loss: 18.4618
Wood: Fabric, Plumbing Fixtures, Glass, Project Base Point, Brick
Brick: Survey Point, Casework, Concrete, Soil, Floors
Furniture: Ceramic, Plumbing Fixtures, Survey Point, Windows, Floors
Soil: Survey Point, Brick, Windows, Concrete, Doors
Fabric: Wood, Plumbing Fixtures, Structural Foundations, Windows, Furniture
Casework: Project Base Point, Concrete, Brick, Doors, Ceramic
"""
|
{
"content_hash": "f046ad7177e0bf19bdcc43736c3cb449",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 96,
"avg_line_length": 31.493827160493826,
"alnum_prop": 0.7103096824774598,
"repo_name": "ranjeethmahankali/BIMToVec",
"id": "49f57ba48e3f31de620f19fc53771a9b9930f4af",
"size": "2551",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "atom_embeddings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "279"
},
{
"name": "C#",
"bytes": "38868"
},
{
"name": "Python",
"bytes": "45849"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from rest_framework import status
from rest_framework.decorators import detail_route
from rest_framework.exceptions import PermissionDenied
from rest_framework.response import Response
from rest_framework.viewsets import ModelViewSet
from django.conf import settings
from django.shortcuts import get_object_or_404
from ipam.models import Aggregate, IPAddress, Prefix, RIR, Role, Service, VLAN, VLANGroup, VRF
from ipam import filters
from extras.api.views import CustomFieldModelViewSet
from utilities.api import WritableSerializerMixin
from . import serializers
#
# VRFs
#
class VRFViewSet(WritableSerializerMixin, CustomFieldModelViewSet):
queryset = VRF.objects.select_related('tenant')
serializer_class = serializers.VRFSerializer
write_serializer_class = serializers.WritableVRFSerializer
filter_class = filters.VRFFilter
#
# RIRs
#
class RIRViewSet(ModelViewSet):
queryset = RIR.objects.all()
serializer_class = serializers.RIRSerializer
filter_class = filters.RIRFilter
#
# Aggregates
#
class AggregateViewSet(WritableSerializerMixin, CustomFieldModelViewSet):
queryset = Aggregate.objects.select_related('rir')
serializer_class = serializers.AggregateSerializer
write_serializer_class = serializers.WritableAggregateSerializer
filter_class = filters.AggregateFilter
#
# Roles
#
class RoleViewSet(ModelViewSet):
queryset = Role.objects.all()
serializer_class = serializers.RoleSerializer
filter_class = filters.RoleFilter
#
# Prefixes
#
class PrefixViewSet(WritableSerializerMixin, CustomFieldModelViewSet):
queryset = Prefix.objects.select_related('site', 'vrf__tenant', 'tenant', 'vlan', 'role')
serializer_class = serializers.PrefixSerializer
write_serializer_class = serializers.WritablePrefixSerializer
filter_class = filters.PrefixFilter
@detail_route(url_path='available-ips', methods=['get', 'post'])
def available_ips(self, request, pk=None):
"""
A convenience method for returning available IP addresses within a prefix. By default, the number of IPs
returned will be equivalent to PAGINATE_COUNT. An arbitrary limit (up to MAX_PAGE_SIZE, if set) may be passed,
however results will not be paginated.
"""
prefix = get_object_or_404(Prefix, pk=pk)
# Create the next available IP within the prefix
if request.method == 'POST':
# Permissions check
if not request.user.has_perm('ipam.add_ipaddress'):
raise PermissionDenied()
# Find the first available IP address in the prefix
try:
ipaddress = list(prefix.get_available_ips())[0]
except IndexError:
return Response(
{
"detail": "There are no available IPs within this prefix ({})".format(prefix)
},
status=status.HTTP_400_BAD_REQUEST
)
# Create the new IP address
data = request.data.copy()
data['address'] = '{}/{}'.format(ipaddress, prefix.prefix.prefixlen)
data['vrf'] = prefix.vrf
serializer = serializers.WritableIPAddressSerializer(data=data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# Determine the maximum amount of IPs to return
else:
try:
limit = int(request.query_params.get('limit', settings.PAGINATE_COUNT))
except ValueError:
limit = settings.PAGINATE_COUNT
if settings.MAX_PAGE_SIZE:
limit = min(limit, settings.MAX_PAGE_SIZE)
# Calculate available IPs within the prefix
ip_list = []
for index, ip in enumerate(prefix.get_available_ips(), start=1):
ip_list.append(ip)
if index == limit:
break
serializer = serializers.AvailableIPSerializer(ip_list, many=True, context={
'request': request,
'prefix': prefix.prefix,
'vrf': prefix.vrf,
})
return Response(serializer.data)
#
# IP addresses
#
class IPAddressViewSet(WritableSerializerMixin, CustomFieldModelViewSet):
queryset = IPAddress.objects.select_related('vrf__tenant', 'tenant', 'interface__device', 'nat_inside')
serializer_class = serializers.IPAddressSerializer
write_serializer_class = serializers.WritableIPAddressSerializer
filter_class = filters.IPAddressFilter
#
# VLAN groups
#
class VLANGroupViewSet(WritableSerializerMixin, ModelViewSet):
queryset = VLANGroup.objects.select_related('site')
serializer_class = serializers.VLANGroupSerializer
write_serializer_class = serializers.WritableVLANGroupSerializer
filter_class = filters.VLANGroupFilter
#
# VLANs
#
class VLANViewSet(WritableSerializerMixin, CustomFieldModelViewSet):
queryset = VLAN.objects.select_related('site', 'group', 'tenant', 'role')
serializer_class = serializers.VLANSerializer
write_serializer_class = serializers.WritableVLANSerializer
filter_class = filters.VLANFilter
#
# Services
#
class ServiceViewSet(WritableSerializerMixin, ModelViewSet):
queryset = Service.objects.select_related('device')
serializer_class = serializers.ServiceSerializer
write_serializer_class = serializers.WritableServiceSerializer
filter_class = filters.ServiceFilter
|
{
"content_hash": "23986bf809d07be16f3a09012e232467",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 118,
"avg_line_length": 32.84393063583815,
"alnum_prop": 0.6833861316437874,
"repo_name": "snazy2000/netbox",
"id": "9cf93cb4b94d8565c434497ee559a5aa2968cad5",
"size": "5682",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "netbox/ipam/api/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "167999"
},
{
"name": "HTML",
"bytes": "370021"
},
{
"name": "JavaScript",
"bytes": "13570"
},
{
"name": "Python",
"bytes": "1018536"
},
{
"name": "Shell",
"bytes": "3389"
}
],
"symlink_target": ""
}
|
"""
Unit tests for the `corpora.Dictionary` class.
"""
from collections import Mapping
import logging
import tempfile
import unittest
import os
import os.path
import scipy
import gensim
from gensim.corpora import Dictionary
from six import PY3
from six.moves import zip
# sample data files are located in the same folder
module_path = os.path.dirname(__file__)
def get_tmpfile(suffix):
return os.path.join(tempfile.gettempdir(), suffix)
class TestDictionary(unittest.TestCase):
def setUp(self):
self.texts = [
['human', 'interface', 'computer'],
['survey', 'user', 'computer', 'system', 'response', 'time'],
['eps', 'user', 'interface', 'system'],
['system', 'human', 'system', 'eps'],
['user', 'response', 'time'],
['trees'],
['graph', 'trees'],
['graph', 'minors', 'trees'],
['graph', 'minors', 'survey']]
def testDocFreqOneDoc(self):
texts = [['human', 'interface', 'computer']]
d = Dictionary(texts)
expected = {0: 1, 1: 1, 2: 1}
self.assertEqual(d.dfs, expected)
def testDocFreqAndToken2IdForSeveralDocsWithOneWord(self):
# two docs
texts = [['human'], ['human']]
d = Dictionary(texts)
expected = {0: 2}
self.assertEqual(d.dfs, expected)
# only one token (human) should exist
expected = {'human': 0}
self.assertEqual(d.token2id, expected)
# three docs
texts = [['human'], ['human'], ['human']]
d = Dictionary(texts)
expected = {0: 3}
self.assertEqual(d.dfs, expected)
# only one token (human) should exist
expected = {'human': 0}
self.assertEqual(d.token2id, expected)
# four docs
texts = [['human'], ['human'], ['human'], ['human']]
d = Dictionary(texts)
expected = {0: 4}
self.assertEqual(d.dfs, expected)
# only one token (human) should exist
expected = {'human': 0}
self.assertEqual(d.token2id, expected)
def testDocFreqForOneDocWithSeveralWord(self):
# two words
texts = [['human', 'cat']]
d = Dictionary(texts)
expected = {0: 1, 1: 1}
self.assertEqual(d.dfs, expected)
# three words
texts = [['human', 'cat', 'minors']]
d = Dictionary(texts)
expected = {0: 1, 1: 1, 2: 1}
self.assertEqual(d.dfs, expected)
def testBuild(self):
d = Dictionary(self.texts)
# Since we don't specify the order in which dictionaries are built,
# we cannot reliably test for the mapping; only the keys and values.
expected_keys = list(range(12))
expected_values = [2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3]
self.assertEqual(sorted(d.dfs.keys()), expected_keys)
self.assertEqual(sorted(d.dfs.values()), expected_values)
expected_keys = sorted(['computer', 'eps', 'graph', 'human',
'interface', 'minors', 'response', 'survey',
'system', 'time', 'trees', 'user'])
expected_values = list(range(12))
self.assertEqual(sorted(d.token2id.keys()), expected_keys)
self.assertEqual(sorted(d.token2id.values()), expected_values)
def testMerge(self):
d = Dictionary(self.texts)
f = Dictionary(self.texts[:3])
g = Dictionary(self.texts[3:])
f.merge_with(g)
self.assertEqual(sorted(d.token2id.keys()), sorted(f.token2id.keys()))
def testFilter(self):
d = Dictionary(self.texts)
d.filter_extremes(no_below=2, no_above=1.0, keep_n=4)
expected = {0: 3, 1: 3, 2: 3, 3: 3}
self.assertEqual(d.dfs, expected)
def testFilterKeepTokens_keepTokens(self):
# provide keep_tokens argument, keep the tokens given
d = Dictionary(self.texts)
d.filter_extremes(no_below=3, no_above=1.0, keep_tokens=['human', 'survey'])
expected = set(['graph', 'trees', 'human', 'system', 'user', 'survey'])
self.assertEqual(set(d.token2id.keys()), expected)
def testFilterKeepTokens_unchangedFunctionality(self):
# do not provide keep_tokens argument, filter_extremes functionality is unchanged
d = Dictionary(self.texts)
d.filter_extremes(no_below=3, no_above=1.0)
expected = set(['graph', 'trees', 'system', 'user'])
self.assertEqual(set(d.token2id.keys()), expected)
def testFilterKeepTokens_unseenToken(self):
# do provide keep_tokens argument with unseen tokens, filter_extremes functionality is unchanged
d = Dictionary(self.texts)
d.filter_extremes(no_below=3, no_above=1.0, keep_tokens=['unknown_token'])
expected = set(['graph', 'trees', 'system', 'user'])
self.assertEqual(set(d.token2id.keys()), expected)
def testFilterMostFrequent(self):
d = Dictionary(self.texts)
d.filter_n_most_frequent(4)
expected = {0: 2, 1: 2, 2: 2, 3: 2, 4: 2, 5: 2, 6: 2, 7: 2}
self.assertEqual(d.dfs, expected)
def testFilterTokens(self):
self.maxDiff = 10000
d = Dictionary(self.texts)
removed_word = d[0]
d.filter_tokens([0])
expected = {'computer': 0, 'eps': 8, 'graph': 10, 'human': 1,
'interface': 2, 'minors': 11, 'response': 3, 'survey': 4,
'system': 5, 'time': 6, 'trees': 9, 'user': 7}
del expected[removed_word]
self.assertEqual(sorted(d.token2id.keys()), sorted(expected.keys()))
expected[removed_word] = len(expected)
d.add_documents([[removed_word]])
self.assertEqual(sorted(d.token2id.keys()), sorted(expected.keys()))
def test_doc2bow(self):
d = Dictionary([["žluťoučký"], ["žluťoučký"]])
# pass a utf8 string
self.assertEqual(d.doc2bow(["žluťoučký"]), [(0, 1)])
# doc2bow must raise a TypeError if passed a string instead of array of strings by accident
self.assertRaises(TypeError, d.doc2bow, "žluťoučký")
# unicode must be converted to utf8
self.assertEqual(d.doc2bow([u'\u017elu\u0165ou\u010dk\xfd']), [(0, 1)])
def test_saveAsText_and_loadFromText(self):
"""`Dictionary` can be saved as textfile and loaded again from textfile. """
tmpf = get_tmpfile('dict_test.txt')
for sort_by_word in [True, False]:
d = Dictionary(self.texts)
d.save_as_text(tmpf, sort_by_word=sort_by_word)
self.assertTrue(os.path.exists(tmpf))
d_loaded = Dictionary.load_from_text(tmpf)
self.assertNotEqual(d_loaded, None)
self.assertEqual(d_loaded.token2id, d.token2id)
def test_from_corpus(self):
"""build `Dictionary` from an existing corpus"""
documents = ["Human machine interface for lab abc computer applications",
"A survey of user opinion of computer system response time",
"The EPS user interface management system",
"System and human system engineering testing of EPS",
"Relation of user perceived response time to error measurement",
"The generation of random binary unordered trees",
"The intersection graph of paths in trees",
"Graph minors IV Widths of trees and well quasi ordering",
"Graph minors A survey"]
stoplist = set('for a of the and to in'.split())
texts = [[word for word in document.lower().split() if word not in stoplist]
for document in documents]
# remove words that appear only once
all_tokens = sum(texts, [])
tokens_once = set(word for word in set(all_tokens) if all_tokens.count(word) == 1)
texts = [[word for word in text if word not in tokens_once]
for text in texts]
dictionary = Dictionary(texts)
corpus = [dictionary.doc2bow(text) for text in texts]
# Create dictionary from corpus without a token map
dictionary_from_corpus = Dictionary.from_corpus(corpus)
dict_token2id_vals = sorted(dictionary.token2id.values())
dict_from_corpus_vals = sorted(dictionary_from_corpus.token2id.values())
self.assertEqual(dict_token2id_vals, dict_from_corpus_vals)
self.assertEqual(dictionary.dfs, dictionary_from_corpus.dfs)
self.assertEqual(dictionary.num_docs, dictionary_from_corpus.num_docs)
self.assertEqual(dictionary.num_pos, dictionary_from_corpus.num_pos)
self.assertEqual(dictionary.num_nnz, dictionary_from_corpus.num_nnz)
# Create dictionary from corpus with an id=>token map
dictionary_from_corpus_2 = Dictionary.from_corpus(corpus, id2word=dictionary)
self.assertEqual(dictionary.token2id, dictionary_from_corpus_2.token2id)
self.assertEqual(dictionary.dfs, dictionary_from_corpus_2.dfs)
self.assertEqual(dictionary.num_docs, dictionary_from_corpus_2.num_docs)
self.assertEqual(dictionary.num_pos, dictionary_from_corpus_2.num_pos)
self.assertEqual(dictionary.num_nnz, dictionary_from_corpus_2.num_nnz)
# Ensure Sparse2Corpus is compatible with from_corpus
bow = gensim.matutils.Sparse2Corpus(scipy.sparse.rand(10, 100))
dictionary = Dictionary.from_corpus(bow)
self.assertEqual(dictionary.num_docs, 100)
def test_dict_interface(self):
"""Test Python 2 dict-like interface in both Python 2 and 3."""
d = Dictionary(self.texts)
self.assertTrue(isinstance(d, Mapping))
self.assertEqual(list(zip(d.keys(), d.values())), list(d.items()))
# Even in Py3, we want the iter* members.
self.assertEqual(list(d.items()), list(d.iteritems()))
self.assertEqual(list(d.keys()), list(d.iterkeys()))
self.assertEqual(list(d.values()), list(d.itervalues()))
# XXX Do we want list results from the dict members in Py3 too?
if not PY3:
self.assertTrue(isinstance(d.items(), list))
self.assertTrue(isinstance(d.keys(), list))
self.assertTrue(isinstance(d.values(), list))
#endclass TestDictionary
if __name__ == '__main__':
logging.basicConfig(level=logging.WARNING)
unittest.main()
|
{
"content_hash": "d48817fdfaff7d2454560233214bb41d",
"timestamp": "",
"source": "github",
"line_count": 263,
"max_line_length": 104,
"avg_line_length": 39.50570342205323,
"alnum_prop": 0.6077959576515881,
"repo_name": "boomsbloom/dtm-fmri",
"id": "16c499b2451e05eabb1249dd680e49aa64c8d77d",
"size": "10530",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "DTM/for_gensim/lib/python2.7/site-packages/gensim/test/test_corpora_dictionary.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "413670"
},
{
"name": "C++",
"bytes": "262666"
},
{
"name": "CSS",
"bytes": "5382"
},
{
"name": "Fortran",
"bytes": "14725"
},
{
"name": "HTML",
"bytes": "555708"
},
{
"name": "JavaScript",
"bytes": "23921"
},
{
"name": "Jupyter Notebook",
"bytes": "16254"
},
{
"name": "Makefile",
"bytes": "1302"
},
{
"name": "Matlab",
"bytes": "36260"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "Python",
"bytes": "46698963"
},
{
"name": "R",
"bytes": "199"
},
{
"name": "Shell",
"bytes": "11728"
},
{
"name": "TeX",
"bytes": "18567"
}
],
"symlink_target": ""
}
|
import pytest
def _make_tree_cfg(config_factory, tree_dir, tree_file):
cfg = config_factory('admin', 'mk', tree_file)
cfg.languages.starting_tree = tree_dir.joinpath(
"{0}.nex".format(tree_file)).read_text(encoding='utf8')
return cfg
def test_basic_starting_tree(config_factory, tree_dir):
cfg = _make_tree_cfg(config_factory, tree_dir, "basic")
cfg.process()
def test_superset_tree(config_factory, tree_dir):
cfg = _make_tree_cfg(config_factory, tree_dir, "superset")
cfg.process()
def test_polytomy_tree(config_factory, tree_dir):
cfg = _make_tree_cfg(config_factory, tree_dir, "polytomies")
cfg.process()
def test_subset_tree(config_factory, tree_dir):
cfg = _make_tree_cfg(config_factory, tree_dir, "subset")
with pytest.raises(ValueError):
cfg.process()
def test_duplicate_taxa_tree(config_factory, tree_dir):
cfg = _make_tree_cfg(config_factory, tree_dir, "duplicates")
with pytest.raises(ValueError):
cfg.process()
|
{
"content_hash": "0b23ef4f7509e3f81261e5ef93e84eac",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 64,
"avg_line_length": 28.857142857142858,
"alnum_prop": 0.6782178217821783,
"repo_name": "lmaurits/BEASTling",
"id": "6ef42045f2facf8c19f6cf7d05cc19f41a6e36ee",
"size": "1010",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/starting_tree_tests.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "293847"
}
],
"symlink_target": ""
}
|
import io
from os.path import join
from json import load as json_load, dumps
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import patch, MagicMock
from urllib.parse import urlparse
import requests
from ona_service.kubernetes_watcher import (
ENV_POD_NAME,
DEFAULT_KUBERNETES_LABEL,
KubernetesWatcher,
)
PATCH_PATH = 'ona_service.kubernetes_watcher.{}'
GET_PODS_RESPONSE = {
'kind': 'PodList',
'apiVersion': 'v1',
'metadata': {'selfLink': '/api/v1/pods/'},
'items': [
# The winner of the election
{
'metadata': {'name': 'obsrvbl-ona-01', 'namespace': 'default'},
'spec': {'nodeName': 'node-01', 'hostNetwork': True},
},
# The loser of the election
{
'metadata': {'name': 'obsrvbl-ona-02', 'namespace': 'default'},
'spec': {'nodeName': 'node-02', 'hostNetwork': True},
},
# Missing pod name, somehow
{
'metadata': {'namespace': 'default'},
},
# Missing metadata, somehow
{
'spec': {'hostNetwork': True},
},
],
}
class KubernetesWatchers(TestCase):
def setUp(self):
self.temp_dir = TemporaryDirectory()
self.k8s_ca_cert_path = join(self.temp_dir.name, 'ca.crt')
with open(self.k8s_ca_cert_path, 'wt') as outfile:
print('CA Certificate!', file=outfile, end='')
self.k8s_token_path = join(self.temp_dir.name, 'token')
with open(self.k8s_token_path, 'wt') as outfile:
print('Token!', file=outfile, end='')
self.test_env = {
'KUBERNETES_SERVICE_HOST': '127.0.0.1',
'KUBERNETES_SERVICE_PORT': '8080',
'K8S_CA_CERT_PATH': self.k8s_ca_cert_path,
'KUBERNETES_TOKEN_PATH': self.k8s_token_path,
ENV_POD_NAME: 'obsrvbl-ona-01',
}
def tearDown(self):
self.temp_dir.cleanup()
@patch(PATCH_PATH.format('requests.get'), autospec=True)
def test_execute(self, mock_get):
# Mock the API response, returning the test data
def _get(url, params=None, **kwargs):
from ona_service.kubernetes_watcher import requests as r
host = urlparse(url).hostname
r.packages.urllib3.connection.match_hostname({None: None}, host)
headers = kwargs['headers']
self.assertEqual(headers['Authorization'], 'Bearer Token!')
self.assertEqual(headers['Accept'], 'application/json')
resp = requests.Response()
resp.status_code = 200
resp.raw = io.BytesIO(dumps(GET_PODS_RESPONSE).encode('utf-8'))
return resp
mock_get.side_effect = _get
# Intercept the upload, and check for expected output
def _send_file(data_type, path, now, suffix=None):
self.assertEqual(data_type, 'logs')
with open(path) as infile:
actual = json_load(infile)
self.assertEqual(actual, GET_PODS_RESPONSE)
return 'file://{}/mock-ona_k8s-pods'.format(self.temp_dir.name)
# Emulate the k8s environment and run the service
with patch.dict(PATCH_PATH.format('os.environ'), self.test_env):
inst = KubernetesWatcher()
inst.api = MagicMock(inst.api)
inst.api.send_file.side_effect = _send_file
inst.execute()
# The first call should be filtered; the second shouldn't be filtered.
self.assertEqual(mock_get.call_count, 2)
self.assertEqual(
mock_get.call_args_list[0][1].get('params'),
{'labelSelector': 'name={}'.format(DEFAULT_KUBERNETES_LABEL)},
)
self.assertEqual(
mock_get.call_args_list[1][1].get('params'),
None
)
# The site signal should indicate the type and path
inst.api.send_signal.assert_called_once_with(
'logs',
{
'log_type': 'k8s-pods',
'path': (
'file://{}/mock-ona_k8s-pods'.format(self.temp_dir.name)
)
}
)
@patch(PATCH_PATH.format('requests.get'), autospec=True)
def test_should_not_execute(self, mock_get):
# When the hostname isn't the first, we shouldn't do any further
# querying.
# Mock the API response, returning the test data
def _get(url, params=None, **kwargs):
from ona_service.kubernetes_watcher import requests as r
host = urlparse(url).hostname
r.packages.urllib3.connection.match_hostname({None: None}, host)
self.assertTrue(params)
resp = requests.Response()
resp.status_code = 200
resp.raw = io.BytesIO(dumps(GET_PODS_RESPONSE).encode('utf-8'))
return resp
mock_get.side_effect = _get
# Emulate the k8s environment and run the service
with patch.dict(PATCH_PATH.format('os.environ'), self.test_env):
inst = KubernetesWatcher()
inst.api = MagicMock(inst.api)
inst.execute()
inst.api.send_file.assert_not_called()
inst.api.send_signal.assert_not_called()
@patch(PATCH_PATH.format('requests.get'), autospec=True)
def test_execute_bad_check(self, mock_get):
# Mock the API response, returning an error
def _get(url, params=None, **kwargs):
resp = requests.Response()
resp.status_code = 403
resp.raw = io.BytesIO(dumps({}).encode('utf-8'))
return resp
mock_get.side_effect = _get
# Emulate the k8s environment and run the service
with patch.dict(PATCH_PATH.format('os.environ'), self.test_env):
inst = KubernetesWatcher()
inst.api = MagicMock(inst.api)
inst.execute()
# Since the initial check failed, no second GET should be made.
# No Observable API calls should be made
self.assertEqual(mock_get.call_count, 1)
self.assertEqual(inst.api.call_count, 0)
@patch(PATCH_PATH.format('requests.get'), autospec=True)
def test_execute_bad_update(self, mock_get):
# Mock the API response, returning an error
def _get(url, params=None, **kwargs):
resp = requests.Response()
if params:
resp = requests.Response()
resp.status_code = 200
resp.raw = io.BytesIO(dumps(GET_PODS_RESPONSE).encode('utf-8'))
else:
resp.status_code = 403
resp.raw = io.BytesIO(dumps({}).encode('utf-8'))
return resp
mock_get.side_effect = _get
# Emulate the k8s environment and run the service
with patch.dict(PATCH_PATH.format('os.environ'), self.test_env):
inst = KubernetesWatcher()
inst.api = MagicMock(inst.api)
inst.execute()
# Since the initial check succeeded, a second GET should be made.
# No Observable API calls should be made
self.assertEqual(mock_get.call_count, 2)
self.assertEqual(inst.api.call_count, 0)
@patch(PATCH_PATH.format('requests.get'), autospec=True)
def test_execute_missing_env(self, mock_get):
# No API calls to Kubernetes or Observable should be made if there's
# any missing environment variables
for key in self.test_env.keys():
env = self.test_env.copy()
env.pop(key)
with patch.dict(PATCH_PATH.format('os.environ'), env):
inst = KubernetesWatcher()
inst.api = MagicMock(inst.api)
inst.execute()
self.assertEqual(mock_get.call_count, 0)
self.assertEqual(inst.api.call_count, 0)
|
{
"content_hash": "015ac96a65aae3951ccddb2041f24d61",
"timestamp": "",
"source": "github",
"line_count": 220,
"max_line_length": 79,
"avg_line_length": 35.66818181818182,
"alnum_prop": 0.5786924939467313,
"repo_name": "obsrvbl/ona",
"id": "ce2f65d7a45531a9f490832c202601428294255c",
"size": "8434",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/scripts/tests/test_kubernetes_watcher.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "643"
},
{
"name": "Makefile",
"bytes": "4589"
},
{
"name": "Python",
"bytes": "304233"
},
{
"name": "Shell",
"bytes": "22753"
}
],
"symlink_target": ""
}
|
from iss import util
import sys
import random
import logging
import numpy as np
class ErrorModel(object):
"""Main ErrorModel Class
This class is used to create inheriting classes and contains all
the functions that are shared by all ErrorModel classes
"""
@property
def logger(self):
component = "{}.{}".format(type(self).__module__, type(self).__name__)
return logging.getLogger(component)
def load_npz(self, npz_path, model):
"""load the error profile .npz file
Args:
npz_path (string): path to the npz file
model (string): type of model. Could be 'cdf' or 'kde'. 'cdf' has
been deprecated and is no longer available
Returns:
ndarray: numpy object containg variables necessary
for error model construction
"""
try:
error_profile = np.load(npz_path, allow_pickle=True)
assert error_profile['model'] == model
except (OSError, IOError) as e:
self.logger.error('Failed to read ErrorModel file: %s' % e)
sys.exit(1)
except AssertionError as e:
self.logger.error(
'Trying to load a %s ErrorModel in %s mode' % (
error_profile['model'], model))
sys.exit(1)
else:
self.logger.debug('Loaded ErrorProfile: %s' % npz_path)
return error_profile
def introduce_error_scores(self, record, orientation):
"""Add phred scores to a SeqRecord according to the error_model
Args:
record (SeqRecord): a read record
orientation (string): orientation of the read. Can be 'forward' or
'reverse'
Returns:
SeqRecord: a read record with error scores
"""
if orientation == 'forward':
record.letter_annotations["phred_quality"] = self.gen_phred_scores(
self.quality_forward, 'forward')
elif orientation == 'reverse':
record.letter_annotations["phred_quality"] = self.gen_phred_scores(
self.quality_reverse, 'reverse')
return record
def mut_sequence(self, record, orientation):
"""Introduce substitution errors to a sequence
If a random probability is higher than the probability of the basecall
being correct, introduce a substitution error
Args:
record (SeqRecord): a read record with error scores
orientation (string): orientation of the read. Can be 'forward' or
'reverse'
Returns:
Seq: a sequence
"""
# get the right subst_matrix
if orientation == 'forward':
nucl_choices = self.subst_choices_for
elif orientation == 'reverse':
nucl_choices = self.subst_choices_rev
mutable_seq = record.seq.tomutable()
quality_list = record.letter_annotations["phred_quality"]
position = 0
for nucl, qual in zip(mutable_seq, quality_list):
if random.random() > util.phred_to_prob(qual) \
and nucl.upper() not in 'RYWSMKHBVDN':
mutable_seq[position] = str(np.random.choice(
nucl_choices[position][nucl.upper()][0],
p=nucl_choices[position][nucl.upper()][1]))
position += 1
return mutable_seq.toseq()
def adjust_seq_length(self, mut_seq, orientation, full_sequence, bounds):
"""Truncate or Extend reads to make them fit the read length
When insertions or deletions are introduced to the reads, their length
will change. This function takes a (mutable) read and a reference
sequence, and extend or truncate the read if it has had an insertion
or a deletion
Args:
mut_seq (MutableSeq): a mutable sequence
orientation (string): orientation of the read. Can be 'forward' or
'reverse'
full_sequence (Seq): the reference sequence from which mut_seq
comes from
bounds (tuple): the position of the read in the full_sequence
Returns:
Seq: a sequence fitting the ErrorModel
"""
read_start, read_end = bounds
if len(mut_seq) == self.read_length:
return mut_seq.toseq()
elif len(mut_seq) > self.read_length:
while len(mut_seq) > self.read_length:
mut_seq.pop()
return mut_seq.toseq()
else: # len smaller
to_add = self.read_length - len(mut_seq)
if orientation == 'forward':
for i in range(to_add):
if read_end + i >= len(full_sequence):
nucl_to_add = 'A'
else:
nucl_to_add = str(full_sequence[read_end + i])
mut_seq.append(nucl_to_add)
elif orientation == 'reverse':
for i in range(to_add):
if read_end + i >= len(full_sequence):
nucl_to_add = 'A'
else:
nucl_to_add = util.rev_comp(
full_sequence[read_end + i])
mut_seq.append(nucl_to_add)
return mut_seq.toseq()
def introduce_indels(self, record, orientation, full_seq, bounds):
"""Introduce insertions or deletions in a sequence
Introduce insertion and deletion errors according to the probabilities
present in the indel choices list
Args:
record (SeqRecord): a sequence record
orientation (string): orientation of the read. Can be 'forward' or
'reverse'
full_seq (Seq): the reference sequence from which mut_seq
comes from
bounds (tuple): the position of the read in the full_sequence
Returns:
Seq: a sequence with (eventually) indels
"""
# get the right indel arrays
if orientation == 'forward':
insertions = self.ins_for
deletions = self.del_for
elif orientation == 'reverse':
insertions = self.ins_rev
deletions = self.del_rev
mutable_seq = record.seq.tomutable()
position = 0
for nucl in range(self.read_length - 1):
try:
# skip ambiguous nucleotides
if mutable_seq[nucl].upper() in 'RYWSMKHBVDN':
position += 1
continue
for nucl_to_insert, prob in insertions[position].items():
if random.random() < prob:
# we want to insert after the base read
mutable_seq.insert(position + 1, str(nucl_to_insert))
if random.random() < deletions[position][mutable_seq[nucl].upper()]:
mutable_seq.pop(position)
position += 1
except IndexError as e:
continue
seq = self.adjust_seq_length(
mutable_seq, orientation, full_seq, bounds)
return seq
|
{
"content_hash": "18401acbab583a158fbeb40f294b1b5f",
"timestamp": "",
"source": "github",
"line_count": 191,
"max_line_length": 84,
"avg_line_length": 37.72774869109948,
"alnum_prop": 0.5550929780738274,
"repo_name": "HadrienG/InSilicoSeq",
"id": "473942885a68cb08e69e4ad032cee5f7684eaadf",
"size": "7253",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "iss/error_models/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "156"
},
{
"name": "Python",
"bytes": "100196"
}
],
"symlink_target": ""
}
|
"""Unit tests for reviewboard.admin.siteconfig."""
import os
from django.conf import settings
from django.urls import reverse
from djblets.siteconfig.models import SiteConfiguration
from reviewboard.admin.siteconfig import load_site_config
from reviewboard.testing.testcase import TestCase
class LoadSiteConfigTests(TestCase):
"""Unit tests for reviewboard.admin.siteconfig.load_site_config."""
def setUp(self):
super(LoadSiteConfigTests, self).setUp()
self.siteconfig = SiteConfiguration.objects.get_current()
def test_with_site_domain_method_http(self):
"""Testing load_site_config with site_domain_method=http"""
self.siteconfig.set('site_domain_method', 'http')
self.siteconfig.save()
load_site_config()
self.assertEqual(str(os.environ.get(str('HTTPS'))), str('off'))
self.assertFalse(getattr(settings, 'CSRF_COOKIE_SECURE', None))
# Ensure that CSRF cookie flags are set correctly.
self.create_user(username='test-user',
password='test-user')
login_url = reverse('login')
response = self.client.get(login_url)
csrf_cookie = response.cookies.get(settings.CSRF_COOKIE_NAME)
self.assertIsNotNone(csrf_cookie)
self.assertFalse(csrf_cookie['secure'])
# Ensure that session cookie flags are set correctly.
response = self.client.post(
reverse('login'),
{
'username': 'test-user',
'password': 'test-user',
})
session_cookie = response.cookies.get(settings.SESSION_COOKIE_NAME)
self.assertIsNotNone(session_cookie)
self.assertFalse(session_cookie['secure'])
def test_with_site_domain_method_https(self):
"""Testing load_site_config with site_domain_method=https"""
self.siteconfig.set('site_domain_method', 'https')
self.siteconfig.save()
load_site_config()
self.assertEqual(str(os.environ.get(str('HTTPS'))), str('on'))
self.assertTrue(getattr(settings, 'CSRF_COOKIE_SECURE', None))
# Ensure that CSRF cookie flags are set correctly.
login_url = reverse('login')
self.create_user(username='test-user',
password='test-user')
response = self.client.get(login_url, secure=True)
csrf_cookie = response.cookies.get(settings.CSRF_COOKIE_NAME)
self.assertIsNotNone(csrf_cookie)
self.assertTrue(csrf_cookie['secure'])
# Ensure that session cookie flags are set correctly.
response = self.client.post(
login_url,
{
'username': 'test-user',
'password': 'test-user',
},
secure=True)
session_cookie = response.cookies.get(settings.SESSION_COOKIE_NAME)
self.assertIsNotNone(session_cookie)
self.assertTrue(session_cookie['secure'])
|
{
"content_hash": "afdc2f2683a05f91cc003f09919ef8d8",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 75,
"avg_line_length": 35.602409638554214,
"alnum_prop": 0.6345177664974619,
"repo_name": "reviewboard/reviewboard",
"id": "220ed0528e147195a5a24ece4ba1c662bb75a18a",
"size": "2955",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reviewboard/admin/tests/test_siteconfig.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10167"
},
{
"name": "Dockerfile",
"bytes": "7721"
},
{
"name": "HTML",
"bytes": "226489"
},
{
"name": "JavaScript",
"bytes": "3991608"
},
{
"name": "Less",
"bytes": "438017"
},
{
"name": "Python",
"bytes": "9186415"
},
{
"name": "Shell",
"bytes": "3855"
}
],
"symlink_target": ""
}
|
from MpfTestCase import MpfTestCase
from mock import MagicMock
class TestTooLongExitCountDelay(MpfTestCase):
def getConfigFile(self):
return 'test_too_long_exit_count_delay.yaml'
def getMachinePath(self):
return '../tests/machine_files/ball_device/'
def _ball_drained(self, *kwargs):
self._num_balls_drained += 1
def put_four_balls_in_trough(self):
self.machine.ball_controller.num_balls_known = 4
self.machine.switch_controller.process_switch('s_trough_1', 1)
self.machine.switch_controller.process_switch('s_trough_2', 1)
self.machine.switch_controller.process_switch('s_trough_3', 1)
self.machine.switch_controller.process_switch('s_trough_4', 1)
self.advance_time_and_run(1)
self.assertEqual(4, self.machine.ball_devices.trough.balls)
self.trough_coil = self.machine.coils.trough_eject
self.plunger_coil = self.machine.coils.plunger_eject
self.trough_coil.pulse = MagicMock()
self.plunger_coil.pulse = MagicMock()
self._num_balls_drained = 0
self.machine.events.add_handler('ball_drained', self._ball_drained)
def test_eject_to_plunger(self):
# tests eject from trough to plunger where the ball enters the plunger
# before the exit_count_delay of the trough
self.put_four_balls_in_trough()
self.machine.playfield.add_ball(player_controlled=True)
self.advance_time_and_run(1)
# default pulse
self.trough_coil.pulse.assert_called_once_with()
self.trough_coil.pulse = MagicMock()
self.machine.switch_controller.process_switch('s_trough_1', 0)
self.machine.switch_controller.process_switch('s_trough_2', 0)
self.machine.switch_controller.process_switch('s_trough_3', 0)
self.machine.switch_controller.process_switch('s_trough_4', 0)
self.advance_time_and_run(.1)
self.machine.switch_controller.process_switch('s_trough_2', 1)
self.machine.switch_controller.process_switch('s_trough_3', 1)
self.machine.switch_controller.process_switch('s_trough_4', 1)
self.machine.switch_controller.process_switch('s_trough_jam', 1)
self.machine.switch_controller.process_switch('s_trough_jam', 0)
self.advance_time_and_run(.1)
# ball goes into plunger
self.machine.switch_controller.process_switch('s_plunger', 1)
self.advance_time_and_run(1)
self.assertEqual(self.machine.ball_devices.trough.balls, 3)
self.assertEqual(self.machine.ball_devices.plunger.balls, 1)
assert not self.plunger_coil.pulse.called
self.assertEqual("idle", self.machine.ball_devices.trough._state)
self.assertEqual("ejecting", self.machine.ball_devices.plunger._state)
# player hits the launch button
self.machine.switch_controller.process_switch('s_launch', 1)
self.machine.switch_controller.process_switch('s_launch', 0)
self.advance_time_and_run(.1)
self.machine.switch_controller.process_switch('s_plunger', 0)
self.advance_time_and_run(1)
# ball moves from plunger lane to playfield
self.machine.switch_controller.process_switch('s_playfield', 1)
self.machine.switch_controller.process_switch('s_playfield', 0)
self.advance_time_and_run(.1)
self.plunger_coil.pulse.assert_called_once_with()
self.assertEqual(self.machine.ball_devices.trough.balls, 3)
self.assertEqual(self.machine.ball_devices.plunger.balls, 0)
self.assertEqual(self.machine.ball_devices.playfield.balls, 1)
self.assertEqual("idle", self.machine.ball_devices.trough._state)
self.assertEqual("idle", self.machine.ball_devices.plunger._state)
self.assertEqual(4, self.machine.ball_controller.num_balls_known)
def test_ball_fell_back_in_trough_before_exit_count_delay(self):
self.put_four_balls_in_trough()
self.machine.playfield.add_ball(player_controlled=True)
self.advance_time_and_run(1)
# default pulse
self.trough_coil.pulse.assert_called_once_with()
self.trough_coil.pulse = MagicMock()
self.machine.switch_controller.process_switch('s_trough_1', 0)
self.machine.switch_controller.process_switch('s_trough_2', 0)
self.machine.switch_controller.process_switch('s_trough_3', 0)
self.machine.switch_controller.process_switch('s_trough_4', 0)
self.advance_time_and_run(.1)
self.machine.switch_controller.process_switch('s_trough_2', 1)
self.machine.switch_controller.process_switch('s_trough_3', 1)
self.machine.switch_controller.process_switch('s_trough_4', 1)
self.machine.switch_controller.process_switch('s_trough_jam', 1)
self.machine.switch_controller.process_switch('s_trough_jam', 0)
self.advance_time_and_run(.2)
# ball falls back in trough
self.machine.switch_controller.process_switch('s_trough_jam', 1)
self.advance_time_and_run(2)
self.assertEqual(self.machine.ball_devices.trough.balls, 3)
self.assertEqual(self.machine.ball_devices.plunger.balls, 0)
self.assertEqual(self.machine.ball_devices.playfield.balls, 0)
# after the eject timeout, the trough will realize it has a ball
self.advance_time_and_run(10)
self.assertEqual(self.machine.ball_devices.trough.balls, 4)
|
{
"content_hash": "0805485e53b60d64f1a12783d180c57e",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 78,
"avg_line_length": 45.425,
"alnum_prop": 0.6802421574023115,
"repo_name": "spierepf/mpf",
"id": "020af9532dba0035ffae3e03a631e6c0c6dff9c1",
"size": "5452",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_TooLongExitCountDelay.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1839"
},
{
"name": "Makefile",
"bytes": "170"
},
{
"name": "Python",
"bytes": "1685472"
},
{
"name": "Shell",
"bytes": "729"
}
],
"symlink_target": ""
}
|
from verta._swagger.base_type import BaseType
class VersioningGetCommitRequestResponse(BaseType):
def __init__(self, commit=None):
required = {
"commit": False,
}
self.commit = commit
for k, v in required.items():
if self[k] is None and v:
raise ValueError('attribute {} is required'.format(k))
@staticmethod
def from_json(d):
from .VersioningCommit import VersioningCommit
tmp = d.get('commit', None)
if tmp is not None:
d['commit'] = VersioningCommit.from_json(tmp)
return VersioningGetCommitRequestResponse(**d)
|
{
"content_hash": "317dcec1264525fb20245caa28250318",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 62,
"avg_line_length": 25.434782608695652,
"alnum_prop": 0.6683760683760683,
"repo_name": "mitdbg/modeldb",
"id": "2f17dd31979356998249d65da3944a94456bbb8b",
"size": "628",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "client/verta/verta/_swagger/_public/modeldb/versioning/model/VersioningGetCommitRequestResponse.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "43352"
},
{
"name": "Dockerfile",
"bytes": "235"
},
{
"name": "HTML",
"bytes": "30924"
},
{
"name": "Java",
"bytes": "393927"
},
{
"name": "JavaScript",
"bytes": "1017682"
},
{
"name": "Python",
"bytes": "178774"
},
{
"name": "Scala",
"bytes": "251259"
},
{
"name": "Shell",
"bytes": "16870"
},
{
"name": "Thrift",
"bytes": "55683"
}
],
"symlink_target": ""
}
|
from setuptools import setup
#parse requirements
req_lines = [line.strip() for line in open("requirements.txt").readlines()]
install_reqs = list(filter(None, req_lines))
setup(
name = "auth-userpass",
version = "0.5.0",
author = "Francis Luong (Franco)",
author_email = "networkascode@definefunk.com",
description = ("Simple but not so secure username and password storage and retrieval"),
license = "LICENSE.txt",
url = "https://github.com/francisluong/py-auth-userpass",
install_requires=install_reqs,
packages=['userpass']
)
|
{
"content_hash": "63fadf8fac31b97ec119b9bf284adf8e",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 91,
"avg_line_length": 33.294117647058826,
"alnum_prop": 0.6925795053003534,
"repo_name": "francisluong/py-auth-userpass",
"id": "f81e38d1d887a9efa6f3794cae721a576322f644",
"size": "566",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6792"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('dynamic_scraper', '0018_auto_20170506_2102'),
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Title', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='ShoppingWebsite',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('url', models.URLField()),
('scraper', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='dynamic_scraper.Scraper')),
('scraper_runtime', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='dynamic_scraper.SchedulerRuntime')),
],
),
migrations.AddField(
model_name='product',
name='Website',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Products.ShoppingWebsite'),
),
migrations.AddField(
model_name='product',
name='checker_runtime',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='dynamic_scraper.SchedulerRuntime'),
),
]
|
{
"content_hash": "dedc4d31aff5231ecc2b78e2e749a5bc",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 163,
"avg_line_length": 39.16279069767442,
"alnum_prop": 0.5973871733966746,
"repo_name": "Habibullah-UET/FYP",
"id": "582fd9ef60799b0ff2a81a48cca8f721833bdb47",
"size": "1757",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Products/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "16300"
},
{
"name": "HTML",
"bytes": "9828"
},
{
"name": "JavaScript",
"bytes": "19920"
},
{
"name": "Python",
"bytes": "20193"
}
],
"symlink_target": ""
}
|
__author__ = "Sylvain Dangin"
__licence__ = "Apache 2.0"
__version__ = "1.0"
__maintainer__ = "Sylvain Dangin"
__email__ = "sylvain.dangin@gmail.com"
__status__ = "Development"
class first_word():
def transform(input_data):
"""Get the first word of a string with more than one word.
:param input_data: Text with more than one word.
:return: The first word of the input string.
:rtype: str or type of the input data if not a string
"""
if isinstance(input_data, str):
if ' ' in input_data:
return input_data.split(' ')[0]
return input_data
|
{
"content_hash": "bec11fe830019ed944390884aadf53d4",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 66,
"avg_line_length": 32.3,
"alnum_prop": 0.5743034055727554,
"repo_name": "Sylvaner/PyConverter",
"id": "f84e7a4bf7df38be662b3adb844e8edd76052dce",
"size": "646",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "transforms/first_word.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "81495"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('survey', '0009_question_impact'),
]
operations = [
migrations.RemoveField(
model_name='question',
name='layers',
),
]
|
{
"content_hash": "78f0f1df0e2c7c239166fce79e8daffa",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 43,
"avg_line_length": 19.176470588235293,
"alnum_prop": 0.588957055214724,
"repo_name": "Ecotrust/floodplain-restoration",
"id": "389afcfe746409a7e895bdb0f17d2379ae7f691f",
"size": "350",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dst/survey/migrations/0010_remove_question_layers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "18811"
},
{
"name": "HTML",
"bytes": "91644"
},
{
"name": "JavaScript",
"bytes": "128271"
},
{
"name": "Python",
"bytes": "182327"
},
{
"name": "Shell",
"bytes": "4981"
}
],
"symlink_target": ""
}
|
"""
Configuration support functionality.
"""
try:
import os
import logging
import inject
from ConfigParser import SafeConfigParser
from pkg_resources import resource_stream
except ImportError as e:
print "Import Error: %s" % e
exit()
logger = logging.getLogger(__name__)
@inject.param('config')
def init_config_file(config_file, config):
"""
Initializes config from a specfied cfg file.
@param config_file: Path to the config file.
@param config: Config object.
"""
if config_file and os.path.exists(config_file):
read = config.read([config_file])
if not read:
raise ValueError("Could not read configuration from file: %s" % config_file)
def init_config():
"""
Initializes config from the default cfg file.
@return: The config object.
"""
config = SafeConfigParser()
config.readfp(resource_stream(__name__, 'default.cfg'))
return config
def debug_config(config):
"""
Debugs config object by printing out all the options, values pairs.
@param config: The config object to be debugged.
"""
for section in config.sections():
for option in config.options(section):
logger.debug("[%s] %s=%s" % (section, option, config.get(section, option)))
|
{
"content_hash": "96a74bc6c825425a00ea402f881880bf",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 88,
"avg_line_length": 28.065217391304348,
"alnum_prop": 0.6576297443841983,
"repo_name": "phuonghoangtaisinh/vmcontroller.unstable",
"id": "5dc6b0a911198001f4a432d2ad17b6fda8f5a520",
"size": "1291",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/vmcontroller.guest/vmcontroller/guest/config/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "123213"
},
{
"name": "Shell",
"bytes": "1657"
}
],
"symlink_target": ""
}
|
from south.db import db
from south.v2 import SchemaMigration
from django.conf import settings
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'DifficultyLevel.pictogram'
db.add_column('o_b_difficulte', 'pictogram',
self.gf('django.db.models.fields.files.FileField')(max_length=512, null=True, db_column='picto', blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'DifficultyLevel.pictogram'
db.delete_column('o_b_difficulte', 'picto')
models = {
u'authent.structure': {
'Meta': {'ordering': "['name']", 'object_name': 'Structure'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
u'core.comfort': {
'Meta': {'ordering': "['comfort']", 'object_name': 'Comfort', 'db_table': "'l_b_confort'"},
'comfort': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_column': "'confort'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'structure': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authent.Structure']", 'db_column': "'structure'"})
},
u'core.datasource': {
'Meta': {'ordering': "['source']", 'object_name': 'Datasource', 'db_table': "'l_b_source'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'structure': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authent.Structure']", 'db_column': "'structure'"})
},
u'core.network': {
'Meta': {'ordering': "['network']", 'object_name': 'Network', 'db_table': "'l_b_reseau'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'network': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_column': "'reseau'"}),
'structure': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authent.Structure']", 'db_column': "'structure'"})
},
u'core.path': {
'Meta': {'object_name': 'Path', 'db_table': "'l_t_troncon'"},
'arrival': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'null': 'True', 'db_column': "'arrivee'", 'blank': 'True'}),
'ascent': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'db_column': "'denivelee_positive'", 'blank': 'True'}),
'comfort': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'paths'", 'null': 'True', 'db_column': "'confort'", 'to': u"orm['core.Comfort']"}),
'comments': ('django.db.models.fields.TextField', [], {'null': 'True', 'db_column': "'remarques'", 'blank': 'True'}),
'datasource': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'paths'", 'null': 'True', 'db_column': "'source'", 'to': u"orm['core.Datasource']"}),
'date_insert': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_column': "'date_insert'", 'blank': 'True'}),
'date_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_column': "'date_update'", 'blank': 'True'}),
'departure': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'null': 'True', 'db_column': "'depart'", 'blank': 'True'}),
'descent': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'db_column': "'denivelee_negative'", 'blank': 'True'}),
'geom': ('django.contrib.gis.db.models.fields.LineStringField', [], {'srid': '%s' % settings.SRID, 'spatial_index': 'False'}),
'geom_3d': ('django.contrib.gis.db.models.fields.GeometryField', [], {'default': 'None', 'dim': '3', 'spatial_index': 'False', 'null': 'True', 'srid': '%s' % settings.SRID}),
'geom_cadastre': ('django.contrib.gis.db.models.fields.LineStringField', [], {'srid': '%s' % settings.SRID, 'null': 'True', 'spatial_index': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'length': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'null': 'True', 'db_column': "'longueur'", 'blank': 'True'}),
'max_elevation': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'db_column': "'altitude_maximum'", 'blank': 'True'}),
'min_elevation': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'db_column': "'altitude_minimum'", 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'db_column': "'nom'", 'blank': 'True'}),
'networks': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'paths'", 'to': u"orm['core.Network']", 'db_table': "'l_r_troncon_reseau'", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'slope': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'null': 'True', 'db_column': "'pente'", 'blank': 'True'}),
'stake': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'paths'", 'null': 'True', 'db_column': "'enjeu'", 'to': u"orm['core.Stake']"}),
'structure': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authent.Structure']", 'db_column': "'structure'"}),
'usages': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'paths'", 'to': u"orm['core.Usage']", 'db_table': "'l_r_troncon_usage'", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'valid': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_column': "'valide'"})
},
u'core.pathaggregation': {
'Meta': {'ordering': "['id']", 'object_name': 'PathAggregation', 'db_table': "'e_r_evenement_troncon'"},
'end_position': ('django.db.models.fields.FloatField', [], {'db_column': "'pk_fin'", 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'db_column': "'ordre'", 'blank': 'True'}),
'path': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'aggregations'", 'on_delete': 'models.DO_NOTHING', 'db_column': "'troncon'", 'to': u"orm['core.Path']"}),
'start_position': ('django.db.models.fields.FloatField', [], {'db_column': "'pk_debut'", 'db_index': 'True'}),
'topo_object': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'aggregations'", 'db_column': "'evenement'", 'to': u"orm['core.Topology']"})
},
u'core.stake': {
'Meta': {'ordering': "['id']", 'object_name': 'Stake', 'db_table': "'l_b_enjeu'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'stake': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_column': "'enjeu'"}),
'structure': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authent.Structure']", 'db_column': "'structure'"})
},
u'core.topology': {
'Meta': {'object_name': 'Topology', 'db_table': "'e_t_evenement'"},
'ascent': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'db_column': "'denivelee_positive'", 'blank': 'True'}),
'date_insert': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_column': "'date_insert'", 'blank': 'True'}),
'date_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_column': "'date_update'", 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_column': "'supprime'"}),
'descent': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'db_column': "'denivelee_negative'", 'blank': 'True'}),
'geom': ('django.contrib.gis.db.models.fields.GeometryField', [], {'default': 'None', 'srid': '%s' % settings.SRID, 'null': 'True', 'spatial_index': 'False'}),
'geom_3d': ('django.contrib.gis.db.models.fields.GeometryField', [], {'default': 'None', 'dim': '3', 'spatial_index': 'False', 'null': 'True', 'srid': '%s' % settings.SRID}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'length': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'null': 'True', 'db_column': "'longueur'", 'blank': 'True'}),
'max_elevation': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'db_column': "'altitude_maximum'", 'blank': 'True'}),
'min_elevation': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'db_column': "'altitude_minimum'", 'blank': 'True'}),
'offset': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'db_column': "'decallage'"}),
'paths': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['core.Path']", 'through': u"orm['core.PathAggregation']", 'db_column': "'troncons'", 'symmetrical': 'False'}),
'slope': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'null': 'True', 'db_column': "'pente'", 'blank': 'True'})
},
u'core.usage': {
'Meta': {'ordering': "['usage']", 'object_name': 'Usage', 'db_table': "'l_b_usage'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'structure': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authent.Structure']", 'db_column': "'structure'"}),
'usage': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_column': "'usage'"})
},
u'trekking.difficultylevel': {
'Meta': {'ordering': "['id']", 'object_name': 'DifficultyLevel', 'db_table': "'o_b_difficulte'"},
'difficulty': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'difficulte'"}),
'id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'pictogram': ('django.db.models.fields.files.FileField', [], {'max_length': '512', 'null': 'True', 'db_column': "'picto'", 'blank': 'True'})
},
u'trekking.informationdesk': {
'Meta': {'ordering': "['name']", 'object_name': 'InformationDesk', 'db_table': "'o_b_renseignement'"},
'description': ('django.db.models.fields.TextField', [], {'db_column': "'description'", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256', 'db_column': "'nom'"})
},
u'trekking.poi': {
'Meta': {'object_name': 'POI', 'db_table': "'o_t_poi'", '_ormbases': [u'core.Topology']},
'description': ('django.db.models.fields.TextField', [], {'db_column': "'description'"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'nom'"}),
'topo_object': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['core.Topology']", 'unique': 'True', 'primary_key': 'True', 'db_column': "'evenement'"}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pois'", 'db_column': "'type'", 'to': u"orm['trekking.POIType']"})
},
u'trekking.poitype': {
'Meta': {'ordering': "['label']", 'object_name': 'POIType', 'db_table': "'o_b_poi'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'nom'"}),
'pictogram': ('django.db.models.fields.files.FileField', [], {'max_length': '512', 'db_column': "'picto'"})
},
u'trekking.route': {
'Meta': {'ordering': "['route']", 'object_name': 'Route', 'db_table': "'o_b_parcours'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'route': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'parcours'"})
},
u'trekking.theme': {
'Meta': {'ordering': "['label']", 'object_name': 'Theme', 'db_table': "'o_b_theme'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'theme'"}),
'pictogram': ('django.db.models.fields.files.FileField', [], {'max_length': '512', 'db_column': "'picto'"})
},
u'trekking.trek': {
'Meta': {'ordering': "['name']", 'object_name': 'Trek', 'db_table': "'o_t_itineraire'", '_ormbases': [u'core.Topology']},
'access': ('django.db.models.fields.TextField', [], {'db_column': "'acces'", 'blank': 'True'}),
'advice': ('django.db.models.fields.TextField', [], {'db_column': "'recommandation'", 'blank': 'True'}),
'advised_parking': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'parking'", 'blank': 'True'}),
'ambiance': ('django.db.models.fields.TextField', [], {'db_column': "'ambiance'", 'blank': 'True'}),
'arrival': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'arrivee'", 'blank': 'True'}),
'departure': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'depart'", 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'db_column': "'description'", 'blank': 'True'}),
'description_teaser': ('django.db.models.fields.TextField', [], {'db_column': "'chapeau'", 'blank': 'True'}),
'difficulty': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'treks'", 'null': 'True', 'db_column': "'difficulte'", 'to': u"orm['trekking.DifficultyLevel']"}),
'disabled_infrastructure': ('django.db.models.fields.TextField', [], {'db_column': "'handicap'", 'blank': 'True'}),
'duration': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'db_column': "'duree'", 'blank': 'True'}),
'information_desk': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'treks'", 'null': 'True', 'db_column': "'renseignement'", 'to': u"orm['trekking.InformationDesk']"}),
'is_park_centered': ('django.db.models.fields.BooleanField', [], {'db_column': "'coeur'"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'nom'"}),
'networks': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'treks'", 'to': u"orm['trekking.TrekNetwork']", 'db_table': "'o_r_itineraire_reseau'", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'parking_location': ('django.contrib.gis.db.models.fields.PointField', [], {'srid': '%s' % settings.SRID, 'null': 'True', 'spatial_index': 'False', 'db_column': "'geom_parking'", 'blank': 'True'}),
'public_transport': ('django.db.models.fields.TextField', [], {'db_column': "'transport'", 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'db_column': "'public'"}),
'related_treks': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_treks+'", 'symmetrical': 'False', 'through': u"orm['trekking.TrekRelationship']", 'to': u"orm['trekking.Trek']"}),
'route': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'treks'", 'null': 'True', 'db_column': "'parcours'", 'to': u"orm['trekking.Route']"}),
'themes': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'treks'", 'to': u"orm['trekking.Theme']", 'db_table': "'o_r_itineraire_theme'", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'topo_object': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['core.Topology']", 'unique': 'True', 'primary_key': 'True', 'db_column': "'evenement'"}),
'usages': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'treks'", 'to': u"orm['trekking.Usage']", 'db_table': "'o_r_itineraire_usage'", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'web_links': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'treks'", 'to': u"orm['trekking.WebLink']", 'db_table': "'o_r_itineraire_web'", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'})
},
u'trekking.treknetwork': {
'Meta': {'ordering': "['network']", 'object_name': 'TrekNetwork', 'db_table': "'o_b_reseau'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'network': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'reseau'"})
},
u'trekking.trekrelationship': {
'Meta': {'unique_together': "(('trek_a', 'trek_b'),)", 'object_name': 'TrekRelationship', 'db_table': "'o_r_itineraire_itineraire'"},
'has_common_departure': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_column': "'depart_commun'"}),
'has_common_edge': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_column': "'troncons_communs'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_circuit_step': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_column': "'etape_circuit'"}),
'trek_a': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'trek_relationship_a'", 'db_column': "'itineraire_a'", 'to': u"orm['trekking.Trek']"}),
'trek_b': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'trek_relationship_b'", 'db_column': "'itineraire_b'", 'to': u"orm['trekking.Trek']"})
},
u'trekking.usage': {
'Meta': {'ordering': "['usage']", 'object_name': 'Usage', 'db_table': "'o_b_usage'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pictogram': ('django.db.models.fields.files.FileField', [], {'max_length': '512', 'db_column': "'picto'"}),
'usage': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'usage'"})
},
u'trekking.weblink': {
'Meta': {'ordering': "['name']", 'object_name': 'WebLink', 'db_table': "'o_t_web'"},
'category': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'links'", 'null': 'True', 'db_column': "'categorie'", 'to': u"orm['trekking.WebLinkCategory']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'nom'"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '128', 'db_column': "'url'"})
},
u'trekking.weblinkcategory': {
'Meta': {'ordering': "['label']", 'object_name': 'WebLinkCategory', 'db_table': "'o_b_web_category'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'nom'"}),
'pictogram': ('django.db.models.fields.files.FileField', [], {'max_length': '512', 'db_column': "'picto'"})
}
}
complete_apps = ['trekking']
|
{
"content_hash": "7e7ae9f9551f8d06137d510a5b31637b",
"timestamp": "",
"source": "github",
"line_count": 206,
"max_line_length": 243,
"avg_line_length": 98.82038834951456,
"alnum_prop": 0.5534214275187896,
"repo_name": "johan--/Geotrek",
"id": "54bd2109823bfaf63e03991e1fee1c99e8d19988",
"size": "20382",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "geotrek/trekking/migrations/0002_auto__add_field_difficultylevel_pictogram.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "12725"
},
{
"name": "HTML",
"bytes": "94850"
},
{
"name": "JavaScript",
"bytes": "184551"
},
{
"name": "Makefile",
"bytes": "4038"
},
{
"name": "PLpgSQL",
"bytes": "73761"
},
{
"name": "Python",
"bytes": "2309827"
},
{
"name": "Shell",
"bytes": "16247"
}
],
"symlink_target": ""
}
|
"""
edacc.web
---------
In this module the flask application instance is defined and configured
according to the settings in config.py.
:copyright: (c) 2010 by Daniel Diepold.
:license: MIT, see LICENSE for details.
"""
import uuid, datetime, os
from jinja2 import FileSystemBytecodeCache
from werkzeug import ImmutableDict
from flask import Flask, Request, g, Blueprint
from flask.ext.cache import Cache
from flask.ext.mail import Mail
from simplekv.fs import FilesystemStore
from flask.ext.kvsession import KVSessionExtension
from edacc import config, models, utils
try:
os.makedirs(config.TEMP_DIR)
except OSError:
pass
Flask.jinja_options = ImmutableDict({
'extensions': ['jinja2.ext.autoescape', 'jinja2.ext.with_'],
'bytecode_cache': FileSystemBytecodeCache(config.TEMP_DIR),
'trim_blocks': True
})
app = Flask(__name__)
app.Debug = config.DEBUG
cache = Cache()
mail = Mail()
#session_store = FilesystemStore(config.TEMP_DIR, perm=0600)
if config.LOGGING:
# set up logging if configured
import logging
from logging.handlers import RotatingFileHandler
file_handler = RotatingFileHandler(config.LOG_FILE)
file_handler.setLevel(logging.WARNING)
formatter = logging.Formatter("---------------------------\n" + \
"%(asctime)s - %(name)s - " + \
"%(levelname)s\n%(message)s")
file_handler.setFormatter(formatter)
app.logger.addHandler(file_handler)
# initialize configured database connections
for username, password, database, label, hidden in config.DEFAULT_DATABASES:
models.add_database(username, password, database, label, hidden)
class LimitedRequest(Request):
""" extending Flask's request class to limit form uploads to 500 MB """
max_form_memory_size = 500 * 1024 * 1024
app.request_class = LimitedRequest
app.config.update(
SECRET_KEY=config.SECRET_KEY,
PERMANENT_SESSION_LIFETIME=datetime.timedelta(days=14),
CACHE_TYPE='filesystem',
CACHE_DIR=config.TEMP_DIR,
MAIL_SERVER=config.MAIL_SERVER,
MAIL_PORT=config.MAIL_PORT,
MAIL_USE_TLS=config.MAIL_USE_TLS,
MAIL_USE_SSL=config.MAIL_USE_SSL,
MAIL_USERNAME=config.MAIL_USERNAME,
MAIL_PASSWORD=config.MAIL_PASSWORD,
DEFAULT_MAIL_SENDER=config.DEFAULT_MAIL_SENDER
)
cache.init_app(app)
mail.init_app(app)
#KVSessionExtension(session_store, app)
# register view modules
from edacc.views.admin import admin
from edacc.views.accounts import accounts
from edacc.views.frontend import frontend
from edacc.views.analysis import analysis
from edacc.views.plot import plot
from edacc.views.api import api
app.register_blueprint(admin)
app.register_blueprint(accounts)
app.register_blueprint(frontend)
app.register_blueprint(analysis)
app.register_blueprint(plot)
app.register_blueprint(api)
from edacc.plugins.borgexplorer import borgexplorer
app.register_blueprint(borgexplorer)
app.jinja_env.filters['download_size'] = utils.download_size
app.jinja_env.filters['job_status_color'] = utils.job_status_color
app.jinja_env.filters['job_result_code_color'] = utils.job_result_code_color
app.jinja_env.filters['launch_command'] = utils.launch_command
app.jinja_env.filters['datetimeformat'] = utils.datetimeformat
app.jinja_env.filters['competition_phase'] = utils.competition_phase
app.jinja_env.filters['result_time'] = utils.result_time
app.jinja_env.filters['render_formula'] = utils.render_formula
app.jinja_env.filters['truncate_name'] = utils.truncate_name
app.jinja_env.filters['parameter_template'] = utils.parameter_template
if config.PIWIK:
@app.before_request
def register_piwik():
""" Attach piwik URL to g """
g.PIWIK_URL = config.PIWIK_URL
@app.before_request
def make_unique_id():
""" Attach an unique ID to the request """
g.unique_id = uuid.uuid4().hex
@app.after_request
def shutdown_session(response):
""" remove SQLAlchemy session from thread after requests - might not even be needed for
non-declarative SQLAlchemy usage according to the SQLAlchemy documentation.
"""
for db in models.get_databases().itervalues():
db.session.remove()
return response
|
{
"content_hash": "e865f070d5a0f6482a2d6c3e49c8f8d2",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 91,
"avg_line_length": 32.30769230769231,
"alnum_prop": 0.7271428571428571,
"repo_name": "ceari/edacc_web",
"id": "7d1e02c22a40cd832bc69679abfa6b069a46568c",
"size": "4224",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "edacc/web.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "1208"
},
{
"name": "CSS",
"bytes": "35306"
},
{
"name": "HTML",
"bytes": "284482"
},
{
"name": "JavaScript",
"bytes": "312273"
},
{
"name": "Makefile",
"bytes": "141"
},
{
"name": "Python",
"bytes": "571699"
}
],
"symlink_target": ""
}
|
import factory
import random
from datetime import timedelta
from datetime import datetime
from django.contrib.auth.models import User
from competition.models import (Competition, Game, GameScore, Team,
Organizer, OrganizerRole,
Registration, Invitation)
from competition.models import RegistrationQuestion as Question
from competition.models import RegistrationQuestionChoice as Choice
from competition.models import RegistrationQuestionResponse as Response
def now(_=None):
"""Returns the current time. Takes a single optional argument,
which gets thrown away. The argument is to make factory_boy happy.
"""
return datetime.now()
def later(_=None):
"""Returns the current time + 12 hours. Takes a single optional
argument, which gets thrown away. The argument is to make
factory_boy happy.
"""
return datetime.now() + timedelta(hours=12)
class UserFactory(factory.django.DjangoModelFactory):
FACTORY_FOR = User
username = factory.Sequence(lambda n: 'user{}'.format(n))
password = "123"
@classmethod
def _prepare(cls, create, **kwargs):
password = kwargs.pop('password', None)
user = super(UserFactory, cls)._prepare(create, **kwargs)
if password:
user.set_password(password)
if create:
user.save()
return user
class CompetitionFactory(factory.django.DjangoModelFactory):
FACTORY_FOR = Competition
name = factory.Sequence(lambda n: "MegaMinerAI: %s" % n)
start_time = factory.LazyAttribute(now)
end_time = factory.LazyAttribute(later)
cost = 8.00
min_num_team_members = 1
max_num_team_members = 3
description = "This is the best MegaMinerAI ever yay!"
class TeamFactory(factory.django.DjangoModelFactory):
FACTORY_FOR = Team
competition = factory.SubFactory(CompetitionFactory)
name = factory.Sequence(lambda n: "Team #%s" % n)
@classmethod
def _prepare(cls, create, **kwargs):
"""Register some users and add them as members of the new team"""
num_choices = int(kwargs.pop('num_members', 3))
team = super(TeamFactory, cls)._prepare(create, **kwargs)
if team.members.count() == 0:
for _i in range(num_choices):
u = UserFactory.create()
RegistrationFactory.create(user=u, competition=team.competition)
team.add_team_member(u)
return team
class GameFactory(factory.django.DjangoModelFactory):
FACTORY_FOR = Game
competition = factory.SubFactory(CompetitionFactory)
start_time = factory.LazyAttribute(now)
end_time = factory.LazyAttribute(now)
class GameScoreFactory(factory.django.DjangoModelFactory):
FACTORY_FOR = GameScore
game = factory.SubFactory(GameFactory)
team = factory.SubFactory(TeamFactory)
score = factory.LazyAttribute(lambda _: random.randint(0, 100))
class OrganizerRoleFactory(factory.django.DjangoModelFactory):
FACTORY_FOR = OrganizerRole
name = factory.Sequence(lambda n: "Role #%s" % n)
description = "Role description."
class OrganizerFactory(factory.django.DjangoModelFactory):
FACTORY_FOR = Organizer
user = factory.SubFactory(UserFactory)
competition = factory.SubFactory(CompetitionFactory)
@classmethod
def _prepare(cls, create, **kwargs):
"""Add a role to the Organizer"""
num_roles = int(kwargs.pop('num_roles', 1))
organizer = super(OrganizerFactory, cls)._prepare(create, **kwargs)
if num_roles > 0:
for _i in range(num_roles):
organizer.role.add(OrganizerRoleFactory.create())
return organizer
class RegistrationFactory(factory.django.DjangoModelFactory):
FACTORY_FOR = Registration
user = factory.SubFactory(UserFactory)
competition = factory.SubFactory(CompetitionFactory)
class RegistrationQuestionFactory(factory.django.DjangoModelFactory):
FACTORY_FOR = Question
question = factory.Sequence(lambda n: "Question #%s" % n)
question_type = factory.LazyAttribute(
lambda _: random.choice(Question.QUESTION_TYPES)[0]
)
@classmethod
def _prepare(cls, create, **kwargs):
num_choices = int(kwargs.pop('num_choices', 4))
q = super(RegistrationQuestionFactory, cls)._prepare(create, **kwargs)
if q.question_type in ('SC', 'MC'):
for _i in range(num_choices):
RegistrationQuestionChoiceFactory.create(question=q)
return q
class RegistrationQuestionChoiceFactory(factory.django.DjangoModelFactory):
FACTORY_FOR = Choice
question = factory.SubFactory(RegistrationQuestionFactory)
choice = factory.Sequence(lambda n: "Choice #%s" % n)
class RegistrationQuestionResponseFactory(factory.django.DjangoModelFactory):
FACTORY_FOR = Response
question = factory.SubFactory(RegistrationQuestionFactory)
registration = factory.SubFactory(RegistrationFactory)
class InvitationFactory(factory.django.DjangoModelFactory):
FACTORY_FOR = Invitation
team = factory.SubFactory(TeamFactory)
sender = factory.SubFactory(UserFactory)
receiver = factory.SubFactory(UserFactory)
message = factory.Sequence(lambda n: "Message #%s" % n)
|
{
"content_hash": "b212208aa4e378c95b5986d187809936",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 80,
"avg_line_length": 32.5398773006135,
"alnum_prop": 0.6919306184012066,
"repo_name": "michaelwisely/django-competition",
"id": "dbe15c32a7aaa0fae411066799675808d8d560c7",
"size": "5304",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "src/competition/tests/factories.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "3975"
},
{
"name": "Cucumber",
"bytes": "1592"
},
{
"name": "HTML",
"bytes": "41099"
},
{
"name": "JavaScript",
"bytes": "2111"
},
{
"name": "Makefile",
"bytes": "782"
},
{
"name": "Python",
"bytes": "290534"
}
],
"symlink_target": ""
}
|
from collections import OrderedDict
expected = OrderedDict(
[
("type", "list"),
("prefix", "alpha-upper"),
(
"items",
[
[OrderedDict([("type", "paragraph"), ("text", u"List paragraph one")])],
[OrderedDict([("type", "paragraph"), ("text", u"List paragraph two")])],
],
),
]
)
|
{
"content_hash": "d3b76972b3e32e3e5acebf8683a02d80",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 88,
"avg_line_length": 25.666666666666668,
"alnum_prop": 0.44155844155844154,
"repo_name": "elifesciences/elife-tools",
"id": "3a787cdf74b0c854d4fb369665a178e0d2ffcd2a",
"size": "385",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/fixtures/test_body_block_content/content_23_expected.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "541611"
},
{
"name": "Shell",
"bytes": "606"
}
],
"symlink_target": ""
}
|
from openstack.tests.unit import base
from openstack.identity.v2 import role
IDENTIFIER = 'IDENTIFIER'
EXAMPLE = {
'enabled': 'True',
'description': '1',
'id': IDENTIFIER,
'name': '3',
}
class TestRole(base.TestCase):
def test_basic(self):
sot = role.Role()
self.assertEqual('role', sot.resource_key)
self.assertEqual('roles', sot.resources_key)
self.assertEqual('/OS-KSADM/roles', sot.base_path)
self.assertTrue(sot.allow_create)
self.assertTrue(sot.allow_fetch)
self.assertTrue(sot.allow_commit)
self.assertTrue(sot.allow_delete)
self.assertTrue(sot.allow_list)
def test_make_it(self):
sot = role.Role(**EXAMPLE)
self.assertEqual(EXAMPLE['description'], sot.description)
self.assertEqual(EXAMPLE['id'], sot.id)
self.assertEqual(EXAMPLE['name'], sot.name)
self.assertTrue(sot.is_enabled)
|
{
"content_hash": "4827b45ff5d9f82db84c9ac693364105",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 65,
"avg_line_length": 29.09375,
"alnum_prop": 0.6412459720730398,
"repo_name": "stackforge/python-openstacksdk",
"id": "bb7b9f79a7dcceb4848157f9d435d08ad6001790",
"size": "1477",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "openstack/tests/unit/identity/v2/test_role.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1138292"
},
{
"name": "Shell",
"bytes": "1792"
}
],
"symlink_target": ""
}
|
"""Implement the Google Smart Home traits."""
from __future__ import annotations
import logging
from homeassistant.components import (
alarm_control_panel,
binary_sensor,
camera,
cover,
fan,
group,
input_boolean,
input_select,
light,
lock,
media_player,
scene,
script,
select,
sensor,
switch,
vacuum,
)
from homeassistant.components.climate import const as climate
from homeassistant.components.humidifier import const as humidifier
from homeassistant.components.lock import STATE_JAMMED, STATE_UNLOCKING
from homeassistant.components.media_player.const import MEDIA_TYPE_CHANNEL
from homeassistant.const import (
ATTR_ASSUMED_STATE,
ATTR_BATTERY_LEVEL,
ATTR_CODE,
ATTR_DEVICE_CLASS,
ATTR_ENTITY_ID,
ATTR_MODE,
ATTR_SUPPORTED_FEATURES,
ATTR_TEMPERATURE,
CAST_APP_ID_HOMEASSISTANT,
SERVICE_ALARM_ARM_AWAY,
SERVICE_ALARM_ARM_CUSTOM_BYPASS,
SERVICE_ALARM_ARM_HOME,
SERVICE_ALARM_ARM_NIGHT,
SERVICE_ALARM_DISARM,
SERVICE_ALARM_TRIGGER,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_CUSTOM_BYPASS,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_DISARMED,
STATE_ALARM_PENDING,
STATE_ALARM_TRIGGERED,
STATE_IDLE,
STATE_LOCKED,
STATE_OFF,
STATE_ON,
STATE_PAUSED,
STATE_PLAYING,
STATE_STANDBY,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from homeassistant.core import DOMAIN as HA_DOMAIN
from homeassistant.helpers.network import get_url
from homeassistant.util import color as color_util, dt, temperature as temp_util
from .const import (
CHALLENGE_ACK_NEEDED,
CHALLENGE_FAILED_PIN_NEEDED,
CHALLENGE_PIN_NEEDED,
ERR_ALREADY_ARMED,
ERR_ALREADY_DISARMED,
ERR_ALREADY_STOPPED,
ERR_CHALLENGE_NOT_SETUP,
ERR_FUNCTION_NOT_SUPPORTED,
ERR_NO_AVAILABLE_CHANNEL,
ERR_NOT_SUPPORTED,
ERR_UNSUPPORTED_INPUT,
ERR_VALUE_OUT_OF_RANGE,
)
from .error import ChallengeNeeded, SmartHomeError
_LOGGER = logging.getLogger(__name__)
PREFIX_TRAITS = "action.devices.traits."
TRAIT_CAMERA_STREAM = f"{PREFIX_TRAITS}CameraStream"
TRAIT_ONOFF = f"{PREFIX_TRAITS}OnOff"
TRAIT_DOCK = f"{PREFIX_TRAITS}Dock"
TRAIT_STARTSTOP = f"{PREFIX_TRAITS}StartStop"
TRAIT_BRIGHTNESS = f"{PREFIX_TRAITS}Brightness"
TRAIT_COLOR_SETTING = f"{PREFIX_TRAITS}ColorSetting"
TRAIT_SCENE = f"{PREFIX_TRAITS}Scene"
TRAIT_TEMPERATURE_SETTING = f"{PREFIX_TRAITS}TemperatureSetting"
TRAIT_TEMPERATURE_CONTROL = f"{PREFIX_TRAITS}TemperatureControl"
TRAIT_LOCKUNLOCK = f"{PREFIX_TRAITS}LockUnlock"
TRAIT_FANSPEED = f"{PREFIX_TRAITS}FanSpeed"
TRAIT_MODES = f"{PREFIX_TRAITS}Modes"
TRAIT_INPUTSELECTOR = f"{PREFIX_TRAITS}InputSelector"
TRAIT_OPENCLOSE = f"{PREFIX_TRAITS}OpenClose"
TRAIT_VOLUME = f"{PREFIX_TRAITS}Volume"
TRAIT_ARMDISARM = f"{PREFIX_TRAITS}ArmDisarm"
TRAIT_HUMIDITY_SETTING = f"{PREFIX_TRAITS}HumiditySetting"
TRAIT_TRANSPORT_CONTROL = f"{PREFIX_TRAITS}TransportControl"
TRAIT_MEDIA_STATE = f"{PREFIX_TRAITS}MediaState"
TRAIT_CHANNEL = f"{PREFIX_TRAITS}Channel"
TRAIT_LOCATOR = f"{PREFIX_TRAITS}Locator"
TRAIT_ENERGYSTORAGE = f"{PREFIX_TRAITS}EnergyStorage"
TRAIT_SENSOR_STATE = f"{PREFIX_TRAITS}SensorState"
PREFIX_COMMANDS = "action.devices.commands."
COMMAND_ONOFF = f"{PREFIX_COMMANDS}OnOff"
COMMAND_GET_CAMERA_STREAM = f"{PREFIX_COMMANDS}GetCameraStream"
COMMAND_DOCK = f"{PREFIX_COMMANDS}Dock"
COMMAND_STARTSTOP = f"{PREFIX_COMMANDS}StartStop"
COMMAND_PAUSEUNPAUSE = f"{PREFIX_COMMANDS}PauseUnpause"
COMMAND_BRIGHTNESS_ABSOLUTE = f"{PREFIX_COMMANDS}BrightnessAbsolute"
COMMAND_COLOR_ABSOLUTE = f"{PREFIX_COMMANDS}ColorAbsolute"
COMMAND_ACTIVATE_SCENE = f"{PREFIX_COMMANDS}ActivateScene"
COMMAND_THERMOSTAT_TEMPERATURE_SETPOINT = (
f"{PREFIX_COMMANDS}ThermostatTemperatureSetpoint"
)
COMMAND_THERMOSTAT_TEMPERATURE_SET_RANGE = (
f"{PREFIX_COMMANDS}ThermostatTemperatureSetRange"
)
COMMAND_THERMOSTAT_SET_MODE = f"{PREFIX_COMMANDS}ThermostatSetMode"
COMMAND_LOCKUNLOCK = f"{PREFIX_COMMANDS}LockUnlock"
COMMAND_FANSPEED = f"{PREFIX_COMMANDS}SetFanSpeed"
COMMAND_FANSPEEDRELATIVE = f"{PREFIX_COMMANDS}SetFanSpeedRelative"
COMMAND_MODES = f"{PREFIX_COMMANDS}SetModes"
COMMAND_INPUT = f"{PREFIX_COMMANDS}SetInput"
COMMAND_NEXT_INPUT = f"{PREFIX_COMMANDS}NextInput"
COMMAND_PREVIOUS_INPUT = f"{PREFIX_COMMANDS}PreviousInput"
COMMAND_OPENCLOSE = f"{PREFIX_COMMANDS}OpenClose"
COMMAND_OPENCLOSE_RELATIVE = f"{PREFIX_COMMANDS}OpenCloseRelative"
COMMAND_SET_VOLUME = f"{PREFIX_COMMANDS}setVolume"
COMMAND_VOLUME_RELATIVE = f"{PREFIX_COMMANDS}volumeRelative"
COMMAND_MUTE = f"{PREFIX_COMMANDS}mute"
COMMAND_ARMDISARM = f"{PREFIX_COMMANDS}ArmDisarm"
COMMAND_MEDIA_NEXT = f"{PREFIX_COMMANDS}mediaNext"
COMMAND_MEDIA_PAUSE = f"{PREFIX_COMMANDS}mediaPause"
COMMAND_MEDIA_PREVIOUS = f"{PREFIX_COMMANDS}mediaPrevious"
COMMAND_MEDIA_RESUME = f"{PREFIX_COMMANDS}mediaResume"
COMMAND_MEDIA_SEEK_RELATIVE = f"{PREFIX_COMMANDS}mediaSeekRelative"
COMMAND_MEDIA_SEEK_TO_POSITION = f"{PREFIX_COMMANDS}mediaSeekToPosition"
COMMAND_MEDIA_SHUFFLE = f"{PREFIX_COMMANDS}mediaShuffle"
COMMAND_MEDIA_STOP = f"{PREFIX_COMMANDS}mediaStop"
COMMAND_REVERSE = f"{PREFIX_COMMANDS}Reverse"
COMMAND_SET_HUMIDITY = f"{PREFIX_COMMANDS}SetHumidity"
COMMAND_SELECT_CHANNEL = f"{PREFIX_COMMANDS}selectChannel"
COMMAND_LOCATE = f"{PREFIX_COMMANDS}Locate"
COMMAND_CHARGE = f"{PREFIX_COMMANDS}Charge"
TRAITS = []
def register_trait(trait):
"""Decorate a function to register a trait."""
TRAITS.append(trait)
return trait
def _google_temp_unit(units):
"""Return Google temperature unit."""
if units == TEMP_FAHRENHEIT:
return "F"
return "C"
def _next_selected(items: list[str], selected: str | None) -> str | None:
"""Return the next item in a item list starting at given value.
If selected is missing in items, None is returned
"""
try:
index = items.index(selected)
except ValueError:
return None
next_item = 0 if index == len(items) - 1 else index + 1
return items[next_item]
class _Trait:
"""Represents a Trait inside Google Assistant skill."""
commands = []
@staticmethod
def might_2fa(domain, features, device_class):
"""Return if the trait might ask for 2FA."""
return False
def __init__(self, hass, state, config):
"""Initialize a trait for a state."""
self.hass = hass
self.state = state
self.config = config
def sync_attributes(self):
"""Return attributes for a sync request."""
raise NotImplementedError
def query_attributes(self):
"""Return the attributes of this trait for this entity."""
raise NotImplementedError
def can_execute(self, command, params):
"""Test if command can be executed."""
return command in self.commands
async def execute(self, command, data, params, challenge):
"""Execute a trait command."""
raise NotImplementedError
@register_trait
class BrightnessTrait(_Trait):
"""Trait to control brightness of a device.
https://developers.google.com/actions/smarthome/traits/brightness
"""
name = TRAIT_BRIGHTNESS
commands = [COMMAND_BRIGHTNESS_ABSOLUTE]
@staticmethod
def supported(domain, features, device_class, attributes):
"""Test if state is supported."""
if domain == light.DOMAIN:
color_modes = attributes.get(light.ATTR_SUPPORTED_COLOR_MODES)
return light.brightness_supported(color_modes)
return False
def sync_attributes(self):
"""Return brightness attributes for a sync request."""
return {}
def query_attributes(self):
"""Return brightness query attributes."""
domain = self.state.domain
response = {}
if domain == light.DOMAIN:
brightness = self.state.attributes.get(light.ATTR_BRIGHTNESS)
if brightness is not None:
response["brightness"] = int(100 * (brightness / 255))
else:
response["brightness"] = 0
return response
async def execute(self, command, data, params, challenge):
"""Execute a brightness command."""
if self.state.domain == light.DOMAIN:
await self.hass.services.async_call(
light.DOMAIN,
light.SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: self.state.entity_id,
light.ATTR_BRIGHTNESS_PCT: params["brightness"],
},
blocking=True,
context=data.context,
)
@register_trait
class CameraStreamTrait(_Trait):
"""Trait to stream from cameras.
https://developers.google.com/actions/smarthome/traits/camerastream
"""
name = TRAIT_CAMERA_STREAM
commands = [COMMAND_GET_CAMERA_STREAM]
stream_info = None
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
if domain == camera.DOMAIN:
return features & camera.SUPPORT_STREAM
return False
def sync_attributes(self):
"""Return stream attributes for a sync request."""
return {
"cameraStreamSupportedProtocols": ["hls"],
"cameraStreamNeedAuthToken": False,
"cameraStreamNeedDrmEncryption": False,
}
def query_attributes(self):
"""Return camera stream attributes."""
return self.stream_info or {}
async def execute(self, command, data, params, challenge):
"""Execute a get camera stream command."""
url = await self.hass.components.camera.async_request_stream(
self.state.entity_id, "hls"
)
self.stream_info = {
"cameraStreamAccessUrl": f"{get_url(self.hass)}{url}",
"cameraStreamReceiverAppId": CAST_APP_ID_HOMEASSISTANT,
}
@register_trait
class OnOffTrait(_Trait):
"""Trait to offer basic on and off functionality.
https://developers.google.com/actions/smarthome/traits/onoff
"""
name = TRAIT_ONOFF
commands = [COMMAND_ONOFF]
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
return domain in (
group.DOMAIN,
input_boolean.DOMAIN,
switch.DOMAIN,
fan.DOMAIN,
light.DOMAIN,
media_player.DOMAIN,
humidifier.DOMAIN,
)
def sync_attributes(self):
"""Return OnOff attributes for a sync request."""
if self.state.attributes.get(ATTR_ASSUMED_STATE, False):
return {"commandOnlyOnOff": True}
return {}
def query_attributes(self):
"""Return OnOff query attributes."""
return {"on": self.state.state not in (STATE_OFF, STATE_UNKNOWN)}
async def execute(self, command, data, params, challenge):
"""Execute an OnOff command."""
if (domain := self.state.domain) == group.DOMAIN:
service_domain = HA_DOMAIN
service = SERVICE_TURN_ON if params["on"] else SERVICE_TURN_OFF
else:
service_domain = domain
service = SERVICE_TURN_ON if params["on"] else SERVICE_TURN_OFF
await self.hass.services.async_call(
service_domain,
service,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
@register_trait
class ColorSettingTrait(_Trait):
"""Trait to offer color temperature functionality.
https://developers.google.com/actions/smarthome/traits/colortemperature
"""
name = TRAIT_COLOR_SETTING
commands = [COMMAND_COLOR_ABSOLUTE]
@staticmethod
def supported(domain, features, device_class, attributes):
"""Test if state is supported."""
if domain != light.DOMAIN:
return False
color_modes = attributes.get(light.ATTR_SUPPORTED_COLOR_MODES)
return light.color_temp_supported(color_modes) or light.color_supported(
color_modes
)
def sync_attributes(self):
"""Return color temperature attributes for a sync request."""
attrs = self.state.attributes
color_modes = attrs.get(light.ATTR_SUPPORTED_COLOR_MODES)
response = {}
if light.color_supported(color_modes):
response["colorModel"] = "hsv"
if light.color_temp_supported(color_modes):
# Max Kelvin is Min Mireds K = 1000000 / mireds
# Min Kelvin is Max Mireds K = 1000000 / mireds
response["colorTemperatureRange"] = {
"temperatureMaxK": color_util.color_temperature_mired_to_kelvin(
attrs.get(light.ATTR_MIN_MIREDS)
),
"temperatureMinK": color_util.color_temperature_mired_to_kelvin(
attrs.get(light.ATTR_MAX_MIREDS)
),
}
return response
def query_attributes(self):
"""Return color temperature query attributes."""
color_mode = self.state.attributes.get(light.ATTR_COLOR_MODE)
color = {}
if light.color_supported([color_mode]):
color_hs = self.state.attributes.get(light.ATTR_HS_COLOR)
brightness = self.state.attributes.get(light.ATTR_BRIGHTNESS, 1)
if color_hs is not None:
color["spectrumHsv"] = {
"hue": color_hs[0],
"saturation": color_hs[1] / 100,
"value": brightness / 255,
}
if light.color_temp_supported([color_mode]):
temp = self.state.attributes.get(light.ATTR_COLOR_TEMP)
# Some faulty integrations might put 0 in here, raising exception.
if temp == 0:
_LOGGER.warning(
"Entity %s has incorrect color temperature %s",
self.state.entity_id,
temp,
)
elif temp is not None:
color["temperatureK"] = color_util.color_temperature_mired_to_kelvin(
temp
)
response = {}
if color:
response["color"] = color
return response
async def execute(self, command, data, params, challenge):
"""Execute a color temperature command."""
if "temperature" in params["color"]:
temp = color_util.color_temperature_kelvin_to_mired(
params["color"]["temperature"]
)
min_temp = self.state.attributes[light.ATTR_MIN_MIREDS]
max_temp = self.state.attributes[light.ATTR_MAX_MIREDS]
if temp < min_temp or temp > max_temp:
raise SmartHomeError(
ERR_VALUE_OUT_OF_RANGE,
f"Temperature should be between {min_temp} and {max_temp}",
)
await self.hass.services.async_call(
light.DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: self.state.entity_id, light.ATTR_COLOR_TEMP: temp},
blocking=True,
context=data.context,
)
elif "spectrumRGB" in params["color"]:
# Convert integer to hex format and left pad with 0's till length 6
hex_value = f"{params['color']['spectrumRGB']:06x}"
color = color_util.color_RGB_to_hs(
*color_util.rgb_hex_to_rgb_list(hex_value)
)
await self.hass.services.async_call(
light.DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: self.state.entity_id, light.ATTR_HS_COLOR: color},
blocking=True,
context=data.context,
)
elif "spectrumHSV" in params["color"]:
color = params["color"]["spectrumHSV"]
saturation = color["saturation"] * 100
brightness = color["value"] * 255
await self.hass.services.async_call(
light.DOMAIN,
SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: self.state.entity_id,
light.ATTR_HS_COLOR: [color["hue"], saturation],
light.ATTR_BRIGHTNESS: brightness,
},
blocking=True,
context=data.context,
)
@register_trait
class SceneTrait(_Trait):
"""Trait to offer scene functionality.
https://developers.google.com/actions/smarthome/traits/scene
"""
name = TRAIT_SCENE
commands = [COMMAND_ACTIVATE_SCENE]
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
return domain in (scene.DOMAIN, script.DOMAIN)
def sync_attributes(self):
"""Return scene attributes for a sync request."""
# Neither supported domain can support sceneReversible
return {}
def query_attributes(self):
"""Return scene query attributes."""
return {}
async def execute(self, command, data, params, challenge):
"""Execute a scene command."""
# Don't block for scripts as they can be slow.
await self.hass.services.async_call(
self.state.domain,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=self.state.domain != script.DOMAIN,
context=data.context,
)
@register_trait
class DockTrait(_Trait):
"""Trait to offer dock functionality.
https://developers.google.com/actions/smarthome/traits/dock
"""
name = TRAIT_DOCK
commands = [COMMAND_DOCK]
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
return domain == vacuum.DOMAIN
def sync_attributes(self):
"""Return dock attributes for a sync request."""
return {}
def query_attributes(self):
"""Return dock query attributes."""
return {"isDocked": self.state.state == vacuum.STATE_DOCKED}
async def execute(self, command, data, params, challenge):
"""Execute a dock command."""
await self.hass.services.async_call(
self.state.domain,
vacuum.SERVICE_RETURN_TO_BASE,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
@register_trait
class LocatorTrait(_Trait):
"""Trait to offer locate functionality.
https://developers.google.com/actions/smarthome/traits/locator
"""
name = TRAIT_LOCATOR
commands = [COMMAND_LOCATE]
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
return domain == vacuum.DOMAIN and features & vacuum.SUPPORT_LOCATE
def sync_attributes(self):
"""Return locator attributes for a sync request."""
return {}
def query_attributes(self):
"""Return locator query attributes."""
return {}
async def execute(self, command, data, params, challenge):
"""Execute a locate command."""
if params.get("silence", False):
raise SmartHomeError(
ERR_FUNCTION_NOT_SUPPORTED,
"Silencing a Locate request is not yet supported",
)
await self.hass.services.async_call(
self.state.domain,
vacuum.SERVICE_LOCATE,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
@register_trait
class EnergyStorageTrait(_Trait):
"""Trait to offer EnergyStorage functionality.
https://developers.google.com/actions/smarthome/traits/energystorage
"""
name = TRAIT_ENERGYSTORAGE
commands = [COMMAND_CHARGE]
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
return domain == vacuum.DOMAIN and features & vacuum.SUPPORT_BATTERY
def sync_attributes(self):
"""Return EnergyStorage attributes for a sync request."""
return {
"isRechargeable": True,
"queryOnlyEnergyStorage": True,
}
def query_attributes(self):
"""Return EnergyStorage query attributes."""
battery_level = self.state.attributes.get(ATTR_BATTERY_LEVEL)
if battery_level == 100:
descriptive_capacity_remaining = "FULL"
elif 75 <= battery_level < 100:
descriptive_capacity_remaining = "HIGH"
elif 50 <= battery_level < 75:
descriptive_capacity_remaining = "MEDIUM"
elif 25 <= battery_level < 50:
descriptive_capacity_remaining = "LOW"
elif 0 <= battery_level < 25:
descriptive_capacity_remaining = "CRITICALLY_LOW"
return {
"descriptiveCapacityRemaining": descriptive_capacity_remaining,
"capacityRemaining": [{"rawValue": battery_level, "unit": "PERCENTAGE"}],
"capacityUntilFull": [
{"rawValue": 100 - battery_level, "unit": "PERCENTAGE"}
],
"isCharging": self.state.state == vacuum.STATE_DOCKED,
"isPluggedIn": self.state.state == vacuum.STATE_DOCKED,
}
async def execute(self, command, data, params, challenge):
"""Execute a dock command."""
raise SmartHomeError(
ERR_FUNCTION_NOT_SUPPORTED,
"Controlling charging of a vacuum is not yet supported",
)
@register_trait
class StartStopTrait(_Trait):
"""Trait to offer StartStop functionality.
https://developers.google.com/actions/smarthome/traits/startstop
"""
name = TRAIT_STARTSTOP
commands = [COMMAND_STARTSTOP, COMMAND_PAUSEUNPAUSE]
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
if domain == vacuum.DOMAIN:
return True
if domain == cover.DOMAIN and features & cover.SUPPORT_STOP:
return True
return False
def sync_attributes(self):
"""Return StartStop attributes for a sync request."""
domain = self.state.domain
if domain == vacuum.DOMAIN:
return {
"pausable": self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
& vacuum.SUPPORT_PAUSE
!= 0
}
if domain == cover.DOMAIN:
return {}
def query_attributes(self):
"""Return StartStop query attributes."""
domain = self.state.domain
state = self.state.state
if domain == vacuum.DOMAIN:
return {
"isRunning": state == vacuum.STATE_CLEANING,
"isPaused": state == vacuum.STATE_PAUSED,
}
if domain == cover.DOMAIN:
return {"isRunning": state in (cover.STATE_CLOSING, cover.STATE_OPENING)}
async def execute(self, command, data, params, challenge):
"""Execute a StartStop command."""
domain = self.state.domain
if domain == vacuum.DOMAIN:
return await self._execute_vacuum(command, data, params, challenge)
if domain == cover.DOMAIN:
return await self._execute_cover(command, data, params, challenge)
async def _execute_vacuum(self, command, data, params, challenge):
"""Execute a StartStop command."""
if command == COMMAND_STARTSTOP:
if params["start"]:
await self.hass.services.async_call(
self.state.domain,
vacuum.SERVICE_START,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
else:
await self.hass.services.async_call(
self.state.domain,
vacuum.SERVICE_STOP,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
elif command == COMMAND_PAUSEUNPAUSE:
if params["pause"]:
await self.hass.services.async_call(
self.state.domain,
vacuum.SERVICE_PAUSE,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
else:
await self.hass.services.async_call(
self.state.domain,
vacuum.SERVICE_START,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
async def _execute_cover(self, command, data, params, challenge):
"""Execute a StartStop command."""
if command == COMMAND_STARTSTOP:
if params["start"] is False:
if (
self.state.state
in (
cover.STATE_CLOSING,
cover.STATE_OPENING,
)
or self.state.attributes.get(ATTR_ASSUMED_STATE)
):
await self.hass.services.async_call(
self.state.domain,
cover.SERVICE_STOP_COVER,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
else:
raise SmartHomeError(
ERR_ALREADY_STOPPED, "Cover is already stopped"
)
else:
raise SmartHomeError(
ERR_NOT_SUPPORTED, "Starting a cover is not supported"
)
else:
raise SmartHomeError(
ERR_NOT_SUPPORTED, f"Command {command} is not supported"
)
@register_trait
class TemperatureControlTrait(_Trait):
"""Trait for devices (other than thermostats) that support controlling temperature. Workaround for Temperature sensors.
https://developers.google.com/assistant/smarthome/traits/temperaturecontrol
"""
name = TRAIT_TEMPERATURE_CONTROL
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
return (
domain == sensor.DOMAIN and device_class == sensor.DEVICE_CLASS_TEMPERATURE
)
def sync_attributes(self):
"""Return temperature attributes for a sync request."""
return {
"temperatureUnitForUX": _google_temp_unit(
self.hass.config.units.temperature_unit
),
"queryOnlyTemperatureSetting": True,
"temperatureRange": {
"minThresholdCelsius": -100,
"maxThresholdCelsius": 100,
},
}
def query_attributes(self):
"""Return temperature states."""
response = {}
unit = self.hass.config.units.temperature_unit
current_temp = self.state.state
if current_temp not in (STATE_UNKNOWN, STATE_UNAVAILABLE):
temp = round(temp_util.convert(float(current_temp), unit, TEMP_CELSIUS), 1)
response["temperatureSetpointCelsius"] = temp
response["temperatureAmbientCelsius"] = temp
return response
async def execute(self, command, data, params, challenge):
"""Unsupported."""
raise SmartHomeError(ERR_NOT_SUPPORTED, "Execute is not supported by sensor")
@register_trait
class TemperatureSettingTrait(_Trait):
"""Trait to offer handling both temperature point and modes functionality.
https://developers.google.com/actions/smarthome/traits/temperaturesetting
"""
name = TRAIT_TEMPERATURE_SETTING
commands = [
COMMAND_THERMOSTAT_TEMPERATURE_SETPOINT,
COMMAND_THERMOSTAT_TEMPERATURE_SET_RANGE,
COMMAND_THERMOSTAT_SET_MODE,
]
# We do not support "on" as we are unable to know how to restore
# the last mode.
hvac_to_google = {
climate.HVAC_MODE_HEAT: "heat",
climate.HVAC_MODE_COOL: "cool",
climate.HVAC_MODE_OFF: "off",
climate.HVAC_MODE_AUTO: "auto",
climate.HVAC_MODE_HEAT_COOL: "heatcool",
climate.HVAC_MODE_FAN_ONLY: "fan-only",
climate.HVAC_MODE_DRY: "dry",
}
google_to_hvac = {value: key for key, value in hvac_to_google.items()}
preset_to_google = {climate.PRESET_ECO: "eco"}
google_to_preset = {value: key for key, value in preset_to_google.items()}
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
return domain == climate.DOMAIN
@property
def climate_google_modes(self):
"""Return supported Google modes."""
modes = []
attrs = self.state.attributes
for mode in attrs.get(climate.ATTR_HVAC_MODES, []):
google_mode = self.hvac_to_google.get(mode)
if google_mode and google_mode not in modes:
modes.append(google_mode)
for preset in attrs.get(climate.ATTR_PRESET_MODES, []):
google_mode = self.preset_to_google.get(preset)
if google_mode and google_mode not in modes:
modes.append(google_mode)
return modes
def sync_attributes(self):
"""Return temperature point and modes attributes for a sync request."""
response = {}
response["thermostatTemperatureUnit"] = _google_temp_unit(
self.hass.config.units.temperature_unit
)
modes = self.climate_google_modes
# Some integrations don't support modes (e.g. opentherm), but Google doesn't
# support changing the temperature if we don't have any modes. If there's
# only one Google doesn't support changing it, so the default mode here is
# only cosmetic.
if len(modes) == 0:
modes.append("heat")
if "off" in modes and any(
mode in modes for mode in ("heatcool", "heat", "cool")
):
modes.append("on")
response["availableThermostatModes"] = modes
return response
def query_attributes(self):
"""Return temperature point and modes query attributes."""
response = {}
attrs = self.state.attributes
unit = self.hass.config.units.temperature_unit
operation = self.state.state
preset = attrs.get(climate.ATTR_PRESET_MODE)
supported = attrs.get(ATTR_SUPPORTED_FEATURES, 0)
if preset in self.preset_to_google:
response["thermostatMode"] = self.preset_to_google[preset]
else:
response["thermostatMode"] = self.hvac_to_google.get(operation, "none")
current_temp = attrs.get(climate.ATTR_CURRENT_TEMPERATURE)
if current_temp is not None:
response["thermostatTemperatureAmbient"] = round(
temp_util.convert(current_temp, unit, TEMP_CELSIUS), 1
)
current_humidity = attrs.get(climate.ATTR_CURRENT_HUMIDITY)
if current_humidity is not None:
response["thermostatHumidityAmbient"] = current_humidity
if operation in (climate.HVAC_MODE_AUTO, climate.HVAC_MODE_HEAT_COOL):
if supported & climate.SUPPORT_TARGET_TEMPERATURE_RANGE:
response["thermostatTemperatureSetpointHigh"] = round(
temp_util.convert(
attrs[climate.ATTR_TARGET_TEMP_HIGH], unit, TEMP_CELSIUS
),
1,
)
response["thermostatTemperatureSetpointLow"] = round(
temp_util.convert(
attrs[climate.ATTR_TARGET_TEMP_LOW], unit, TEMP_CELSIUS
),
1,
)
else:
if (target_temp := attrs.get(ATTR_TEMPERATURE)) is not None:
target_temp = round(
temp_util.convert(target_temp, unit, TEMP_CELSIUS), 1
)
response["thermostatTemperatureSetpointHigh"] = target_temp
response["thermostatTemperatureSetpointLow"] = target_temp
else:
if (target_temp := attrs.get(ATTR_TEMPERATURE)) is not None:
response["thermostatTemperatureSetpoint"] = round(
temp_util.convert(target_temp, unit, TEMP_CELSIUS), 1
)
return response
async def execute(self, command, data, params, challenge):
"""Execute a temperature point or mode command."""
# All sent in temperatures are always in Celsius
unit = self.hass.config.units.temperature_unit
min_temp = self.state.attributes[climate.ATTR_MIN_TEMP]
max_temp = self.state.attributes[climate.ATTR_MAX_TEMP]
if command == COMMAND_THERMOSTAT_TEMPERATURE_SETPOINT:
temp = temp_util.convert(
params["thermostatTemperatureSetpoint"], TEMP_CELSIUS, unit
)
if unit == TEMP_FAHRENHEIT:
temp = round(temp)
if temp < min_temp or temp > max_temp:
raise SmartHomeError(
ERR_VALUE_OUT_OF_RANGE,
f"Temperature should be between {min_temp} and {max_temp}",
)
await self.hass.services.async_call(
climate.DOMAIN,
climate.SERVICE_SET_TEMPERATURE,
{ATTR_ENTITY_ID: self.state.entity_id, ATTR_TEMPERATURE: temp},
blocking=True,
context=data.context,
)
elif command == COMMAND_THERMOSTAT_TEMPERATURE_SET_RANGE:
temp_high = temp_util.convert(
params["thermostatTemperatureSetpointHigh"], TEMP_CELSIUS, unit
)
if unit == TEMP_FAHRENHEIT:
temp_high = round(temp_high)
if temp_high < min_temp or temp_high > max_temp:
raise SmartHomeError(
ERR_VALUE_OUT_OF_RANGE,
(
f"Upper bound for temperature range should be between "
f"{min_temp} and {max_temp}"
),
)
temp_low = temp_util.convert(
params["thermostatTemperatureSetpointLow"], TEMP_CELSIUS, unit
)
if unit == TEMP_FAHRENHEIT:
temp_low = round(temp_low)
if temp_low < min_temp or temp_low > max_temp:
raise SmartHomeError(
ERR_VALUE_OUT_OF_RANGE,
(
f"Lower bound for temperature range should be between "
f"{min_temp} and {max_temp}"
),
)
supported = self.state.attributes.get(ATTR_SUPPORTED_FEATURES)
svc_data = {ATTR_ENTITY_ID: self.state.entity_id}
if supported & climate.SUPPORT_TARGET_TEMPERATURE_RANGE:
svc_data[climate.ATTR_TARGET_TEMP_HIGH] = temp_high
svc_data[climate.ATTR_TARGET_TEMP_LOW] = temp_low
else:
svc_data[ATTR_TEMPERATURE] = (temp_high + temp_low) / 2
await self.hass.services.async_call(
climate.DOMAIN,
climate.SERVICE_SET_TEMPERATURE,
svc_data,
blocking=True,
context=data.context,
)
elif command == COMMAND_THERMOSTAT_SET_MODE:
target_mode = params["thermostatMode"]
supported = self.state.attributes.get(ATTR_SUPPORTED_FEATURES)
if target_mode == "on":
await self.hass.services.async_call(
climate.DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
return
if target_mode == "off":
await self.hass.services.async_call(
climate.DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
return
if target_mode in self.google_to_preset:
await self.hass.services.async_call(
climate.DOMAIN,
climate.SERVICE_SET_PRESET_MODE,
{
climate.ATTR_PRESET_MODE: self.google_to_preset[target_mode],
ATTR_ENTITY_ID: self.state.entity_id,
},
blocking=True,
context=data.context,
)
return
await self.hass.services.async_call(
climate.DOMAIN,
climate.SERVICE_SET_HVAC_MODE,
{
ATTR_ENTITY_ID: self.state.entity_id,
climate.ATTR_HVAC_MODE: self.google_to_hvac[target_mode],
},
blocking=True,
context=data.context,
)
@register_trait
class HumiditySettingTrait(_Trait):
"""Trait to offer humidity setting functionality.
https://developers.google.com/actions/smarthome/traits/humiditysetting
"""
name = TRAIT_HUMIDITY_SETTING
commands = [COMMAND_SET_HUMIDITY]
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
if domain == humidifier.DOMAIN:
return True
return domain == sensor.DOMAIN and device_class == sensor.DEVICE_CLASS_HUMIDITY
def sync_attributes(self):
"""Return humidity attributes for a sync request."""
response = {}
attrs = self.state.attributes
domain = self.state.domain
if domain == sensor.DOMAIN:
device_class = attrs.get(ATTR_DEVICE_CLASS)
if device_class == sensor.DEVICE_CLASS_HUMIDITY:
response["queryOnlyHumiditySetting"] = True
elif domain == humidifier.DOMAIN:
response["humiditySetpointRange"] = {
"minPercent": round(
float(self.state.attributes[humidifier.ATTR_MIN_HUMIDITY])
),
"maxPercent": round(
float(self.state.attributes[humidifier.ATTR_MAX_HUMIDITY])
),
}
return response
def query_attributes(self):
"""Return humidity query attributes."""
response = {}
attrs = self.state.attributes
domain = self.state.domain
if domain == sensor.DOMAIN:
device_class = attrs.get(ATTR_DEVICE_CLASS)
if device_class == sensor.DEVICE_CLASS_HUMIDITY:
current_humidity = self.state.state
if current_humidity not in (STATE_UNKNOWN, STATE_UNAVAILABLE):
response["humidityAmbientPercent"] = round(float(current_humidity))
elif domain == humidifier.DOMAIN:
target_humidity = attrs.get(humidifier.ATTR_HUMIDITY)
if target_humidity is not None:
response["humiditySetpointPercent"] = round(float(target_humidity))
return response
async def execute(self, command, data, params, challenge):
"""Execute a humidity command."""
if self.state.domain == sensor.DOMAIN:
raise SmartHomeError(
ERR_NOT_SUPPORTED, "Execute is not supported by sensor"
)
if command == COMMAND_SET_HUMIDITY:
await self.hass.services.async_call(
humidifier.DOMAIN,
humidifier.SERVICE_SET_HUMIDITY,
{
ATTR_ENTITY_ID: self.state.entity_id,
humidifier.ATTR_HUMIDITY: params["humidity"],
},
blocking=True,
context=data.context,
)
@register_trait
class LockUnlockTrait(_Trait):
"""Trait to lock or unlock a lock.
https://developers.google.com/actions/smarthome/traits/lockunlock
"""
name = TRAIT_LOCKUNLOCK
commands = [COMMAND_LOCKUNLOCK]
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
return domain == lock.DOMAIN
@staticmethod
def might_2fa(domain, features, device_class):
"""Return if the trait might ask for 2FA."""
return True
def sync_attributes(self):
"""Return LockUnlock attributes for a sync request."""
return {}
def query_attributes(self):
"""Return LockUnlock query attributes."""
if self.state.state == STATE_JAMMED:
return {"isJammed": True}
# If its unlocking its not yet unlocked so we consider is locked
return {"isLocked": self.state.state in (STATE_UNLOCKING, STATE_LOCKED)}
async def execute(self, command, data, params, challenge):
"""Execute an LockUnlock command."""
if params["lock"]:
service = lock.SERVICE_LOCK
else:
_verify_pin_challenge(data, self.state, challenge)
service = lock.SERVICE_UNLOCK
await self.hass.services.async_call(
lock.DOMAIN,
service,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
@register_trait
class ArmDisArmTrait(_Trait):
"""Trait to Arm or Disarm a Security System.
https://developers.google.com/actions/smarthome/traits/armdisarm
"""
name = TRAIT_ARMDISARM
commands = [COMMAND_ARMDISARM]
state_to_service = {
STATE_ALARM_ARMED_HOME: SERVICE_ALARM_ARM_HOME,
STATE_ALARM_ARMED_AWAY: SERVICE_ALARM_ARM_AWAY,
STATE_ALARM_ARMED_NIGHT: SERVICE_ALARM_ARM_NIGHT,
STATE_ALARM_ARMED_CUSTOM_BYPASS: SERVICE_ALARM_ARM_CUSTOM_BYPASS,
STATE_ALARM_TRIGGERED: SERVICE_ALARM_TRIGGER,
}
state_to_support = {
STATE_ALARM_ARMED_HOME: alarm_control_panel.const.SUPPORT_ALARM_ARM_HOME,
STATE_ALARM_ARMED_AWAY: alarm_control_panel.const.SUPPORT_ALARM_ARM_AWAY,
STATE_ALARM_ARMED_NIGHT: alarm_control_panel.const.SUPPORT_ALARM_ARM_NIGHT,
STATE_ALARM_ARMED_CUSTOM_BYPASS: alarm_control_panel.const.SUPPORT_ALARM_ARM_CUSTOM_BYPASS,
STATE_ALARM_TRIGGERED: alarm_control_panel.const.SUPPORT_ALARM_TRIGGER,
}
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
return domain == alarm_control_panel.DOMAIN
@staticmethod
def might_2fa(domain, features, device_class):
"""Return if the trait might ask for 2FA."""
return True
def _supported_states(self):
"""Return supported states."""
features = self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
return [
state
for state, required_feature in self.state_to_support.items()
if features & required_feature != 0
]
def sync_attributes(self):
"""Return ArmDisarm attributes for a sync request."""
response = {}
levels = []
for state in self._supported_states():
# level synonyms are generated from state names
# 'armed_away' becomes 'armed away' or 'away'
level_synonym = [state.replace("_", " ")]
if state != STATE_ALARM_TRIGGERED:
level_synonym.append(state.split("_")[1])
level = {
"level_name": state,
"level_values": [{"level_synonym": level_synonym, "lang": "en"}],
}
levels.append(level)
response["availableArmLevels"] = {"levels": levels, "ordered": False}
return response
def query_attributes(self):
"""Return ArmDisarm query attributes."""
if "next_state" in self.state.attributes:
armed_state = self.state.attributes["next_state"]
else:
armed_state = self.state.state
response = {"isArmed": armed_state in self.state_to_service}
if response["isArmed"]:
response.update({"currentArmLevel": armed_state})
return response
async def execute(self, command, data, params, challenge):
"""Execute an ArmDisarm command."""
if params["arm"] and not params.get("cancel"):
# If no arm level given, we can only arm it if there is
# only one supported arm type. We never default to triggered.
if not (arm_level := params.get("armLevel")):
states = self._supported_states()
if STATE_ALARM_TRIGGERED in states:
states.remove(STATE_ALARM_TRIGGERED)
if len(states) != 1:
raise SmartHomeError(ERR_NOT_SUPPORTED, "ArmLevel missing")
arm_level = states[0]
if self.state.state == arm_level:
raise SmartHomeError(ERR_ALREADY_ARMED, "System is already armed")
if self.state.attributes["code_arm_required"]:
_verify_pin_challenge(data, self.state, challenge)
service = self.state_to_service[arm_level]
# disarm the system without asking for code when
# 'cancel' arming action is received while current status is pending
elif (
params["arm"]
and params.get("cancel")
and self.state.state == STATE_ALARM_PENDING
):
service = SERVICE_ALARM_DISARM
else:
if self.state.state == STATE_ALARM_DISARMED:
raise SmartHomeError(ERR_ALREADY_DISARMED, "System is already disarmed")
_verify_pin_challenge(data, self.state, challenge)
service = SERVICE_ALARM_DISARM
await self.hass.services.async_call(
alarm_control_panel.DOMAIN,
service,
{
ATTR_ENTITY_ID: self.state.entity_id,
ATTR_CODE: data.config.secure_devices_pin,
},
blocking=True,
context=data.context,
)
@register_trait
class FanSpeedTrait(_Trait):
"""Trait to control speed of Fan.
https://developers.google.com/actions/smarthome/traits/fanspeed
"""
name = TRAIT_FANSPEED
commands = [COMMAND_FANSPEED, COMMAND_REVERSE]
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
if domain == fan.DOMAIN:
return features & fan.SUPPORT_SET_SPEED
if domain == climate.DOMAIN:
return features & climate.SUPPORT_FAN_MODE
return False
def sync_attributes(self):
"""Return speed point and modes attributes for a sync request."""
domain = self.state.domain
speeds = []
result = {}
if domain == fan.DOMAIN:
reversible = bool(
self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
& fan.SUPPORT_DIRECTION
)
result.update(
{
"reversible": reversible,
"supportsFanSpeedPercent": True,
}
)
elif domain == climate.DOMAIN:
modes = self.state.attributes.get(climate.ATTR_FAN_MODES) or []
for mode in modes:
speed = {
"speed_name": mode,
"speed_values": [{"speed_synonym": [mode], "lang": "en"}],
}
speeds.append(speed)
result.update(
{
"reversible": False,
"availableFanSpeeds": {"speeds": speeds, "ordered": True},
}
)
return result
def query_attributes(self):
"""Return speed point and modes query attributes."""
attrs = self.state.attributes
domain = self.state.domain
response = {}
if domain == climate.DOMAIN:
speed = attrs.get(climate.ATTR_FAN_MODE) or "off"
response["currentFanSpeedSetting"] = speed
if domain == fan.DOMAIN:
percent = attrs.get(fan.ATTR_PERCENTAGE) or 0
response["currentFanSpeedPercent"] = percent
return response
async def execute_fanspeed(self, data, params):
"""Execute an SetFanSpeed command."""
domain = self.state.domain
if domain == climate.DOMAIN:
await self.hass.services.async_call(
climate.DOMAIN,
climate.SERVICE_SET_FAN_MODE,
{
ATTR_ENTITY_ID: self.state.entity_id,
climate.ATTR_FAN_MODE: params["fanSpeed"],
},
blocking=True,
context=data.context,
)
if domain == fan.DOMAIN:
await self.hass.services.async_call(
fan.DOMAIN,
fan.SERVICE_SET_PERCENTAGE,
{
ATTR_ENTITY_ID: self.state.entity_id,
fan.ATTR_PERCENTAGE: params["fanSpeedPercent"],
},
blocking=True,
context=data.context,
)
async def execute_reverse(self, data, params):
"""Execute a Reverse command."""
if self.state.domain == fan.DOMAIN:
if self.state.attributes.get(fan.ATTR_DIRECTION) == fan.DIRECTION_FORWARD:
direction = fan.DIRECTION_REVERSE
else:
direction = fan.DIRECTION_FORWARD
await self.hass.services.async_call(
fan.DOMAIN,
fan.SERVICE_SET_DIRECTION,
{ATTR_ENTITY_ID: self.state.entity_id, fan.ATTR_DIRECTION: direction},
blocking=True,
context=data.context,
)
async def execute(self, command, data, params, challenge):
"""Execute a smart home command."""
if command == COMMAND_FANSPEED:
await self.execute_fanspeed(data, params)
elif command == COMMAND_REVERSE:
await self.execute_reverse(data, params)
@register_trait
class ModesTrait(_Trait):
"""Trait to set modes.
https://developers.google.com/actions/smarthome/traits/modes
"""
name = TRAIT_MODES
commands = [COMMAND_MODES]
SYNONYMS = {
"preset mode": ["preset mode", "mode", "preset"],
"sound mode": ["sound mode", "effects"],
"option": ["option", "setting", "mode", "value"],
}
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
if domain == fan.DOMAIN and features & fan.SUPPORT_PRESET_MODE:
return True
if domain == input_select.DOMAIN:
return True
if domain == select.DOMAIN:
return True
if domain == humidifier.DOMAIN and features & humidifier.SUPPORT_MODES:
return True
if domain == light.DOMAIN and features & light.SUPPORT_EFFECT:
return True
if domain != media_player.DOMAIN:
return False
return features & media_player.SUPPORT_SELECT_SOUND_MODE
def _generate(self, name, settings):
"""Generate a list of modes."""
mode = {
"name": name,
"name_values": [
{"name_synonym": self.SYNONYMS.get(name, [name]), "lang": "en"}
],
"settings": [],
"ordered": False,
}
for setting in settings:
mode["settings"].append(
{
"setting_name": setting,
"setting_values": [
{
"setting_synonym": self.SYNONYMS.get(setting, [setting]),
"lang": "en",
}
],
}
)
return mode
def sync_attributes(self):
"""Return mode attributes for a sync request."""
modes = []
for domain, attr, name in (
(fan.DOMAIN, fan.ATTR_PRESET_MODES, "preset mode"),
(media_player.DOMAIN, media_player.ATTR_SOUND_MODE_LIST, "sound mode"),
(input_select.DOMAIN, input_select.ATTR_OPTIONS, "option"),
(select.DOMAIN, select.ATTR_OPTIONS, "option"),
(humidifier.DOMAIN, humidifier.ATTR_AVAILABLE_MODES, "mode"),
(light.DOMAIN, light.ATTR_EFFECT_LIST, "effect"),
):
if self.state.domain != domain:
continue
if (items := self.state.attributes.get(attr)) is not None:
modes.append(self._generate(name, items))
# Shortcut since all domains are currently unique
break
payload = {"availableModes": modes}
return payload
def query_attributes(self):
"""Return current modes."""
attrs = self.state.attributes
response = {}
mode_settings = {}
if self.state.domain == fan.DOMAIN:
if fan.ATTR_PRESET_MODES in attrs:
mode_settings["preset mode"] = attrs.get(fan.ATTR_PRESET_MODE)
elif self.state.domain == media_player.DOMAIN:
if media_player.ATTR_SOUND_MODE_LIST in attrs:
mode_settings["sound mode"] = attrs.get(media_player.ATTR_SOUND_MODE)
elif self.state.domain == input_select.DOMAIN:
mode_settings["option"] = self.state.state
elif self.state.domain == select.DOMAIN:
mode_settings["option"] = self.state.state
elif self.state.domain == humidifier.DOMAIN:
if ATTR_MODE in attrs:
mode_settings["mode"] = attrs.get(ATTR_MODE)
elif self.state.domain == light.DOMAIN and light.ATTR_EFFECT in attrs:
mode_settings["effect"] = attrs.get(light.ATTR_EFFECT)
if mode_settings:
response["on"] = self.state.state not in (STATE_OFF, STATE_UNKNOWN)
response["currentModeSettings"] = mode_settings
return response
async def execute(self, command, data, params, challenge):
"""Execute a SetModes command."""
settings = params.get("updateModeSettings")
if self.state.domain == fan.DOMAIN:
preset_mode = settings["preset mode"]
await self.hass.services.async_call(
fan.DOMAIN,
fan.SERVICE_SET_PRESET_MODE,
{
ATTR_ENTITY_ID: self.state.entity_id,
fan.ATTR_PRESET_MODE: preset_mode,
},
blocking=True,
context=data.context,
)
return
if self.state.domain == input_select.DOMAIN:
option = settings["option"]
await self.hass.services.async_call(
input_select.DOMAIN,
input_select.SERVICE_SELECT_OPTION,
{
ATTR_ENTITY_ID: self.state.entity_id,
input_select.ATTR_OPTION: option,
},
blocking=True,
context=data.context,
)
return
if self.state.domain == select.DOMAIN:
option = settings["option"]
await self.hass.services.async_call(
select.DOMAIN,
select.SERVICE_SELECT_OPTION,
{
ATTR_ENTITY_ID: self.state.entity_id,
select.ATTR_OPTION: option,
},
blocking=True,
context=data.context,
)
return
if self.state.domain == humidifier.DOMAIN:
requested_mode = settings["mode"]
await self.hass.services.async_call(
humidifier.DOMAIN,
humidifier.SERVICE_SET_MODE,
{
ATTR_MODE: requested_mode,
ATTR_ENTITY_ID: self.state.entity_id,
},
blocking=True,
context=data.context,
)
return
if self.state.domain == light.DOMAIN:
requested_effect = settings["effect"]
await self.hass.services.async_call(
light.DOMAIN,
SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: self.state.entity_id,
light.ATTR_EFFECT: requested_effect,
},
blocking=True,
context=data.context,
)
return
if self.state.domain == media_player.DOMAIN and (
sound_mode := settings.get("sound mode")
):
await self.hass.services.async_call(
media_player.DOMAIN,
media_player.SERVICE_SELECT_SOUND_MODE,
{
ATTR_ENTITY_ID: self.state.entity_id,
media_player.ATTR_SOUND_MODE: sound_mode,
},
blocking=True,
context=data.context,
)
_LOGGER.info(
"Received an Options command for unrecognised domain %s",
self.state.domain,
)
return
@register_trait
class InputSelectorTrait(_Trait):
"""Trait to set modes.
https://developers.google.com/assistant/smarthome/traits/inputselector
"""
name = TRAIT_INPUTSELECTOR
commands = [COMMAND_INPUT, COMMAND_NEXT_INPUT, COMMAND_PREVIOUS_INPUT]
SYNONYMS = {}
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
if domain == media_player.DOMAIN and (
features & media_player.SUPPORT_SELECT_SOURCE
):
return True
return False
def sync_attributes(self):
"""Return mode attributes for a sync request."""
attrs = self.state.attributes
inputs = [
{"key": source, "names": [{"name_synonym": [source], "lang": "en"}]}
for source in attrs.get(media_player.ATTR_INPUT_SOURCE_LIST, [])
]
payload = {"availableInputs": inputs, "orderedInputs": True}
return payload
def query_attributes(self):
"""Return current modes."""
attrs = self.state.attributes
return {"currentInput": attrs.get(media_player.ATTR_INPUT_SOURCE, "")}
async def execute(self, command, data, params, challenge):
"""Execute an SetInputSource command."""
sources = self.state.attributes.get(media_player.ATTR_INPUT_SOURCE_LIST) or []
source = self.state.attributes.get(media_player.ATTR_INPUT_SOURCE)
if command == COMMAND_INPUT:
requested_source = params.get("newInput")
elif command == COMMAND_NEXT_INPUT:
requested_source = _next_selected(sources, source)
elif command == COMMAND_PREVIOUS_INPUT:
requested_source = _next_selected(list(reversed(sources)), source)
else:
raise SmartHomeError(ERR_NOT_SUPPORTED, "Unsupported command")
if requested_source not in sources:
raise SmartHomeError(ERR_UNSUPPORTED_INPUT, "Unsupported input")
await self.hass.services.async_call(
media_player.DOMAIN,
media_player.SERVICE_SELECT_SOURCE,
{
ATTR_ENTITY_ID: self.state.entity_id,
media_player.ATTR_INPUT_SOURCE: requested_source,
},
blocking=True,
context=data.context,
)
@register_trait
class OpenCloseTrait(_Trait):
"""Trait to open and close a cover.
https://developers.google.com/actions/smarthome/traits/openclose
"""
# Cover device classes that require 2FA
COVER_2FA = (
cover.DEVICE_CLASS_DOOR,
cover.DEVICE_CLASS_GARAGE,
cover.DEVICE_CLASS_GATE,
)
name = TRAIT_OPENCLOSE
commands = [COMMAND_OPENCLOSE, COMMAND_OPENCLOSE_RELATIVE]
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
if domain == cover.DOMAIN:
return True
return domain == binary_sensor.DOMAIN and device_class in (
binary_sensor.DEVICE_CLASS_DOOR,
binary_sensor.DEVICE_CLASS_GARAGE_DOOR,
binary_sensor.DEVICE_CLASS_LOCK,
binary_sensor.DEVICE_CLASS_OPENING,
binary_sensor.DEVICE_CLASS_WINDOW,
)
@staticmethod
def might_2fa(domain, features, device_class):
"""Return if the trait might ask for 2FA."""
return domain == cover.DOMAIN and device_class in OpenCloseTrait.COVER_2FA
def sync_attributes(self):
"""Return opening direction."""
response = {}
features = self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if self.state.domain == binary_sensor.DOMAIN:
response["queryOnlyOpenClose"] = True
response["discreteOnlyOpenClose"] = True
elif (
self.state.domain == cover.DOMAIN
and features & cover.SUPPORT_SET_POSITION == 0
):
response["discreteOnlyOpenClose"] = True
if (
features & cover.SUPPORT_OPEN == 0
and features & cover.SUPPORT_CLOSE == 0
):
response["queryOnlyOpenClose"] = True
if self.state.attributes.get(ATTR_ASSUMED_STATE):
response["commandOnlyOpenClose"] = True
return response
def query_attributes(self):
"""Return state query attributes."""
domain = self.state.domain
response = {}
# When it's an assumed state, we will return empty state
# This shouldn't happen because we set `commandOnlyOpenClose`
# but Google still queries. Erroring here will cause device
# to show up offline.
if self.state.attributes.get(ATTR_ASSUMED_STATE):
return response
if domain == cover.DOMAIN:
if self.state.state == STATE_UNKNOWN:
raise SmartHomeError(
ERR_NOT_SUPPORTED, "Querying state is not supported"
)
position = self.state.attributes.get(cover.ATTR_CURRENT_POSITION)
if position is not None:
response["openPercent"] = position
elif self.state.state != cover.STATE_CLOSED:
response["openPercent"] = 100
else:
response["openPercent"] = 0
elif domain == binary_sensor.DOMAIN:
if self.state.state == STATE_ON:
response["openPercent"] = 100
else:
response["openPercent"] = 0
return response
async def execute(self, command, data, params, challenge):
"""Execute an Open, close, Set position command."""
domain = self.state.domain
features = self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if domain == cover.DOMAIN:
svc_params = {ATTR_ENTITY_ID: self.state.entity_id}
should_verify = False
if command == COMMAND_OPENCLOSE_RELATIVE:
position = self.state.attributes.get(cover.ATTR_CURRENT_POSITION)
if position is None:
raise SmartHomeError(
ERR_NOT_SUPPORTED,
"Current position not know for relative command",
)
position = max(0, min(100, position + params["openRelativePercent"]))
else:
position = params["openPercent"]
if position == 0:
service = cover.SERVICE_CLOSE_COVER
should_verify = False
elif position == 100:
service = cover.SERVICE_OPEN_COVER
should_verify = True
elif features & cover.SUPPORT_SET_POSITION:
service = cover.SERVICE_SET_COVER_POSITION
if position > 0:
should_verify = True
svc_params[cover.ATTR_POSITION] = position
else:
raise SmartHomeError(
ERR_NOT_SUPPORTED, "No support for partial open close"
)
if (
should_verify
and self.state.attributes.get(ATTR_DEVICE_CLASS)
in OpenCloseTrait.COVER_2FA
):
_verify_pin_challenge(data, self.state, challenge)
await self.hass.services.async_call(
cover.DOMAIN, service, svc_params, blocking=True, context=data.context
)
@register_trait
class VolumeTrait(_Trait):
"""Trait to control volume of a device.
https://developers.google.com/actions/smarthome/traits/volume
"""
name = TRAIT_VOLUME
commands = [COMMAND_SET_VOLUME, COMMAND_VOLUME_RELATIVE, COMMAND_MUTE]
@staticmethod
def supported(domain, features, device_class, _):
"""Test if trait is supported."""
if domain == media_player.DOMAIN:
return features & (
media_player.SUPPORT_VOLUME_SET | media_player.SUPPORT_VOLUME_STEP
)
return False
def sync_attributes(self):
"""Return volume attributes for a sync request."""
features = self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
return {
"volumeCanMuteAndUnmute": bool(features & media_player.SUPPORT_VOLUME_MUTE),
"commandOnlyVolume": self.state.attributes.get(ATTR_ASSUMED_STATE, False),
# Volume amounts in SET_VOLUME and VOLUME_RELATIVE are on a scale
# from 0 to this value.
"volumeMaxLevel": 100,
# Default change for queries like "Hey Google, volume up".
# 10% corresponds to the default behavior for the
# media_player.volume{up,down} services.
"levelStepSize": 10,
}
def query_attributes(self):
"""Return volume query attributes."""
response = {}
level = self.state.attributes.get(media_player.ATTR_MEDIA_VOLUME_LEVEL)
if level is not None:
# Convert 0.0-1.0 to 0-100
response["currentVolume"] = int(level * 100)
muted = self.state.attributes.get(media_player.ATTR_MEDIA_VOLUME_MUTED)
if muted is not None:
response["isMuted"] = bool(muted)
return response
async def _set_volume_absolute(self, data, level):
await self.hass.services.async_call(
media_player.DOMAIN,
media_player.SERVICE_VOLUME_SET,
{
ATTR_ENTITY_ID: self.state.entity_id,
media_player.ATTR_MEDIA_VOLUME_LEVEL: level,
},
blocking=True,
context=data.context,
)
async def _execute_set_volume(self, data, params):
level = max(0, min(100, params["volumeLevel"]))
if not (
self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
& media_player.SUPPORT_VOLUME_SET
):
raise SmartHomeError(ERR_NOT_SUPPORTED, "Command not supported")
await self._set_volume_absolute(data, level / 100)
async def _execute_volume_relative(self, data, params):
relative = params["relativeSteps"]
features = self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if features & media_player.SUPPORT_VOLUME_SET:
current = self.state.attributes.get(media_player.ATTR_MEDIA_VOLUME_LEVEL)
target = max(0.0, min(1.0, current + relative / 100))
await self._set_volume_absolute(data, target)
elif features & media_player.SUPPORT_VOLUME_STEP:
svc = media_player.SERVICE_VOLUME_UP
if relative < 0:
svc = media_player.SERVICE_VOLUME_DOWN
relative = -relative
for _ in range(relative):
await self.hass.services.async_call(
media_player.DOMAIN,
svc,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
else:
raise SmartHomeError(ERR_NOT_SUPPORTED, "Command not supported")
async def _execute_mute(self, data, params):
mute = params["mute"]
if not (
self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
& media_player.SUPPORT_VOLUME_MUTE
):
raise SmartHomeError(ERR_NOT_SUPPORTED, "Command not supported")
await self.hass.services.async_call(
media_player.DOMAIN,
media_player.SERVICE_VOLUME_MUTE,
{
ATTR_ENTITY_ID: self.state.entity_id,
media_player.ATTR_MEDIA_VOLUME_MUTED: mute,
},
blocking=True,
context=data.context,
)
async def execute(self, command, data, params, challenge):
"""Execute a volume command."""
if command == COMMAND_SET_VOLUME:
await self._execute_set_volume(data, params)
elif command == COMMAND_VOLUME_RELATIVE:
await self._execute_volume_relative(data, params)
elif command == COMMAND_MUTE:
await self._execute_mute(data, params)
else:
raise SmartHomeError(ERR_NOT_SUPPORTED, "Command not supported")
def _verify_pin_challenge(data, state, challenge):
"""Verify a pin challenge."""
if not data.config.should_2fa(state):
return
if not data.config.secure_devices_pin:
raise SmartHomeError(ERR_CHALLENGE_NOT_SETUP, "Challenge is not set up")
if not challenge:
raise ChallengeNeeded(CHALLENGE_PIN_NEEDED)
if challenge.get("pin") != data.config.secure_devices_pin:
raise ChallengeNeeded(CHALLENGE_FAILED_PIN_NEEDED)
def _verify_ack_challenge(data, state, challenge):
"""Verify an ack challenge."""
if not data.config.should_2fa(state):
return
if not challenge or not challenge.get("ack"):
raise ChallengeNeeded(CHALLENGE_ACK_NEEDED)
MEDIA_COMMAND_SUPPORT_MAPPING = {
COMMAND_MEDIA_NEXT: media_player.SUPPORT_NEXT_TRACK,
COMMAND_MEDIA_PAUSE: media_player.SUPPORT_PAUSE,
COMMAND_MEDIA_PREVIOUS: media_player.SUPPORT_PREVIOUS_TRACK,
COMMAND_MEDIA_RESUME: media_player.SUPPORT_PLAY,
COMMAND_MEDIA_SEEK_RELATIVE: media_player.SUPPORT_SEEK,
COMMAND_MEDIA_SEEK_TO_POSITION: media_player.SUPPORT_SEEK,
COMMAND_MEDIA_SHUFFLE: media_player.SUPPORT_SHUFFLE_SET,
COMMAND_MEDIA_STOP: media_player.SUPPORT_STOP,
}
MEDIA_COMMAND_ATTRIBUTES = {
COMMAND_MEDIA_NEXT: "NEXT",
COMMAND_MEDIA_PAUSE: "PAUSE",
COMMAND_MEDIA_PREVIOUS: "PREVIOUS",
COMMAND_MEDIA_RESUME: "RESUME",
COMMAND_MEDIA_SEEK_RELATIVE: "SEEK_RELATIVE",
COMMAND_MEDIA_SEEK_TO_POSITION: "SEEK_TO_POSITION",
COMMAND_MEDIA_SHUFFLE: "SHUFFLE",
COMMAND_MEDIA_STOP: "STOP",
}
@register_trait
class TransportControlTrait(_Trait):
"""Trait to control media playback.
https://developers.google.com/actions/smarthome/traits/transportcontrol
"""
name = TRAIT_TRANSPORT_CONTROL
commands = [
COMMAND_MEDIA_NEXT,
COMMAND_MEDIA_PAUSE,
COMMAND_MEDIA_PREVIOUS,
COMMAND_MEDIA_RESUME,
COMMAND_MEDIA_SEEK_RELATIVE,
COMMAND_MEDIA_SEEK_TO_POSITION,
COMMAND_MEDIA_SHUFFLE,
COMMAND_MEDIA_STOP,
]
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
if domain == media_player.DOMAIN:
for feature in MEDIA_COMMAND_SUPPORT_MAPPING.values():
if features & feature:
return True
return False
def sync_attributes(self):
"""Return opening direction."""
response = {}
if self.state.domain == media_player.DOMAIN:
features = self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
support = []
for command, feature in MEDIA_COMMAND_SUPPORT_MAPPING.items():
if features & feature:
support.append(MEDIA_COMMAND_ATTRIBUTES[command])
response["transportControlSupportedCommands"] = support
return response
def query_attributes(self):
"""Return the attributes of this trait for this entity."""
return {}
async def execute(self, command, data, params, challenge):
"""Execute a media command."""
service_attrs = {ATTR_ENTITY_ID: self.state.entity_id}
if command == COMMAND_MEDIA_SEEK_RELATIVE:
service = media_player.SERVICE_MEDIA_SEEK
rel_position = params["relativePositionMs"] / 1000
seconds_since = 0 # Default to 0 seconds
if self.state.state == STATE_PLAYING:
now = dt.utcnow()
upd_at = self.state.attributes.get(
media_player.ATTR_MEDIA_POSITION_UPDATED_AT, now
)
seconds_since = (now - upd_at).total_seconds()
position = self.state.attributes.get(media_player.ATTR_MEDIA_POSITION, 0)
max_position = self.state.attributes.get(
media_player.ATTR_MEDIA_DURATION, 0
)
service_attrs[media_player.ATTR_MEDIA_SEEK_POSITION] = min(
max(position + seconds_since + rel_position, 0), max_position
)
elif command == COMMAND_MEDIA_SEEK_TO_POSITION:
service = media_player.SERVICE_MEDIA_SEEK
max_position = self.state.attributes.get(
media_player.ATTR_MEDIA_DURATION, 0
)
service_attrs[media_player.ATTR_MEDIA_SEEK_POSITION] = min(
max(params["absPositionMs"] / 1000, 0), max_position
)
elif command == COMMAND_MEDIA_NEXT:
service = media_player.SERVICE_MEDIA_NEXT_TRACK
elif command == COMMAND_MEDIA_PAUSE:
service = media_player.SERVICE_MEDIA_PAUSE
elif command == COMMAND_MEDIA_PREVIOUS:
service = media_player.SERVICE_MEDIA_PREVIOUS_TRACK
elif command == COMMAND_MEDIA_RESUME:
service = media_player.SERVICE_MEDIA_PLAY
elif command == COMMAND_MEDIA_SHUFFLE:
service = media_player.SERVICE_SHUFFLE_SET
# Google Assistant only supports enabling shuffle
service_attrs[media_player.ATTR_MEDIA_SHUFFLE] = True
elif command == COMMAND_MEDIA_STOP:
service = media_player.SERVICE_MEDIA_STOP
else:
raise SmartHomeError(ERR_NOT_SUPPORTED, "Command not supported")
await self.hass.services.async_call(
media_player.DOMAIN,
service,
service_attrs,
blocking=True,
context=data.context,
)
@register_trait
class MediaStateTrait(_Trait):
"""Trait to get media playback state.
https://developers.google.com/actions/smarthome/traits/mediastate
"""
name = TRAIT_MEDIA_STATE
commands = []
activity_lookup = {
STATE_OFF: "INACTIVE",
STATE_IDLE: "STANDBY",
STATE_PLAYING: "ACTIVE",
STATE_ON: "STANDBY",
STATE_PAUSED: "STANDBY",
STATE_STANDBY: "STANDBY",
STATE_UNAVAILABLE: "INACTIVE",
STATE_UNKNOWN: "INACTIVE",
}
playback_lookup = {
STATE_OFF: "STOPPED",
STATE_IDLE: "STOPPED",
STATE_PLAYING: "PLAYING",
STATE_ON: "STOPPED",
STATE_PAUSED: "PAUSED",
STATE_STANDBY: "STOPPED",
STATE_UNAVAILABLE: "STOPPED",
STATE_UNKNOWN: "STOPPED",
}
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
return domain == media_player.DOMAIN
def sync_attributes(self):
"""Return attributes for a sync request."""
return {"supportActivityState": True, "supportPlaybackState": True}
def query_attributes(self):
"""Return the attributes of this trait for this entity."""
return {
"activityState": self.activity_lookup.get(self.state.state, "INACTIVE"),
"playbackState": self.playback_lookup.get(self.state.state, "STOPPED"),
}
@register_trait
class ChannelTrait(_Trait):
"""Trait to get media playback state.
https://developers.google.com/actions/smarthome/traits/channel
"""
name = TRAIT_CHANNEL
commands = [COMMAND_SELECT_CHANNEL]
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
if (
domain == media_player.DOMAIN
and (features & media_player.SUPPORT_PLAY_MEDIA)
and device_class == media_player.DEVICE_CLASS_TV
):
return True
return False
def sync_attributes(self):
"""Return attributes for a sync request."""
return {"availableChannels": [], "commandOnlyChannels": True}
def query_attributes(self):
"""Return channel query attributes."""
return {}
async def execute(self, command, data, params, challenge):
"""Execute an setChannel command."""
if command == COMMAND_SELECT_CHANNEL:
channel_number = params.get("channelNumber")
else:
raise SmartHomeError(ERR_NOT_SUPPORTED, "Unsupported command")
if not channel_number:
raise SmartHomeError(
ERR_NO_AVAILABLE_CHANNEL,
"Channel is not available",
)
await self.hass.services.async_call(
media_player.DOMAIN,
media_player.SERVICE_PLAY_MEDIA,
{
ATTR_ENTITY_ID: self.state.entity_id,
media_player.ATTR_MEDIA_CONTENT_ID: channel_number,
media_player.ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_CHANNEL,
},
blocking=True,
context=data.context,
)
@register_trait
class SensorStateTrait(_Trait):
"""Trait to get sensor state.
https://developers.google.com/actions/smarthome/traits/sensorstate
"""
sensor_types = {
sensor.DEVICE_CLASS_AQI: ("AirQuality", "AQI"),
sensor.DEVICE_CLASS_CO: ("CarbonDioxideLevel", "PARTS_PER_MILLION"),
sensor.DEVICE_CLASS_CO2: ("CarbonMonoxideLevel", "PARTS_PER_MILLION"),
sensor.DEVICE_CLASS_PM25: ("PM2.5", "MICROGRAMS_PER_CUBIC_METER"),
sensor.DEVICE_CLASS_PM10: ("PM10", "MICROGRAMS_PER_CUBIC_METER"),
sensor.DEVICE_CLASS_VOLATILE_ORGANIC_COMPOUNDS: (
"VolatileOrganicCompounds",
"PARTS_PER_MILLION",
),
}
name = TRAIT_SENSOR_STATE
commands = []
@classmethod
def supported(cls, domain, features, device_class, _):
"""Test if state is supported."""
return domain == sensor.DOMAIN and device_class in cls.sensor_types
def sync_attributes(self):
"""Return attributes for a sync request."""
device_class = self.state.attributes.get(ATTR_DEVICE_CLASS)
if (data := self.sensor_types.get(device_class)) is not None:
return {
"sensorStatesSupported": {
"name": data[0],
"numericCapabilities": {"rawValueUnit": data[1]},
}
}
def query_attributes(self):
"""Return the attributes of this trait for this entity."""
device_class = self.state.attributes.get(ATTR_DEVICE_CLASS)
if (data := self.sensor_types.get(device_class)) is not None:
return {
"currentSensorStateData": [
{"name": data[0], "rawValue": self.state.state}
]
}
|
{
"content_hash": "c03b8695449be17fcfd783eb37bb3163",
"timestamp": "",
"source": "github",
"line_count": 2324,
"max_line_length": 123,
"avg_line_length": 34.586919104991395,
"alnum_prop": 0.5808534461308783,
"repo_name": "lukas-hetzenecker/home-assistant",
"id": "9f79f0f7d9b49f57f35c88c569139871c3aab1ef",
"size": "80380",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/google_assistant/trait.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2443"
},
{
"name": "Python",
"bytes": "38023745"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
}
|
import os.path
import archivebot.wpull
def add_args(args, names, item):
for name in names:
value = name % item
if value:
args.append(value)
def make_args(item, default_user_agent, wpull_exe, youtube_dl_exe, phantomjs_exe, finished_warcs_dir, warc_max_size):
# -----------------------------------------------------------------------
# BASE ARGUMENTS
# -----------------------------------------------------------------------
user_agent = item.get('user_agent') or default_user_agent
plugin_path = os.path.join(
os.path.dirname(archivebot.wpull.__file__),
'plugin.py'
)
args = [wpull_exe,
'-U', user_agent,
'--header', 'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'--quiet',
'-o', '%(item_dir)s/wpull.log' % item,
'--database', '%(item_dir)s/wpull.db' % item,
'--html-parser', 'libxml2-lxml',
'--plugin-script', plugin_path,
'--plugin-args', ' --dupes-db %(item_dir)s/dupes_db' % item,
'--save-cookies', '%(cookie_jar)s' % item,
'--no-check-certificate',
'--delete-after',
'--no-robots',
'--page-requisites',
'--no-parent',
'--sitemaps',
'--inet4-only',
'--timeout', '20',
'--tries', '3',
'--waitretry', '5',
'--warc-file', '%(item_dir)s/%(warc_file_base)s' % item,
'--warc-max-size', warc_max_size,
'--warc-header', 'operator: Archive Team',
'--warc-header', 'downloaded-by: ArchiveBot',
'--warc-header', 'archivebot-job-ident: %(ident)s' % item,
'--warc-move', finished_warcs_dir,
'--python-script', 'wpull_hooks.py',
'--phantomjs-exe', phantomjs_exe,
'--debug-manhole',
'--strip-session-id',
'--escaped-fragment',
'--session-timeout', '21600',
'--monitor-disk', '500m',
'--monitor-memory', '50m',
'--max-redirect', '8',
'--youtube-dl-exe', youtube_dl_exe
]
if item['url'].startswith("http://www.reddit.com/") or \
item['url'].startswith("https://www.reddit.com/"):
add_args(args, ['--header', 'Cookie: over18=1'], item)
if 'blogspot.' in item['url']:
add_args(args, ['--header', 'Cookie: NCR=1'], item)
# -----------------------------------------------------------------------
# !ao < FILE
# -----------------------------------------------------------------------
if 'source_url_file' in item:
add_args(args, ['-i', '%(source_url_file)s'], item)
else:
add_args(args, ['%(url)s'], item)
# -----------------------------------------------------------------------
# RECURSIVE FETCH / HOST-SPANNING
# -----------------------------------------------------------------------
if item.get('recursive'):
add_args(args, ['--recursive', '--level', '%(depth)s'], item)
args.append('--span-hosts-allow')
if item.get('recursive') and not item.get('no_offsite_links'):
args.append('page-requisites,linked-pages')
else:
args.append('page-requisites')
# -----------------------------------------------------------------------
# PHANTOMJS CONFIGURATION
# -----------------------------------------------------------------------
if item.get('grabber') == 'phantomjs':
item.log_output('Telling wpull to use PhantomJS.')
phantomjs_args = [
'--phantomjs',
'--phantomjs-scroll', item['phantomjs_scroll'],
'--phantomjs-wait', item['phantomjs_wait']
]
if item.get('no_phantomjs_smart_scroll'):
phantomjs_args.append('--no-phantomjs-smart-scroll')
item.log_output('Setting PhantomJS args: %s' % phantomjs_args)
args.extend(phantomjs_args)
# -----------------------------------------------------------------------
# YOUTUBE-DL
# -----------------------------------------------------------------------
if item.get('youtube_dl'):
args.append('--youtube-dl')
return args
# ---------------------------------------------------------------------------
class WpullArgs(object):
def __init__(self, *, default_user_agent, wpull_exe, youtube_dl_exe, phantomjs_exe, finished_warcs_dir, warc_max_size):
self.default_user_agent = default_user_agent
self.wpull_exe = wpull_exe
self.youtube_dl_exe = youtube_dl_exe
self.phantomjs_exe = phantomjs_exe
self.finished_warcs_dir = finished_warcs_dir
self.warc_max_size = warc_max_size
def realize(self, item):
return make_args(item, self.default_user_agent, self.wpull_exe,
self.youtube_dl_exe, self.phantomjs_exe, self.finished_warcs_dir,
self.warc_max_size)
# vim:ts=4:sw=4:et:tw=78
|
{
"content_hash": "18c918931941e46c99e61764e3e135e7",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 123,
"avg_line_length": 37.261538461538464,
"alnum_prop": 0.4642857142857143,
"repo_name": "Frogging101/ArchiveBot",
"id": "888a7a7cd544a10cb8b742a8b5b83b92a4d02313",
"size": "4844",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pipeline/archivebot/seesaw/wpull.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "927"
},
{
"name": "HTML",
"bytes": "74835"
},
{
"name": "Haxe",
"bytes": "16694"
},
{
"name": "JavaScript",
"bytes": "24102"
},
{
"name": "Makefile",
"bytes": "79"
},
{
"name": "Python",
"bytes": "113219"
},
{
"name": "Ruby",
"bytes": "104126"
},
{
"name": "Shell",
"bytes": "1350"
}
],
"symlink_target": ""
}
|
import os
import sys
from setuptools import find_packages
from setuptools import setup
version = '1.32.0.dev0'
install_requires = [
'dns-lexicon>=3.2.1',
'setuptools>=41.6.0',
]
if not os.environ.get('SNAP_BUILD'):
install_requires.extend([
# We specify the minimum acme and certbot version as the current plugin
# version for simplicity. See
# https://github.com/certbot/certbot/issues/8761 for more info.
f'acme>={version}',
f'certbot>={version}',
])
elif 'bdist_wheel' in sys.argv[1:]:
raise RuntimeError('Unset SNAP_BUILD when building wheels '
'to include certbot dependencies.')
if os.environ.get('SNAP_BUILD'):
install_requires.append('packaging')
docs_extras = [
'Sphinx>=1.0', # autodoc_member_order = 'bysource', autodoc_default_flags
'sphinx_rtd_theme',
]
setup(
name='certbot-dns-nsone',
version=version,
description="NS1 DNS Authenticator plugin for Certbot",
url='https://github.com/certbot/certbot',
author="Certbot Project",
author_email='certbot-dev@eff.org',
license='Apache License 2.0',
python_requires='>=3.7',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Plugins',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Security',
'Topic :: System :: Installation/Setup',
'Topic :: System :: Networking',
'Topic :: System :: Systems Administration',
'Topic :: Utilities',
],
packages=find_packages(),
include_package_data=True,
install_requires=install_requires,
extras_require={
'docs': docs_extras,
},
entry_points={
'certbot.plugins': [
'dns-nsone = certbot_dns_nsone._internal.dns_nsone:Authenticator',
],
},
)
|
{
"content_hash": "84b1095793aac4852b4933e6ff9d867b",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 79,
"avg_line_length": 31.17808219178082,
"alnum_prop": 0.6120386643233744,
"repo_name": "lmcro/letsencrypt",
"id": "9a400bb90e42eeb1ecc8eed53b26e060d2a0b86a",
"size": "2276",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "certbot-dns-nsone/setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "14147"
},
{
"name": "Augeas",
"bytes": "4731"
},
{
"name": "Batchfile",
"bytes": "35037"
},
{
"name": "DIGITAL Command Language",
"bytes": "133"
},
{
"name": "Groff",
"bytes": "222"
},
{
"name": "Makefile",
"bytes": "37309"
},
{
"name": "Nginx",
"bytes": "4274"
},
{
"name": "Python",
"bytes": "1225979"
},
{
"name": "Shell",
"bytes": "26934"
}
],
"symlink_target": ""
}
|
import mock
from nova import test
from nova.tests.unit import fake_instance
from nova.virt.virtualbox import constants
from nova.virt.virtualbox import exception
from nova.virt.virtualbox import pathutils
class PathUtilsTestCase(test.NoDBTestCase):
def setUp(self):
super(PathUtilsTestCase, self).setUp()
instance_values = {
'name': 'fake_name',
'uuid': 'fake_uuid',
}
self._context = 'fake-context'
self._instance = fake_instance.fake_instance_obj(self._context,
**instance_values)
@mock.patch('os.makedirs')
@mock.patch('os.path.exists')
def test_create_path(self, mock_exists, mock_makedirs):
mock_exists.side_effect = [False, True]
for _ in range(2):
pathutils.create_path(mock.sentinel.path)
self.assertEqual(2, mock_exists.call_count)
mock_makedirs.assert_called_once_with(mock.sentinel.path)
@mock.patch('shutil.rmtree')
@mock.patch('os.remove')
@mock.patch('os.path.isdir')
@mock.patch('os.path.exists')
def test_delete(self, mock_exists, mock_is_dir, mock_remove, mock_rmtree):
mock_exists.side_effect = [False, True, True]
mock_is_dir.side_effect = [True, False]
for _ in range(3):
pathutils.delete_path(mock.sentinel.path)
self.assertEqual(3, mock_exists.call_count)
self.assertEqual(2, mock_is_dir.call_count)
mock_remove.assert_called_once_with(mock.sentinel.path)
mock_rmtree.assert_called_once_with(mock.sentinel.path)
@mock.patch('os.path.exists')
@mock.patch('nova.virt.virtualbox.pathutils.delete_path')
@mock.patch('nova.virt.virtualbox.pathutils.create_path')
def test_action(self, mock_create, mock_delete, mock_exists):
path = pathutils.instance_dir()
pathutils.instance_dir(action=mock.sentinel.invalid_action)
self.assertEqual(0, mock_create.call_count)
self.assertEqual(0, mock_delete.call_count)
self.assertEqual(0, mock_exists.call_count)
pathutils.instance_dir(action=constants.PATH_CREATE)
mock_create.assert_called_once_with(path)
mock_create.reset_mock()
pathutils.instance_dir(action=constants.PATH_DELETE)
mock_delete.assert_called_once_with(path)
mock_delete.reset_mock()
pathutils.instance_dir(action=constants.PATH_EXISTS)
mock_exists.assert_called_once_with(path)
mock_exists.reset_mock()
pathutils.instance_dir(action=constants.PATH_OVERWRITE)
mock_delete.assert_called_once_with(path)
mock_create.assert_called_once_with(path)
@mock.patch('os.path.normpath')
def test_instance_dir(self, mock_join):
pathutils.instance_dir()
self.assertEqual(1, mock_join.call_count)
@mock.patch('os.path.join')
@mock.patch('nova.virt.virtualbox.pathutils.instance_dir')
def test_instance_basepath(self, mock_instance_dir, mock_join):
pathutils.instance_basepath(self._instance)
mock_join.assert_called_once_with(mock_instance_dir(),
self._instance.name)
@mock.patch('os.path.join')
@mock.patch('nova.virt.virtualbox.pathutils.instance_basepath')
def test_ephemeral_vhd_path(self, mock_instance_basepath, mock_join):
pathutils.ephemeral_vhd_path(self._instance, 'fake-disk-format')
mock_instance_basepath.assert_called_once_with(self._instance)
self.assertEqual(1, mock_join.call_count)
@mock.patch('os.path.join')
def test_base_disk_dir(self, mock_join):
pathutils.base_disk_dir()
self.assertEqual(1, mock_join.call_count)
@mock.patch('os.path.join')
@mock.patch('nova.virt.virtualbox.pathutils.base_disk_dir')
def test_base_disk_path(self, mock_base_disk, mock_join):
pathutils.base_disk_path(self._instance)
self.assertEqual(1, mock_base_disk.call_count)
self.assertEqual(1, mock_join.call_count)
@mock.patch('os.path.join')
@mock.patch('nova.virt.virtualbox.pathutils.instance_basepath')
def test_root_disk_path(self, mock_instance_basepath, mock_join):
pathutils.root_disk_path(self._instance, 'fake-disk-format')
mock_instance_basepath.assert_called_once_with(self._instance)
self.assertEqual(1, mock_join.call_count)
@mock.patch('os.path.join')
@mock.patch('nova.virt.virtualbox.pathutils.instance_basepath')
def test_export_dir(self, mock_instance_basepath, mock_join):
pathutils.export_dir(self._instance)
mock_instance_basepath.assert_called_once_with(self._instance)
self.assertEqual(1, mock_join.call_count)
@mock.patch('nova.virt.virtualbox.manage.VBoxManage.show_vm_info')
def test_get_root_disk_path(self, mock_vm_info):
mock_vm_info.return_value = {
constants.DEFAULT_ROOT_ATTACH_POINT: mock.sentinel.path}
self.assertEqual(mock.sentinel.path,
pathutils.get_root_disk_path(self._instance))
@mock.patch('nova.virt.virtualbox.manage.VBoxManage.show_vm_info')
def test_get_root_disk_path_fail(self, mock_vm_info):
mock_vm_info.side_effect = [
exception.VBoxManageError("err"),
{}
]
for _ in range(2):
self.assertIsNone(pathutils.get_root_disk_path(self._instance))
|
{
"content_hash": "e9c48819a7f06ae2419f8f32d2ab5b37",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 78,
"avg_line_length": 38.864285714285714,
"alnum_prop": 0.6583348649145377,
"repo_name": "cloudbase/nova-virtualbox",
"id": "ae6a516ac565d4585d02be58d40b5c3a9278b067",
"size": "6080",
"binary": false,
"copies": "2",
"ref": "refs/heads/virtualbox_driver",
"path": "nova/tests/unit/virt/virtualbox/test_pathutils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16016453"
},
{
"name": "Shell",
"bytes": "20716"
},
{
"name": "Smarty",
"bytes": "497954"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.