max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
data/allface_dataset.py | bruinxiong/Rotate-and-Render | 397 | 12757251 | import os
import math
import numpy as np
from PIL import Image
import skimage.transform as trans
import cv2
import torch
from data import dataset_info
from data.base_dataset import BaseDataset
import util.util as util
dataset_info = dataset_info()
class AllFaceDataset(BaseDataset):
@staticmethod
def modify_commandline_options(parser, is_train):
parser.add_argument('--no_pairing_check', action='store_true',
help='If specified, skip sanity check of correct label-image file pairing')
return parser
def cv2_loader(self, img_str):
img_array = np.frombuffer(img_str, dtype=np.uint8)
return cv2.imdecode(img_array, cv2.IMREAD_COLOR)
def fill_list(self, tmp_list):
length = len(tmp_list)
if length % self.opt.batchSize != 0:
end = math.ceil(length / self.opt.batchSize) * self.opt.batchSize
tmp_list = tmp_list + tmp_list[-1 * (end - length) :]
return tmp_list
def initialize(self, opt):
self.opt = opt
dataset_num = dataset_info.get_dataset(opt)
self.prefix = [dataset_info.prefix[num] for num in dataset_num]
file_list = [dataset_info.file_list[num] for num in dataset_num]
land_mark_list = [dataset_info.land_mark_list[num] for num in dataset_num]
self.params_dir = [dataset_info.params_dir[num] for num in dataset_num]
self.folder_level = [dataset_info.folder_level[num] for num in dataset_num]
self.num_datasets = len(file_list)
assert len(land_mark_list) == self.num_datasets, \
'num of landmk dir should be the num of datasets'
assert len(self.params_dir) == self.num_datasets, \
'num of params_dir should be the num of datasets'
self.dataset_lists = []
self.landmark_paths = []
self.sizes = []
for n in range(self.num_datasets):
with open(file_list[n]) as f:
img_lists = f.readlines()
img_lists = self.fill_list(img_lists)
self.sizes.append(len(img_lists))
self.dataset_lists.append(sorted(img_lists))
with open(land_mark_list[n]) as f:
landmarks = f.readlines()
landmarks = self.fill_list(landmarks)
self.landmark_paths.append(sorted(landmarks))
self.dataset_size = min(self.sizes)
self.initialized = False
def get_landmarks(self, landmark, img_list):
landmark_split = landmark.strip().split(' ')
filename1_without_ext = os.path.basename(img_list.strip())
filename2_without_ext = os.path.basename(landmark_split[0])
assert (filename1_without_ext == filename2_without_ext), \
"The image_path %s and params_path %s don't match." % \
(img_list, landmark_split[0])
label = landmark_split[1]
landmarks = landmark_split[2:]
landmarks = list(map(float, landmarks))
landmarks_array = np.array(landmarks).reshape(5, 2)
return landmarks_array, label
def get_param_file(self, img_list, dataset_num):
img_name = os.path.splitext(img_list)[0]
name_split = img_name.split("/")
folder_level = self.folder_level[dataset_num]
param_folder = os.path.join(self.params_dir[dataset_num],
"/".join([name_split[i] for i in range(len(name_split) - folder_level, len(name_split))]) + ".txt")
# params = np.loadtxt(param_folder)
return param_folder
def paths_match(self, path1, path2):
filename1_without_ext = os.path.splitext(os.path.basename(path1)[-10:])[0]
filename2_without_ext = os.path.splitext(os.path.basename(path2)[-10:])[0]
return filename1_without_ext == filename2_without_ext
def affine_align(self, img, landmark=None, **kwargs):
M = None
h, w, c = img.shape
src = np.array([
[38.2946, 51.6963],
[73.5318, 51.5014],
[56.0252, 71.7366],
[41.5493, 92.3655],
[70.7299, 92.2041]], dtype=np.float32)
src = src * 290 / 112
src[:, 0] += 50
src[:, 1] += 60
src = src / 400 * self.opt.crop_size
dst = landmark
# dst = landmark.astype(np.float32)
tform = trans.SimilarityTransform()
tform.estimate(dst, src)
M = tform.params[0:2, :]
warped = cv2.warpAffine(img, M, (self.opt.crop_size, self.opt.crop_size), borderValue=0.0)
return warped, M
def __getitem__(self, index):
# Label Image
randnum = np.random.randint(sum(self.sizes))
dataset_num = np.random.randint(self.num_datasets)
image_path = self.dataset_lists[dataset_num][index].strip()
image_path = os.path.join(self.prefix[dataset_num], image_path)
img = cv2.imread(image_path)
if img is None:
raise Exception('None Image')
param_path = self.get_param_file(image_path, dataset_num)
# img = cv2.imread(image_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
M = None
landmark_path = self.landmark_paths[dataset_num][index].strip()
landmarks, label = self.get_landmarks(landmark_path, image_path)
wrapped_img, M = self.affine_align(img, landmarks)
M = torch.from_numpy(M).float()
wrapped_img = wrapped_img.transpose(2, 0, 1) / 255.0
wrapped_img = torch.from_numpy(wrapped_img).float()
input_dict = {
'image': wrapped_img,
'param_path': param_path,
'M': M,
'path': image_path
}
# Give subclasses a chance to modify the final output
self.postprocess(input_dict)
return input_dict
def postprocess(self, input_dict):
return input_dict
def __len__(self):
return self.dataset_size
| 2.15625 | 2 |
ACM-Solution/Lpthwex15.py | wasi0013/Python-CodeBase | 2 | 12757252 | from sys import argv
script, filename = argv
txt = open (filename)
print ("Here is your file %r:"%filename)
print (txt.read())
print ("Type the filename again:")
file_again = input("> ")
txt_again = open (file_again)
print (txt_again.read())
| 3.8125 | 4 |
scripts/egl/gtf_wrapper.py | BigBigos/VK-GL-CTS | 1 | 12757253 | <reponame>BigBigos/VK-GL-CTS<gh_stars>1-10
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------
# drawElements Quality Program utilities
# --------------------------------------
#
# Copyright 2015 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#-------------------------------------------------------------------------
import os
from itertools import imap, chain
from common import *
from enums import enumValue
from library import getExtOnlyIface
from khr_util.format import indentLines
def getMangledName (funcName):
assert funcName[:3] == "egl"
return "eglw" + funcName[3:]
def commandAliasDefinition (command):
return "#define\t%s\t%s" % (command.name, getMangledName(command.name))
def commandWrapperDeclaration (command):
return "%s\t%s\t(%s);" % (
command.type,
getMangledName(command.name),
", ".join([param.declaration for param in command.params]))
NATIVE_TYPES = [
"EGLNativeWindowType",
"EGLNativeDisplayType",
"EGLNativePixmapType",
]
def commandWrapperDefinition (command):
template = """
{returnType} {mangledName} ({paramDecls})
{{
const eglw::Library* egl = eglw::getCurrentThreadLibrary();
if (!egl)
return{defaultReturn};
{maybeReturn}egl->{memberName}({arguments});
}}"""
arguments = []
for param in command.params:
if param.type in NATIVE_TYPES:
arguments.append("(void*)" + param.name)
else:
arguments.append(param.name)
return template.format(
returnType = command.type,
mangledName = "eglw" + command.name[3:],
paramDecls = commandParams(command),
defaultReturn = " " + getDefaultReturn(command) if command.type != 'void' else "",
maybeReturn = "return " if command.type != 'void' else "",
memberName = getFunctionMemberName(command.name),
arguments = ", ".join(arguments))
def getDefaultReturn (command):
if command.name == "glGetError":
return "GL_INVALID_OPERATION"
else:
assert command.type != 'void'
return "(%s)0" % command.type
commandParams = khr_util.format.commandParams
def enumDefinitionC (enum):
return "#define %s\t%s" % (enum.name, enumValue(enum))
def gen (registry):
noExtIface = getInterface(registry, 'egl', VERSION)
extOnlyIface = getExtOnlyIface(registry, 'egl', EXTENSIONS)
defaultIface = getDefaultInterface()
defines = imap(commandAliasDefinition, defaultIface.commands)
prototypes = imap(commandWrapperDeclaration, defaultIface.commands)
src = indentLines(chain(defines, prototypes))
writeInlFile(os.path.join(EGL_WRAPPER_DIR, "eglwApi.inl"), src)
writeInlFile(os.path.join(EGL_WRAPPER_DIR, "eglwEnumsC.inl"), indentLines(map(enumDefinitionC, defaultIface.enums)))
genCommandList(noExtIface, commandWrapperDefinition, EGL_WRAPPER_DIR, "eglwImpl.inl")
genCommandList(extOnlyIface, commandWrapperDefinition, EGL_WRAPPER_DIR, "eglwImplExt.inl", True)
| 1.90625 | 2 |
app/YtManager/settings.py | chibicitiberiu/ytsm | 298 | 12757254 | """
Django settings for YtManager project.
Generated by 'django-admin startproject' using Django 1.11.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import sys
import logging
from os.path import dirname as up
#
# Basic Django stuff
#
ALLOWED_HOSTS = ['*']
SESSION_COOKIE_AGE = 3600 * 30 # one month
# Application definition
INSTALLED_APPS = [
'django.contrib.auth',
'dynamic_preferences',
'dynamic_preferences.users.apps.UserPreferencesConfig',
'YtManagerApp.apps.YtManagerAppConfig',
'crispy_forms',
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'YtManager.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.template.context_processors.media',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'dynamic_preferences.processors.global_preferences',
],
},
},
]
WSGI_APPLICATION = 'YtManager.wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LOGIN_REDIRECT_URL = '/'
LOGIN_URL = '/login'
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Thumbnails
THUMBNAIL_SIZE_VIDEO = (410, 230)
THUMBNAIL_SIZE_SUBSCRIPTION = (250, 250)
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
# Misc Django stuff
CRISPY_TEMPLATE_PACK = 'bootstrap4'
LOG_FORMAT = '%(asctime)s|%(process)d|%(thread)d|%(name)s|%(filename)s|%(lineno)d|%(levelname)s|%(message)s'
CONSOLE_LOG_FORMAT = '%(asctime)s | %(name)s | %(filename)s:%(lineno)d | %(levelname)s | %(message)s'
#
# Directories
#
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
PROJECT_ROOT = up(up(os.path.dirname(__file__))) # Project root
BASE_DIR = up(os.path.dirname(__file__)) # Base dir of the application
CONFIG_DIR = os.getenv("YTSM_CONFIG_DIR", os.path.join(PROJECT_ROOT, "config"))
DATA_DIR = os.getenv("YTSM_DATA_DIR", os.path.join(PROJECT_ROOT, "data"))
STATIC_ROOT = os.path.join(PROJECT_ROOT, "static")
MEDIA_ROOT = os.path.join(DATA_DIR, 'media')
#
# Defaults
#
_DEFAULT_DEBUG = False
_DEFAULT_SECRET_KEY = <KEY>'
_DEFAULT_DATABASE = {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(DATA_DIR, 'ytmanager.db'),
'HOST': None,
'USER': None,
'PASSWORD': None,
'PORT': None,
}
CONFIG_ERRORS = []
CONFIG_WARNINGS = []
# These are just to make inspector happy, they will be set in the load_config_ini() method
DEBUG = None
SECRET_KEY = None
DATABASES = None
LOG_LEVEL = None
#
# Config parser options
#
CFG_PARSER_OPTS = {
'PROJECT_ROOT': PROJECT_ROOT,
'BASE_DIR': BASE_DIR,
'CONFIG_DIR': CONFIG_DIR,
'DATA_DIR': DATA_DIR,
}
#
# Load globals from config.ini
#
def get_global_opt(name, cfgparser, env_variable=None, fallback=None, boolean=False, integer=False):
"""
Reads a configuration option, in the following order:
1. environment variable
2. config parser
3. fallback
:param integer:
:param cfgparser:
:param name:
:param env_variable:
:param fallback:
:param boolean:
:return:
"""
# Get from environment variable
if env_variable is not None:
value = os.getenv(env_variable)
if value is not None and boolean:
return value.lower() in ['true', 't', 'on', 'yes', 'y', '1']
elif value is not None and integer:
try:
return int(value)
except ValueError:
CONFIG_WARNINGS.append(f'Environment variable {env_variable}: value must be an integer value!')
elif value is not None:
return value
# Get from config parser
if boolean:
try:
return cfgparser.getboolean('global', name, fallback=fallback, vars=CFG_PARSER_OPTS)
except ValueError:
CONFIG_WARNINGS.append(f'config.ini file: Value set for option global.{name} is not valid! '
f'Valid options: true, false, on, off.')
return fallback
if integer:
try:
return cfgparser.getint('global', name, fallback=fallback, vars=CFG_PARSER_OPTS)
except ValueError:
CONFIG_WARNINGS.append(f'config.ini file: Value set for option global.{name} must be an integer number! ')
return fallback
return cfgparser.get('global', name, fallback=fallback, vars=CFG_PARSER_OPTS)
def load_config_ini():
from configparser import ConfigParser
from YtManagerApp.utils.extended_interpolation_with_env import ExtendedInterpolatorWithEnv
import dj_database_url
try:
os.makedirs(DATA_DIR, exist_ok=True)
logging.info(f"Using data directory {DATA_DIR}")
except OSError as e:
print(f'CRITICAL ERROR! Cannot create data directory {DATA_DIR}! {e}', file=sys.stderr)
return
cfg = ConfigParser(allow_no_value=True, interpolation=ExtendedInterpolatorWithEnv())
cfg_file = os.path.join(CONFIG_DIR, "config.ini")
read_ok = cfg.read([cfg_file])
if cfg_file not in read_ok:
CONFIG_ERRORS.append(f'Configuration file {cfg_file} could not be read! Please make sure the file is in the '
'right place, and it has read permissions.')
# Debug
global DEBUG
DEBUG = get_global_opt('Debug', cfg, env_variable='YTSM_DEBUG', fallback=_DEFAULT_DEBUG, boolean=True)
# Secret key
# SECURITY WARNING: keep the secret key used in production secret!
global SECRET_KEY
SECRET_KEY = get_global_opt('SecretKey', cfg, env_variable='YTSM_SECRET_KEY', fallback=_DEFAULT_SECRET_KEY)
# Database
global DATABASES
DATABASES = {
'default': _DEFAULT_DATABASE
}
if cfg.has_option('global', 'DatabaseURL'):
DATABASES['default'] = dj_database_url.parse(cfg.get('global', 'DatabaseURL', vars=CFG_PARSER_OPTS),
conn_max_age=600)
else:
DATABASES['default'] = {
'ENGINE': get_global_opt('DatabaseEngine', cfg,
env_variable='YTSM_DB_ENGINE', fallback=_DEFAULT_DATABASE['ENGINE']),
'NAME': get_global_opt('DatabaseName', cfg,
env_variable='YTSM_DB_NAME', fallback=_DEFAULT_DATABASE['NAME']),
'HOST': get_global_opt('DatabaseHost', cfg,
env_variable='YTSM_DB_HOST', fallback=_DEFAULT_DATABASE['HOST']),
'USER': get_global_opt('DatabaseUser', cfg,
env_variable='YTSM_DB_USER', fallback=_DEFAULT_DATABASE['USER']),
'PASSWORD': get_global_opt('DatabasePassword', cfg,
env_variable='YTSM_DB_PASSWORD', fallback=_DEFAULT_DATABASE['PASSWORD']),
'PORT': get_global_opt('DatabasePort', cfg,
env_variable='YTSM_DB_PORT', fallback=_DEFAULT_DATABASE['PORT']),
}
# Log settings
global LOG_LEVEL
log_level_str = get_global_opt('LogLevel', cfg, env_variable='YTSM_LOG_LEVEL', fallback='INFO')
try:
LOG_LEVEL = getattr(logging, log_level_str)
except AttributeError:
CONFIG_WARNINGS.append(f'Invalid log level {log_level_str}. '
f'Valid options are: DEBUG, INFO, WARN, ERROR, CRITICAL.')
print("Invalid log level " + LOG_LEVEL)
LOG_LEVEL = logging.INFO
load_config_ini()
| 1.820313 | 2 |
crunch_old.py | yasheshgaur/OOV-repitition-analysis | 1 | 12757255 | from nltk.tokenize import RegexpTokenizer
import re
from bs4 import BeautifulSoup
from urllib2 import urlopen
import num2words
from decimal import *
import matplotlib.pyplot as plt
from math import *
import numpy as np
#pdb.set_trace()
# The commented code is incase you want to download the text from the web
#url = "http://see.stanford.edu/materials/icsppcs107/transcripts/ProgrammingParadigms-Lecture01.html"
#request = urllib.request.Request(url)
#html = urlopen(url).read().decode('cp1252').encode('utf-8')
#raw = BeautifulSoup(html).get_text()
#print("===========================================================================================================")
#print(raw)
#print(html)
def ConvertNum2Words(text):
p = re.compile('\d+')
List = p.findall(text)
#convert integers
for i in range(0,len(List)):
List[i] = int(float(List[i]))
#sort in descending order
List.sort(reverse=True)
#convert it back to String :p
for i in range(0,len(List)):
List[i] = str(List[i])
for item in List:
print item
replace = num2words.num2words(int(float(item)))
print replace
text = re.sub(item,replace,text)
# text = re.sub(item,num2words.num2words(int(float(item))),text)
# print num2words.num2words(int(float(item)))
return text
replacement_patterns = [
(r'won\'t', 'will not'),
(r'can\'t', 'cannot'),
(r'I\'m', 'I am'),
(r'ain\'t', 'is not'),
(r'(\w+)\'ll', '\g<1> will'),
(r'(\w+)n\'t', '\g<1> not'),
(r'(\w+)\'ve', '\g<1> have'),
(r'(\w+)\'s', '\g<1> is'),
(r'(\w+)\'re', '\g<1> are'),
(r'(\w+)\'d', '\g<1> would'),
(r'\+',' plus '),
(r'=',' equals '),
(r'\*',' star '),
(r'&',' ampersand '),
(r'\$',' dollars ')
# (r'[0-9]+', num2words.num2words(int(float('\g<1>')))),
]
class RegexpReplacer(object):
def __init__(self,patterns=replacement_patterns):
self.patterns = [(re.compile(regex), repl) for (regex,repl) in patterns]
def replace(self, text):
s = text
for (pattern,repl) in self.patterns:
s = re.sub(pattern,repl,s)
return s
replacer = RegexpReplacer()
raw = open('lecture1').read()
raw = re.sub('\xe2\x80\x99','\'',raw)
raw = re.sub('\xe2\x80\x9c','\"',raw)
raw = re.sub('\xe2\x80\x9d','\"',raw)
raw = re.sub('\xe2\x80\x90','-',raw)
raw = re.sub('\xe2\x80\x91','-',raw)
raw = re.sub('\xe2\x80\x92','-',raw)
raw = re.sub('\xe2\x80\x93','-',raw)
raw = re.sub('\xe2\x80\x94','-',raw)
raw = re.sub('\xe2\x80\x95','-',raw)
raw = re.sub('ProgrammingParadigms','Programming Paradigm',raw)
#print(raw)
raw = replacer.replace(raw)
raw = ConvertNum2Words(raw)
#print(raw)
#f = open('lecture1-clean','w')
#print >>f,raw
#f.close()
#tokenizer = RegexpTokenizer(r'\b[w+]+\b')
tokenizer = RegexpTokenizer(r'\d+|[a-zA-Z]+|\$')
lecture = (tokenizer.tokenize(raw))
for i in range(0,len(lecture)):
lecture[i] = lecture[i].upper()
WordSet = (set(lecture))
vocab = open('switchboard_extracted').readlines()
for i in range(0,len(vocab)-1):
vocab[i] = vocab[i].rstrip('\n')
#Start with the standard vocab, when OOV encountered, increase OOV count and add in the vocab. Give OOV rate after every word=OOV count/total words seen till now or will be seen (the total number of words in array).
#i/p=lecture array, vocab, timetaken to add OOV in lecture
def CalculateOOVRateJeff1(array,vocab,TimeTakenToAddOOV,FileToWriteOOVRates):
print "entered function"
NumberOfTokensSeen = 0
NumberOfOOVSeen = 0
OOVRateArray = {}
OOVArray = {}
OOVRateArray[0] = FindOOVRateOffset(array,vocab,0,len(array))
OOVArray[0] = 'NA'
for i in range(1,len(array)):
NumberOfTokensSeen = NumberOfTokensSeen + 1
flag1 = 0
flag2 = 0
print "Processing token", array[i]
if array[i] not in vocab:
OOVArray[i] = array[i]
print "OOV encountered", array[i]
NumberOfOOVSeen = NumberOfOOVSeen + 1
flag1 = 1
else:
OOVArray[i] = 'NA'
print "We are good, OOV not encountered"
if (((i-TimeTakenToAddOOV) > 0) and (OOVArray[i-TimeTakenToAddOOV] is not 'NA')):
vocab.append(OOVArray[i-TimeTakenToAddOOV])
print "OOV added:", OOVArray[i-TimeTakenToAddOOV]
flag2 = 1
# OOVRateArray[i] = Decimal(NumberOfOOVSeen)/Decimal(NumberOfTokensSeen)
if (flag1 == 1 or flag2 == 1):
OOVRateArray[i] = FindOOVRateOffset(array[i+1:len(array)],vocab,NumberOfOOVSeen,len(array))
else:
OOVRateArray[i] = OOVRateArray[i-1]
f = open(FileToWriteOOVRates,'w')
for item in OOVRateArray:
print>>f,OOVRateArray[item]*100
f.close()
for item in OOVArray:
print(OOVArray[item])
def FindOOVRateOffset(array,vocab,Offset,LengthOfTheWholeThing):
NumberOfMatches = 0
NumberOfOOV = 0
for x in array:
if x in vocab:
NumberOfMatches = NumberOfMatches + 1
else:
NumberOfOOV = NumberOfOOV + 1
print('OOV rate:')
print(Decimal(NumberOfOOV+Offset)/LengthOfTheWholeThing)
return(Decimal(NumberOfOOV+Offset)/LengthOfTheWholeThing)
#Start with the standard vocab, when OOV encountered, increase OOV count and add in the vocab. Give OOV after every word=OOV count/total words seen till now.
#i/p=lecture array, vocab, timetaken to add OOV in lecture
def CalculateOOVRateJeff(array,vocab,TimeTakenToAddOOV,FileToWriteOOVRates):
print "entered function"
NumberOfTokensSeen = 0
NumberOfOOVSeen = 0
OOVRateArray = {}
OOVArray = {}
for i in range(0,len(array)):
NumberOfTokensSeen = NumberOfTokensSeen + 1
print "Processing token", array[i]
if array[i] not in vocab:
OOVArray[i] = array[i]
print "OOV encountered", array[i]
NumberOfOOVSeen = NumberOfOOVSeen + 1
else:
OOVArray[i] = 'NA'
print "We are good, OOV not encountered"
if (((i-TimeTakenToAddOOV) >= 0) and (OOVArray[i-TimeTakenToAddOOV] is not 'NA')):
vocab.append(OOVArray[i-TimeTakenToAddOOV])
print "OOV added:", array[i-TimeTakenToAddOOV]
OOVRateArray[i] = Decimal(NumberOfOOVSeen)/Decimal(NumberOfTokensSeen)
f = open(FileToWriteOOVRates,'w')
for item in OOVRateArray:
print>>f,OOVRateArray[item]*100
f.close()
for item in OOVArray:
print(OOVArray[item])
def FindOOVRate(array,vocab):
NumberOfMatches = 0
NumberOfOOV = 0
for x in array:
if x in vocab:
NumberOfMatches = NumberOfMatches + 1
else:
NumberOfOOV = NumberOfOOV + 1
print('OOV rate:')
print(Decimal(NumberOfOOV)/Decimal(len(array)))
return(Decimal(NumberOfOOV)/Decimal(len(array)))
#Function: Processes tokens linearly, adds OOV to the vocab and calculates the new OOV rate as and when the OOV token is encountered.
#i/p = token array, File to write instantaneous OOV rate array to.
#o/p = No return
def CrunchInstantaneous(FileToWriteOOVRates,array):
OOVRateArray = {}
OOVRateArray[0] = (FindOOVRate(array,vocab))
for i in range(1,len(array)):
print("processing token")
print(array[i])
if array[i] in vocab:
OOVRateArray[i] = OOVRateArray[i-1]
else:
#add OOV to vocab then caculate OOV rate for the remaning words in the array
vocab.append(array[i])
print("New Vocab added:")
print(vocab[-1])
OOVRateArray[i] = (FindOOVRate(array,vocab))
f = open(FileToWriteOOVRates,'w')
for item in OOVRateArray:
print>>f,(OOVRateArray[item]*100)
f.close()
# CrucnhInterval will take an array (which usually a part of the token array) collect OOVs, add them to vocab
def CrunchInterval(array):
for i in range(1,len(array)):
print("processing token")
print(array[i])
if array[i] not in vocab:
#add OOV to vocab
vocab.append(array[i])
print("New Vocab added:")
print(vocab[-1])
#Take the token array, divide it into parts, do CrunchInterval for each part, return the OOVRate array which will look like a staircase wil be written to a file of your choice
def CrunchStepWise(array,StepWidth,FileToWriteOOVRates):
OOVRateArray = {}
ChunkOOV = FindOOVRate(array,vocab)
NoOfSteps = len(array)/StepWidth
LastStepWidth = len(array) % StepWidth
for i in range(0,(NoOfSteps)):
print("===============================")
print(StepWidth*i)
print(StepWidth*(i+1))
print("===============================")
for j in range(StepWidth*i,StepWidth*(i+1)):
OOVRateArray[j] = ChunkOOV
CrunchInterval(array[StepWidth*i:StepWidth*(i+1)])
ChunkOOV = FindOOVRate(array,vocab)
print("======================================")
print(StepWidth*(NoOfSteps))
print(len(array))
print(ChunkOOV)
print("======================================")
for k in range(StepWidth*(NoOfSteps),len(array)):
print "I am adding ",ChunkOOV," to ",k,"index"
OOVRateArray[k] = ChunkOOV
print "length of OOV array is",len(OOVRateArray)
f = open(FileToWriteOOVRates,'w')
print("length of OOVRate array is ", len(OOVRateArray))
for item in OOVRateArray:
print "the followong item indexs are being written to file ",item
print>>f,OOVRateArray[item]*100
f.close()
#CrunchInstantaneous('RateDenoisedData',lecture)
#CrunchStepWise(lecture,500,'rate_500_only_words')
#print(lecture)
#print(vocab)
#CalculateOOVRateJeff(lecture,vocab,600,'rate_jeff_600')
#CalculateOOVRateJeff1(lecture,vocab,6000,'rate_jeff1_6000')
#get a list of OOV with in the order that they appear
f = open('OOVList','w')
OOVArray = []
for i in range(0,len(lecture)):
if lecture[i] not in vocab:
if lecture[i] not in OOVArray:
OOVArray.append(lecture[i])
print >> f, lecture[i]
print >> f, i
f.close()
UniqueOOV = OOVArray
OOVRepititionIndex = []
#once that is done, get the array indices of when an OOV occurs in lecture/raw
for i in range(0,len(UniqueOOV)):
LocalIndexes = []
for j in range(0,len(lecture)):
if UniqueOOV[i] == lecture[j]:
LocalIndexes.append(j)
OOVRepititionIndex.append(LocalIndexes)
f = open('OOVMinimumDistance','w')
#show bursty nature of each OOV by printing the minimum and maximum spacing between these OOVs
for i in range(0,len(OOVRepititionIndex)):
x = np.array(OOVRepititionIndex[i])
if len(x) > 1:
print len(x)
x_diff = np.diff(x)
print >> f,lecture[x[0]]
print >> f,np.amin(x_diff)
f.close()
print "printing OOV array :D "
print OOVArray
print "to cross validate, I am printing the lecture index value of the first index stored in every nested list"
for i in range(0,len(OOVRepititionIndex)):
print lecture[OOVRepititionIndex[i][0]]
print "another cross validation test, all the words that follow this should be the same"
for i in range(0,len(OOVRepititionIndex[0])):
print lecture[OOVRepititionIndex[0][i]]
print "another cross validation test, all the words that follow this should be the same"
for i in range(0,len(OOVRepititionIndex[40])):
print lecture[OOVRepititionIndex[40][i]]
for i in range(0,len(OOVRepititionIndex)):
x = OOVRepititionIndex[i]
ValueFory = x[0]
y = [ValueFory]*len(x)
plt.plot(x,y,linestyle="",marker="o")
plt.show()
| 3.5625 | 4 |
systeminfo/main.py | Slaporter/comp30670-assignment2 | 0 | 12757256 | '''
Created on 25 Jan 2018
@author: Slaporter
'''
import platform
def get_platform_info():
return (platform.platform())
if __name__ == '__main__':
get_platform_info() | 2.0625 | 2 |
act/qc/radiometer_tests.py | rcjackson/ACT | 1 | 12757257 | <reponame>rcjackson/ACT
"""
act.qc.radiometer_tests
------------------------------
Tests specific to radiometers
"""
from scipy.fftpack import rfft, rfftfreq
import numpy as np
import xarray as xr
import pandas as pd
import datetime
import dask
import warnings
from act.utils.datetime_utils import determine_time_delta
from act.utils.geo_utils import get_sunrise_sunset_noon, is_sun_visible
def fft_shading_test(obj, variable='diffuse_hemisp_narrowband_filter4',
fft_window=30,
shad_freq_lower=[0.008, 0.017],
shad_freq_upper=[0.0105, 0.0195],
ratio_thresh=[3.15, 1.2],
time_interval=None, smooth_window=5, shading_thresh=0.4):
"""
Function to test shadowband radiometer (MFRSR, RSS, etc) instruments
for shading related problems. Program was adapted by <NAME>
from the method defined in Alexandrov et al 2007 to process on a
point by point basis using a window of data around that point for
the FFT analysis.
For ARM data, testing has found that this works the best on narrowband
filter4 for MFRSR data.
Function has been tested and is in use by the ARM DQ Office for
problem detection. It is know to have some false positives at times.
Need to run obj.clean.cleanup() ahead of time to ensure proper addition
to QC variable
Parameters
----------
obj : xarray Dataset
Data object
variable : string
Name of variable to process
fft_window : int
Number of samples to use in the FFT window. Default is +- 30 samples
Note: this is +- so the full window will be double
shad_freq_lower : list
Lower frequency over which to look for peaks in FFT
shad_freq_upper : list
Upper frequency over which to look for peaks in FFT
ratio_thresh : list
Threshold for each freq window to flag data. I.e. if the peak is 3.15 times
greater than the surrounding area
time_interval : float
Sampling rate of the instrument
smooth_window : int
Number of samples to use in smoothing FFTs before analysis
shading_thresh : float
After smoothing, the value over which is considered a shading signal
Returns
-------
obj : xarray Dataset
Data object
References
----------
Alexandrov, Mikhail & Kiedron, Peter & Michalsky, Joseph & Hodges, Gary
& Flynn, Connor & Lacis, Andrew. (2007). Optical depth measurements by
shadow-band radiometers and their uncertainties. Applied optics. 46.
8027-38. 10.1364/AO.46.008027.
"""
# Get time and data from variable
time = obj['time'].values
data = obj[variable].values
if 'missing_value' in obj[variable].attrs:
missing = obj[variable].attrs['missing_value']
else:
missing = -9999.
# Get time interval between measurements
if time_interval is None:
dt = determine_time_delta(time)
else:
dt = time_interval
# Compute the FFT for each point +- window samples
task = []
sun_up = is_sun_visible(latitude=obj['lat'].values, longitude=obj['lon'].values, date_time=time)
for t in range(len(time)):
sind = t - fft_window
eind = t + fft_window
if sind < 0:
sind = 0
if eind > len(time):
eind = len(time)
# Get data and remove all nan/missing values
d = data[sind:eind]
idx = ((d != missing) & (np.isnan(d) is not True))
index = np.where(idx)
d = d[index]
# Add to task for dask processing
task.append(dask.delayed(fft_shading_test_process)(
time[t], d,
shad_freq_lower=shad_freq_lower,
shad_freq_upper=shad_freq_upper,
ratio_thresh=ratio_thresh,
time_interval=dt,
is_sunny=sun_up[t]))
# Process using dask
result = dask.compute(*task)
# Run data through a rolling median to filter out singular
# false positives
shading = [r['shading'] for r in result]
shading = pd.Series(shading).rolling(window=smooth_window, min_periods=1).median()
# Find indices where shading is indicated
idx = (np.asarray(shading) > shading_thresh)
index = np.where(idx)
# Add test to QC Variable
desc = 'FFT Shading Test'
obj.qcfilter.add_test(variable, index=index, test_meaning=desc)
# Prepare frequency and fft variables for adding to object
fft = np.empty([len(time), fft_window * 2])
fft[:] = np.nan
freq = np.empty([len(time), fft_window * 2])
freq[:] = np.nan
for i, r in enumerate(result):
dummy = r['fft']
fft[i, 0:len(dummy)] = dummy
dummy = r['freq']
freq[i, 0:len(dummy)] = dummy
attrs = {'units': '', 'long_name': 'FFT Results for Shading Test', 'upper_freq': shad_freq_upper,
'lower_freq': shad_freq_lower}
fft_window = xr.DataArray(range(fft_window * 2), dims=['fft_window'],
attrs={'long_name': 'FFT Window', 'units': '1'})
da = xr.DataArray(fft, dims=['time', 'fft_window'], attrs=attrs, coords=[obj['time'], fft_window])
obj['fft'] = da
attrs = {'units': '', 'long_name': 'FFT Frequency Values for Shading Test'}
da = xr.DataArray(freq, dims=['time', 'fft_window'], attrs=attrs, coords=[obj['time'], fft_window])
obj['fft_freq'] = da
return obj
def fft_shading_test_process(time, data, shad_freq_lower=None,
shad_freq_upper=None, ratio_thresh=None,
time_interval=None, is_sunny=None):
"""
Processing function to do the FFT calculations/thresholding
Parameters
----------
time : datetime
Center time of calculation used for calculating sunrise/sunset
data : list
Data for run through fft processing
shad_freq_lower : list
Lower limits of freqencies to look for shading issues
shad_freq_upper : list
Upper limits of freqencies to look for shading issues
ratio_thresh : list
Thresholds to apply, corresponding to frequencies chosen
time_interval : float
Time interval of data
Returns
-------
shading : int
Binary to indicate shading problem (1) or not (0)
"""
if not is_sunny:
return {'shading': 0, 'fft': [np.nan] * len(data), 'freq': [np.nan] * len(data)}
# FFT Algorithm
fftv = abs(rfft(data))
freq = rfftfreq(fftv.size, d=time_interval)
# Get FFT data under threshold
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=RuntimeWarning)
idx = (fftv > 1.)
index = np.where(idx)
fftv[index] = np.nan
freq[index] = np.nan
# Return if FFT is empty
if len(fftv) == 0:
return {'shading': 0, 'fft': [np.nan] * len(data), 'freq': [np.nan] * len(data)}
# Commented out as it seems to work better without smoothing
# fftv=pd.DataFrame(data=fftv).rolling(min_periods=3,window=3,center=True).mean().values.flatten()
ratio = []
# Calculates the ratio (size) of the peaks in the FFT to the surrounding
# data
wind = 3
# Run through each frequency to look for peaks
# Calculate threshold of peak value to surrounding values
for i in range(len(shad_freq_lower)):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=RuntimeWarning)
idx = np.logical_and(freq > shad_freq_lower[i],
freq < shad_freq_upper[i])
index = np.where(idx)
if len(index[0]) == 0:
continue
peak = max(fftv[index])
index = index[0]
sind = index[0] - wind
if sind < 0:
sind = 0
eind = index[-1] + wind
if eind > len(fftv):
eind = len(fftv)
if len(range(sind, index[0])) == 0 or len(range(index[-1], eind)) == 0:
ratio.append(0.0)
else:
# Calculates to the left/right of each peak
peak_l = max(fftv[range(sind, index[0])])
peak_r = max(fftv[range(index[-1], eind)])
ratio.append(peak / np.mean([peak_l, peak_r]))
# Checks ratios against thresholds for each freq range
shading = 0
if len(ratio) > 0:
pass1 = False
pass2 = False
if ratio[0] > ratio_thresh[0]:
pass1 = True
if len(ratio) > 1:
if ratio[1] > ratio_thresh[1]:
pass2 = True
else:
pass2 = True
if pass1 and pass2:
shading = 1
return {'shading': shading, 'fft': fftv, 'freq': freq}
| 2.3125 | 2 |
scripts/update_static_files.py | krausf/microservice-repository-template | 0 | 12757258 | <gh_stars>0
#!/usr/bin/env python3
# Copyright 2021 Universität Tübingen, DKFZ and EMBL
# for the German Human Genome-Phenome Archive (GHGA)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script moves all files considered static (as defined in `../.static_files`)
from the microservice template repository over to this repository
"""
import urllib.parse
from pathlib import Path
import requests
REPO_ROOT_DIR = Path(__file__).parent.parent.resolve()
STATIC_FILE_LIST = REPO_ROOT_DIR / ".static_files"
RAW_TEMPLATE_URL = (
"https://raw.githubusercontent.com/ghga-de/microservice-repository-template/main/"
)
def run():
"""Moves the files"""
print("Updating static file from template repo:")
with open(STATIC_FILE_LIST, "r", encoding="utf8") as list_file:
for line in list_file:
relative_file_path = line.rstrip("\n")
if relative_file_path == "" or relative_file_path.startswith("#"):
continue
print(f" - {relative_file_path}")
remote_file_url = urllib.parse.urljoin(RAW_TEMPLATE_URL, relative_file_path)
remote_file_request = requests.get(remote_file_url)
if remote_file_request.status_code != 200:
print(
f"WARNING: request to remote file {remote_file_url} returned "
f"non-200 status code: {remote_file_request.status_code}"
f"\nWARNING: ignoring file: {relative_file_path}"
)
continue
remote_file_content = remote_file_request.text
local_file_path = REPO_ROOT_DIR / Path(relative_file_path)
with open(local_file_path, "w", encoding="utf8") as local_file:
local_file.write(remote_file_content)
if __name__ == "__main__":
run()
| 2.234375 | 2 |
src/mipi-code2vec/mipi_websocket/client_sample.py | ngocpq/mipi | 0 | 12757259 | <reponame>ngocpq/mipi
#!/usr/bin/env python
# WS client example
import asyncio
import json
import websockets
async def hello():
uri = "ws://localhost:8765"
async with websockets.connect(uri) as websocket:
msg = json.dumps({'PatchId': 'patch_sample01',
'BugId': 'sample',
'PatchedMethods': [{'DevIntention': 'get|max',
'OrgCode': 'public void getMax(int a, int b){ if (a<=b) return a; else return b;}',
'PatCode': 'public void getMax(int a, int b){ if (a>b) return a; else return b;}'}]})
await websocket.send(msg)
print(f"> {msg}")
greeting = await websocket.recv()
print(f"< {greeting}")
asyncio.get_event_loop().run_until_complete(hello()) | 2.84375 | 3 |
directRel/htmlExtractor.py | cqkenuo/Vulnerability-Knowledge-Graph | 1 | 12757260 | from bs4 import BeautifulSoup as bs
import os
import pandas as pd
import re
import csv
import io
result = {}
new = {}
id = 0
'''
result = {id:{'title':' ', 'abstract':' ', 'key_wordsZ':{'a','b','c'}, 'key_wordsE':{'a','b','c'},'authors': {'author1'} }}
'''
p = os.walk('知网html') # html文件夹路径
output_route = '../output/'
with open(output_route + "中文关键词.csv", "w", newline='') as csvfile: # 写csv文件表头
writer = csv.writer(csvfile)
writer.writerow(['title', 'key_words'])
with open(output_route + "英文关键词.csv", "w", newline='') as csvfile: # 写csv文件表头
writer = csv.writer(csvfile)
writer.writerow(['title', 'key_words'])
with open(output_route + "作者.csv", "w", newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(['title', 'authors'])
with open(output_route + "标题摘要.csv", "w", newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(['title', 'abstract'])
for path, dir_list, file_list in p:
for file_name in file_list:
print(file_name)
if file_name[-5:] == '.html':
try:
htmlfile = io.open('./知网html/' + file_name, 'r', encoding='utf-8')
htmlhandle = htmlfile.read()
soup = bs(htmlhandle, 'lxml')
result[file_name] = new # 第n个文献信息
# result = pd.DataFrame({}, index=[id]) #存储第n个文献的信息
# author = {}
result[file_name]['author'] = [] # {作者:研究机构} 多个作者
result[file_name]['title'] = '' # 文章标题
result[file_name]['abstract'] = '' # 文献摘要
result[file_name]['key_wordsZ'] = [] # 多个关键词
result[file_name]['key_wordsE'] = []
# author['Institute'] = ''
content = soup.find('div', class_='top-title') #解析标题
# print(content)
print(content.h1.text)
result[file_name]['title'] = content.h1.text.replace('"','').replace('\n', '').replace('\r', '')
data = soup.find('div', class_='data') #解析摘要
# print(data)
print(data.p.text)
result[file_name]['abstract'] = data.p.text.replace('\n', '').replace('\r', '')
data = soup.find('div', id='a_keywords') #解析关键词
da = soup.find('div', id="a_keywordsEN")
# print(data)
a = data.p.text
b = da.p.text
print(a)
print(b)
#print(a.splitlines()[1])
for i in a.splitlines():
result[file_name]['key_wordsZ'].append(i)
result[file_name]['key_wordsZ'] = filter(None, result[file_name]['key_wordsZ'])
for i in b.splitlines():
result[file_name]['key_wordsE'].append(i)
result[file_name]['key_wordsE'] = filter(None, result[file_name]['key_wordsE'])
content = soup.find('div', class_='content') #解析作者
print(content.h2.text)
for i in content.h2.text.splitlines():
result[file_name]['author'].append(i)
print(i)
result[file_name]['author'] = filter(None, result[file_name]['author'])
#new['author'] = content.h2.text
with open(output_route + "中文关键词.csv", "a", newline='') as csvfile: #把关键词写入csv文件
writer = csv.writer(csvfile)
for i in result[file_name]['key_wordsZ']:
list = [str(result[file_name]['title']).replace('\n', '').replace('\r', ''), str(i)]
writer.writerow(list)
with open(output_route + "英文关键词.csv", "a", newline='') as csvfile: #把关键词写入csv文件
writer = csv.writer(csvfile)
for i in result[file_name]['key_wordsE']:
list = [str(result[file_name]['title']).replace('\n', '').replace('\r', ''),str(i)]
writer.writerow(list)
with open(output_route + "作者.csv", "a", newline='') as csvfile:
writer = csv.writer(csvfile)
for i in result[file_name]['author']:
list = [str(result[file_name]['title']).replace('\n', '').replace('\r', ''), str(i)]
writer.writerow(list)
with open(output_route + "标题摘要.csv", "a", newline='') as csvfile:
writer = csv.writer(csvfile)
list = [result[file_name]['title'].replace('\n', '').replace('\r', ''), result[file_name]['abstract']]
writer.writerow(list)
print(result)
except:
print('异常')
print(result)
else:
pass
with open(output_route + "中文关键词.csv", "r") as csvfile: #把关键词写入csv文件
content = csvfile.read()
with open(output_route + "中文关键词.csv", "w", newline='') as csvfile:
csvfile.write(content.replace('"', '').replace(';',''))
with open(output_route + "英文关键词.csv", "r") as csvfile: #把关键词写入csv文件
content = csvfile.read()
with open(output_route + "英文关键词.csv", "w", newline='') as csvfile:
csvfile.write(content.replace('"', '').replace(';',''))
with open(output_route + "作者.csv", "r") as csvfile: #把关键词写入csv文件
content = csvfile.read()
with open(output_route + "作者.csv", "w", newline='') as csvfile:
csvfile.write(content.replace('"', '').replace(';',''))
with open(output_route + "标题摘要.csv", "r") as csvfile: #把关键词写入csv文件
content = csvfile.read()
with open(output_route + "标题摘要.csv", "w", newline='') as csvfile:
csvfile.write(content.replace('"', '').replace(';','')) | 2.96875 | 3 |
djexperience/core/urls.py | rg3915/django-experience-2016 | 1 | 12757261 | from django.conf.urls import url
from djexperience.core.views import home, about
urlpatterns = [
url(r'^$', home, name='home'),
url(r'^about/$', about, name='about'),
]
| 1.6875 | 2 |
src/py/vo.py | snwjas/RandomDesktopBackground-WEBUI | 17 | 12757262 | <filename>src/py/vo.py<gh_stars>10-100
# -*-coding:utf-8-*-
"""
视图对象
@author <NAME>
"""
import traceback
import typing
from enum import Enum
from starlette.responses import JSONResponse
class ConfigVO(object):
""" """
key: str
value: typing.Any
pytype: type
defaults: typing.Any # 默认值,pcikle序列化字节字符串值
comment: str
enable: bool
utime: str
ctime: str
def __init__(self,
key: str = None,
value: typing.Any = None,
enable: bool = None
):
self.key = key
self.value = value
self.enable = enable
self.pytype = type(value).__name__
self.defaults = None
self.comment = ''
self.enable = True
class StatusVO(object):
""" """
running: bool
class RS(Enum):
""" 自定义响应状态 """
NOT_CURRENT_CLIENT = (401, '非最近请求客户端')
NOT_FOUND = (404, '请求目标不存在')
class E(Exception):
"""
自定义异常
"""
status = 400
message = 'error'
cause = ''
data = None
def rs(self, rs: RS, data: typing.Any = None):
self.status = rs.value[0]
self.message = rs.value[1]
self.data = data
return self
def ret(self, status: int = 400, message: str = None, data: typing.Any = None):
self.status = status
self.message = message
self.data = data
return self
def e(self, e: Exception, message: str = None, data: typing.Any = None):
tb = traceback.format_tb(e.__traceback__)
if tb:
self.cause = tb[0]
self.message = message if message else e.__str__()
self.data = data
return self
class R(object):
"""
自定义返回体
"""
content = {
'status': 200,
'message': 'success',
'data': None
}
def ok(self, message: str = 'success', data: typing.Any = None):
self.content['status'] = 200
self.content['message'] = message
self.content['data'] = data
return JSONResponse(content=self.content)
def err(self, message: str = 'fail', data: typing.Any = None):
self.content['status'] = 400
self.content['message'] = message
self.content['data'] = data
return JSONResponse(content=self.content)
def rs(self, rs: RS, data: typing.Any = None):
self.content['status'] = rs.value[0]
self.content['message'] = rs.value[1]
self.content['data'] = data
return JSONResponse(content=self.content)
def ret(self, status: int, message, data: typing.Any = None):
self.content['status'] = status
self.content['message'] = message
self.content['data'] = data
return JSONResponse(content=self.content)
def e(self, e: E):
self.content['status'] = e.status
self.content['message'] = e.message
self.content['data'] = e.data
return JSONResponse(content=self.content)
| 2.640625 | 3 |
bll/plugins/cinder_service.py | papodaca/opsconsole-server | 0 | 12757263 | <reponame>papodaca/opsconsole-server<gh_stars>0
# (c) Copyright 2015-2017 Hewlett Packard Enterprise Development LP
# (c) Copyright 2017 SUSE LLC
from cinderclient.v2 import client as cinderclient
from bll import api
from bll.common.util import get_conf
from bll.plugins import service
class CinderSvc(service.SvcBase):
"""
This class deals with all the interaction with cinder client including
cinder volume type creation and mapping the volume backends to volume
types.
The ``target`` value for this plugin is ``cinder``. See :ref:`rest-api`
for a full description of the request and response formats.
"""
def __init__(self, *args, **kwargs):
"""
Initializer for the Cinder Client Service
"""
super(CinderSvc, self).__init__(*args, **kwargs)
self.cinder_client = cinderclient.Client(
session=self.token_helper.get_session(),
endpoint_type=get_conf("services.endpoint_type",
default="internalURL"),
user_agent=api.USER_AGENT)
@service.expose(action="DELETE")
def volume_type_delete(self):
"""
Delete a volume type.
Request format::
"target": "cinder",
"operation": "volume_type_delete",
"action": "DELETE",
"volume_type_id": "MYID"
"""
vol_type = self.request[api.DATA]["volume_type_id"]
response = self.cinder_client.volume_types.delete(vol_type)
return response
@service.expose(action="PUT")
def volume_type_add(self):
"""
Add a volume type with the given name.
Request format::
"target": "cinder",
"operation": "volume_type_add",
"action": "PUT",
"volume_type": "MYTYPENAME"
"""
vol_type = self.request[api.DATA]["volume_type"]
volume_type = self.cinder_client.volume_types.create(vol_type)
response = {'id': volume_type.id, 'name': volume_type.name}
return response
@service.expose()
def volume_type_list(self):
"""
Return a list of volume types.
Request format::
"target": "cinder",
"operation": "volume_type_list"
"""
volume_types = self.cinder_client.volume_types.list()
return {v.id: v.name for v in volume_types}
@service.expose(action="PUT")
def map_volume_backend(self):
"""
Updates the ``volume_backend_name`` extra specs of a volume type to
refer to the given backend name.
Request format::
"target": "cinder",
"operation": "volume_type_list",
"volume_type_id": "MYID",
"backend_name": "MYBACKEND"
"""
vol_type_id = self.request[api.DATA]["volume_type_id"]
backend_name = self.request[api.DATA]["backend_name"]
response = {}
volume_type = self.cinder_client.volume_types.get(vol_type_id)
resp = volume_type.set_keys({"volume_backend_name": backend_name})
response[vol_type_id] = resp
return response
@classmethod
def needs_services(cls):
return ['volume']
| 2.015625 | 2 |
analyze/index.py | Apareshka/ds_quietplanet | 0 | 12757264 | <filename>analyze/index.py
#!/usr/bin/env python3
from helpers.download import download
from analyze import compare_graphs
from pathlib import Path
import json
import csv
import os
import io
def transform_dataset(dataset: str, calculation_method: str, empty: float):
constant = 0.0
csvfile = io.StringIO(dataset.replace("\0", ""))
reader = csv.reader(csvfile, delimiter=",")
# values count
count = 0
for row in reader:
row = [float(value) for value in row if value]
for value in row:
if value != empty:
if calculation_method == "average":
count += 1
if calculation_method == "average" or calculation_method == "sum":
constant += value
else:
raise Exception("Unexpected calculation method \"{}\"".format(calculation_method))
if calculation_method == "average" and count != 0:
constant /= count
return constant
def bake_chart_data(dataset_folder: str, chart: dict, output_file: str = "chart.json", write: bool = True):
out_dictionary = {
"description": chart["description"],
"legend": chart["legend"],
"metadata": chart["metadata"],
"data": [],
"before": None,
"section": "No section" if not "section" in chart.keys() else chart["section"]
}
calculation_method = chart["calculate"]
files = [file for file in os.listdir(dataset_folder) if file.lower().endswith(".csv")]
for file in files:
with open(os.path.join(dataset_folder, file), "r") as csv:
dataset = csv.read()
out_dictionary["data"].append(transform_dataset(dataset, calculation_method, 99999.0))
if "before" in chart.keys():
prev_chart = chart["before"]
prev_chart["legend"] = chart["legend"]
prev_chart["description"] = chart["description"]
prev_chart["calculate"] = calculation_method
prev_chart = bake_chart_data(os.path.join(dataset_folder, "before"), prev_chart, write = False)
similarity = compare_graphs(out_dictionary, prev_chart)
prev_chart.pop("legend", None)
prev_chart.pop("description", None)
prev_chart.pop("before", None)
out_dictionary["before"] = prev_chart
out_dictionary["similarity"] = similarity
if write:
with open(output_file, "w") as output:
json.dump(out_dictionary, output)
else:
return out_dictionary
def load_charts_list(charts_json_path: str):
with open(charts_json_path) as charts_json:
charts = json.load(charts_json)
return charts
def download_chart_datasets(datasets: list, output_folder: str):
urls = {}
for i in range(len(datasets)):
filename = str(i + 1) + ".csv"
urls[datasets[i]] = filename
download(urls, output_folder)
def generate_chart(chart: dict, output_folder: str, print_info: bool = False):
download_chart_datasets(chart["datasets"], output_folder)
if print:
print("Main datasets for chart \"{}\" downloaded successfully".format(chart["name"]))
if "before" in chart.keys():
download_chart_datasets(chart["before"]["datasets"], Path(output_folder) / "before")
if print:
print("Datasets for previous results for chart \"{}\" downloaded successfully".format(chart["name"]))
bake_chart_data(output_folder, chart, os.path.join(output_folder, "chart.json"))
if print:
print("Baking \"{}\" chart data finished successfully".format(chart["name"]))
def generate_all_graphs(print_info: bool = False):
charts = load_charts_list(Path(__file__).absolute().parent.parent / "graphs" / "graphs.json")
for chart in charts:
output_folder = Path(__file__).absolute().parent.parent / "graphs" / (chart["name"])
output_folder.mkdir(exist_ok=True)
generate_chart(chart, str(output_folder), print_info)
if print_info:
print("---")
if print_info:
print("Graphs generated successfully")
return
if __name__ == "__main__":
generate_all_graphs(True)
| 2.765625 | 3 |
hydragnn/utils/distributed.py | allaffa/GCNN | 2 | 12757265 | <reponame>allaffa/GCNN
##############################################################################
# Copyright (c) 2021, Oak Ridge National Laboratory #
# All rights reserved. #
# #
# This file is part of HydraGNN and is distributed under a BSD 3-clause #
# license. For the licensing terms see the LICENSE file in the top-level #
# directory. #
# #
# SPDX-License-Identifier: BSD-3-Clause #
##############################################################################
import os
import re
import torch
import torch.distributed as dist
def parse_slurm_nodelist(nodelist):
"""
Parse SLURM_NODELIST env string to get list of nodes.
Usage example:
parse_slurm_nodelist(os.environ["SLURM_NODELIST"])
Input examples:
"or-condo-g04"
"or-condo-g[05,07-08,13]"
"or-condo-g[05,07-08,13],or-condo-h[01,12]"
"""
nlist = list()
for block, _ in re.findall(r"([\w-]+(\[[\d\-,]+\])*)", nodelist):
m = re.match(r"^(?P<prefix>[\w\-]+)\[(?P<group>.*)\]", block)
if m is None:
## single node
nlist.append(block)
else:
## multiple nodes
g = m.groups()
prefix = g[0]
for sub in g[1].split(","):
if "-" in sub:
start, end = re.match(r"(\d+)-(\d+)", sub).groups()
fmt = "%%0%dd" % (len(start))
for i in range(int(start), int(end) + 1):
node = prefix + fmt % i
nlist.append(node)
else:
node = prefix + sub
nlist.append(node)
return nlist
def init_comm_size_and_rank():
world_size = None
world_rank = 0
if os.getenv("OMPI_COMM_WORLD_SIZE") and os.getenv("OMPI_COMM_WORLD_RANK"):
## Summit
world_size = int(os.environ["OMPI_COMM_WORLD_SIZE"])
world_rank = int(os.environ["OMPI_COMM_WORLD_RANK"])
elif os.getenv("SLURM_NPROCS") and os.getenv("SLURM_PROCID"):
## CADES
world_size = int(os.environ["SLURM_NPROCS"])
world_rank = int(os.environ["SLURM_PROCID"])
## Fall back to default
if world_size is None:
world_size = 1
return int(world_size), int(world_rank)
def get_comm_size_and_rank():
world_size = None
world_rank = 0
if dist.is_initialized():
world_size = dist.get_world_size()
world_rank = dist.get_rank()
else:
world_size = 1
return int(world_size), int(world_rank)
def setup_ddp():
""" "Initialize DDP"""
if dist.is_nccl_available() and torch.cuda.is_available():
backend = "nccl"
elif torch.distributed.is_gloo_available():
backend = "gloo"
else:
raise RuntimeError("No parallel backends available")
world_size, world_rank = init_comm_size_and_rank()
## Default setting
master_addr = "127.0.0.1"
master_port = "8889"
if os.getenv("LSB_HOSTS") is not None:
## source: https://www.olcf.ornl.gov/wp-content/uploads/2019/12/Scaling-DL-on-Summit.pdf
## The following is Summit specific
master_addr = os.environ["LSB_HOSTS"].split()[1]
elif os.getenv("SLURM_NODELIST") is not None:
## The following is CADES specific
master_addr = parse_slurm_nodelist(os.environ["SLURM_NODELIST"])[0]
try:
os.environ["MASTER_ADDR"] = master_addr
os.environ["MASTER_PORT"] = master_port
os.environ["WORLD_SIZE"] = str(world_size)
os.environ["RANK"] = str(world_rank)
if not dist.is_initialized():
dist.init_process_group(
backend=backend, rank=int(world_rank), world_size=int(world_size)
)
except KeyError:
print("DDP has to be initialized within a job - Running in sequential mode")
return world_size, world_rank
| 2.46875 | 2 |
tests/test_cms_auth.py | Allen7D/mini-shop-server | 533 | 12757266 | # _*_ coding: utf-8 _*_
"""
Created by Allen7D on 2020/4/13.
"""
from app import create_app
from tests.utils import get_authorization
__author__ = 'Allen7D'
app = create_app()
def test_create_auth_list():
with app.test_client() as client:
rv = client.post('/cms/auth/append', headers={
'Authorization': get_authorization()
}, json={
'group_id': 5,
'auth_ids': [1, 2, 3]
})
json_data = rv.get_json()
print(json_data)
def test_delete_auth_list():
with app.test_client() as client:
rv = client.post('/cms/auth/remove', headers={
'Authorization': get_authorization()
}, json={
'group_id': 5,
'auth_ids': [1, 2, 3]
})
json_data = rv.get_json()
print(json_data)
test_create_auth_list()
test_delete_auth_list()
| 2.328125 | 2 |
dependencies/panda/Panda3D-1.10.0-x64/direct/leveleditor/AnimMgrBase.py | CrankySupertoon01/Toontown-2 | 1 | 12757267 | <filename>dependencies/panda/Panda3D-1.10.0-x64/direct/leveleditor/AnimMgrBase.py
"""
Defines AnimMgrBase
"""
import os, wx, math
from direct.interval.IntervalGlobal import *
from panda3d.core import VBase3,VBase4
import ObjectGlobals as OG
import AnimGlobals as AG
class AnimMgrBase:
""" AnimMgr will create, manage, update animations in the scene """
def __init__(self, editor):
self.editor = editor
self.graphEditorCounter = 0
self.keyFramesInfo = {}
self.curveAnimation = {}
#normal properties
self.lerpFuncs={
'H' : self.lerpFuncH,
'P' : self.lerpFuncP,
'R' : self.lerpFuncR,
'SX' : self.lerpFuncSX,
'SY' : self.lerpFuncSY,
'SZ' : self.lerpFuncSZ,
'CR' : self.lerpFuncCR,
'CG' : self.lerpFuncCG,
'CB' : self.lerpFuncCB,
'CA' : self.lerpFuncCA
}
#Properties which has animation curves
self.curveLerpFuncs={
'X' : [ self.lerpFuncX, self.lerpCurveFuncX ],
'Y' : [ self.lerpFuncY, self.lerpCurveFuncY ],
'Z' : [ self.lerpFuncZ, self.lerpCurveFuncZ ]
}
def reset(self):
self.keyFramesInfo = {}
self.curveAnimation = {}
def generateKeyFrames(self):
#generate keyFrame list
self.keyFrames = []
for property in self.keyFramesInfo.keys():
for frameInfo in self.keyFramesInfo[property]:
frame = frameInfo[AG.FRAME]
exist = False
for keyFrame in self.keyFrames:
if frame == keyFrame:
exist = True
break
if exist == False:
self.keyFrames.append(frame)
def generateSlope(self, list):
#generate handler slope of every keyframe for animation curve
listLen = len(list)
if listLen == 2:
slope =[float(list[1][AG.FRAME]-list[0][AG.FRAME]),(float(list[1][AG.VALUE])-float(list[0][AG.VALUE]))]
list[0][AG.INSLOPE] = slope
list[1][AG.INSLOPE] = slope
list[0][AG.OUTSLOPE] = list[0][AG.INSLOPE]
list[1][AG.OUTSLOPE] = list[1][AG.INSLOPE]
return
if listLen >= 3:
list[0][AG.INSLOPE] = [float(list[1][AG.FRAME] - list[0][AG.FRAME]),(float(list[1][AG.VALUE]) - float(list[0][AG.VALUE]))]
list[0][AG.OUTSLOPE] = list[0][AG.INSLOPE]
for i in range(1, listLen-1):
list[i][AG.INSLOPE] = [float(list[i+1][AG.FRAME] - list[i-1][AG.FRAME]),(float(list[i+1][AG.VALUE]) - float(list[i-1][AG.VALUE]))]
list[i][AG.OUTSLOPE] = list[i][AG.INSLOPE]
list[listLen-1][AG.INSLOPE] = [float(list[listLen-1][AG.FRAME] - list[listLen-2][AG.FRAME]),(float(list[listLen-1][AG.VALUE]) - float(list[listLen-2][AG.VALUE]))]
list[listLen-1][AG.OUTSLOPE] = list[listLen-1][AG.INSLOPE]
return
def removeAnimInfo(self, uid):
for property in self.keyFramesInfo.keys():
if property[AG.UID] == uid:
del self.keyFramesInfo[property]
self.generateKeyFrames()
if self.editor.mode == self.editor.ANIM_MODE:
self.editor.ui.animUI.OnPropKey()
def singleCurveAnimation(self, nodePath, curve, time):
rope = curve[OG.OBJ_NP]
self.points = rope.getPoints(time)
self.hprs = []
temp = render.attachNewNode("temp")
temp.setHpr(0,0,0)
for i in range(len(self.points)-1):
temp.setPos(self.points[i])
temp.lookAt(self.points[i+1])
hpr = temp.getHpr()
## self.hprs.append(hpr)
self.hprs.append(VBase3(hpr[0]+180,hpr[1],hpr[2]))
self.hprs.append(self.hprs[len(self.points)-2])
curveSequenceName = str(nodePath[OG.OBJ_UID])+' '+str(curve[OG.OBJ_UID])+' '+str(time)
self.curveSequence = Sequence(name = curveSequenceName)
for i in range(len(self.points)-1):
myLerp = LerpPosHprInterval(nodePath[OG.OBJ_NP], float(1)/float(24), self.points[i+1], self.hprs[i+1], self.points[i], self.hprs[i])
self.curveSequence.append(myLerp)
return self.curveSequence
def createParallel(self, startFrame, endFrame):
self.parallel = []
self.parallel = Parallel(name="Current Parallel")
self.createCurveAnimation(self.parallel)
self.createActorAnimation(self.parallel, startFrame, endFrame)
self.createKeyFrameAnimation(self.parallel, startFrame, endFrame)
self.createCurveKeyFrameAnimation(self.parallel, startFrame, endFrame)
return self.parallel
def createCurveAnimation(self, parallel):
for key in self.curveAnimation:
curveInfo = self.curveAnimation[key]
nodePath = self.editor.objectMgr.findObjectById(curveInfo[AG.NODE])
curve = self.editor.objectMgr.findObjectById(curveInfo[AG.CURVE])
time = curveInfo[AG.TIME]
sequence = self.singleCurveAnimation(nodePath, curve, time)
parallel.append(sequence)
def createActorAnimation(self, parallel, startFrame, endFrame):
self.editor.objectMgr.findActors(render)
for actor in self.editor.objectMgr.Actor:
actorAnim = os.path.basename(actor[OG.OBJ_ANIM])
myInterval = ActorInterval(actor[OG.OBJ_NP], actorAnim, loop=1, duration = float(endFrame-startFrame+1)/float(24))
parallel.append(myInterval)
def createKeyFrameAnimation(self, parallel, startFrame, endFrame):
#generate key frame animation for normal property
self.editor.objectMgr.findNodes(render)
for node in self.editor.objectMgr.Nodes:
for property in self.keyFramesInfo.keys():
if property[AG.UID] == node[OG.OBJ_UID] and property[AG.PROP_NAME] != 'X' and property[AG.PROP_NAME] != 'Y' and property[AG.PROP_NAME] != 'Z':
mysequence = Sequence(name = node[OG.OBJ_UID])
keyFramesInfo = self.keyFramesInfo[property]
if len(keyFramesInfo) == 1:
myLerp = LerpFunc(self.lerpFuncs[property[AG.PROP_NAME]],fromData=float(keyFramesInfo[0][AG.VALUE]),toData=float(keyFramesInfo[0][AG.VALUE]),duration = float(endFrame-startFrame)/float(24),blendType = 'noBlend',extraArgs = [node[OG.OBJ_NP]])
mysequence.append(myLerp)
parallel.append(mysequence)
if len(keyFramesInfo) != 1:
myLerp = LerpFunc(self.lerpFuncs[property[AG.PROP_NAME]],fromData=float(keyFramesInfo[0][AG.VALUE]),toData=float(keyFramesInfo[0][AG.VALUE]),duration = float(keyFramesInfo[0][AG.FRAME]-startFrame)/float(24),blendType = 'noBlend',extraArgs = [node[OG.OBJ_NP]])
mysequence.append(myLerp)
for key in range(0,len(keyFramesInfo)-1):
myLerp = LerpFunc(self.lerpFuncs[property[AG.PROP_NAME]],fromData=float(keyFramesInfo[key][AG.VALUE]),toData=float(keyFramesInfo[key+1][AG.VALUE]),duration = float(keyFramesInfo[key+1][AG.FRAME]-keyFramesInfo[key][AG.FRAME])/float(24),blendType = 'noBlend',extraArgs = [node[OG.OBJ_NP]])
mysequence.append(myLerp)
myLerp = LerpFunc(self.lerpFuncs[property[AG.PROP_NAME]],fromData=float(keyFramesInfo[len(keyFramesInfo)-1][AG.VALUE]),toData=float(keyFramesInfo[len(keyFramesInfo)-1][AG.VALUE]),duration = float(endFrame-keyFramesInfo[len(keyFramesInfo)-1][AG.FRAME])/float(24),blendType = 'noBlend',extraArgs = [node[OG.OBJ_NP]])
mysequence.append(myLerp)
parallel.append(mysequence)
def createCurveKeyFrameAnimation(self, parallel, startFrame, endFrame):
#generate key frame animation for the property which is controled by animation curve
self.editor.objectMgr.findNodes(render)
for node in self.editor.objectMgr.Nodes:
for property in self.keyFramesInfo.keys():
if property[AG.UID] == node[OG.OBJ_UID]:
if property[AG.PROP_NAME] == 'X' or property[AG.PROP_NAME] == 'Y' or property[AG.PROP_NAME] == 'Z':
mysequence = Sequence(name = node[OG.OBJ_UID])
keyFramesInfo = self.keyFramesInfo[property]
if len(keyFramesInfo) == 1:
myLerp = LerpFunc(self.curveLerpFuncs[property[AG.PROP_NAME]][0],fromData=float(keyFramesInfo[0][AG.VALUE]),toData=float(keyFramesInfo[0][AG.VALUE]),duration = float(endFrame-startFrame)/float(24),blendType = 'noBlend',extraArgs = [node[OG.OBJ_NP]])
mysequence.append(myLerp)
parallel.append(mysequence)
if len(keyFramesInfo) == 2:
myLerp = LerpFunc(self.curveLerpFuncs[property[AG.PROP_NAME]][0],fromData=float(keyFramesInfo[0][AG.VALUE]),toData=float(keyFramesInfo[0][AG.VALUE]),duration = float(keyFramesInfo[0][AG.FRAME]-startFrame)/float(24),blendType = 'noBlend',extraArgs = [node[OG.OBJ_NP]])
mysequence.append(myLerp)
for key in range(0,len(keyFramesInfo)-1):
self.keyFrameInfoForSingleLerp = keyFramesInfo
self.keyInfoForSingleLerp = key
myLerp = LerpFunc(self.curveLerpFuncs[property[AG.PROP_NAME]][0],fromData=float(keyFramesInfo[key][AG.VALUE]),toData=float(keyFramesInfo[key+1][AG.VALUE]),duration = float(keyFramesInfo[key+1][AG.FRAME]-keyFramesInfo[key][AG.FRAME])/float(24),blendType = 'noBlend',extraArgs = [node[OG.OBJ_NP]])
mysequence.append(myLerp)
myLerp = LerpFunc(self.curveLerpFuncs[property[AG.PROP_NAME]][0],fromData=float(keyFramesInfo[len(keyFramesInfo)-1][AG.VALUE]),toData=float(keyFramesInfo[len(keyFramesInfo)-1][AG.VALUE]),duration = float(endFrame-keyFramesInfo[len(keyFramesInfo)-1][AG.FRAME])/float(24),blendType = 'noBlend',extraArgs = [node[OG.OBJ_NP]])
mysequence.append(myLerp)
parallel.append(mysequence)
if len(keyFramesInfo) > 2:
myLerp = LerpFunc(self.curveLerpFuncs[property[AG.PROP_NAME]][0],fromData=float(keyFramesInfo[0][AG.VALUE]),toData=float(keyFramesInfo[0][1]),duration = float(keyFramesInfo[0][AG.FRAME]-startFrame)/float(24),blendType = 'noBlend',extraArgs = [node[OG.OBJ_NP]])
mysequence.append(myLerp)
for key in range(0,len(keyFramesInfo)-1):
myLerp = LerpFunc(self.curveLerpFuncs[property[AG.PROP_NAME]][1],fromData=float(keyFramesInfo[key][AG.FRAME]),toData=float(keyFramesInfo[key+1][AG.FRAME]),duration = float(keyFramesInfo[key+1][AG.FRAME]-keyFramesInfo[key][AG.FRAME])/float(24),blendType = 'noBlend',extraArgs = [[node[OG.OBJ_NP], keyFramesInfo, key]])
mysequence.append(myLerp)
myLerp = LerpFunc(self.curveLerpFuncs[property[AG.PROP_NAME]][0],fromData=float(keyFramesInfo[len(keyFramesInfo)-1][AG.VALUE]),toData=float(keyFramesInfo[len(keyFramesInfo)-1][AG.VALUE]),duration = float(endFrame-keyFramesInfo[len(keyFramesInfo)-1][AG.FRAME])/float(24),blendType = 'noBlend',extraArgs = [node[OG.OBJ_NP]])
mysequence.append(myLerp)
parallel.append(mysequence)
def getPos(self, x, list, i):
#get the value from animation curve
x1 = float(list[i][AG.FRAME])
y1 = float(list[i][AG.VALUE])
x4 = float(list[i+1][AG.FRAME])
y4 = float(list[i+1][AG.VALUE])
t1x = list[i][AG.OUTSLOPE][0]
t1y = list[i][AG.OUTSLOPE][1]
t2x = list[i+1][AG.INSLOPE][0]
t2y = list[i+1][AG.INSLOPE][1]
x2 = x1 + (x4 - x1) / float(3)
scale1 = (x2 - x1) / t1x
y2 = y1 + t1y * scale1
x3 = x4 - (x4 - x1) / float(3)
scale2 = (x4 - x3) / t2x
y3 = y4 - t2y * scale2
ax = - float(1) * x1 + float(3) * x2 - float(3) * x3 + float(1) * x4
bx = float(3) * x1 - float(6) * x2 + float(3) * x3 + float(0) * x4
cx = - float(3) * x1 + float(3) * x2 + float(0) * x3 + float(0) * x4
dx = float(1) * x1 + float(0) * x2 - float(0) * x3 + float(0) * x4
ay = - float(1) * y1 + float(3) * y2 - float(3) * y3 + float(1) * y4
by = float(3) * y1 - float(6) * y2 + float(3) * y3 + float(0) * y4
cy = - float(3) * y1 + float(3) * y2 + float(0) * y3 + float(0) * y4
dy = float(1) * y1 + float(0) * y2 - float(0) * y3 + float(0) * y4
if ax == 0 and bx == 0 and cx == 0:
return 0
if ax == 0 and bx == 0 and cx != 0:
a = cx
b = dx-x
t = -b/a
y = ay * t*t*t + by * t*t + cy * t + dy
return y
if ax == 0 and bx!= 0:
a=bx
b=cx
c=dx-x
t=(-b+math.sqrt(b**2-4.0*a*c))/2*a
if t>=0 and t<=1:
y = ay * t*t*t + by * t*t + cy * t + dy
return y
else:
t=(-b-math.sqrt(b**2-4.0*a*c))/2*a
y = ay * t*t*t + by * t*t + cy * t + dy
return y
if ax != 0:
a = ax
b = bx
c = cx
d = dx - float(x)
t = self.calculateT(a, b, c, d, x)
y = ay * t*t*t + by * t*t + cy * t + dy
return y
def calculateT(self, a, b, c, d, x):
#Newton EQUATION
t = float(1)
t2 = t
t -= (a*t*t*t+b*t*t+c*t+d)/(float(3)*a*t*t+float(2)*b*t+c)
if abs(t-t2) <= 0.000001:
return t
else:
while abs(t - t2) > 0.000001:
t2 = t
t -= (a*t*t*t+b*t*t+c*t+d)/(float(3)*a*t*t+float(2)*b*t+c)
return t
def lerpFuncX(self,pos,np):
np.setX(pos)
def lerpFuncY(self,pos,np):
np.setY(pos)
def lerpFuncZ(self,pos,np):
np.setZ(pos)
def lerpCurveFuncX(self,t,extraArgs):
np = extraArgs[0]
pos = self.getPos(t, extraArgs[1], extraArgs[2])
np.setX(pos)
def lerpCurveFuncY(self,t,extraArgs):
np = extraArgs[0]
pos = self.getPos(t, extraArgs[1], extraArgs[2])
np.setY(pos)
def lerpCurveFuncZ(self,t,extraArgs):
np = extraArgs[0]
pos = self.getPos(t, extraArgs[1], extraArgs[2])
np.setZ(pos)
def lerpFuncH(self,angle,np):
np.setH(angle)
def lerpFuncP(self,angle,np):
np.setP(angle)
def lerpFuncR(self,angle,np):
np.setR(angle)
def lerpFuncSX(self,scale,np):
np.setSx(scale)
def lerpFuncSY(self,scale,np):
np.setSy(scale)
def lerpFuncSZ(self,scale,np):
np.setSz(scale)
def lerpFuncCR(self,R,np):
obj = self.editor.objectMgr.findObjectByNodePath(np)
r = obj[OG.OBJ_RGBA][0]
g = obj[OG.OBJ_RGBA][1]
b = obj[OG.OBJ_RGBA][2]
a = obj[OG.OBJ_RGBA][3]
self.colorUpdate(R,g,b,a,np)
def lerpFuncCG(self,G,np):
obj = self.editor.objectMgr.findObjectByNodePath(np)
r = obj[OG.OBJ_RGBA][0]
g = obj[OG.OBJ_RGBA][1]
b = obj[OG.OBJ_RGBA][2]
a = obj[OG.OBJ_RGBA][3]
self.colorUpdate(r,G,b,a,np)
def lerpFuncCB(self,B,np):
obj = self.editor.objectMgr.findObjectByNodePath(np)
r = obj[OG.OBJ_RGBA][0]
g = obj[OG.OBJ_RGBA][1]
b = obj[OG.OBJ_RGBA][2]
a = obj[OG.OBJ_RGBA][3]
self.colorUpdate(r,g,B,a,np)
def lerpFuncCA(self,A,np):
obj = self.editor.objectMgr.findObjectByNodePath(np)
r = obj[OG.OBJ_RGBA][0]
g = obj[OG.OBJ_RGBA][1]
b = obj[OG.OBJ_RGBA][2]
a = obj[OG.OBJ_RGBA][3]
self.colorUpdate(r,g,b,A,np)
def colorUpdate(self, r, g, b, a, np):
if base.direct.selected.last == None:
self.editor.objectMgr.updateObjectColor(r, g, b, a, np)
elif self.editor.objectMgr.findObjectByNodePath(np) == self.editor.objectMgr.findObjectByNodePath(base.direct.selected.last):
self.editor.ui.objectPropertyUI.propCR.setValue(r)
self.editor.ui.objectPropertyUI.propCG.setValue(g)
self.editor.ui.objectPropertyUI.propCB.setValue(b)
self.editor.ui.objectPropertyUI.propCA.setValue(a)
self.editor.objectMgr.updateObjectColor(r, g, b, a, np)
else:
self.editor.objectMgr.updateObjectColor(r, g, b, a, np)
| 2.0625 | 2 |
predavanje7/zadatak2.py | Miillky/uvod_u_programiranje | 0 | 12757268 | <reponame>Miillky/uvod_u_programiranje
# Definiran je rjecnik s parovima ime_osobe:pin_kartice rj = {"Branka":3241, "Stipe":5623, "Doris":3577, "Ana":7544}
# Napišite program koji od korisnika prima ime osobe, a zatim traži pin. Ukoliko je unesen ispravan pin, korisnik unosi novi pin koji se pohranjuje u rječniku
rj = {'Branka':3241,'Stipe':5623,'Doris':3577,'Ana':7544}
ime = str(input("Unesi ime: "))
pin = rj[ime.capitalize()]
pin_in = int(input("Unesite pin: "))
if pin == pin_in:
pin_in = int(input("Unesite novi PIN: "))
rj.update({ime.capitalize():pin_in})
else:
print("PIN neispravan!") | 3.71875 | 4 |
components/contrib/CatBoost/Train_classifier/from_CSV/component.py | Iuiu1234/pipelines | 2,860 | 12757269 | <reponame>Iuiu1234/pipelines
from kfp.components import InputPath, OutputPath, create_component_from_func
def catboost_train_classifier(
training_data_path: InputPath('CSV'),
model_path: OutputPath('CatBoostModel'),
starting_model_path: InputPath('CatBoostModel') = None,
label_column: int = 0,
loss_function: str = 'Logloss',
num_iterations: int = 500,
learning_rate: float = None,
depth: int = 6,
random_seed: int = 0,
cat_features: list = None,
text_features: list = None,
additional_training_options: dict = {},
):
'''Train a CatBoost classifier model.
Args:
training_data_path: Path for the training data in CSV format.
model_path: Output path for the trained model in binary CatBoostModel format.
starting_model_path: Path for the existing trained model to start from.
label_column: Column containing the label data.
loss_function: The metric to use in training and also selector of the machine learning
problem to solve. Default = 'Logloss'
num_iterations: Number of trees to add to the ensemble.
learning_rate: Step size shrinkage used in update to prevents overfitting.
Default value is selected automatically for binary classification with other parameters set to default.
In all other cases default is 0.03.
depth: Depth of a tree. All trees are the same depth. Default = 6
random_seed: Random number seed. Default = 0
cat_features: A list of Categorical features (indices or names).
text_features: A list of Text features (indices or names).
additional_training_options: A dictionary with additional options to pass to CatBoostClassifier
Outputs:
model: Trained model in binary CatBoostModel format.
Annotations:
author: <NAME> <<EMAIL>>
'''
import tempfile
from pathlib import Path
from catboost import CatBoostClassifier, Pool
column_descriptions = {label_column: 'Label'}
column_description_path = tempfile.NamedTemporaryFile(delete=False).name
with open(column_description_path, 'w') as column_description_file:
for idx, kind in column_descriptions.items():
column_description_file.write('{}\t{}\n'.format(idx, kind))
train_data = Pool(
training_data_path,
column_description=column_description_path,
has_header=True,
delimiter=',',
)
model = CatBoostClassifier(
iterations=num_iterations,
depth=depth,
learning_rate=learning_rate,
loss_function=loss_function,
random_seed=random_seed,
verbose=True,
**additional_training_options,
)
model.fit(
train_data,
cat_features=cat_features,
text_features=text_features,
init_model=starting_model_path,
#verbose=False,
#plot=True,
)
Path(model_path).parent.mkdir(parents=True, exist_ok=True)
model.save_model(model_path)
if __name__ == '__main__':
catboost_train_classifier_op = create_component_from_func(
catboost_train_classifier,
output_component_file='component.yaml',
base_image='python:3.7',
packages_to_install=['catboost==0.23'],
annotations={
"author": "<NAME> <<EMAIL>>",
"canonical_location": "https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/CatBoost/Train_classifier/from_CSV/component.yaml",
},
)
| 3.125 | 3 |
Python/minimum-time-to-remove-all-cars-containing-illegal-goods.py | Priyansh2/LeetCode-Solutions | 4 | 12757270 | <reponame>Priyansh2/LeetCode-Solutions
# Time: O(n)
# Space: O(1)
# dp
class Solution(object):
def minimumTime(self, s):
"""
:type s: str
:rtype: int
"""
left = 0
result = left+(len(s)-0)
for i in xrange(1, len(s)+1):
left = min(left+2*(s[i-1] == '1'), i)
result = min(result, left+(len(s)-i))
return result
# Time: O(n)
# Space: O(n)
# dp
class Solution2(object):
def minimumTime(self, s):
"""
:type s: str
:rtype: int
"""
result, right = len(s), [0]*(len(s)+1)
for i in reversed(xrange(len(s))):
right[i] = min(right[i+1]+2*(s[i] == '1'), len(s)-i)
left = 0
result = left+right[0]
for i in xrange(1, len(s)+1):
left = min(left+2*(s[i-1] == '1'), i)
result = min(result, left+right[i])
return result
| 3.203125 | 3 |
examples/lib.title.py | frankier/python-mediawiki-utilities | 23 | 12757271 | """
Demonstrates title normalization and parsing.
"""
import sys
import os
sys.path.insert(0, os.path.abspath(os.getcwd()))
from mw.api import Session
from mw.lib import title
# Normalize titles
title.normalize("foo bar")
# > "Foo_bar"
# Construct a title parser from the API
api_session = Session("https://en.wikipedia.org/w/api.php")
parser = title.Parser.from_api(api_session)
# Handles normalization
parser.parse("user:epochFail")
# > 2, "EpochFail"
# Handles namespace aliases
parser.parse("WT:foobar")
# > 5, "Foobar"
| 2.546875 | 3 |
boboleetcode/Play-Leetcode-master/0896-Monotonic-Array/py-0896/Solution3.py | yaominzh/CodeLrn2019 | 2 | 12757272 | # Source : https://leetcode.com/problems/monotonic-array/
# Author : penpenps
# Time : 2019-07-29
from typing import List
# One-line solution
# Time Complexity: O(n)
# Space Complexity: O(1)
class Solution:
def isMonotonic(self, A: List[int]) -> bool:
return not {(x>y) - (y>x) for x, y in zip(A, A[1:])} >= {1, -1} | 3.65625 | 4 |
user/views.py | hp5441/mcq-test-software | 0 | 12757273 | <reponame>hp5441/mcq-test-software
from django.http.response import JsonResponse
from django.shortcuts import redirect, render
from django.contrib.auth import authenticate, login, logout
from user.forms import SignUpForm, SignInForm
def logout_view(request):
"""logout view"""
try:
logout(request)
except:
raise Exception("unable to logout")
else:
return redirect("/")
def redirect_view(request):
"""redirects user depending on user's privileges"""
if(request.user.is_authenticated):
if(request.user.is_teacher):
return redirect("/quiz/create-quiz/")
else:
return redirect(f"/quiz/{request.user.pk}/")
else:
return redirect("/signin/")
def sign_up(request):
"""both student's and teacher's sign up view using builtin model form"""
if request.user.is_authenticated:
return redirect("/")
context = {'form': SignUpForm}
return render(request, 'signin/sign_up.html', context)
def sign_in(request):
"""both student's and teacher's sign in view using builtin model form"""
if request.user.is_authenticated:
return redirect("/")
context = {'form': SignInForm}
return render(request, 'signin/sign_in.html', context)
def teacher_sign_up(request):
"""sign up post request validation view"""
if request.method == 'POST':
form = SignUpForm(request.POST)
if form.is_valid():
print(request.POST)
teacher = form.save(commit=False)
teacher.is_teacher = True
teacher.save()
else:
return JsonResponse({"error": "details are invalid"}, safe=False)
return redirect("/")
def teacher_sign_in(request):
"""sign in post request validation view"""
if request.method == 'POST':
form = SignInForm(data=request.POST)
user = None
print(request.POST)
if form.is_valid():
try:
user = authenticate(request, username=request.POST.get(
'username'), password=request.POST.get('password'))
except:
return JsonResponse({"error": "user not found"}, safe=False)
if user is not None and user.is_teacher:
try:
login(request, user)
return redirect("/quiz/create-quiz/")
except:
return JsonResponse({"error": "unable to login"}, safe=False)
else:
return JsonResponse({"error": "user not found"}, safe=False)
else:
return JsonResponse({"error": "details are invalid"}, safe=False)
return redirect("/")
def student_sign_up(request):
"""student sign up post request validation view"""
if request.method == 'POST':
form = SignUpForm(request.POST)
if form.is_valid():
print(request.POST)
form.save()
else:
return JsonResponse({"error": "details are invalid"}, safe=False)
return redirect("/")
def student_sign_in(request):
"""student sign in post request validation view"""
if request.method == 'POST':
form = SignInForm(data=request.POST)
user = None
print(request.POST)
if form.is_valid():
try:
user = authenticate(request, username=request.POST.get(
'username'), password=request.POST.get('password'))
except:
raise Exception("unablr to authenticate")
if user is not None and not user.is_teacher:
try:
login(request, user)
return redirect("/quiz/"+str(user.pk)+"/")
except:
raise Exception("unable to login")
else:
return JsonResponse({"error": "user not found"}, safe=False)
else:
return JsonResponse({"error": "details are invalid"}, safe=False)
return redirect("/")
| 2.84375 | 3 |
tests/unit/test_cant_enter_unless_started.py | ZeroEkkusu/smartcontract-lottery | 0 | 12757274 | <reponame>ZeroEkkusu/smartcontract-lottery
from brownie import network, exceptions
from scripts.deploy_lottery import deploy_lottery
from scripts.helpful_scripts import LOCAL_BLOCKCHAIN_ENVIRONMENTS, get_account
import pytest
def test_cant_enter_unless_started():
# (Arrange)
# Only on a local, non-forked network
if network.show_active() not in LOCAL_BLOCKCHAIN_ENVIRONMENTS:
pytest.skip()
# Deploy a Lottery
lottery = deploy_lottery()
# (Act/Assert)
# Assert that one can't enter the lottery if it has not started
# (There's a problem with the GUI Ganache, this is a workaround)
ex = ValueError if network.show_active(
) == "ganache-local" else exceptions.VirtualMachineError
with pytest.raises(ex):
lottery.enter(
{"from": get_account(), "value": lottery.getEntranceFee()})
| 2.234375 | 2 |
Code/tests/test_expand_repo.py | macmule/autopkg | 855 | 12757275 | <reponame>macmule/autopkg
#!/usr/local/autopkg/python
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import imp
import os
import unittest
autopkg = imp.load_source(
"autopkg", os.path.join(os.path.dirname(__file__), "..", "autopkg")
)
class TestExpandRepo(unittest.TestCase):
"""Test cases for expansion of recipe repos for add/delete/update."""
def test_expand_single_autopkg_org_urls(self):
"""Expand single part short repo URLs in the AutoPkg org on GitHub"""
url = autopkg.expand_repo_url("recipes")
self.assertEqual(url, "https://github.com/autopkg/recipes")
url = autopkg.expand_repo_url("bogus")
self.assertEqual(url, "https://github.com/autopkg/bogus")
def test_expand_multi_autopkg_org_urls(self):
"""Expand multi part short repo URLs in the AutoPkg org on GitHub"""
url = autopkg.expand_repo_url("autopkg/recipes")
self.assertEqual(url, "https://github.com/autopkg/recipes")
url = autopkg.expand_repo_url("autopkg/bogus")
self.assertEqual(url, "https://github.com/autopkg/bogus")
def test_expand_multi_other_org_urls(self):
"""Expand multi part short repo URLs in another org on GitHub"""
url = autopkg.expand_repo_url("eth-its/autopkg-mac-recipes")
self.assertEqual(url, "https://github.com/eth-its/autopkg-mac-recipes")
url = autopkg.expand_repo_url("facebook/Recipes-For-AutoPkg")
self.assertEqual(url, "https://github.com/facebook/Recipes-For-AutoPkg")
url = autopkg.expand_repo_url("bogusorg/bogusrepo")
self.assertEqual(url, "https://github.com/bogusorg/bogusrepo")
def test_expand_full_urls(self):
"""Expand full URLs"""
url = autopkg.expand_repo_url("http://github.com/eth-its/autopkg-mac-recipes")
self.assertEqual(url, "http://github.com/eth-its/autopkg-mac-recipes")
url = autopkg.expand_repo_url("https://github.com/eth-its/autopkg-mac-recipes")
self.assertEqual(url, "https://github.com/eth-its/autopkg-mac-recipes")
url = autopkg.expand_repo_url("http://github.com/facebook/Recipes-For-AutoPkg")
self.assertEqual(url, "http://github.com/facebook/Recipes-For-AutoPkg")
url = autopkg.expand_repo_url("https://github.com/facebook/Recipes-For-AutoPkg")
self.assertEqual(url, "https://github.com/facebook/Recipes-For-AutoPkg")
url = autopkg.expand_repo_url("http://github.com/bogusorg/bogusrepo")
self.assertEqual(url, "http://github.com/bogusorg/bogusrepo")
url = autopkg.expand_repo_url("https://github.com/bogusorg/bogusrepo")
self.assertEqual(url, "https://github.com/bogusorg/bogusrepo")
# TODO: Not yet implemented.
# def test_expand_file_urls(self):
# """Expand file URLs"""
# url = autopkg.expand_repo_url("file:///private/tmp/")
# self.assertEqual(url, "/private/tmp/")
# url = autopkg.expand_repo_url("file:///foo/bar/")
# self.assertEqual(url, "/foo/bar/")
def test_expand_file_paths(self):
"""Expand file paths"""
url = autopkg.expand_repo_url("/private/tmp/")
self.assertEqual(url, "/private/tmp")
url = autopkg.expand_repo_url("/foo/bar/")
self.assertEqual(url, "/foo/bar")
url = autopkg.expand_repo_url("/foo/bar")
self.assertEqual(url, "/foo/bar")
url = autopkg.expand_repo_url(
"~/Library/AutoPkg/RecipeRepos/com.github.autopkg.recipes"
)
self.assertEqual(
url, "~/Library/AutoPkg/RecipeRepos/com.github.autopkg.recipes"
)
url = autopkg.expand_repo_url("/Users/Shared/foo")
self.assertEqual(url, "/Users/Shared/foo")
if __name__ == "__main__":
unittest.main()
| 2.234375 | 2 |
allennlp/allennlp/data/tokenizers/pretrained_transformer_pre_tokenizer.py | rahular/joint-coref-srl | 0 | 12757276 | <filename>allennlp/allennlp/data/tokenizers/pretrained_transformer_pre_tokenizer.py
from typing import List, Optional
from overrides import overrides
import spacy
from transformers.tokenization_bert import (
BasicTokenizer as BertTokenizer,
_is_punctuation,
)
from allennlp.data.tokenizers.token import Token
from allennlp.data.tokenizers.tokenizer import Tokenizer
@Tokenizer.register("bert-basic")
class BertPreTokenizer(Tokenizer):
"""
The ``BasicTokenizer`` from the BERT implementation.
This is used to split a sentence into words.
Then the ``BertTokenIndexer`` converts each word into wordpieces.
"""
default_never_split = ["[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]"]
def __init__(
self, do_lower_case: bool = True, never_split: Optional[List[str]] = None
) -> None:
if never_split is None:
never_split = self.default_never_split
else:
never_split = never_split + self.default_never_split
self.basic_tokenizer = BertTokenizer(do_lower_case, never_split)
self.basic_tokenizer._run_split_on_punc = self._run_split_on_punc
self.never_split = never_split
@overrides
def tokenize(self, text: str) -> List[Token]:
return [Token(text) for text in self.basic_tokenizer.tokenize(text)]
# HACK: Monkeypatch for huggingface's broken BasicTokenizer.
# TODO(Mark): Remove this once https://github.com/huggingface/transformers/pull/2557
# is merged.
def _run_split_on_punc(self, text, never_split=None):
"""Splits punctuation on a piece of text."""
if never_split is None:
never_split = self.never_split
if never_split is not None and text in never_split:
return [text]
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _remove_spaces(tokens: List[spacy.tokens.Token]) -> List[spacy.tokens.Token]:
return [token for token in tokens if not token.is_space]
| 2.796875 | 3 |
composer/workflows/bashoperator_python2.py | HoleCat/echarlosperros | 0 | 12757277 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START composer_bashoperator_python2]
import datetime
from airflow import models
from airflow.operators import bash_operator
yesterday = datetime.datetime.combine(
datetime.datetime.today() - datetime.timedelta(1),
datetime.datetime.min.time())
default_dag_args = {
# Setting start date as yesterday starts the DAG immediately when it is
# detected in the Cloud Storage bucket.
'start_date': yesterday,
}
with models.DAG(
'composer_sample_bashoperator_python2',
schedule_interval=datetime.timedelta(days=1),
default_args=default_dag_args) as dag:
run_python2 = bash_operator.BashOperator(
task_id='run_python2',
# This example runs a Python script from the data folder to prevent
# Airflow from attempting to parse the script as a DAG.
bash_command='python2 /home/airflow/gcs/data/python2_script.py',
)
# [END composer_bashoperator_python2]
| 2.28125 | 2 |
Python3/0898-Bitwise-ORs-of-Subarrays/soln.py | wyaadarsh/LeetCode-Solutions | 5 | 12757278 | class Solution:
def subarrayBitwiseORs(self, A):
"""
:type A: List[int]
:rtype: int
"""
res, cur = set(), set()
for x in A:
cur = {x | y for y in cur} | {x}
res |= cur
return len(res)
| 3.375 | 3 |
merge_sort.py | kabilasudhannc/DataStructures_in_Python | 1 | 12757279 | def merge_sort(sorting_list):
"""
Sorts a list in ascending order
Returns a new sorted list
Divide: Find the midpoint of the list and divide into sublist
Conquer: Recursively sort the sublist created in previous step
Combine: Merge the sorted sublist created in previous step
Takes O(n log n) time
"""
if len(sorting_list) <= 1:
return sorting_list
left_half, right_half = split(sorting_list)
left = merge_sort(left_half)
right = merge_sort(right_half)
return merge(left, right)
def split(sorting_list):
"""
Divide the unsorted list at midpoint into sublist
Returns two sublist - left and right
Takes overall O(log n) time
"""
mid = len(sorting_list) // 2
left = sorting_list[: mid]
right = sorting_list[mid:]
return left, right
def merge(left, right):
"""
Merges two lists (arrays), sorting them in the process
Returns a new merged list
Takes overall O(n) time
"""
l = []
i = 0
j = 0
while i < len(left) and j < len(right):
if left[i] < right[j]:
l.append(left[i])
i += 1
else:
l.append(right[j])
j += 1
while i < len(left):
l.append(left[i])
i += 1
while j < len(right):
l.append(right[j])
j += 1
return l
def verify(sorted_list):
n = len(sorted_list)
if n == 0 or n == 1:
return True
return sorted_list[0] < sorted_list[1] and verify(sorted_list[1:])
a_list = [23, 61, 1, 2, 63, 7, 3, 9, 54, 66]
result = merge_sort(a_list)
print(verify(result))
| 4.53125 | 5 |
Interview_Questions/palindrome_permutation.py | Mosuswalks/Python-Data-Structures | 0 | 12757280 | <reponame>Mosuswalks/Python-Data-Structures
# Question 1.4:
# Given a string, write a function to check if it is a permutation of a palindrome.
#
#
def palin_permu(string: str) -> bool:
| 4.09375 | 4 |
tmdb-search/ingest_tmdb_to_appserarch.py | xinlc/elasticsearch-learning | 2 | 12757281 | <gh_stars>1-10
import requests
import json
def extract():
f = open('./tmdb.json')
if f:
return json.loads(f.read());
return {}
def index_all(movieDict={}):
for id, movie in movieDict.iteritems():
index_doc(movie)
print(id)
def index_doc(doc):
content=[]
content.append(doc)
resp = requests.post("http://localhost:3002/api/as/v1/engines/tmdb/documents",
headers={"content-type":"application/json","Authorization": "Bearer <KEY>"},
data=json.dumps(content))
print resp
def main():
movieDict = extract()
index_all(movieDict=movieDict)
if __name__== "__main__":
main()
| 2.984375 | 3 |
timeflux_eego/nodes/driver.py | timeflux/timeflux_eego | 0 | 12757282 | <reponame>timeflux/timeflux_eego
"""
"""
import time
from timeflux.core.node import Node
import numpy as np
import scipy.stats
import eego
class EegoDriver(Node):
def __init__(self,
dll_dir=None,
sampling_rate=512,
reference_channels=None,
reference_range=1,
bipolar_channels=None,
bipolar_range=4,
amplifier_index=0,
impedance_window=1,
start_impedance_trigger=None,
start_eeg_trigger=None,
trigger_column='label'):
super().__init__()
self._factory = eego.glue.factory(dll_dir or eego.sdk.default_dll(), None)
retries = 3
self._amplifier = None
while retries > 0:
retries -= 1
try:
self._amplifier = self._factory.amplifiers[amplifier_index]
except IndexError:
self.logger.warning('Amplifier %d not found, retrying...', amplifier_index)
time.sleep(1)
if self._amplifier:
self.logger.info('Connected to amplifier')
break
if not self._amplifier:
self.logger.error('Could not find EEG amplifier, is it connected and on?')
raise ValueError('Could not initialize EEG amplifier')
self._ref_config = self._amplifier.get_default_config('reference',
names=reference_channels,
signal_range=reference_range)
self._bip_config = self._amplifier.get_default_config('bipolar',
names=bipolar_channels,
signal_range=bipolar_range)
if sampling_rate not in self._amplifier.sampling_rates:
raise ValueError(f'Unsupported sampling rate {sampling_rate} by '
f'{self._amplifier}') # TODO: amplifier repr or str
self._rate = sampling_rate
self._mode = 'eeg'
self.logger.info('Masks are %x %x', self._bip_config.mask, self._ref_config.mask)
self._stream = self._amplifier.open_eeg_stream(self._rate,
self._ref_config.range,
self._bip_config.range,
self._ref_config.mask,
self._bip_config.mask)
self._start_impedance_trigger = start_impedance_trigger
self._start_eeg_trigger = start_eeg_trigger
self._trigger_column = trigger_column
self._start_timestamp = None
self._reference_ts = None
self._sample_count = None
self._impedance_window = impedance_window
self._impedance_history = None
self.logger.info('Eeego amplifier connected %s', self._amplifier)
def update(self):
if self._mode == 'eeg':
self.update_signals()
elif self._mode == 'impedance':
self.update_impedances()
# Handle events
if self.i_events.ready():
start_impedance = (
self._start_impedance_trigger is not None and
np.any(self._start_impedance_trigger == self.i_events.data[self._trigger_column])
)
start_eeg = (
self._start_eeg_trigger is not None and
np.any(self._start_eeg_trigger == self.i_events.data[self._trigger_column])
)
if start_eeg and self._mode == 'impedance':
self.logger.info('Switching to signal mode...')
self._sample_count = None
del self._stream # Important: this frees the device so we can make another stream
self._stream = self._amplifier.open_eeg_stream(self._rate,
self._ref_config.range,
self._bip_config.range,
self._ref_config.mask,
self._bip_config.mask)
self._mode = 'eeg'
elif start_impedance and self._mode == 'eeg':
self.logger.info('Switching to impedance mode...')
self._impedance_history = None
self._sample_count = None
del self._stream # Important: this frees the device so we can make another stream
self._stream = self._amplifier.open_impedance_stream(self._ref_config.mask)
self._mode = 'impedance'
def update_signals(self):
# The first time, drop all samples that might have been captured
# between the initialization and the first time this is called
if self._sample_count is None:
buffer = self._stream.get_data()
n_samples, n_channels = buffer.shape
self.logger.info('Dropped a total of %d samples of data between '
'driver initialization and first node update',
n_samples)
self._sample_count = 0
try:
buffer = self._stream.get_data()
except RuntimeError as ex:
self.logger.error('Eego SDK gave runtime error (%s), '
'resuming the driver acquisition...', ex)
return
n_samples, n_channels = buffer.shape
if n_samples <= 0:
self.logger.info('No data yet...')
return
data = np.fromiter(buffer, dtype=np.float).reshape(-1, n_channels)
del buffer
# account for read data for starting timestamp
if self._sample_count == 0 and n_samples > 0:
self._start_timestamp = (
np.datetime64(int(time.time() * 1e6), 'us') -
# Adjust for the read samples
int(1e6 * n_samples / self._rate)
)
self._reference_ts = self._start_timestamp
# sample counting to calculate drift
self._sample_count += n_samples
elapsed_seconds = (
(np.datetime64(int(time.time() * 1e6), 'us') - self._reference_ts) /
np.timedelta64(1, 's')
)
n_expected = int(np.round(elapsed_seconds * self._rate))
self.logger.debug('Read samples=%d, elapsed_seconds=%f. '
'Expected=%d Real=%d Diff=%d (%.3f sec)',
n_samples, elapsed_seconds,
n_expected, self._sample_count, n_expected - self._sample_count,
(n_expected - self._sample_count) / self._rate)
# Manage timestamps
# For this node, we are trusting the device clock and setting the
# timestamps from the sample number and sampling rate
timestamps = (
self._start_timestamp +
(np.arange(n_samples + 1) * 1e6 / self._rate).astype('timedelta64[us]')
)
self._start_timestamp = timestamps[-1]
eeg_channels = self._ref_config.channels + ('trigger', 'counter')
eeg_col_idx = np.r_[np.arange(len(self._ref_config.channels)), [-2, -1]]
self.o_eeg_signal.set(data[:, eeg_col_idx],
timestamps=timestamps[:-1],
names=eeg_channels)
bip_channels = self._bip_config.channels + ('trigger', 'counter')
bip_col_idx = np.r_[np.arange(len(self._bip_config.channels)) + len(self._ref_config.channels),
[-2, -1]]
self.o_bipolar_signal.set(data[:, bip_col_idx],
timestamps=timestamps[:-1],
names=bip_channels)
def update_impedances(self):
buffer = self._stream.get_data()
n_samples, n_channels = buffer.shape
if n_samples <= 0:
return
data = np.fromiter(buffer, dtype=np.float).reshape(-1, n_channels)
del buffer
self._sample_count = self._sample_count or 0
self._sample_count += n_samples
impedance_channel_names = self._ref_config.channels + ('REF', 'GND')
# Manage window
if self._impedance_history is None:
self._impedance_history = data
else:
self._impedance_history = np.r_[self._impedance_history, data][-self._impedance_window:]
avg_data = scipy.stats.gmean(self._impedance_history + 1e-3, axis=0)
self.o_eeg_impedance.set(avg_data[np.newaxis, :],
names=impedance_channel_names) | 1.960938 | 2 |
marmot/features/phrase/token_count_feature_extractor.py | qe-team/marmot | 19 | 12757283 | <reponame>qe-team/marmot
from __future__ import division
from marmot.features.feature_extractor import FeatureExtractor
import sys
import logging
import numpy as np
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
logger = logging.getLogger('testlogger')
class TokenCountFeatureExtractor(FeatureExtractor):
def get_features(self, context_obj):
#sys.stderr.write("Start TokenCountFeatureExtractor\n")
target_len = len(context_obj['token'])
target_tok_len = np.average([len(word) for word in context_obj['token']])
source_len, source_tok_len = 0, 0
if 'source_token' in context_obj and len(context_obj['source_token']) > 0:
source_len = len(context_obj['source_token'])
source_tok_len = np.average([len(word) for word in context_obj['source_token']])
target_occur = []
for word in context_obj['token']:
target_occur.append(context_obj['target'].count(word))
avg_occur = np.average(target_occur)
tg_src_ratio = target_len/source_len if source_len > 0 else 0
#sys.stderr.write("Finish TokenCountFeatureExtractor\n")
return [str(target_len),
str(source_len),
str(source_len/target_len),
str(tg_src_ratio),
str(target_tok_len),
str(source_tok_len),
str(avg_occur)]
def get_feature_names(self):
return ['target_phrase_len',
'source_phrase_len',
'source_target_len_ratio',
'target_source_len_ratio',
'avg_target_token_len',
'avg_source_token_len',
'avg_occurrence_of_target_word_within_sentence']
| 2.25 | 2 |
compare_wem.py | ADawesomeguy/witcher-3-sound-editing-tools | 2 | 12757284 | import sys
import os
import struct
from cStringIO import StringIO
class WEMError(Exception):
pass
class WEMTypes(object):
SEEK_BEGIN = 0
SEEK_CURRENT = 1
SEEK_END = 2
def yes_or_no(message):
while True:
answer = raw_input("%s? [Y]es/[N]o: " % (message)).strip().lower()
if answer in ("yes", "ye", "y"):
return True
elif answer in ("no", "n"):
return False
class Table(object):
def __init__(self, headers, content):
self.headers = headers
self.content = content
def show(self):
# Get table sizes.
tabsizes = tuple(max(len(_header), max(len(elements[i]) for elements in iter(self.content)))
for (i, _header) in enumerate(self.headers))
# Make table header.
header = " ".join(_header.ljust(tabsizes[i]) for (i, _header) in enumerate(self.headers))
head = tail = ("-" * len(header))
# Make table elements.
table = "\n".join(" ".join(element.ljust(tabsizes[i]) for (i, element) in enumerate(elements))
for elements in iter(self.content))
# Print crafted table.
print "\n".join((head, header, table, tail))
class Packet(object):
def __init__(self, wem, offset, no_granule):
self.offset = offset
self.absolute_granule = 0
self.no_granule = no_granule
self.size = wem._read_uint16()
if not self.no_granule:
self.absolute_granule = wem._read_uint32()
def __len__(self):
return self.size
def get_header_size(self):
return 2 if self.no_granule else 6
def get_offset(self):
return self.offset + self.get_header_size()
def get_next_offset(self):
return self.get_offset() + self.size
class WEM(object):
def __init__(self, file):
try:
self.fsize = os.path.getsize(file)
self.file = open(file, "rb")
except (OSError, IOError):
raise WEMError("Cannot open file")
self._file = file
self.buffer = StringIO()
self.riff_head = None
self.riff_size = None
self.wave_head = None
self.fmt_offset = None
self.fmt_size = None
self.cue_offset = None
self.cue_size = None
self.LIST_offset = None
self.LIST_size = None
self.smpl_offset = None
self.smpl_size = None
self.vorb_offset = None
self.vorb_size = None
self.data_offset = None
self.data_size = None
self.codecid = None
self.channels = 0
self.sample_rate = 0
self.avg_bytes_per_second = 0
self.block_alignment = None
self.bps = None
self.extra_fmt_length = None
self.ext_unk = 0
self.subtype = 0
self.sample_count = 0
self.no_granule = False
self.mod_signal = None
self.mod_packets = False
self.fmt_unk_field32_1 = None
self.fmt_unk_field32_2 = None
self.setup_packet_offset = 0
self.first_audio_packet_offset = 0
self.fmt_unk_field32_3 = None
self.fmt_unk_field32_4 = None
self.fmt_unk_field32_5 = None
self.header_triad_present = False
self.old_packet_headers = False
self.uid = 0
self.blocksize_0_pow = 0
self.blocksize_1_pow = 0
self.cue_count = 0
self.cue_id = None
self.cue_position = None
self.cue_datachunkid = None
self.cue_chunkstart = None
self.cue_blockstart = None
self.cue_sampleoffset = None
self.adtlbuf = None
self.LIST_remain = None
self.loop_count = 0
self.loop_start = 0
self.loop_end = 0
self.fake_vorb = False
self.pre_data = None
self.data_setup = None
self.data = None
def __del__(self):
try:
self.file.close()
except Exception:
pass
try:
self.buffer.close()
except Exception:
pass
def _read_uchar(self, size=None):
if size is None:
return struct.unpack("<B", self.file.read(1))[0]
else:
return self.file.read(size)
def _read_uint16(self):
return struct.unpack("<H", self.file.read(2))[0]
def _read_uint32(self):
return struct.unpack("<I", self.file.read(4))[0]
def _write_uchar(self, data):
if type(data) == int or type(data) == long:
self.buffer.write(struct.pack("<B", data))
else:
self.buffer.write(data)
def _write_uint16(self, data):
self.buffer.write(struct.pack("<H", data))
def _write_uint32(self, data):
self.buffer.write(struct.pack("<I", data))
def read(self):
try:
self.riff_head = self._read_uchar(4)
if self.riff_head != "RIFF":
raise WEMError("No RIFF head found")
self.riff_size = self._read_uint32() + 8
if self.riff_size > self.fsize:
raise WEMError("Truncated RIFF")
self.wave_head = self._read_uchar(4)
if self.wave_head != "WAVE":
raise WEMError("No WAVE head found")
chunk_offset = 12
while chunk_offset < self.riff_size:
self.file.seek(chunk_offset, WEMTypes.SEEK_BEGIN)
if chunk_offset + 8 > self.riff_size:
raise WEMError("Truncated chunk header")
chunk_type = self._read_uchar(4)
chunk_size = self._read_uint32()
if chunk_type == "fmt ":
self.fmt_offset = chunk_offset + 8
self.fmt_size = chunk_size
elif chunk_type == "cue ":
self.cue_offset = chunk_offset + 8
self.cue_size = chunk_size
elif chunk_type == "LIST":
self.LIST_offset = chunk_offset + 8
self.LIST_size = chunk_size
elif chunk_type == "smpl":
self.smpl_offset = chunk_offset + 8
self.smpl_size = chunk_size
elif chunk_type == "vorb":
self.vorb_offset = chunk_offset + 8
self.vorb_size = chunk_size
elif chunk_type == "data":
self.data_offset = chunk_offset + 8
self.data_size = chunk_size
chunk_offset += (8 + chunk_size)
if chunk_offset > self.riff_size:
raise WEMError("Truncated chunk")
if self.fmt_offset is None and self.data_offset is None:
raise WEMError("No fmt and data chunks found")
if self.vorb_size not in (None, 0x28, 0x2A, 0x2C, 0x32, 0x34):
raise WEMError("Bad vorb size")
if self.vorb_offset is None:
if self.fmt_size != 0x42:
raise WEMError("fmt size must be 0x42 if no vorb")
else:
self.vorb_offset = self.fmt_offset + 0x18 # Fake
self.fake_vorb = True
else:
raise WEMError("Not Supported")
if self.fmt_size not in (0x28, 0x18, 0x12):
raise WEMError("Bad fmt size")
self.file.seek(self.fmt_offset, WEMTypes.SEEK_BEGIN)
self.codecid = self._read_uint16()
if self.codecid != 0xFFFF:
raise WEMError("Bad codec id")
self.channels = self._read_uint16()
self.sample_rate = self._read_uint32()
self.avg_bytes_per_second = self._read_uint32()
self.block_alignment = self._read_uint16()
if self.block_alignment != 0:
raise WEMError("Bad block alignment")
self.bps = self._read_uint16()
if self.bps != 0:
raise WEMError("BPS is not 0")
self.extra_fmt_length = self._read_uint16()
if self.extra_fmt_length != (self.fmt_size - 0x12):
raise WEMError("Bad extra fmt length")
if (self.fmt_size - 0x12) >= 2:
self.ext_unk = self._read_uint16()
if (self.fmt_size - 0x12) >= 6:
self.subtype = self._read_uint32()
if self.cue_offset is not None:
#if self.cue_size != 0x1c:
#raise WEMError("Bad cue size")
self.file.seek(self.cue_offset, WEMTypes.SEEK_BEGIN)
self.cue_count = self._read_uint32()
self.cue_id = self._read_uint32()
self.cue_position = self._read_uint32()
self.cue_datachunkid = self._read_uint32()
self.cue_chunkstart = self._read_uint32()
self.cue_blockstart = self._read_uint32()
self.cue_sampleoffset = self._read_uint32()
if self.LIST_offset is not None:
self.file.seek(self.LIST_offset, WEMTypes.SEEK_BEGIN)
self.adtlbuf = self._read_uchar(4)
if self.adtlbuf != "adtl":
raise WEMError("LIST is not adtl")
self.LIST_remain = self._read_uchar(self.LIST_size - 4)
if self.smpl_offset is not None:
self.file.seek(self.smpl_offset + 0x1C, WEMTypes.SEEK_BEGIN)
self.loop_count = self._read_uint32()
if self.loop_count != 1:
raise WEMError("Not an one loop")
self.file.seek(self.smpl_offset + 0x2c, WEMTypes.SEEK_BEGIN)
self.loop_start = self._read_uint32()
self.loop_end = self._read_uint32()
self.file.seek(self.vorb_offset, WEMTypes.SEEK_BEGIN)
self.sample_count = self._read_uint32()
if self.vorb_size in (None, 0x2A):
self.no_granule = True
self.file.seek(self.vorb_offset + 0x4, WEMTypes.SEEK_BEGIN)
self.mod_signal = self._read_uint32()
if self.mod_signal not in (0x4A, 0x4B, 0x69, 0x70):
self.mod_packets = True
self.fmt_unk_field32_1 = self._read_uint32()
self.fmt_unk_field32_2 = self._read_uint32()
self.file.seek(self.vorb_offset + 0x10, WEMTypes.SEEK_BEGIN)
else:
self.file.seek(self.vorb_offset + 0x18, WEMTypes.SEEK_BEGIN)
self.setup_packet_offset = self._read_uint32()
self.first_audio_packet_offset = self._read_uint32()
self.fmt_unk_field32_3 = self._read_uint32()
self.fmt_unk_field32_4 = self._read_uint32()
self.fmt_unk_field32_5 = self._read_uint32()
if self.vorb_size in (None, 0x2A):
self.file.seek(self.vorb_offset + 0x24, WEMTypes.SEEK_BEGIN)
elif self.vorb_size in (0x32, 0x34):
self.file.seek(self.vorb_offset + 0x2C, WEMTypes.SEEK_BEGIN)
if self.vorb_size in (0x28, 0x2C):
self.header_triad_present = True
self.old_packet_headers = True
elif self.vorb_size in (None, 0x2A, 0x32, 0x34):
self.uid = self._read_uint32()
self.blocksize_0_pow = self._read_uchar()
self.blocksize_1_pow = self._read_uchar()
if self.loop_count != 0:
if self.loop_end == 0:
self.loop_end = self.sample_count
else:
self.loop_end += 1
if self.loop_start >= self.sample_count or self.loop_end > self.sample_count or self.loop_start > self.loop_end:
raise WMError("Loops out of range")
if self.subtype in (4, 3, 0x33, 0x37, 0x3b, 0x3f):
pass
self.setup_packet()
self.file.seek(self.data_offset, WEMTypes.SEEK_BEGIN)
self.pre_data = self._read_uchar(self.setup_packet_offset)
self.data_setup = self._read_uchar(self.first_audio_packet_offset)
self.data = self.file.read()
if len(self.pre_data) + len(self.data_setup) + len(self.data) != self.data_size:
raise WEMError("Bad data")
except (IOError, struct.error):
raise WMError("Bad WEM file")
def setup_packet(self):
self.packet = Packet(self, self.data_offset + self.setup_packet_offset, self.no_granule)
if self.packet.absolute_granule != 0:
raise WEMError("Setup packet granule is not 0")
def merge_headers(self, ww):
if not self.fake_vorb:
raise WEMError("Not supported")
self.riff_size = 0
#self.LIST_size = ww.LIST_size
#self.adtlbuf = ww.adtlbuf
#self.LIST_remain = ww.LIST_remain
#self.unk_field32_1 = ww.unk_field32_1
#self.unk_field32_2 = ww.unk_field32_2
#self.unk_field32_3 = ww.unk_field32_3
#self.unk_field32_4 = ww.unk_field32_4
#self.unk_field32_5 = ww.unk_field32_5
#self.uid = ww.uid
self.subtype = ww.subtype
#self.mod_signal = ww.mod_signal
#self.setup_packet_offset = ww.setup_packet_offset
#self.first_audio_packet_offset = ww.first_audio_packet_offset
#self.pre_data = ww.pre_data
#self.data_setup = ww.data_setup
self.file.close()
self._write_uchar(self.riff_head)
self._write_uint32(self.riff_size)
self._write_uchar(self.wave_head)
self._write_uchar("fmt ")
self._write_uint32(self.fmt_size)
self._write_uint16(self.codecid)
self._write_uint16(self.channels)
self._write_uint32(self.sample_rate)
self._write_uint32(self.avg_bytes_per_second)
self._write_uint16(self.block_alignment)
self._write_uint16(self.bps)
self._write_uint16(self.extra_fmt_length)
self._write_uint16(self.ext_unk)
self._write_uint32(self.subtype)
self._write_uint32(self.sample_count)
self._write_uint32(self.mod_signal)
self._write_uint32(self.fmt_unk_field32_1)
self._write_uint32(self.fmt_unk_field32_2)
self._write_uint32(len(self.pre_data))
self._write_uint32(len(self.pre_data) + (self.first_audio_packet_offset - self.setup_packet_offset))
self._write_uint32(self.fmt_unk_field32_3)
self._write_uint32(self.fmt_unk_field32_4)
self._write_uint32(self.fmt_unk_field32_5)
self._write_uint32(self.uid)
self._write_uchar(self.blocksize_0_pow)
self._write_uchar(self.blocksize_1_pow)
if self.cue_offset is not None:
self._write_uchar("cue ")
self._write_uint32(self.cue_size)
self._write_uint32(self.cue_count)
self._write_uint32(self.cue_id)
self._write_uint32(self.cue_position)
self._write_uint32(self.cue_datachunkid)
self._write_uint32(self.cue_chunkstart)
self._write_uint32(self.cue_blockstart)
self._write_uint32(self.cue_sampleoffset)
#self._write_uchar("LIST")
#self._write_uint32(self.LIST_size)
#self._write_uchar(self.adtlbuf)
#self._write_uchar(self.LIST_remain)
def merge_datas(self, ww):
databuf = StringIO()
databuf.write(self.pre_data)
databuf.write(self.data_setup)
databuf.write(self.data)
self.data_size = databuf.tell()
self._write_uchar("data")
self._write_uint32(self.data_size)
self._write_uchar(databuf.getvalue())
databuf.close()
def calculate_riff_size(self):
fsize = self.buffer.tell()
self.buffer.seek(4, WEMTypes.SEEK_BEGIN)
self._write_uint32(fsize - 8)
self.buffer.seek(0, WEMTypes.SEEK_END)
def get_elements_for_table(self):
return (
"RIFF SIZE: %i" % (self.riff_size),
"CUE: " + ("No" if self.cue_offset is None else "Yes"),
"LIST: " + ("No" if self.LIST_offset is None else "Yes"),
"SMP1: " + ("No" if self.smpl_offset is None else "Yes"),
"VORB: " + ("No" if self.vorb_offset is None or self.fake_vorb else "Yes"),
"LIST SIZE: %i" % (self.LIST_size) if self.LIST_offset is not None else "",
"FMT SIZE: %i" % (self.fmt_size),
"DATA SIZE: %i" % (self.data_size),
"CODEC ID: %i" % (self.codecid),
"CHANNELS: %i" % (self.channels),
"SAMPLE RATE: %i" % (self.sample_rate),
"AVG BYTES PER SECOND: %i" % (self.avg_bytes_per_second),
"BPS: %i" % (self.bps),
"EXTRA FMT LENGTH: %i" % (self.extra_fmt_length),
"EXT UNKNOWN: %i" % (self.ext_unk),
"SUBTYPE: %i" % (self.subtype),
"SAMPLE COUNT: %i" % (self.sample_count),
"NO GRANULE: " + ("Yes" if self.no_granule else "No"),
"MOD SIGNAL: %i" % (self.mod_signal),
"MOD PACKETS: " + ("Yes" if self.mod_packets else "No"),
"SETUP PACKET OFFSET: %i" % (self.setup_packet_offset),
"FIRST AUDIO PACKET OFFSET: %i" % (self.first_audio_packet_offset),
"HEADER TRIAD PRESENT: " + ("Yes" if self.header_triad_present else "No"),
"OLD PACKET HEADERS: " + ("Yes" if self.old_packet_headers else "No"),
"UID: %i" % (self.uid),
"BLOCKSIZE 0: %i" % (self.blocksize_0_pow),
"BLOCKSIZE 1: %i" % (self.blocksize_1_pow),
"UNK FMT FIELDS 32: %i, %i, %i, %i, %i" % (self.fmt_unk_field32_1, self.fmt_unk_field32_2, self.fmt_unk_field32_3, self.fmt_unk_field32_4, self.fmt_unk_field32_5),
"CUE COUNT: %i" % (self.cue_count),
"CUE SIZE: %i" % (self.cue_size) if self.cue_offset is not None else "",
"CUE ID: %i" % (self.cue_id) if self.cue_offset is not None else "",
"CUE POSITION: %i" % (self.cue_position) if self.cue_offset is not None else "",
"CUE DATACHUNKID: %i" % (self.cue_datachunkid) if self.cue_offset is not None else "",
"CUE CHUNKSTART: %i" % (self.cue_chunkstart) if self.cue_offset is not None else "",
"CUE BLOCKSTART: %i" % (self.cue_blockstart) if self.cue_offset is not None else "",
"CUE SAMPLEOFFSET: %i" % (self.cue_sampleoffset) if self.cue_offset is not None else "",
"LOOP COUNT: %i" % (self.loop_count)
)
def create(self):
try:
self.file = open(self._file + ".merged", "wb")
self.file.write(self.buffer.getvalue())
except IOError:
raise WEMError("Couldn't flush merged file")
def main(argc, argv):
if argc != 3:
print "Usage: %s <INPUT> <OUTPUT>" % (os.path.basename(argv[0]))
sys.exit(1)
input = argv[1].strip()
if not input:
raise SyntaxError("Invalid input")
output = argv[2].strip()
if not output:
raise SyntaxError("Invalid output")
sys.stdout.write("Analyzing...")
wwinput = WEM(input)
wwinput.read()
wwoutput = WEM(output)
wwoutput.read()
sys.stdout.write("Done!\n")
tabheaders = ("INPUT", "OUTPUT")
tabcontent1 = wwinput.get_elements_for_table()
tabcontent2 = wwoutput.get_elements_for_table()
tabcontent = [(tabcontent1[i], tabcontent2[i]) for (i, element) in enumerate(tabcontent1)]
table = Table(tabheaders, tabcontent)
print
table.show()
print
answer = yes_or_no("Merge headers")
if answer:
wwoutput.merge_headers(wwinput)
wwoutput.merge_datas(wwinput)
wwoutput.calculate_riff_size()
wwoutput.create()
del wwinput
del wwoutput
sys.exit(0)
if __name__ == "__main__":
main(len(sys.argv), sys.argv) | 2.640625 | 3 |
register_printer/parser/register_parser.py | zhangyiant/RegisterPrinter | 0 | 12757285 | import logging
from .field_parser import parse_field_row
from .parse_exception import ExcelParseException
LOGGER = logging.getLogger(__name__)
def is_empty_row(row):
if row[0].value == "":
return True
return False
def is_field_row(row):
"""
row: xlrd row object.
"""
if row[2].value != "":
return True
return False
def validate_register_row_empty_field(row, previous_context):
"""
row can be obtained by xlrd sheet.row() method.
It's a sequence of cell objects.
"""
context = previous_context.copy()
field_map = [
(2, "msb"),
(3, "lsb"),
(4, "field"),
(5, "access"),
(6, "default")
]
for (col, field_name) in field_map:
context.column = col
if row[col].value != "":
msg = "Field '%s' must be emtpy." % field_name
raise ExcelParseException(msg, context)
return
def parse_register_row(row, previous_context):
"""
row: xlrd row object. You can obtain it by sheet.row()
a sequence of cells.
"""
context = previous_context.copy()
validate_register_row_empty_field(row, context)
context.column = 0
try:
offset = int(row[0].value, 16)
except Exception as exc:
msg = "Parse offset error: {}.".format(exc)
raise ExcelParseException(msg, context)
name = row[1].value
description = "%s" % row[7].value
result = {
"offset": offset,
"name": name,
"description": description
}
return result
def validate_field(new_field, parsed_fields, previous_context):
context = previous_context
field_dict = new_field
field_dict_list = parsed_fields
fields = []
inserted = False
for field in field_dict_list:
if inserted:
fields.append(field)
overlapped = False
if field_dict["lsb"] > field["lsb"]:
if field_dict["lsb"] > field["msb"]:
fields.append(field)
else:
overlapped = True
elif field_dict["lsb"] == field["lsb"]:
overlapped = True
else:
if field_dict["msb"] < field["msb"]:
fields.append(field_dict)
fields.append(field)
inserted = True
else:
overlapped = True
if overlapped:
error_msg = "Fields overlap: \n{0}\n{1}".format(
field, field_dict)
raise ExcelParseException(error_msg, context)
return
def parse_register(sheet, start_row, previous_context):
context = previous_context.copy()
rowx = start_row
row = sheet.row(rowx)
context.row = rowx
register_dict = parse_register_row(row, context)
rowx = rowx + 1
row = sheet.row(rowx)
context.row = rowx
field_dict_list = []
while is_field_row(row):
field_dict = parse_field_row(row, context)
validate_field(field_dict, field_dict_list, context)
field_dict_list.append(field_dict)
if rowx < sheet.nrows - 1:
rowx = rowx + 1
row = sheet.row(rowx)
context.row = rowx
else:
break
if is_empty_row(row):
rowx += 1
else:
err_msg = \
"sheet {0} row {1} error: no blank row between registers".format(
sheet.name,
rowx + 1)
LOGGER.debug(err_msg)
msg = "No blank row between registers."
raise ExcelParseException(msg, context)
register_dict["fields"] = field_dict_list
return register_dict, rowx
| 3.125 | 3 |
google/cloud/forseti/services/server.py | mcunha/forseti-security | 0 | 12757286 | <reponame>mcunha/forseti-security<filename>google/cloud/forseti/services/server.py
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Forseti Server program."""
# pylint: disable=line-too-long
from abc import ABCMeta, abstractmethod
from concurrent import futures
from multiprocessing.pool import ThreadPool
import argparse
import os
import sys
import threading
import time
import grpc
from google.cloud.forseti.common.util import file_loader
from google.cloud.forseti.common.util import logger
from google.cloud.forseti.services.client import ClientComposition
from google.cloud.forseti.services.dao import create_engine
from google.cloud.forseti.services.dao import ModelManager
from google.cloud.forseti.services.explain.service import GrpcExplainerFactory
from google.cloud.forseti.services import db
from google.cloud.forseti.services.inventory.service import GrpcInventoryFactory
from google.cloud.forseti.services.inventory.storage import Storage
from google.cloud.forseti.services.model.service import GrpcModellerFactory
from google.cloud.forseti.services.notifier.service import GrpcNotifierFactory
from google.cloud.forseti.services.scanner.service import GrpcScannerFactory
from google.cloud.forseti.services.server_config.service import GrpcServerConfigFactory
LOGGER = logger.get_logger(__name__)
SERVICE_MAP = {
'explain': GrpcExplainerFactory,
'inventory': GrpcInventoryFactory,
'scanner': GrpcScannerFactory,
'notifier': GrpcNotifierFactory,
'model': GrpcModellerFactory,
'server': GrpcServerConfigFactory
}
class AbstractServiceConfig(object):
"""Abstract base class for service configuration. This class
is used to implement dependency injection for the gRPC services."""
__metaclass__ = ABCMeta
@abstractmethod
def get_engine(self):
"""Get the database engine.
Raises:
NotImplementedError: Abstract.
"""
raise NotImplementedError()
@abstractmethod
def scoped_session(self):
"""Get a scoped session.
Raises:
NotImplementedError: Abstract.
"""
raise NotImplementedError()
@abstractmethod
def client(self):
"""Get an API client.
Raises:
NotImplementedError: Abstract.
"""
raise NotImplementedError()
@abstractmethod
def run_in_background(self, func):
"""Runs a function in a thread pool in the background.
Args:
func (Function): Function to be executed.
Raises:
NotImplementedError: Abstract.
"""
raise NotImplementedError()
def get_storage_class(self):
"""Returns the class used for the inventory storage.
Raises:
NotImplementedError: Abstract.
"""
raise NotImplementedError()
class AbstractInventoryConfig(dict):
"""Abstract base class for service configuration. This class
is used to implement dependency injection for the gRPC services."""
__metaclass__ = ABCMeta
def get_root_resource_id(self):
"""Returns the root resource id.
Raises:
NotImplementedError: Abstract.
"""
raise NotImplementedError()
def get_gsuite_admin_email(self):
"""Returns gsuite admin email.
Raises:
NotImplementedError: Abstract.
"""
raise NotImplementedError()
def get_api_quota_configs(self):
"""Returns the per API quota configs.
Raises:
NotImplementedError: Abstract.
"""
raise NotImplementedError()
def get_retention_days_configs(self):
"""Returns the days of inventory data to retain.
Raises:
NotImplementedError: Abstract.
"""
raise NotImplementedError()
def get_service_config(self):
"""Returns the service config.
Raises:
NotImplementedError: Abstract.
"""
raise NotImplementedError()
class InventoryConfig(AbstractInventoryConfig):
"""Implements composed dependency injection for the inventory."""
def __init__(self,
root_resource_id,
gsuite_admin_email,
api_quota_configs,
retention_days,
*args,
**kwargs):
"""Initialize
Args:
root_resource_id (str): Root resource to start crawling from
gsuite_admin_email (str): G Suite admin email
api_quota_configs (dict): API quota configs
retention_days (int): Days of inventory tables to retain.
args: args when creating InventoryConfig
kwargs: kwargs when creating InventoryConfig
"""
super(InventoryConfig, self).__init__(*args, **kwargs)
self.service_config = None
self.root_resource_id = root_resource_id
self.gsuite_admin_email = gsuite_admin_email
self.api_quota_configs = api_quota_configs
self.retention_days = retention_days
def get_root_resource_id(self):
"""Return the configured root resource id.
Returns:
str: Root resource id.
"""
return self.root_resource_id
def get_gsuite_admin_email(self):
"""Return the gsuite admin email to use.
Returns:
str: Gsuite admin email.
"""
return self.gsuite_admin_email
def get_api_quota_configs(self):
"""Returns the per API quota configs.
Returns:
dict: The API quota configurations.
"""
return self.api_quota_configs
def get_retention_days_configs(self):
"""Returns the days of inventory data to retain.
Returns:
int: The days of inventory data to retain.
"""
return self.retention_days
def get_service_config(self):
"""Return the attached service configuration.
Returns:
object: Service configuration.
"""
return self.service_config
def set_service_config(self, service_config):
"""Attach a service configuration.
Args:
service_config (object): Service configuration.
"""
self.service_config = service_config
# pylint: disable=too-many-instance-attributes
class ServiceConfig(AbstractServiceConfig):
"""Implements composed dependency injection to Forseti Server services."""
def __init__(self,
forseti_config_file_path,
forseti_db_connect_string,
endpoint):
"""Initialize
Args:
forseti_config_file_path (str): Path to Forseti configuration file.
forseti_db_connect_string (str): Forseti database string
endpoint (str): server endpoint
"""
super(ServiceConfig, self).__init__()
self.thread_pool = ThreadPool()
self.engine = create_engine(forseti_db_connect_string,
pool_recycle=3600)
self.model_manager = ModelManager(self.engine)
self.sessionmaker = db.create_scoped_sessionmaker(self.engine)
self.endpoint = endpoint
self.forseti_config_file_path = forseti_config_file_path
self.inventory_config = None
self.scanner_config = None
self.notifier_config = None
self.global_config = None
self.forseti_config = None
self.update_lock = threading.RLock()
def _read_from_config(self, config_file_path=None):
"""Read from the forseti configuration file.
Args:
config_file_path (str): Forseti server config file path.
Returns:
dict: Forseti server configuration.
str: Error message.
"""
# if config_file_path is not passed in, we will use the default
# configuration path that was passed in during the initialization
# of the server.
forseti_config_path = config_file_path or self.forseti_config_file_path
forseti_config = {}
err_msg = ''
try:
forseti_config = file_loader.read_and_parse_file(
forseti_config_path)
except (AttributeError, IOError) as err:
err_msg = ('Unable to open Forseti Security config file. '
'Please check your path and filename and try '
'again. Error: {}').format(err)
LOGGER.error(err_msg)
return forseti_config, err_msg
def update_configuration(self, config_file_path=None):
"""Update the inventory, scanner, global and notifier configurations.
Args:
config_file_path (str): Forseti server config file path.
Returns:
bool: Whether or not configuration has been updated.
str: Error message.
"""
forseti_config, err_msg = self._read_from_config(config_file_path)
if not forseti_config:
# if forseti_config is empty, there is nothing to update.
return False, err_msg
with self.update_lock:
# Lock before performing the update to avoid multiple updates
# at the same time.
self.forseti_config = forseti_config
# Setting up individual configurations
forseti_inventory_config = forseti_config.get('inventory', {})
inventory_config = InventoryConfig(
forseti_inventory_config.get('root_resource_id', ''),
forseti_inventory_config.get('domain_super_admin_email', ''),
forseti_inventory_config.get('api_quota', {}),
forseti_inventory_config.get('retention_days', -1))
# TODO: Create Config classes to store scanner and notifier configs.
forseti_scanner_config = forseti_config.get('scanner', {})
forseti_notifier_config = forseti_config.get('notifier', {})
forseti_global_config = forseti_config.get('global', {})
self.inventory_config = inventory_config
self.inventory_config.set_service_config(self)
self.scanner_config = forseti_scanner_config
self.notifier_config = forseti_notifier_config
self.global_config = forseti_global_config
return True, err_msg
def get_forseti_config(self):
"""Get the Forseti config.
Returns:
dict: Forseti config.
"""
return self.forseti_config
def get_inventory_config(self):
"""Get the inventory config.
Returns:
object: Inventory config.
"""
return self.inventory_config
def get_scanner_config(self):
"""Get the scanner config.
Returns:
dict: Scanner config.
"""
return self.scanner_config
def get_notifier_config(self):
"""Get the notifier config.
Returns:
dict: Notifier config.
"""
return self.notifier_config
def get_global_config(self):
"""Get the global config.
Returns:
dict: Global config.
"""
return self.global_config
def get_engine(self):
"""Get the database engine.
Returns:
object: Database engine object.
"""
return self.engine
def scoped_session(self):
"""Get a scoped session.
Returns:
object: A scoped session.
"""
return self.sessionmaker()
def client(self):
"""Get an API client.
Returns:
object: API client to use against services.
"""
return ClientComposition(self.endpoint)
def run_in_background(self, func):
"""Runs a function in a thread pool in the background.
Args:
func (Function): Function to be executed.
"""
self.thread_pool.apply_async(func)
def get_storage_class(self):
"""Returns the storage class used to access the inventory.
Returns:
class: Type of a storage implementation.
"""
return Storage
# pylint: enable=too-many-instance-attributes
def serve(endpoint,
services,
forseti_db_connect_string,
config_file_path,
log_level,
enable_console_log,
max_workers=32,
wait_shutdown_secs=3):
"""Instantiate the services and serves them via gRPC.
Args:
endpoint (str): the server channel endpoint
services (list): services to register on the server
forseti_db_connect_string (str): Forseti database string
config_file_path (str): Path to Forseti configuration file.
log_level (str): Sets the threshold for Forseti's logger.
enable_console_log (bool): Enable console logging.
max_workers (int): maximum number of workers for the crawler
wait_shutdown_secs (int): seconds to wait before shutdown
Raises:
Exception: No services to start
"""
# Configuring log level for the application
logger.set_logger_level_from_config(log_level)
if enable_console_log:
logger.enable_console_log()
factories = []
for service in services:
factories.append(SERVICE_MAP[service])
if not factories:
raise Exception('No services to start.')
# Server config service is always started.
factories.append(SERVICE_MAP['server'])
config = ServiceConfig(
forseti_config_file_path=config_file_path,
forseti_db_connect_string=forseti_db_connect_string,
endpoint=endpoint)
config.update_configuration()
server = grpc.server(futures.ThreadPoolExecutor(max_workers))
for factory in factories:
factory(config).create_and_register_service(server)
server.add_insecure_port(endpoint)
server.start()
while True:
try:
time.sleep(1)
except KeyboardInterrupt:
server.stop(wait_shutdown_secs).wait()
return
def check_args(args):
"""Make sure the required args are present and valid.
The exit codes are arbitrary and just serve the purpose of facilitating
distinction betweeen the various error cases.
Args:
args (dict): the command line args
Returns:
tuple: 2-tuple with an exit code and error message.
"""
if not args['services']:
return (1, 'ERROR: please specify at least one service.')
if not args['config_file_path']:
return (2, 'ERROR: please specify the Forseti config file.')
if not os.path.isfile(args['config_file_path']):
return (3, 'ERROR: "%s" is not a file.' % args['config_file_path'])
if not os.access(args['config_file_path'], os.R_OK):
return(4, 'ERROR: "%s" is not readable.' % args['config_file_path'])
if not args['forseti_db']:
return(5, 'ERROR: please specify the Forseti database string.')
return (0, 'All good!')
# pylint: enable=too-many-locals
def main():
"""Run."""
parser = argparse.ArgumentParser()
parser.add_argument(
'--endpoint',
default='[::]:50051',
help='Server endpoint')
parser.add_argument(
'--forseti_db',
help=('Forseti database string, formatted as '
'"mysql://<db_user>@<db_host>:<db_port>/<db_name>"'))
parser.add_argument(
'--config_file_path',
help='Path to Forseti configuration file.')
services = sorted(SERVICE_MAP.keys())
parser.add_argument(
'--services',
nargs='+',
choices=services,
help=('Forseti services i.e. at least one of: %s.' %
', '.join(services)))
parser.add_argument(
'--log_level',
default='info',
choices=['debug', 'info', 'warning', 'error'],
help='Sets the threshold for Forseti\'s logger.'
' Logging messages which are less severe'
' than the level you set will be ignored.')
parser.add_argument(
'--enable_console_log',
action='store_true',
help='Print log to console.')
args = vars(parser.parse_args())
exit_code, error_msg = check_args(args)
if exit_code:
sys.stderr.write('%s\n\n' % error_msg)
parser.print_usage()
sys.exit(exit_code)
serve(args['endpoint'],
args['services'],
args['forseti_db'],
args['config_file_path'],
args['log_level'],
args['enable_console_log'])
if __name__ == '__main__':
main()
| 1.585938 | 2 |
authlete/django/handler/authorization_request_error_handler.py | authlete/authlete-python-django | 6 | 12757287 | #
# Copyright (C) 2019 Authlete, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the
# License.
from authlete.django.handler.authorization_request_base_handler import AuthorizationRequestBaseHandler
from authlete.django.web.response_utility import ResponseUtility
from authlete.dto.authorization_action import AuthorizationAction
class AuthorizationRequestErrorHandler(AuthorizationRequestBaseHandler):
"""Handler for error cases of authorization reuqests.
A response from Authlete's /api/auth/authorization API contains an "action"
response parameter. When the value of the response parameter is neither
"NO_INTERACTION" nor "INTERACTION", the authorization request should be
handled as an error case. This class is a handler for such error cases.
"""
def __init__(self):
"""Constructor"""
super().__init__(None)
def handle(self, response):
"""Handle an error case of an authorization request.
This method returns None when response.action returns
AuthorizationAction.INTERACTION or AuthorizationAction.NO_INTERACTION.
In other cases, an instance of django.http.HttpResponse is returned.
Args:
response (authlete.dto.AuthorizationResponse)
Returns:
django.http.HttpResponse : An error response
Raises:
authlete.api.AuthleteApiException
"""
# 'action' in the response denotes the next action which the
# implementation of the authorization endpoint should take.
action = response.action
# The content of the response which should be returned to the
# user agent. The format varies depending on the action.
content = response.responseContent
if action == AuthorizationAction.INTERNAL_SERVER_ERROR:
# 500 Internal Server Error
return ResponseUtility.internalServerError(content)
elif action == AuthorizationAction.BAD_REQUEST:
# 400 Bad Request
return ResponseUtility.badRequest(content)
elif action == AuthorizationAction.LOCATION:
# 302 Found
return ResponseUtility.location(content)
elif action == AuthorizationAction.FORM:
# 200 OK
return ResponseUtility.okHtml(content)
elif action == AuthorizationAction.INTERACTION:
# This is not an error case. The implementation of the
# authorization endpoint should show an authorization
# page to the user.
return None
elif action == AuthorizationAction.NO_INTERACTION:
# This is not an error case. The implementation of the
# authorization endpoint should handle the authorization
# request without user interaction.
return None
else:
# 500 Internal Server Error
# Authlete's /api/auth/authorization API returned an unknown action.
return self.unknownAction('/api/auth/authorization')
| 2.328125 | 2 |
src/main/kotlin/course/mit/asset-v1_MITx+6.00.1x+2T2017+type@asset+block@Lecture2/mysum.py | grigorykulik/kotlin_start | 1 | 12757288 | # -*- coding: utf-8 -*-
"""
Created on Wed Jun 8 11:21:27 2016
@author: ericgrimson
"""
mysum = 0
for i in range(5, 11, 2):
mysum += i
if mysum == 5:
break
print(mysum)
| 3.703125 | 4 |
sake_scrapy/sake_scrapy/spiders/ishikawa.py | popotyoro/Crawler | 0 | 12757289 | <filename>sake_scrapy/sake_scrapy/spiders/ishikawa.py
# -*- coding: utf-8 -*-
import scrapy
from ..items import SakeScrapyItem
class IshikawaSpider(scrapy.Spider):
name = 'ishikawa'
allowed_domains = ['www.japansake.or.jp']
start_urls = ['http://www.japansake.or.jp/']
def start_requests(self):
url = 'http://www.japansake.or.jp/sake/app.php/makers/search/?prefcode=17'
yield scrapy.Request(url, callback=self.parse)
def parse(self, response):
for pos in response.css('#yw1 li.page a'):
older_post_link = pos.css('::attr(href)').extract_first()
if older_post_link is None:
# リンクが取得できなかった場合は最後のページなので処理を終了
return
older_post_link = response.urljoin(older_post_link)
# 次のページをのリクエストを実行する
yield scrapy.Request(older_post_link, callback=self.parse_sake_detail)
def parse_sake_detail(self, response):
for pos in response.css('#yw0 table tbody tr'):
kuramoto = pos.css('td.column-name a::text').extract_first().strip()
for sake in pos.css('td').extract()[-1].replace('<td>', '').replace('</td>', '').replace('「', '').replace('」', '').split('、'):
if sake:
yield SakeScrapyItem(
prefecture='石川',
kuramoto=kuramoto,
sake_name=sake
) | 2.625 | 3 |
work/NCS/look_for_smallet_file.py | youdar/work | 0 | 12757290 | from __future__ import division
from iotbx.pdb.multimer_reconstruction import multimer
import os
def run():
pdb_dir = os.environ["PDB_MIRROR_PDB"]
pdb_files = open(os.path.join(pdb_dir, "INDEX"), "r").readlines()
pdb_code_list = [
'3p0s', '2wws', '2xpj', '2bfu', '3n7x', '2iz9', '1dwn', '1ei7','2wzr',
'2vf1', '1m1c', '1llc', '1dzl', '2vf9', '3ntt', '4ar2','4gmp', '3vdd',
'3bcc', '3s4g', '3lob', '3qpr', '1tdi', '1ohg', '3e8k', '2qzv', '2e0z',
'1wcd', '4bcu', '1vcr', '1ng0', '3dar', '4f5x', '4g93', '1bcc', '2izw',
'1f2n', '1ny7', '3oah', '1vb4', '2gtl', '2g33', '2zzq', '2ws9', '1c8n',
'2w4z', '1x9t', '3r0r', '4gb3', '1vsz', '2g34', '2c4y', '1z7s', '1ddl',
'2bq5', '2c4z', '3fbm', '2gh8', '1qjx', '1f8v', '2iz8', '2bs1', '4aqq',
'1qju', '1x36', '1w39', '1x35', '1pgw', '2wff', '2vq0', '2fz2', '2fz1',
'1x9p', '3vbu', '3hag', '4gh4', '3chx', '1pgl', '1a37', '1lp3', '3zfe',
'4fts', '4fsj', '3raa', '2c4q', '1qjy', '4hl8', '3tn9', '3es5', '1js9',
'4gbt', '4fte', '2x5i', '2izn', '1zba', '1r2j', '1k5m', '2w4y', '2qqp',
'4jgy', '4ftb', '4ang', '3zfg', '3zff', '3cji', '2c51', '1vak', '1uf2',
'1ohf', '3ux1', '4g0r', '3s6p', '3ra4', '2bu1', '1laj', '1a34', '7msf',
'4iv1', '3ra9', '3ra8', '2xbo', '1h8t', '5msf', '4jgz', '3vbo', '2ztn',
'1b35', '6msf', '4aed', '3vbs', '3vbr', '3vbf', '3ra2', '3kz4', '1za7',
'1vb2', '1rhq', '4iv3', '3vbh', '3nou', '3not', '3nop', '2xgk', '2wbh',
'2qij', '2c50', '2buk', '1wce', '1tnv']
pdb_code_list = [
'3dar', '1vcr', '1r2j', '1a37', '1llc', '1tnv', '1tdi', '1w39', '1ny7',
'1ddl', '1c8n', '2bfu', '4gmp', '3vbr', '3vbu', '3vbo', '4jgy', '3es5',
'3nop', '3not', '3nou', '3bcc', '1bcc', '1z7s', '6msf', '2iz8', '7msf',
'2izn', '2c50', '2c51', '2iz9', '2c4y', '2c4z', '5msf', '2c4q', '2bu1',
'3raa', '3oah', '3ra2', '3ra9', '3ra8', '3ra4', '3qpr', '1ei7', '1a34',
'3chx', '2wbh', '2fz1', '2fz2', '2gh8', '1wcd', '3fbm', '4gb3', '1laj',
'3vbh', '1dzl', '3hag', '4iv3', '1js9', '3n7x', '4gh4', '4jgz', '3tn9',
'4iv1', '1vb2', '1vb4', '1vak', '3s4g', '2buk', '1x36', '4bcu', '1b35',
'2wzr', '1k5m', '2bq5', '1zba', '1pgw', '3vbs', '1x35', '3vbf', '1pgl',
'4fsj', '4fte', '4fts', '2e0z', '4ftb', '2w4y', '2w4z', '2qzv', '3vdd',
'3p0s', '1qjx', '1qjy', '1qju', '3r0r', '2bs1', '2ztn', '1x9t', '2zzq',
'1x9p', '4aqq', '1za7', '4ar2', '2wws', '2xpj', '4hl8', '3ntt', '2vf1',
'3ux1', '2xgk', '2izw', '3cji', '4gbt', '2vq0', '4g93', '2g34', '2qij',
'2g33', '1f2n', '4g0r', '1ng0', '2ws9', '2xbo', '2wff', '1wce', '1dwn',
'2vf9', '3zfe', '3zff', '3zfg', '2x5i', '1h8t', '3lob', '4ang', '2gtl',
'2qqp', '1f8v', '1m1c', '1lp3', '4aed', '3e8k', '1uf2', '1ohg', '1ohf',
'3s6p', '3kz4', '4f5x', '1vsz']
pdb_length = []
for fn in pdb_code_list:
file_path = [x.strip() for x in pdb_files if x[-12:-8]==fn][0]
print file_path
full_path = os.path.join(pdb_dir,file_path)
m = multimer(
file_name=full_path,
reconstruction_type='cau',
error_handle=True,eps=1e-2)
pdb_length.append([len(m.sites_cart()),fn])
pdb_length.sort()
print '='*60
print pdb_length[:5]
print '='*60
if __name__=='__main__':
print 'start'
run()
print 'done'
| 1.835938 | 2 |
aiobungie/undefined.py | nxtlo/aiobungie | 36 | 12757291 | # MIT License
#
# Copyright (c) 2020 - Present nxtlo
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""An undefined type object."""
from __future__ import annotations
import typing
_T = typing.TypeVar("_T", covariant=True)
class UndefinedType:
"""An `UNDEFINED` type."""
__instance: typing.ClassVar[UndefinedType]
def __bool__(self) -> typing.Literal[False]:
return False
def __repr__(self) -> str:
return "UNDEFINED"
def __str__(self) -> str:
return "UNDEFINED"
def __new__(cls) -> UndefinedType:
try:
return cls.__instance
except AttributeError:
o = super().__new__(cls)
cls.__instance = o
return cls.__instance
Undefined: typing.Final[UndefinedType] = UndefinedType()
"""An undefined type for attribs that may be undefined and not None."""
UndefinedOr = typing.Union[UndefinedType, _T]
"""A union version of the Undefined type which can be undefined or any other type."""
| 2.171875 | 2 |
gpytorch/lazy/matmul_lazy_variable.py | julieli/gpytorch | 1 | 12757292 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch
from torch.autograd import Variable
from .lazy_variable import LazyVariable
from .non_lazy_variable import NonLazyVariable
def _inner_repeat(tensor, amt):
return tensor.unsqueeze(-1).repeat(amt, 1).squeeze(-1)
def _outer_repeat(tensor, amt):
return tensor.unsqueeze(-1).repeat(1, amt).view(-1)
class MatmulLazyVariable(LazyVariable):
def __init__(self, lhs, rhs):
if not isinstance(lhs, LazyVariable):
lhs = NonLazyVariable(lhs)
if not isinstance(rhs, LazyVariable):
rhs = NonLazyVariable(rhs)
super(MatmulLazyVariable, self).__init__(lhs, rhs)
self.lhs = lhs
self.rhs = rhs
def _matmul(self, rhs):
return self.lhs._matmul(self.rhs._matmul(rhs))
def _t_matmul(self, rhs):
return self.rhs._t_matmul(self.lhs._t_matmul(rhs))
def _quad_form_derivative(self, left_vecs, right_vecs):
if left_vecs.ndimension() == 1:
left_vecs = left_vecs.unsqueeze(1)
right_vecs = right_vecs.unsqueeze(1)
right_vecs_times_rhs = self.rhs._matmul(right_vecs)
left_vecs_times_lhs_t = self.lhs._t_matmul(left_vecs)
left_grad, = self.lhs._quad_form_derivative(left_vecs, right_vecs_times_rhs)
right_grad, = self.rhs._quad_form_derivative(left_vecs_times_lhs_t, right_vecs)
return left_grad, right_grad
def _size(self):
if self.lhs.ndimension() > 2:
return torch.Size((self.lhs.size(0), self.lhs.size(1), self.rhs.size(2)))
else:
return torch.Size((self.lhs.size(0), self.rhs.size(1)))
def _transpose_nonbatch(self, *args):
return self.__class__(
self.rhs._transpose_nonbatch(), self.lhs._transpose_nonbatch()
)
def _batch_get_indices(self, batch_indices, left_indices, right_indices):
outer_size = batch_indices.size(0)
inner_size = self.lhs.size(-1)
inner_indices = Variable(right_indices.data.new(inner_size))
torch.arange(0, inner_size, out=inner_indices.data)
left_vals = self.lhs._batch_get_indices(
_outer_repeat(batch_indices, inner_size),
_outer_repeat(left_indices, inner_size),
_inner_repeat(inner_indices, outer_size),
)
right_vals = self.rhs._batch_get_indices(
_outer_repeat(batch_indices, inner_size),
_inner_repeat(inner_indices, outer_size),
_outer_repeat(right_indices, inner_size),
)
return (left_vals.view(-1, inner_size) * right_vals.view(-1, inner_size)).sum(
-1
)
def _get_indices(self, left_indices, right_indices):
outer_size = left_indices.size(0)
inner_size = self.lhs.size(-1)
inner_indices = Variable(right_indices.data.new(inner_size))
torch.arange(0, inner_size, out=inner_indices.data)
left_vals = self.lhs._get_indices(
_outer_repeat(left_indices, inner_size),
_inner_repeat(inner_indices, outer_size),
)
right_vals = self.rhs._get_indices(
_inner_repeat(inner_indices, outer_size),
_outer_repeat(right_indices, inner_size),
)
return (left_vals.view(-1, inner_size) * right_vals.view(-1, inner_size)).sum(
-1
)
def diag(self):
if (
isinstance(self.lhs, NonLazyVariable)
and isinstance(self.rhs, NonLazyVariable)
):
return (self.lhs.tensor * self.rhs.tensor.transpose(-1, -2)).sum(-1)
else:
return super(MatmulLazyVariable, self).diag()
def evaluate(self):
return torch.matmul(self.lhs.evaluate(), self.rhs.evaluate())
| 2.140625 | 2 |
Python/easy/0884_uncommon_words_from_two_sentences.py | CalmScout/LeetCode | 0 | 12757293 | """
We are given two sentences A and B. (A sentence is a string of space separated words.
Each word consists only of lowercase letters.) A word is uncommon if it appears exactly
once in one of the sentences, and does not appear in the other sentence. Return a list
of all uncommon words. You may return the list in any order.
"""
from typing import List
from collections import Counter
class Solution:
def uncommonFromSentences(self, A: str, B: str) -> List[str]:
tmp = A.split()
tmp.extend(B.split())
c = Counter(tmp)
res = [el for el in c if c[el] == 1]
return res
if __name__ == "__main__":
A = "this apple is sweet"
B = "this apple is sour"
out = ["sweet","sour"]
res = Solution().uncommonFromSentences(A, B)
assert set(res) == set(out) and len(res) == len(out)
A = "apple apple"
B = "banana"
out = ["banana"]
res = Solution().uncommonFromSentences(A, B)
assert set(res) == set(out) and len(res) == len(out)
| 3.953125 | 4 |
Python/GraphTheory/RoadsAndLibraries/RoadsAndLibrariesBFS.py | zseen/hackerrank-challenges | 0 | 12757294 | #!/bin/python3
import os
import sys
from collections import deque
LOCAL_INPUT = "ON"
class CitiesAndRoads:
def __init__(self):
self.nodesToEdges = {}
def addNode(self, nodeId):
self.nodesToEdges[nodeId] = set()
def addEdge(self, startNodeId, endNodeId):
self.nodesToEdges[startNodeId].add(endNodeId)
def visitConnectableNodes(self, startNodeId):
nodesToVisit = deque()
nodesToVisit.appendleft(startNodeId)
visitedNodes = set()
while nodesToVisit:
currentNode = nodesToVisit.pop()
visitedNodes.add(currentNode)
neighborNodes = self.nodesToEdges[currentNode]
for neighborNode in neighborNodes:
if neighborNode not in visitedNodes:
nodesToVisit.appendleft(neighborNode)
visitedNodes.add(neighborNode)
return visitedNodes
def countGroupsOfConnectableNodes(self, startNodeId):
unvisitedSet = set(self.nodesToEdges.keys())
componentsCounter = 0
while unvisitedSet:
currentVisitedNodesList = list(self.visitConnectableNodes(next(iter(unvisitedSet))))
for node in currentVisitedNodesList:
unvisitedSet.remove(node)
componentsCounter += 1
return componentsCounter
def getMinimumCostBuildingRoads(self, citiesNum, libraryCost, roadCost, startCity):
communitiesNum = self.countGroupsOfConnectableNodes(startCity)
minimalRoadsCost = (citiesNum - communitiesNum) * roadCost
libraryEachCommunityCost = communitiesNum * libraryCost
return minimalRoadsCost + libraryEachCommunityCost
def createPossibleRoadsMap(roadsNum, citiesNum):
roadsBetweenCities = []
for _ in range(roadsNum):
roadsBetweenCities.append(list(map(int, input().rstrip().split())))
citiesWithRoads = CitiesAndRoads()
for city in range(1, citiesNum + 1):
citiesWithRoads.addNode(city)
for road in roadsBetweenCities:
citiesWithRoads.addEdge(road[0], road[1])
citiesWithRoads.addEdge(road[1], road[0])
return citiesWithRoads
def parseInputAndCompareCost():
nmC_libC_road = input().split()
citiesNum = int(nmC_libC_road[0])
roadsNum = int(nmC_libC_road[1])
libraryCost = int(nmC_libC_road[2])
roadCost = int(nmC_libC_road[3])
citiesWithRoads = createPossibleRoadsMap(roadsNum, citiesNum)
startCity = 1
if libraryCost <= roadCost:
minimalCost = citiesNum * libraryCost
else:
minimalCost = citiesWithRoads.getMinimumCostBuildingRoads(citiesNum, libraryCost, roadCost, startCity)
return minimalCost
def main():
if LOCAL_INPUT == "ON":
sys.stdin = open('RoadsAndLibraries_input.txt')
q = int(input())
for q_itr in range(q):
minimalCost = parseInputAndCompareCost()
print(minimalCost)
elif LOCAL_INPUT == "OFF":
with open(os.environ['OUTPUT_PATH'], 'w') as fptr:
q = int(input())
for q_itr in range(q):
minimalCost = parseInputAndCompareCost()
fptr.write(str(minimalCost) + '\n')
else:
print("Please set LOCAL_INPUT to 'ON' or 'OFF'.")
if __name__ == '__main__':
main()
| 3.40625 | 3 |
src/lib/models/networks/mixnet.py | insightcs/CenterNet | 0 | 12757295 | # coding: utf-8
# @Author: oliver
# @Date: 2019-07-29 19:14:22
import re
import math
import torch
import logging
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
from copy import deepcopy
from adaptive_avgmax_pool import SelectAdaptivePool2d
from mixed_conv2d import select_conv2d
IMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406)
IMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225)
def _cfg(url='', **kwargs):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'conv_stem', 'classifier': 'classifier',
#**kwargs
}
default_cfgs = {
'mixnet_s': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_s-89d3354b.pth'),
'mixnet_m': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_m-0f4d8805.pth'),
'mixnet_l': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_l-6c92e0c8.pth'),
}
_DEBUG = False
# Default args for PyTorch BN impl
_BN_MOMENTUM_PT_DEFAULT = 0.1
_BN_EPS_PT_DEFAULT = 1e-5
_BN_ARGS_PT = dict(momentum=_BN_MOMENTUM_PT_DEFAULT, eps=_BN_EPS_PT_DEFAULT)
# Defaults used for Google/Tensorflow training of mobile networks /w RMSprop as per
# papers and TF reference implementations. PT momentum equiv for TF decay is (1 - TF decay)
# NOTE: momentum varies btw .99 and .9997 depending on source
# .99 in official TF TPU impl
# .9997 (/w .999 in search space) for paper
_BN_MOMENTUM_TF_DEFAULT = 1 - 0.99
_BN_EPS_TF_DEFAULT = 1e-3
_BN_ARGS_TF = dict(momentum=_BN_MOMENTUM_TF_DEFAULT, eps=_BN_EPS_TF_DEFAULT)
def _resolve_bn_args(kwargs):
bn_args = _BN_ARGS_TF.copy() if kwargs.pop('bn_tf', False) else _BN_ARGS_PT.copy()
bn_momentum = kwargs.pop('bn_momentum', None)
if bn_momentum is not None:
bn_args['momentum'] = bn_momentum
bn_eps = kwargs.pop('bn_eps', None)
if bn_eps is not None:
bn_args['eps'] = bn_eps
return bn_args
def _round_channels(channels, multiplier=1.0, divisor=8, channel_min=None):
"""Round number of filters based on depth multiplier."""
if not multiplier:
return channels
channels *= multiplier
channel_min = channel_min or divisor
new_channels = max(
int(channels + divisor / 2) // divisor * divisor,
channel_min)
# Make sure that round down does not go down by more than 10%.
if new_channels < 0.9 * channels:
new_channels += divisor
return new_channels
def _parse_ksize(ss):
if ss.isdigit():
return int(ss)
else:
return [int(k) for k in ss.split('.')]
def _decode_block_str(block_str, depth_multiplier=1.0):
""" Decode block definition string
Gets a list of block arg (dicts) through a string notation of arguments.
E.g. ir_r2_k3_s2_e1_i32_o16_se0.25_noskip
All args can exist in any order with the exception of the leading string which
is assumed to indicate the block type.
leading string - block type (
ir = InvertedResidual, ds = DepthwiseSep, dsa = DeptwhiseSep with pw act, cn = ConvBnAct)
r - number of repeat blocks,
k - kernel size,
s - strides (1-9),
e - expansion ratio,
c - output channels,
se - squeeze/excitation ratio
n - activation fn ('re', 'r6', 'hs', or 'sw')
Args:
block_str: a string representation of block arguments.
Returns:
A list of block args (dicts)
Raises:
ValueError: if the string def not properly specified (TODO)
"""
assert isinstance(block_str, str)
ops = block_str.split('_')
block_type = ops[0] # take the block type off the front
ops = ops[1:]
options = {}
noskip = False
for op in ops:
# string options being checked on individual basis, combine if they grow
if op == 'noskip':
noskip = True
elif op.startswith('n'):
# activation fn
key = op[0]
v = op[1:]
if v == 're':
value = F.relu
elif v == 'r6':
value = F.relu6
elif v == 'hs':
value = hard_swish
elif v == 'sw':
value = swish
else:
continue
options[key] = value
else:
# all numeric options
splits = re.split(r'(\d.*)', op)
if len(splits) >= 2:
key, value = splits[:2]
options[key] = value
# if act_fn is None, the model default (passed to model init) will be used
act_fn = options['n'] if 'n' in options else None
exp_kernel_size = _parse_ksize(options['a']) if 'a' in options else 1
pw_kernel_size = _parse_ksize(options['p']) if 'p' in options else 1
num_repeat = int(options['r'])
# each type of block has different valid arguments, fill accordingly
if block_type == 'ir':
block_args = dict(
block_type=block_type,
dw_kernel_size=_parse_ksize(options['k']),
exp_kernel_size=exp_kernel_size,
pw_kernel_size=pw_kernel_size,
out_chs=int(options['c']),
exp_ratio=float(options['e']),
se_ratio=float(options['se']) if 'se' in options else None,
stride=int(options['s']),
act_fn=act_fn,
noskip=noskip,
)
elif block_type == 'ds' or block_type == 'dsa':
block_args = dict(
block_type=block_type,
dw_kernel_size=_parse_ksize(options['k']),
pw_kernel_size=pw_kernel_size,
out_chs=int(options['c']),
se_ratio=float(options['se']) if 'se' in options else None,
stride=int(options['s']),
act_fn=act_fn,
pw_act=block_type == 'dsa',
noskip=block_type == 'dsa' or noskip,
)
elif block_type == 'cn':
block_args = dict(
block_type=block_type,
kernel_size=int(options['k']),
out_chs=int(options['c']),
stride=int(options['s']),
act_fn=act_fn,
)
else:
assert False, 'Unknown block type (%s)' % block_type
# return a list of block args expanded by num_repeat and
# scaled by depth_multiplier
num_repeat = int(math.ceil(num_repeat * depth_multiplier))
return [deepcopy(block_args) for _ in range(num_repeat)]
def _decode_arch_args(string_list):
block_args = []
for block_str in string_list:
block_args.append(_decode_block_str(block_str))
return block_args
def _decode_arch_def(arch_def, depth_multiplier=1.0):
arch_args = []
for stack_idx, block_strings in enumerate(arch_def):
assert isinstance(block_strings, list)
stack_args = []
for block_str in block_strings:
assert isinstance(block_str, str)
stack_args.extend(_decode_block_str(block_str, depth_multiplier))
arch_args.append(stack_args)
return arch_args
def swish(x, inplace=False):
if inplace:
return x.mul_(x.sigmoid())
else:
return x * x.sigmoid()
def sigmoid(x, inplace=False):
return x.sigmoid_() if inplace else x.sigmoid()
def hard_swish(x, inplace=False):
if inplace:
return x.mul_(F.relu6(x + 3.) / 6.)
else:
return x * F.relu6(x + 3.) / 6.
def hard_sigmoid(x, inplace=False):
if inplace:
return x.add_(3.).clamp_(0., 6.).div_(6.)
else:
return F.relu6(x + 3.) / 6.
class _BlockBuilder(object):
""" Build Trunk Blocks
This ended up being somewhat of a cross between
https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mnasnet_models.py
and
https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/modeling/backbone/fbnet_builder.py
"""
def __init__(self, channel_multiplier=1.0, channel_divisor=8, channel_min=None,
pad_type='', act_fn=None, se_gate_fn=sigmoid, se_reduce_mid=False,
bn_args=_BN_ARGS_PT, drop_connect_rate=0., verbose=False):
self.channel_multiplier = channel_multiplier
self.channel_divisor = channel_divisor
self.channel_min = channel_min
self.pad_type = pad_type
self.act_fn = act_fn
self.se_gate_fn = se_gate_fn
self.se_reduce_mid = se_reduce_mid
self.bn_args = bn_args
self.drop_connect_rate = drop_connect_rate
self.verbose = verbose
# updated during build
self.in_chs = None
self.block_idx = 0
self.block_count = 0
def _round_channels(self, chs):
return _round_channels(chs, self.channel_multiplier, self.channel_divisor, self.channel_min)
def _make_block(self, ba):
bt = ba.pop('block_type')
ba['in_chs'] = self.in_chs
ba['out_chs'] = self._round_channels(ba['out_chs'])
ba['bn_args'] = self.bn_args
ba['pad_type'] = self.pad_type
# block act fn overrides the model default
ba['act_fn'] = ba['act_fn'] if ba['act_fn'] is not None else self.act_fn
assert ba['act_fn'] is not None
if bt == 'ir':
ba['drop_connect_rate'] = self.drop_connect_rate * self.block_idx / self.block_count
ba['se_gate_fn'] = self.se_gate_fn
ba['se_reduce_mid'] = self.se_reduce_mid
if self.verbose:
logging.info(' InvertedResidual {}, Args: {}'.format(self.block_idx, str(ba)))
block = InvertedResidual(**ba)
elif bt == 'ds' or bt == 'dsa':
ba['drop_connect_rate'] = self.drop_connect_rate * self.block_idx / self.block_count
if self.verbose:
logging.info(' DepthwiseSeparable {}, Args: {}'.format(self.block_idx, str(ba)))
block = DepthwiseSeparableConv(**ba)
elif bt == 'cn':
if self.verbose:
logging.info(' ConvBnAct {}, Args: {}'.format(self.block_idx, str(ba)))
block = ConvBnAct(**ba)
else:
assert False, 'Uknkown block type (%s) while building model.' % bt
self.in_chs = ba['out_chs'] # update in_chs for arg of next block
return block
def _make_stack(self, stack_args):
blocks = []
# each stack (stage) contains a list of block arguments
for i, ba in enumerate(stack_args):
if self.verbose:
logging.info(' Block: {}'.format(i))
if i >= 1:
# only the first block in any stack can have a stride > 1
ba['stride'] = 1
block = self._make_block(ba)
blocks.append(block)
self.block_idx += 1 # incr global idx (across all stacks)
return nn.Sequential(*blocks)
def __call__(self, in_chs, block_args):
""" Build the blocks
Args:
in_chs: Number of input-channels passed to first block
block_args: A list of lists, outer list defines stages, inner
list contains strings defining block configuration(s)
Return:
List of block stacks (each stack wrapped in nn.Sequential)
"""
if self.verbose:
logging.info('Building model trunk with %d stages...' % len(block_args))
self.in_chs = in_chs
self.block_count = sum([len(x) for x in block_args])
self.block_idx = 0
blocks = []
# outer list of block_args defines the stacks ('stages' by some conventions)
for stack_idx, stack in enumerate(block_args):
if self.verbose:
logging.info('Stack: {}'.format(stack_idx))
assert isinstance(stack, list)
stack = self._make_stack(stack)
blocks.append(stack)
return blocks
def _initialize_weight_goog(m):
# weight init as per Tensorflow Official impl
# https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mnasnet_model.py
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels # fan-out
m.weight.data.normal_(0, math.sqrt(2.0 / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1.0)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(0) # fan-out
init_range = 1.0 / math.sqrt(n)
m.weight.data.uniform_(-init_range, init_range)
m.bias.data.zero_()
def _initialize_weight_default(m):
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1.0)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
nn.init.kaiming_uniform_(m.weight, mode='fan_in', nonlinearity='linear')
def drop_connect(inputs, training=False, drop_connect_rate=0.):
"""Apply drop connect."""
if not training:
return inputs
keep_prob = 1 - drop_connect_rate
random_tensor = keep_prob + torch.rand(
(inputs.size()[0], 1, 1, 1), dtype=inputs.dtype, device=inputs.device)
random_tensor.floor_() # binarize
output = inputs.div(keep_prob) * random_tensor
return output
class ChannelShuffle(nn.Module):
# FIXME haven't used yet
def __init__(self, groups):
super(ChannelShuffle, self).__init__()
self.groups = groups
def forward(self, x):
"""Channel shuffle: [N,C,H,W] -> [N,g,C/g,H,W] -> [N,C/g,g,H,w] -> [N,C,H,W]"""
N, C, H, W = x.size()
g = self.groups
assert C % g == 0, "Incompatible group size {} for input channel {}".format(
g, C
)
return (
x.view(N, g, int(C / g), H, W)
.permute(0, 2, 1, 3, 4)
.contiguous()
.view(N, C, H, W)
)
class SqueezeExcite(nn.Module):
def __init__(self, in_chs, reduce_chs=None, act_fn=F.relu, gate_fn=sigmoid):
super(SqueezeExcite, self).__init__()
self.act_fn = act_fn
self.gate_fn = gate_fn
reduced_chs = reduce_chs or in_chs
self.conv_reduce = nn.Conv2d(in_chs, reduced_chs, 1, bias=True)
self.conv_expand = nn.Conv2d(reduced_chs, in_chs, 1, bias=True)
def forward(self, x):
# NOTE adaptiveavgpool can be used here, but seems to cause issues with NVIDIA AMP performance
x_se = x.view(x.size(0), x.size(1), -1).mean(-1).view(x.size(0), x.size(1), 1, 1)
x_se = self.conv_reduce(x_se)
x_se = self.act_fn(x_se, inplace=True)
x_se = self.conv_expand(x_se)
x = x * self.gate_fn(x_se)
return x
class ConvBnAct(nn.Module):
def __init__(self, in_chs, out_chs, kernel_size,
stride=1, pad_type='', act_fn=F.relu, bn_args=_BN_ARGS_PT):
super(ConvBnAct, self).__init__()
assert stride in [1, 2]
self.act_fn = act_fn
self.conv = select_conv2d(in_chs, out_chs, kernel_size, stride=stride, padding=pad_type)
self.bn1 = nn.BatchNorm2d(out_chs, **bn_args)
def forward(self, x):
x = self.conv(x)
x = self.bn1(x)
x = self.act_fn(x, inplace=True)
return x
class DepthwiseSeparableConv(nn.Module):
""" DepthwiseSeparable block
Used for DS convs in MobileNet-V1 and in the place of IR blocks with an expansion
factor of 1.0. This is an alternative to having a IR with optional first pw conv.
"""
def __init__(self, in_chs, out_chs, dw_kernel_size=3,
stride=1, pad_type='', act_fn=F.relu, noskip=False,
pw_kernel_size=1, pw_act=False,
se_ratio=0., se_gate_fn=sigmoid,
bn_args=_BN_ARGS_PT, drop_connect_rate=0.):
super(DepthwiseSeparableConv, self).__init__()
assert stride in [1, 2]
self.has_se = se_ratio is not None and se_ratio > 0.
self.has_residual = (stride == 1 and in_chs == out_chs) and not noskip
self.has_pw_act = pw_act # activation after point-wise conv
self.act_fn = act_fn
self.drop_connect_rate = drop_connect_rate
self.conv_dw = select_conv2d(
in_chs, in_chs, dw_kernel_size, stride=stride, padding=pad_type, depthwise=True)
self.bn1 = nn.BatchNorm2d(in_chs, **bn_args)
# Squeeze-and-excitation
if self.has_se:
self.se = SqueezeExcite(
in_chs, reduce_chs=max(1, int(in_chs * se_ratio)), act_fn=act_fn, gate_fn=se_gate_fn)
self.conv_pw = select_conv2d(in_chs, out_chs, pw_kernel_size, padding=pad_type)
self.bn2 = nn.BatchNorm2d(out_chs, **bn_args)
def forward(self, x):
residual = x
x = self.conv_dw(x)
x = self.bn1(x)
x = self.act_fn(x, inplace=True)
if self.has_se:
x = self.se(x)
x = self.conv_pw(x)
x = self.bn2(x)
if self.has_pw_act:
x = self.act_fn(x, inplace=True)
if self.has_residual:
if self.drop_connect_rate > 0.:
x = drop_connect(x, self.training, self.drop_connect_rate)
x += residual
return x
class InvertedResidual(nn.Module):
""" Inverted residual block w/ optional SE"""
def __init__(self, in_chs, out_chs, dw_kernel_size=3,
stride=1, pad_type='', act_fn=F.relu, noskip=False,
exp_ratio=1.0, exp_kernel_size=1, pw_kernel_size=1,
se_ratio=0., se_reduce_mid=False, se_gate_fn=sigmoid,
shuffle_type=None, bn_args=_BN_ARGS_PT, drop_connect_rate=0.):
super(InvertedResidual, self).__init__()
mid_chs = int(in_chs * exp_ratio)
self.has_se = se_ratio is not None and se_ratio > 0.
self.has_residual = (in_chs == out_chs and stride == 1) and not noskip
self.act_fn = act_fn
self.drop_connect_rate = drop_connect_rate
# Point-wise expansion
self.conv_pw = select_conv2d(in_chs, mid_chs, exp_kernel_size, padding=pad_type)
self.bn1 = nn.BatchNorm2d(mid_chs, **bn_args)
self.shuffle_type = shuffle_type
if shuffle_type is not None and isinstance(exp_kernel_size, list):
self.shuffle = ChannelShuffle(len(exp_kernel_size))
# Depth-wise convolution
self.conv_dw = select_conv2d(
mid_chs, mid_chs, dw_kernel_size, stride=stride, padding=pad_type, depthwise=True)
self.bn2 = nn.BatchNorm2d(mid_chs, **bn_args)
# Squeeze-and-excitation
if self.has_se:
se_base_chs = mid_chs if se_reduce_mid else in_chs
self.se = SqueezeExcite(
mid_chs, reduce_chs=max(1, int(se_base_chs * se_ratio)), act_fn=act_fn, gate_fn=se_gate_fn)
# Point-wise linear projection
self.conv_pwl = select_conv2d(mid_chs, out_chs, pw_kernel_size, padding=pad_type)
self.bn3 = nn.BatchNorm2d(out_chs, **bn_args)
def forward(self, x):
residual = x
# Point-wise expansion
x = self.conv_pw(x)
x = self.bn1(x)
x = self.act_fn(x, inplace=True)
# FIXME haven't tried this yet
# for channel shuffle when using groups with pointwise convs as per FBNet variants
if self.shuffle_type == "mid":
x = self.shuffle(x)
# Depth-wise convolution
x = self.conv_dw(x)
x = self.bn2(x)
x = self.act_fn(x, inplace=True)
# Squeeze-and-excitation
if self.has_se:
x = self.se(x)
# Point-wise linear projection
x = self.conv_pwl(x)
x = self.bn3(x)
if self.has_residual:
if self.drop_connect_rate > 0.:
x = drop_connect(x, self.training, self.drop_connect_rate)
x += residual
# NOTE maskrcnn_benchmark building blocks have an SE module defined here for some variants
return x
def fill_up_weights(up):
w = up.weight.data
f = math.ceil(w.size(2) / 2)
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(w.size(2)):
for j in range(w.size(3)):
w[0, 0, i, j] = \
(1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c))
for c in range(1, w.size(0)):
w[c, 0, :, :] = w[0, 0, :, :]
def fill_fc_weights(layers):
for m in layers.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.001)
# torch.nn.init.kaiming_normal_(m.weight.data, nonlinearity='relu')
# torch.nn.init.xavier_normal_(m.weight.data)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
class MixNet(nn.Module):
"""
* MixNet S, M, L
"""
def __init__(self, block_args, heads={}, head_conv=256, num_classes=1000, in_chans=3, scale=1, stem_size=32, num_features=1280,
channel_multiplier=1.0, channel_divisor=8, channel_min=None,
pad_type='', act_fn=F.relu, drop_rate=0., drop_connect_rate=0.,
se_gate_fn=sigmoid, se_reduce_mid=False, bn_args=_BN_ARGS_PT, weight_init='goog'):
super(MixNet, self).__init__()
self.num_classes = num_classes
self.drop_rate = drop_rate
self.act_fn = act_fn
self.num_features = num_features
self.heads = heads
stem_size = _round_channels(stem_size, channel_multiplier, channel_divisor, channel_min)
self.conv_stem = select_conv2d(in_chans, stem_size, 3, stride=2, padding=pad_type)
self.bn1 = nn.BatchNorm2d(stem_size, **bn_args)
in_chs = stem_size
builder = _BlockBuilder(
channel_multiplier, channel_divisor, channel_min,
pad_type, act_fn, se_gate_fn, se_reduce_mid,
bn_args, drop_connect_rate, verbose=_DEBUG)
self.blocks = nn.Sequential(*builder(in_chs, block_args))
self.blocks = self.blocks
in_chs = builder.in_chs
self.inplanes = builder.in_chs
for m in self.modules():
if weight_init == 'goog':
_initialize_weight_goog(m)
else:
_initialize_weight_default(m)
# used for deconv layers
self.deconv_layers = self._make_deconv_layer(
3,
[256, 128, 64],
[4, 4, 4],
)
for head in self.heads:
classes = self.heads[head]
if head_conv > 0:
fc = nn.Sequential(
nn.Conv2d(64, head_conv,
kernel_size=3, padding=1, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(head_conv, classes,
kernel_size=1, stride=1,
padding=0, bias=True))
if 'hm' in head:
fc[-1].bias.data.fill_(-2.19)
else:
fill_fc_weights(fc)
else:
fc = nn.Conv2d(64, classes,
kernel_size=1, stride=1,
padding=0, bias=True)
if 'hm' in head:
fc.bias.data.fill_(-2.19)
else:
fill_fc_weights(fc)
self.__setattr__(head, fc)
def _get_deconv_cfg(self, deconv_kernel, index):
if deconv_kernel == 4:
padding = 1
output_padding = 0
elif deconv_kernel == 3:
padding = 1
output_padding = 1
elif deconv_kernel == 2:
padding = 0
output_padding = 0
return deconv_kernel, padding, output_padding
def _make_deconv_layer(self, num_layers, num_filters, num_kernels):
assert num_layers == len(num_filters), \
'ERROR: num_deconv_layers is different len(num_deconv_filters)'
assert num_layers == len(num_kernels), \
'ERROR: num_deconv_layers is different len(num_deconv_filters)'
layers = []
for i in range(num_layers):
kernel, padding, output_padding = \
self._get_deconv_cfg(num_kernels[i], i)
planes = num_filters[i]
#fc = DCN(self.inplanes, planes,
# kernel_size=(3,3), stride=1,
# padding=1, dilation=1, deformable_groups=1)
fc = nn.Conv2d(self.inplanes, planes,
kernel_size=3, stride=1,
padding=1, dilation=1, bias=False)
fill_fc_weights(fc)
up = nn.ConvTranspose2d(
in_channels=planes,
out_channels=planes,
kernel_size=kernel,
stride=2,
padding=padding,
output_padding=output_padding,
bias=False)
fill_up_weights(up)
layers.append(fc)
layers.append(nn.BatchNorm2d(planes, momentum=_BN_MOMENTUM_PT_DEFAULT))
layers.append(nn.ReLU(inplace=True))
layers.append(up)
layers.append(nn.BatchNorm2d(planes, momentum=_BN_MOMENTUM_PT_DEFAULT))
layers.append(nn.ReLU(inplace=True))
self.inplanes = planes
return nn.Sequential(*layers)
def forward(self, inputs):
x = self.conv_stem(inputs)
x = self.bn1(x)
x = self.act_fn(x, inplace=True)
x = self.blocks(x)
x = self.deconv_layers(x)
ret = {}
for head in self.heads:
ret[head] = self.__getattr__(head)(x)
return [ret]
def _gen_mixnet_s(channel_multiplier=1.0, num_classes=1000, **kwargs):
"""Creates a MixNet Small model.
Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet/mixnet
Paper: https://arxiv.org/abs/1907.09595
"""
arch_def = [
# stage 0, 112x112 in
['ds_r1_k3_s1_e1_c16'], # relu
# stage 1, 112x112 in
['ir_r1_k3_a1.1_p1.1_s2_e6_c24', 'ir_r1_k3_a1.1_p1.1_s1_e3_c24'], # relu
# stage 2, 56x56 in
['ir_r1_k3.5.7_s2_e6_c40_se0.5_nsw', 'ir_r3_k3.5_a1.1_p1.1_s1_e6_c40_se0.5_nsw'], # swish
# stage 3, 28x28 in
['ir_r1_k3.5.7_p1.1_s2_e6_c80_se0.25_nsw', 'ir_r2_k3.5_p1.1_s1_e6_c80_se0.25_nsw'], # swish
# stage 4, 14x14in
['ir_r1_k3.5.7_a1.1_p1.1_s1_e6_c120_se0.5_nsw', 'ir_r2_k3.5.7.9_a1.1_p1.1_s1_e3_c120_se0.5_nsw'], # swish
# stage 5, 14x14in
['ir_r1_k3.5.7.9.11_s2_e6_c200_se0.5_nsw', 'ir_r2_k3.5.7.9_p1.1_s1_e6_c200_se0.5_nsw'], # swish
# 7x7
]
model = MixNet(
_decode_arch_def(arch_def),
num_classes=num_classes,
stem_size=16,
num_features=1536,
channel_multiplier=channel_multiplier,
channel_divisor=8,
channel_min=None,
bn_args=_resolve_bn_args(kwargs),
act_fn=F.relu,
**kwargs
)
return model
def _gen_mixnet_m(channel_multiplier=1.0, num_classes=1000, **kwargs):
"""Creates a MixNet Medium-Large model.
Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet/mixnet
Paper: https://arxiv.org/abs/1907.09595
"""
arch_def = [
# stage 0, 112x112 in
['ds_r1_k3_s1_e1_c24'], # relu
# stage 1, 112x112 in
['ir_r1_k3.5.7_a1.1_p1.1_s2_e6_c32', 'ir_r1_k3_a1.1_p1.1_s1_e3_c32'], # relu
# stage 2, 56x56 in
['ir_r1_k3.5.7.9_s2_e6_c40_se0.5_nsw', 'ir_r3_k3.5_a1.1_p1.1_s1_e6_c40_se0.5_nsw'], # swish
# stage 3, 28x28 in
['ir_r1_k3.5.7_s2_e6_c80_se0.25_nsw', 'ir_r3_k3.5.7.9_a1.1_p1.1_s1_e6_c80_se0.25_nsw'], # swish
# stage 4, 14x14in
['ir_r1_k3_s1_e6_c120_se0.5_nsw', 'ir_r3_k3.5.7.9_a1.1_p1.1_s1_e3_c120_se0.5_nsw'], # swish
# stage 5, 14x14in
['ir_r1_k3.5.7.9_s2_e6_c200_se0.5_nsw', 'ir_r3_k3.5.7.9_p1.1_s1_e6_c200_se0.5_nsw'], # swish
# 7x7
]
model = MixNet(
_decode_arch_def(arch_def),
num_classes=num_classes,
stem_size=24,
num_features=1536,
channel_multiplier=channel_multiplier,
channel_divisor=8,
channel_min=None,
bn_args=_resolve_bn_args(kwargs),
act_fn=F.relu,
**kwargs
)
return model
def mixnet_s(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
"""Creates a MixNet Small model. Tensorflow compatible variant
"""
default_cfg = default_cfgs['mixnet_s']
kwargs['bn_eps'] = _BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_mixnet_s(
channel_multiplier=1.0, num_classes=num_classes, in_chans=in_chans, **kwargs)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, in_chans)
return model
def mixnet_m(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
"""Creates a MixNet Medium model. Tensorflow compatible variant
"""
default_cfg = default_cfgs['mixnet_m']
kwargs['bn_eps'] = _BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_mixnet_m(
channel_multiplier=1.0, num_classes=num_classes, in_chans=in_chans, **kwargs)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, in_chans)
return model
def mixnet_l(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
"""Creates a MixNet Large model. Tensorflow compatible variant
"""
default_cfg = default_cfgs['mixnet_l']
kwargs['bn_eps'] = _BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_mixnet_m(
channel_multiplier=1.3, num_classes=num_classes, in_chans=in_chans, **kwargs)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, in_chans)
return model
def load_pretrained(model, default_cfg, in_chans=3):
if 'url' not in default_cfg or not default_cfg['url']:
logging.warning("Pretrained model URL is invalid, using random initialization.")
return
pretrained_state_dict = model_zoo.load_url(default_cfg['url'])
model_state_dict = model.state_dict()
if in_chans == 1:
conv1_name = default_cfg['first_conv']
logging.info('Converting first conv (%s) from 3 to 1 channel' % conv1_name)
conv1_weight = pretrained_state_dict[conv1_name + '.weight']
pretrained_state_dict[conv1_name + '.weight'] = conv1_weight.mean(dim=1, keepdim=True)
elif in_chans != 3:
assert False, "Invalid in_chans for pretrained weights"
for k in pretrained_state_dict.keys():
if k in model_state_dict.keys():
if pretrained_state_dict[k].shape != model_state_dict[k].shape:
pretrained_state_dict[k] = model_state_dict[k]
else:
print('Drop parameter {}.'.format(k))
for k in model_state_dict.keys():
if not (k in pretrained_state_dict.keys()):
print('No param {}.'.format(k))
pretrained_state_dict[k] = model_state_dict[k]
model.load_state_dict(pretrained_state_dict, strict=False)
return model
if __name__ == '__main__':
model = mixnet_s()
| 1.96875 | 2 |
pyss/queue_object.py | vpv11110000/pyss | 0 | 12757296 | # #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Модуль объекта очереди
"""
# pylint: disable=line-too-long
from pyss import pyssobject
from pyss.pyss_const import *
from pyss.block import Block
from pyss import statisticalseries
from pyss.pyssobject import PyssObject
from pyss.pyssstateobject import PyssStateObject
class QueueObject(PyssStateObject):
"""Объект очереди
Хранит данные очереди.
Можно назначать обработчики изменения состояния (см. PyssStateObject).
Под значением старого состояния понимается старое значение текущей длины очереди.
Владельцем QueueObject является модель.
При изменении состояния, вызывается обработчик измененния состояния (см. pyssstateobject.PyssStateObject).
Args:
ownerModel=None - объект модели-владельца
queueName - имя очереди
initLength=0 - начальная длина очереди
Атрибуты объекта QueueObject (в дополнение к атрибутам pyssownerobject.PyssOwnerObject):
bl = Queue(...)
bl[QUEUE_OBJECT][QUEUE_NAME] - имя объекта (queue.Queue) очереди
bl[QUEUE_OBJECT][QUEUE_LENGTH] - Текущая длина очереди.
bl[QUEUE_OBJECT][QUEUE_LENGTH_MAX] - Максимальная длина очереди.
bl[QUEUE_OBJECT][ENTRY_ZERO] - NOT_IMPLEMENT Число нулевых входов в очередь, при котором время нахождения в очереди равно нулю.
bl[QUEUE_OBJECT][TIME_MEAN] - Среднее время пребывания транзактов в очереди (включая нулевые входы).
bl[QUEUE_OBJECT][TIME_MEAN_WITHOUT_ZERO] - Среднее время пребывания сообщения в очереди (без нулевых входов).
bl[QUEUE_OBJECT][QUEUE_MEAN_LENGTH_BY_TIME] - NOT_IMPLEMENT
bl[QUEUE_OBJECT][LISTTRANSACT] - словарь: key - <номера транзактов> : value - <время входа в очередь>
bl[QUEUE_OBJECT][LIFE_TIME_LIST] - писок меток времени и длины очереди [{START:<time>, STATE:<len>}]
bl[QUEUE_OBJECT][STATISTICAL_SERIES] - статистическая последовательность времён нахождения транзактов в очереди
"""
def __init__(self, ownerModel=None, queueName=None, initLength=0):
super(QueueObject, self).__init__(QUEUE_OBJECT, owner=ownerModel)
map(pyssobject.raiseIsTrue, [queueName is None or queueName.strip() == ""])
self[QUEUE_NAME] = queueName
# текущая длина очереди
self[QUEUE_LENGTH] = initLength
self[QUEUE_LENGTH_MAX] = initLength
self[ENTRY_ZERO] = 0
self[TIME_MEAN] = None
self[TIME_MEAN_WITHOUT_ZERO] = None
self[QUEUE_MEAN_LENGTH_BY_TIME] = None
self[ENTRY_COUNT] = 0
self[CURRENT_COUNT] = 0
self[RETRY] = 0
# список номеров транзактов и времени постановки в очередь
self[LISTTRANSACT] = {}
self[LIFE_TIME_LIST] = [{START:ownerModel.getCurTime(), STATE:initLength}]
self[STATISTICAL_SERIES] = statisticalseries.StatisticalSeries()
ownerModel.addQueueObject(self)
def increase(self, currentTime, transact, delta):
oldState = self[QUEUE_LENGTH]
self[QUEUE_LENGTH] += delta
if self[QUEUE_LENGTH_MAX] < self[QUEUE_LENGTH]:
self[QUEUE_LENGTH_MAX] = self[QUEUE_LENGTH]
self[LISTTRANSACT][transact[NUM]] = currentTime
self[ENTRY_COUNT] += 1
self[CURRENT_COUNT] += 1
self[LIFE_TIME_LIST].append({START:currentTime, STATE:self[QUEUE_LENGTH]})
# под значением старого состояния понимается старое значение текущей длины очереди
self.fireHandlerOnStateChange(oldState=oldState)
def decrease(self, currentTime, transact, delta):
"""Здесь делается не всё, что нужно"""
oldState = self[QUEUE_LENGTH]
self[QUEUE_LENGTH] -= delta
self[LIFE_TIME_LIST].append({START:currentTime, STATE:self[QUEUE_LENGTH]})
self.fireHandlerOnStateChange(oldState=oldState)
if __name__ == '__main__':
def main():
print "?"
main()
| 2.203125 | 2 |
pygeostatistics/gslib_reader.py | wassemalward/pyGeoStatistics | 66 | 12757297 | # -*- coding: utf-8 -*-
"""
Read gslib file format
Created on Wen Sep 5th 2018
"""
from __future__ import absolute_import, division, print_function
__author__ = "yuhao"
import numpy as np
import pandas as pd
from scipy.spatial.distance import pdist
from mpl_toolkits.mplot3d import Axes3D
class SpatialData(object):
def __init__(self, file_path):
self.datafl = file_path
self.vr = None
self.property_name = None
self._2d = False
self._read_data()
def _read_data(self):
"""
read gslib file
"""
column_name = []
with open(self.datafl, 'r') as fin:
_ = fin.readline().strip()
ncols = int(fin.readline().strip())
for _ in range(ncols):
column_name.append(fin.readline().strip())
self.property_name = [item for item in column_name
if item not in ['x', 'y', 'z']]
df = pd.read_csv(self.datafl, sep='\t', header=None, names=column_name,
skiprows=ncols+2)
if 'z' not in column_name:
self._2d = True
column_name.append('z')
df['z'] = 0
self.df = df
data_dtype = np.dtype({
'names': column_name,
'formats': ['f8'] * len(column_name)})
self.vr = np.core.records.fromarrays(
df.values.transpose(), dtype=data_dtype)
def preview(self):
return self.vr.head(20)
def pdf(self, ax, bins=15):
hist, bin_edges = np.histogram(self.vr[self.property_name[0]],
bins=bins)
ax.set_title("pdf")
ax.bar(bin_edges[:-1], hist, width=bin_edges[1]-bin_edges[0],
color='red', alpha=0.5)
def cdf(self, ax):
data = self.vr[self.property_name[0]]
data = np.sort(data)
cdf = np.arange(1, len(data) + 1) / len(data)
ax.set_title("cdf")
ax.plot(data, cdf)
@property
def maximum(self):
return self.df[self.property_name[0]].max()
@property
def minimum(self):
return self.df[self.property_name[0]].min()
@property
def mean(self):
return self.df[self.property_name[0]].mean()
@property
def variance(self):
return self.df[self.property_name[0]].var()
@property
def meadian(self):
return np.median(self.vr[self.property_name[0]])
@property
def upper_quartile(self):
return self.df[self.property_name[0]].quantile(0.75)
@property
def lower_quartile(self):
return self.df[self.property_name[0]].quantile(0.25)
@property
def num(self):
return self.vr.shape[0]
def distance(self):
num = self.vr.shape[0]
return pdist(np.concatenate((self.vr['x'].reshape((num, 1)),
self.vr['y'].reshape((num, 1))), axis=1))
@property
def summary(self):
return (
"Summary\n"
"-------\n"
"Number of Points: {}\n"
"Mean: {}\n"
"Variance: {}\n"
"Minimum: {}\n"
"Lower Quartile: {}\n"
"Median: {}\n"
"Upper Quartile: {}\n"
"Maximum: {}\n").format(
self.num,
self.mean,
self.variance,
self.minimum,
self.lower_quartile,
self.meadian,
self.upper_quartile,
self.maximum)
def scatter(self, ax, prop=None):
"""
Plot scatter of data points on given axis
Parameters
----------
ax : AxesSubplot or Axes3DSubplot
axis on which the scatter plot is drawn
prop : str
property to display with colormap
"""
sc = None
prop = self.property_name[0] if prop is None else prop
if not self._2d and isinstance(ax, Axes3D):
sc = ax.scatter(
self.vr['x'], self.vr['y'], self.vr['z'],
c=prop)
else:
sc = ax.scatter(
self.vr['x'], self.vr['y'], c=prop)
return sc
| 2.453125 | 2 |
ETRI.py | chawonseok/Etri_cdqa | 0 | 12757298 | import urllib3
import json
def ETRI_POS_Tagging(text) :
openApiURL = "http://aiopen.etri.re.kr:8000/WiseNLU"
accessKey = "14af2341-2fde-40f3-a0b9-b724fa029380"
analysisCode = "morp"
requestJson = {
"access_key": accessKey,
"argument": {
"text": text,
"analysis_code": analysisCode
}
}
http = urllib3.PoolManager()
response = http.request(
"POST",
openApiURL,
headers={"Content-Type": "application/json; charset=UTF-8"},
body=json.dumps(requestJson)
)
return Pos_extract(response)
def Pos_extract(Data) :
Noun = []
Extract_a = json.loads(str(Data.data,"utf=8"))['return_object']['sentence']
for i in range(len(Extract_a)) :
Extract_b = dict(Extract_a[i])
for j in range(len(Extract_b['morp'])) :
if Extract_b['morp'][j]['type'] =='NNG' or Extract_b['morp'][j]['type'] =='NNP':
Noun.append(Extract_b['morp'][j]['lemma'])
return Noun | 2.78125 | 3 |
AER_experimentalist/experiment_environment/tinkerforge_variable.py | musslick/DARTS-Cognitive-Modeling | 0 | 12757299 | <reponame>musslick/DARTS-Cognitive-Modeling
from abc import ABC, abstractmethod
import numpy as np
from AER_experimentalist.experiment_environment.variable import Variable
class Tinkerforge_Variable(Variable):
_variable_label = ""
_UID = ""
_priority = 0
def __init__(self, variable_label="", UID="", name="", units="", priority="", value_range=(0,1), type=float):
super().__init__(name=name, value_range=value_range, units=units, type=type, variable_label=variable_label)
self._UID = UID
self._priority = priority
# Get priority of variable.
# The priority is used to determine the sequence of variables to be measured or manipulated.
def __get_priority__(self):
return self._priority
# Set priority of variable.
# The priority is used to determine the sequence of variables to be measured or manipulated.
def __set_priority__(self, priority):
self._priority = priority
def clean_up(self):
pass | 3.171875 | 3 |
assets/ctfFiles/2021/idek2021/crypto/seedOfLife/solver.py | Angmar2722/Angmar2722.github.io | 0 | 12757300 | import random
from tqdm import tqdm
from Crypto.Util.number import *
for seed in tqdm(range(10000000)):
random.seed(seed)
toBreak = False
for i in range(19):
random.seed(random.random())
seedtosave = random.random()
for add in range(0, 1000):
random.seed(seedtosave+add)
for i in range(0, 100):
temp = random.random()
if add == 0 and i == 0 and temp != 0.5327486342598738:
toBreak = True
break
if toBreak:
break
if toBreak:
continue
for add in range(0, 1000):
random.seed(seedtosave-add)
for i in range(0, 1000):
random.random()
random.seed(seedtosave)
for i in range(0, 100):
t = random.random()*100
if t == 83.74981977975804:
print("idek{", seed, "}", sep="")
exit()
#idek{103123} | 2.515625 | 3 |
src/twitterclient.py | jameslawler/synonymly-standalone | 0 | 12757301 | #twitterclient
import twitter
from configuration import configuration
class twitterclient:
def __init__(self):
config = configuration("config.ini")
self.api = twitter.Api(consumer_key=config.getTwitterConsumerKey(),
consumer_secret=config.getTwitterConsumerSecret(),
access_token_key=config.getTwitterAccessToken(),
access_token_secret=config.getTwitterAccessTokenSecret())
def tweet(self, message):
self.api.PostUpdate(message) | 2.796875 | 3 |
bsbolt/Index/RRBSIndex.py | NuttyLogic/BSBolt | 10 | 12757302 | <reponame>NuttyLogic/BSBolt
import re
from typing import List, Tuple
from bsbolt.Index.RRBSCutSites import ProcessCutSites
from bsbolt.Index.IndexOutput import IndexOutput
from bsbolt.Utils.FastaIterator import OpenFasta
from bsbolt.Utils.UtilityFunctions import get_external_paths
class RRBSBuild:
"""Format reference sequence inputs for processing by BWA. In silico digests reference sequence and
return mappable regions that are within the fragment boundary. Fragments are relative to the restriction cut site
if provided, or the complete restriction sequence is considered as part of the mappable fragment.:
Params:
* *reference_file (str)*: path to reference file in fasta format
* *genome_database (str)*: directory to output processed datafiles
* *block_size (int)*: bwa indexing block size, increases indexing speed but increases memory consumption
* *lower_bound (int)*: smallest mappable fragment size
* *upper_bound (int)*: largest mappable fragment size
* *cut_format (str)*: Comma separated list of restriction sites, - represent cut break
* *ignore_alt (bool)*: ignore alt contigs when constructing alignment index
Usage:
```python
index = RRBSBuild(**kwargs)
index.generate_rrbs_database()
```
"""
def __init__(self, reference_file: str = None, genome_database: str = None,
lower_bound: int = 30, upper_bound: int = 500, cut_format: str = 'C-CGG',
block_size: int = None, ignore_alt: bool = False):
bwa_path, _, _ = get_external_paths()
self.reference_file = OpenFasta(fasta=reference_file)
self.index_output = IndexOutput(genome_database=genome_database, block_size=block_size)
self.lower_bound = lower_bound
self.upper_bound = upper_bound
self.ignore_alt = ignore_alt
self.cut_sites = ProcessCutSites(cut_format=cut_format)
print(self.cut_sites.restriction_site_dict)
self.mappable_regions = []
self.contig_size_dict = {}
def generate_rrbs_database(self):
""" Wrapper for class functions to process and build mapping indices.
"""
contig_id = None
contig_sequence = []
# return true, line if '>' in fasta line
for is_contig_label, sequence in self.reference_file:
if is_contig_label:
if contig_id:
self.process_contig_region(contig_id, contig_sequence)
contig_sequence = []
# set contig_id
contig_id = sequence.replace('>', '').split()[0]
else:
contig_sequence.append(sequence.upper())
# process remaining sequence after iterator exhausted
self.process_contig_region(contig_id, contig_sequence)
self.index_output.database_output.close()
# launch build commands
self.index_output.build_index()
# output mappable regions in bed format
self.index_output.output_mappable_regions(self.mappable_regions)
# output contig size index
self.index_output.output_contig_sequence('genome_index', self.contig_size_dict)
def process_contig_region(self, contig_id: str, contig_sequence: List[str]):
"""Given a contig_id will output a pickle file of the whole sequence and output a masked version of the
the sequence where only mappable regions are reported.
Params:
* *contig_id (str)*: contig label
* *contig_sequence (list)*: a list of of string containing DNA Sequence
"""
if self.ignore_alt and 'alt' in contig_id.lower():
return
# join contig_sequence to get ease downstream processing
contig_str: str = ''.join(contig_sequence)
# save contig size to dict
self.contig_size_dict[contig_id] = len(contig_str)
# serialize contig sequence
self.index_output.output_contig_sequence(contig_id, contig_str)
# retrieve list of mappable regions
mappable_regions: list = self.process_rrbs_sequence(contig_str)
# check if contig regions empty, if so unmask first 80 bases for indexing
if not mappable_regions:
mappable_regions.append((1, 80))
# get masked contig sequence
masked_contig_sequence: str = self.mask_contig(contig_str, mappable_regions)
self.mappable_regions.extend([f'{contig_id}\t{region[0]}\t{region[1]}\n' for region in mappable_regions])
# perform sanity check, if AssertionError the region designation process is bad
assert len(contig_str) == len(masked_contig_sequence), 'Contig Length != Masked Contig Length'
# write contig sequence to output files
self.index_output.write_contig_sequence(contig_id, masked_contig_sequence)
def process_rrbs_sequence(self, contig_str: str) -> List[Tuple[int, int]]:
"""Designate mappable regions by finding all occurrences of the restriction site string in the passed DNA
sequence. Merge restriction map into regions by considering pairs of downstream and upstream restriction sites
that pass the size limits.
Params:
* *contig_str (str)*: STR of continuous DNA sequence
Returns:
* *mappable_regions (list)*: List of tuples the contain the start and end position of fragments that
are with the size limits
"""
restriction_site_locations = []
# get the position of the all occurrences of the restriction site pattern in the DNA sequence and add to list
for restrication_site, offset in self.cut_sites.restriction_site_dict.items():
restriction_site_locations.extend([(m.start(), offset, restrication_site) for m in
re.finditer(restrication_site, contig_str)])
# sort list so fragments are in ordered by start position
restriction_site_locations.sort(key=lambda x: x[0])
mappable_regions = []
# iterate through list to get the cut site plus upstream cut site, will terminate at the second to last site
for downstream_site, upstream_site in zip(restriction_site_locations, restriction_site_locations[1:]):
# set default downstream site that includes the restriction site
down_start = downstream_site[0]
# if cut site indicated shift start position
if downstream_site[1]:
down_start = downstream_site[0] + downstream_site[1]
# set site end to include restriction site
up_end = upstream_site[0] + len(upstream_site[2])
# correct fragment if cut site provided
if upstream_site[1]:
up_end = upstream_site[0] + upstream_site[1]
site_range = up_end - down_start
# check fragment length
if self.lower_bound <= site_range <= self.upper_bound:
# extend mappable boundaries by two to assess methylation context relative to reference sequence
mappable_regions.append((down_start - 2, up_end + 2))
return mappable_regions
def mask_contig(self, contig_str: str, mappable_regions: List[Tuple[int, int]]) -> str:
"""Given a list of mappable regions, if cut site isn't designated merges mappable fragments, returns a string
of DNA sequence with masked unmappable regions
Params:
* *contig_id (str)*: contig label
* *contig_str (str)*: str of DNA sequence
* *mappable_regions (list)*: list of mappable regions
Returns:
* *masked_contig_sequence (str)*: str of DNA sequence with un-mappable regions masked
"""
contig_mappable_regions = iter(mappable_regions)
start, end = next(contig_mappable_regions)
masked_contig = []
for position, bp in enumerate(contig_str):
if position > end:
try:
start, end = next(contig_mappable_regions)
except StopIteration:
pass
if start <= position <= end:
masked_contig.append(bp)
else:
masked_contig.append('-')
return ''.join(masked_contig)
| 2.34375 | 2 |
main.py | technokowski/snowbot3000 | 0 | 12757303 | <gh_stars>0
from speaker import Speaker
# This is just to test my custom class from speaker
# I keep it because it's a milestone for me
my_speak = Speaker()
my_speak.speak('f yeah')
my_speak.speak('I love this')
my_speak.change_rate('175')
my_speak.speak('f yeah')
my_speak.speak('I love this')
my_speak.change_voice('Fred')
my_speak.speak('f yeah')
my_speak.speak('I love this')
my_speak.speak("I don't know")
| 2.46875 | 2 |
main.py | AshishKapoor/learning-fastapi | 0 | 12757304 | <reponame>AshishKapoor/learning-fastapi
from imp import reload
from operator import truediv
import uvicorn
if __name__ == "__main__":
uvicorn.run("app.api:app", port="8000", reload=True)
| 2 | 2 |
parts/app/tests/forms/tests_baseform.py | heljhumenad/parts-arrival | 3 | 12757305 | from django import forms
from django.utils.translation import ugettext_lazy as _
from parts.app.forms import baseform
from parts.app.tests.base import TestCase
class TestBaseForm(TestCase):
def setUp(self):
super().setUp()
self._base_form = baseform.BaseForms()
def test_instance_for_base_form(self):
self.assertEqual(self._base_form, isinstance(baseform.BaseForms())
| 2.109375 | 2 |
helper.py | nvb5140/AuditClassification | 0 | 12757306 | <filename>helper.py<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 21 17:49:21 2021
@author: nickbenelli
"""
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import sklearn.metrics
from sklearn.metrics import confusion_matrix
def map_string_values_ints(df, string_colnames = []):
'''
Converts strings to ints in data frame
Parameters
----------
df : pandans.DataFrame
data frame with string values.
string_colnames : list, optional
List of columns with string variables. The default is [].
Returns
-------
df : pandans.DataFrame
data frame with all int values.
map_dict : dict
map dictionary of strings converted to ints.
'''
map_dict = {}
non_number_starter = 99
for colname in string_colnames:
# create key fo colname
map_dict[colname] = {}
non_number_idx = non_number_starter
# Loop through - convert string to integers
for item in set(df.loc[:, colname]):
try:
map_dict[colname][item] = int(item)
except ValueError:
pass
# Loop through values that could not convert string to int and assign number 99
for item in set(df.loc[:, colname]):
if item not in map_dict[colname].keys():
while non_number_idx in map_dict[colname].values():
non_number_idx -= 1
map_dict[colname][item] = non_number_idx
df.loc[:, colname] = df.loc[:, colname].map(map_dict[colname])
print(map_dict)
return df, map_dict
def fill_nas_df(df, use_mean= True):
'''
Replaces NAs in data frame column with mean or median.
Parameters
----------
df : pandas.DataFrame
data frame with NAs in float.
use_mean : bool, optional
True- Mean
False- Median. The default is True.
Returns
-------
df : pandas.DataFrame
data frame with filled in NAs.
'''
# Fill Blanks
for col in df.columns:
try:
if use_mean:
replace_value = df.loc[:, col].mean()
else:
replace_value = df.loc[:, col].median()
df.loc[:, col].fillna(value=replace_value, inplace=True)
except TypeError:
continue
return df
def convert_int_to_float_df(df):
'''
Converts int to floats
Parameters
----------
df : pandas.DataFrame
df input.
Returns
-------
df : pandas.DataFrame
df output.
'''
for col in df.columns:
try:
#df.loc[:, col] = df.loc[:, col].astype(float)
df.loc[:, col] = pd.to_numeric(df.loc[:, col], downcast='float')
except ValueError:
pass
return df
def dummy_string_var(df, string_colnames):
'''
Takes String Values and adds dummy data in place of column
Parameters
----------
df : pandas
data frame.
string_colnames : list
list of string columns in data frame.
Returns
-------
df_new : pandas
data frame with new dummy variables.
'''
df_new = df.copy()
col = string_colnames[0]
dummy_dict = {}
for col in string_colnames:
df_sting = df_new.loc[:, col]
df_dummy = pd.get_dummies(df_sting)
# Rename the columns
dummy_colname = list(df_dummy.columns)
for inc, dummy_name in enumerate(dummy_colname):
dummy_colname[inc] = '_'.join([col, dummy_name])
df_dummy.columns = dummy_colname
# Add dummy df to dict
dummy_dict[col] = df_dummy
# Add dummy data to dataframe and remove string col
col_index_place = df_new.columns.get_loc(col)
df_before_dummy = df_new.iloc[:, :col_index_place]
df_after_dummy = df_new.iloc[:, (col_index_place+1):]
df_new = pd.concat([df_before_dummy, df_dummy, df_after_dummy], axis=1)
return df_new
'''
# Accuracy
'''
def accuracy_table(y_test, y_predicted, test_name= ''):
'''
Builds confusion matrix and determines accuracy of algorithm
Parameters
----------
y_test : pd.Series or np.darray
actual values in dataset.
y_predicted : pd.Series or np.darray
predicted values of dataset.
test_name : str, optional
Name of test run to predict. The default is ''.
Returns
-------
accuracy : float
accuracy score from sklearn metrics.
conf_mat : numpy.darray
confusion matrix from sklearn matrix.
'''
class_report = sklearn.metrics.classification_report(y_test, y_predicted)
accuracy = sklearn.metrics.accuracy_score(y_test, y_predicted)
print("{} Classification Report: \n\n".format(test_name), class_report)
print("{} Accuracy: {}".format(test_name, round(accuracy, 4)))
print()
conf_mat = confusion_matrix(y_test, y_predicted)
#print('Confusion Matrix:\n{}'.format(conf_mat))
# Plot confusion matrix
#sns.set(font_scale = 0.8)
sns.heatmap(conf_mat, square=True, annot=True, cbar=False, fmt='g')
plt.xlabel('predicted value')
plt.ylabel('true value')
# Create Dictionary
TN, FP, FN, TP = conf_mat.flatten()
round_digit = 4
precision = round(sklearn.metrics.precision_score(y_test, y_predicted), round_digit)
recall = round(sklearn.metrics.recall_score(y_test, y_predicted), round_digit)
f1_sc = round(sklearn.metrics.f1_score(y_test, y_predicted), round_digit)
accuracy_dict = {
'Test Name' : test_name,
'TP' : TP, 'TN' : TN, 'FP': FP, 'FN' : FN,
'Accuracy' : round(accuracy, round_digit),
'Precision' : precision, 'Recall' : recall, 'F1 Score' : f1_sc,
'Confusion Matrix' : conf_mat}
return accuracy_dict
def confusion_matrix_and_tests(y, y_predicted):
'''
Uses confusion matrix to find specicifity and sensitivy of model
Parameters
----------
y : pd.Series or np.darray
actual output values of dataset.
y_predicted : pd.Series or np.darray
predicted output values of dataset.
Returns
-------
conf_matrix_dict : dict
dictionary of the specificity andn sensitivity.
'''
conf_matrix = confusion_matrix(y, y_predicted)
conf_matrix_dict = {
'TN' : conf_matrix[0, 0],
'FN' : conf_matrix[1, 0],
'TP' : conf_matrix[1, 1],
'FP' : conf_matrix[0, 1],
}
conf_matrix_dict['sensitivity'] = conf_matrix_dict['TP'] / (conf_matrix_dict['TP'] + conf_matrix_dict['FN'])
conf_matrix_dict['specificity'] = conf_matrix_dict['TN'] / (conf_matrix_dict['TN'] + conf_matrix_dict['FP'])
conf_matrix_dict['sensitivity_percent'] = round(conf_matrix_dict['sensitivity'] * 100, 2)
conf_matrix_dict['specificity_percent'] = round(conf_matrix_dict['specificity'] * 100, 2)
return conf_matrix_dict | 3.0625 | 3 |
setup.py | bizarrechaos/gdoh-client | 0 | 12757307 | <filename>setup.py
from setuptools import setup
setup(
name='gdoh-client',
version='1.0',
author='bizarrechaos',
packages=['gdoh'],
license='Apache License Version 2.0',
description='Libraries and command line tool for Google\'s DNS-over-HTTPS',
install_requires=['docopt==0.6.2',
'requests==2.11.1',
'prettytable==0.7.2'],
url='https://github.com/bizarrechaos/gdoh-client',
entry_points='''
[console_scripts]
gdoh = gdoh.__main__:main
'''
)
| 1.328125 | 1 |
gpytorch/mlls/variational_elbo.py | harvineet/gpytorch | 0 | 12757308 | <gh_stars>0
#!/usr/bin/env python3
import torch
from .marginal_log_likelihood import MarginalLogLikelihood
from .. import settings
class VariationalELBO(MarginalLogLikelihood):
def __init__(self, likelihood, model, num_data, combine_terms=True):
"""
A special MLL designed for variational inference
Args:
- likelihood: (Likelihood) - the likelihood for the model
- model: (Module) - the variational GP model
- num_data: (int) - the total number of training data points (necessary for SGD)
- combine_terms: (bool) - whether or not to sum the expected NLL with the KL terms (default True)
"""
super(VariationalELBO, self).__init__(likelihood, model)
self.combine_terms = combine_terms
self.num_data = num_data
def forward(self, variational_dist_f, target, **kwargs):
num_batch = variational_dist_f.event_shape.numel()
variational_dist_u = self.model.variational_strategy.variational_distribution.variational_distribution
prior_dist = self.model.variational_strategy.prior_distribution
if len(variational_dist_u.batch_shape) < len(prior_dist.batch_shape):
variational_dist_u = variational_dist_u.expand(prior_dist.batch_shape)
log_likelihood = self.likelihood.variational_log_probability(variational_dist_f, target, **kwargs).div(
num_batch
)
kl_divergence = self.model.variational_strategy.kl_divergence()
if kl_divergence.dim() > log_likelihood.dim():
kl_divergence = kl_divergence.sum(-1)
if log_likelihood.numel() == 1:
kl_divergence = kl_divergence.sum()
kl_divergence = kl_divergence.div(self.num_data)
if self.combine_terms:
res = log_likelihood - kl_divergence
for _, prior, closure, _ in self.named_priors():
res.add_(prior.log_prob(closure()).sum().div(self.num_data))
return res
else:
log_prior = torch.zeros_like(log_likelihood)
for _, prior, closure, _ in self.named_priors():
log_prior.add_(prior.log_prob(closure()).sum())
return log_likelihood, kl_divergence, log_prior.div(self.num_data)
class VariationalELBOEmpirical(VariationalELBO):
def __init__(self, likelihood, model, num_data):
"""
A special MLL designed for variational inference.
This computes an empirical (rather than exact) estimate of the KL divergence
Args:
- likelihood: (Likelihood) - the likelihood for the model
- model: (Module) - the variational GP model
- num_data: (int) - the total number of training data points (necessary for SGD)
"""
super(VariationalELBOEmpirical, self).__init__(likelihood, model, num_data, combine_terms=True)
def forward(self, variational_dist_f, target, **kwargs):
num_batch = variational_dist_f.event_shape[0]
variational_dist_u = self.model.variational_strategy.variational_distribution.variational_distribution
prior_dist = self.model.variational_strategy.prior_distribution
log_likelihood = self.likelihood.variational_log_probability(variational_dist_f, target, **kwargs)
log_likelihood = log_likelihood.div(num_batch)
num_samples = settings.num_likelihood_samples.value()
variational_samples = variational_dist_u.rsample(torch.Size([num_samples]))
kl_divergence = (
variational_dist_u.log_prob(variational_samples) - prior_dist.log_prob(variational_samples)
).mean(0)
kl_divergence = kl_divergence.div(self.num_data)
res = log_likelihood - kl_divergence
for _, prior, closure, _ in self.named_priors():
res.add_(prior.log_prob(closure()).sum().div(self.num_data))
return res
| 2.375 | 2 |
enrichmentmanager/templatetags/enrichment_extra.py | rectory-school/rectory-apps | 0 | 12757309 | from enrichmentmanager.models import EnrichmentSignup, EnrichmentOption
from enrichmentmanager.lib import canEditSignup
from io import StringIO
from datetime import date
from django import template
register = template.Library()
@register.assignment_tag(takes_context=True)
def select_for(context, slot, student):
if student.lockout:
return '<p class="readOnlySignup">{lockout}</p>'.format(lockout=student.lockout)
canEdit = canEditSignup(context.request.user, slot, student)
key = "slot_{slot_id}_{student_id}".format(slot_id=slot.id, student_id=student.id)
if canEdit:
out = StringIO()
choices = context['slotChoices'][slot]
out.write('<select name="{key}_option" class="slotSelector saveTrack">'.format(key=key))
out.write('<option value="">--</option>')
selected = context['relatedSignups'].get(key)
preferredChoices = StringIO()
otherChoices = StringIO()
associatedTeachers = student.associated_teachers.all()
for choice in choices:
if choice.teacher in associatedTeachers:
if selected == choice.id:
preferredChoices.write('<option value="{choice_id}" selected="selected">{choice_name}</option>'.format(choice_id=choice.id, choice_name=str(choice)))
else:
preferredChoices.write('<option value="{choice_id}">{choice_name}</option>'.format(choice_id=choice.id, choice_name=str(choice)))
else:
if selected == choice.id:
otherChoices.write('<option value="{choice_id}" selected="selected">{choice_name}</option>'.format(choice_id=choice.id, choice_name=str(choice)))
else:
otherChoices.write('<option value="{choice_id}">{choice_name}</option>'.format(choice_id=choice.id, choice_name=str(choice)))
preferredChoices = preferredChoices.getvalue()
otherChoices = otherChoices.getvalue()
if preferredChoices and otherChoices:
out.write(preferredChoices)
out.write('<option value="">--</option>')
out.write(otherChoices)
else:
out.write(preferredChoices)
out.write(otherChoices)
out.write("</select>")
if context.request.user.has_perm("enrichmentmanager.can_set_admin_lock"):
#TODO: Horribly ineffecient
try:
signup = EnrichmentSignup.objects.get(slot=slot, student=student)
except EnrichmentSignup.DoesNotExist:
signup = None
if signup and signup.admin_lock:
out.write('<input type="checkbox" title="Admin Lockout" name="{key}_adminlock" class="saveTrack adminLock" checked />'.format(key=key))
else:
out.write('<input type="checkbox" title="Admin Lockout" name="{key}_adminlock" class="saveTrack adminLock" />'.format(key=key))
return out.getvalue()
else:
selectedID = context['relatedSignups'].get(key)
if selectedID:
#Ineffecient, will generate many queries if viewing in read only mode
selectedChoice = EnrichmentOption.objects.get(pk=selectedID)
return '<p class="readOnlySignup">{option}</p>'.format(option=selectedChoice)
return ""
@register.assignment_tag(takes_context=True)
def display_for(context, slot, student):
if student.lockout:
return "<em title='Lockout assigned'>{}</em>".format(student.lockout)
key = "slot_{slot_id}_{student_id}".format(slot_id=slot.id, student_id=student.id)
selected = context['relatedSignups'].get(key, "")
if selected:
return str(EnrichmentOption.objects.get(pk=selected))
return "<strong>Unassigned</strong>"
| 2.296875 | 2 |
old_tests/test_knmi_rain.py | acequia-package/Acequia | 0 | 12757310 |
"""
testing module knmi_rain from acequia
"""
import acequia as aq
def hdr(msg):
print()
print('#','-'*50)
print(msg)
print('#','-'*50)
print()
if __name__ == '__main__':
hdr('# read valid file')
srcpath = r'.\testdata\knmi\neerslaggeg_EENRUM_154.txt'
prc = aq.KnmiRain(srcpath)
n = len(prc.rawdata)
print(f'Number of data rows is {n}')
hdr('# try to read invalid filepath')
prc2 = aq.KnmiRain('dummy')
n = len(prc2.rawdata)
print(f'Number of data rows is {n}')
hdr('# get table with definitions and units')
tbl = prc.units()
print(tbl)
hdr('# get all possible variables and one not possible')
for name in ['prc','dummy']:
n = len(prc.timeseries(name))
print(f'Number of {name}: {n}')
| 2.5625 | 3 |
Backend/accounts/urls.py | iamrameshkumar/Django-React-Blog | 1 | 12757311 | from django.conf.urls import url, include
from rest_framework_jwt.views import obtain_jwt_token
from accounts.views import (
UserCreateView,
)
app_name = 'accounts'
urlpatterns = [
url(r'^register/$',UserCreateView.as_view(),name='accounts'),
url(r'^home/login/token/$',obtain_jwt_token),
]
| 1.742188 | 2 |
utils/modeling/migrating.py | koursaros-ai/microservices | 13 | 12757312 | <reponame>koursaros-ai/microservices<filename>utils/modeling/migrating.py
import sys
import transformers
from fairseq.models import roberta
from fairseq.data.data_utils import collate_tokens
import time
import torch.nn.functional as F
import torch.hub
# def roberta_to_transformer(path_to_roberta, path_to_yaml):
# model = RobertaModel.from_pretrained(path_to_roberta, checkpoint_file='model.pt')
# model.eval()
MAX_LENGTH = 256
PAD = True
def predict_transformers(model, tokenizer):
def predict_fn(*args):
inputs = time_fn(transformers_encode_batch, tokenizer, *args)
inputs_dict = {
'input_ids': inputs[0],
'attention_mask': inputs[1],
'token_type_ids': inputs[2]
}
outputs = model(**inputs_dict)
logits = outputs[0]
preds = F.log_softmax(logits, dim=-1)
return preds.tolist()
return predict_fn
def predict_roberta(model):
def pred_fn(*args):
batch = time_fn(collate_tokens, [model.encode(*arg)[:MAX_LENGTH] for arg in zip(*args)], pad_idx=1)
labels = model.predict('mnli', *batch).tolist()
return labels
return pred_fn
def benchmark(pred_fn, n):
args = 'All work and no play.', 'Make jack a very dull boy.'
for i in range(0, n):
assert(type(pred_fn(*args)) == list)
def benchmark_mnli(samples):
torch_hub_model = time_fn(torch.hub.load, 'pytorch/fairseq','roberta.large.mnli')
try:
transformers_model = time_fn(transformers.RobertaModel.from_pretrained,
'roberta-large-mnli')
except:
transformers_model = time_fn(transformers.RobertaModel.from_pretrained,
'roberta-large-mnli', force_download=True)
transformers_tokenizer = time_fn(transformers.RobertaTokenizer.from_pretrained, 'roberta-large-mnli')
pred_functions = {
'transformers' : predict_transformers(transformers_model, transformers_tokenizer),
'torch_hub' : predict_roberta(torch_hub_model)
}
for framework, pred_fn in pred_functions.items():
print(f'Benchmarking {framework} with {samples} samples')
time_fn(benchmark, pred_fn, samples)
### HELPERS
def time_fn(fn, *args, **kwargs):
start = time.time()
res = fn(*args, **kwargs)
print(f'Took {time.time() - start} seconds to run {fn.__name__}')
return res
def transformer_to_features(tokenizer, *args):
inputs = tokenizer.encode_plus(
*args,
add_special_tokens=True,
max_length=MAX_LENGTH,
truncate_first_sequence=True
)
input_ids, token_type_ids = inputs["input_ids"][:MAX_LENGTH], \
inputs["token_type_ids"][:MAX_LENGTH]
attention_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
if PAD:
padding_length = MAX_LENGTH - len(input_ids)
input_ids = ([0] * padding_length) + input_ids
attention_mask = ([0] * padding_length) + attention_mask
token_type_ids = ([0] * padding_length) + token_type_ids
return (input_ids, attention_mask, token_type_ids)
def transformers_encode_batch(tokenizer, *args):
all_input_ids = []
all_attention_mask = []
all_token_type_ids = []
for sample in zip(*args):
input_ids, attention_mask, token_type_ids = transformer_to_features(tokenizer, *sample)
all_input_ids.append(input_ids)
all_attention_mask.append(attention_mask)
all_token_type_ids.append(token_type_ids)
return all_input_ids, all_attention_mask, all_token_type_ids
if __name__ == '__main__':
benchmark_mnli(10) | 2.09375 | 2 |
0119. Pascal's Triangle II/solution.py | furutuki/LeetCodeSolution | 0 | 12757313 | from typing import List
class Solution:
def getRow(self, rowIndex: int) -> List[int]:
if rowIndex == 0:
return [1]
elif rowIndex == 1:
return [1, 1]
lastLine = [1, 1]
for i in range(2, rowIndex + 1):
line = []
for j in range(i + 1):
if j == 0 or j == i:
line.append(1)
else:
line.append(lastLine[j - 1] + lastLine[j])
lastLine = line
return line
s = Solution()
print(s.getRow(2))
| 3.515625 | 4 |
sdk/python/pulumi_wavefront/service_account.py | pulumi/pulumi-wavefront | 1 | 12757314 | <filename>sdk/python/pulumi_wavefront/service_account.py
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['ServiceAccountArgs', 'ServiceAccount']
@pulumi.input_type
class ServiceAccountArgs:
def __init__(__self__, *,
identifier: pulumi.Input[str],
active: Optional[pulumi.Input[bool]] = None,
description: Optional[pulumi.Input[str]] = None,
ingestion_policy: Optional[pulumi.Input[str]] = None,
permissions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
user_groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a ServiceAccount resource.
:param pulumi.Input[str] identifier: The (unique) identifier of the service account to create. Must start with sa::
:param pulumi.Input[bool] active: Whether or not the service account is active
:param pulumi.Input[str] description: The description of the service account
:param pulumi.Input[str] ingestion_policy: ID of ingestion policy
:param pulumi.Input[Sequence[pulumi.Input[str]]] permissions: List of permission to grant to this service account. Valid options are
`agent_management`, `alerts_management`, `dashboard_management`, `embedded_charts`, `events_management`, `external_links_management`,
`host_tag_management`, `metrics_management`, `user_management`
:param pulumi.Input[Sequence[pulumi.Input[str]]] user_groups: List of user groups for this service account
"""
pulumi.set(__self__, "identifier", identifier)
if active is not None:
pulumi.set(__self__, "active", active)
if description is not None:
pulumi.set(__self__, "description", description)
if ingestion_policy is not None:
pulumi.set(__self__, "ingestion_policy", ingestion_policy)
if permissions is not None:
pulumi.set(__self__, "permissions", permissions)
if user_groups is not None:
pulumi.set(__self__, "user_groups", user_groups)
@property
@pulumi.getter
def identifier(self) -> pulumi.Input[str]:
"""
The (unique) identifier of the service account to create. Must start with sa::
"""
return pulumi.get(self, "identifier")
@identifier.setter
def identifier(self, value: pulumi.Input[str]):
pulumi.set(self, "identifier", value)
@property
@pulumi.getter
def active(self) -> Optional[pulumi.Input[bool]]:
"""
Whether or not the service account is active
"""
return pulumi.get(self, "active")
@active.setter
def active(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "active", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of the service account
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="ingestionPolicy")
def ingestion_policy(self) -> Optional[pulumi.Input[str]]:
"""
ID of ingestion policy
"""
return pulumi.get(self, "ingestion_policy")
@ingestion_policy.setter
def ingestion_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ingestion_policy", value)
@property
@pulumi.getter
def permissions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of permission to grant to this service account. Valid options are
`agent_management`, `alerts_management`, `dashboard_management`, `embedded_charts`, `events_management`, `external_links_management`,
`host_tag_management`, `metrics_management`, `user_management`
"""
return pulumi.get(self, "permissions")
@permissions.setter
def permissions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "permissions", value)
@property
@pulumi.getter(name="userGroups")
def user_groups(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of user groups for this service account
"""
return pulumi.get(self, "user_groups")
@user_groups.setter
def user_groups(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "user_groups", value)
@pulumi.input_type
class _ServiceAccountState:
def __init__(__self__, *,
active: Optional[pulumi.Input[bool]] = None,
description: Optional[pulumi.Input[str]] = None,
identifier: Optional[pulumi.Input[str]] = None,
ingestion_policy: Optional[pulumi.Input[str]] = None,
permissions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
user_groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
Input properties used for looking up and filtering ServiceAccount resources.
:param pulumi.Input[bool] active: Whether or not the service account is active
:param pulumi.Input[str] description: The description of the service account
:param pulumi.Input[str] identifier: The (unique) identifier of the service account to create. Must start with sa::
:param pulumi.Input[str] ingestion_policy: ID of ingestion policy
:param pulumi.Input[Sequence[pulumi.Input[str]]] permissions: List of permission to grant to this service account. Valid options are
`agent_management`, `alerts_management`, `dashboard_management`, `embedded_charts`, `events_management`, `external_links_management`,
`host_tag_management`, `metrics_management`, `user_management`
:param pulumi.Input[Sequence[pulumi.Input[str]]] user_groups: List of user groups for this service account
"""
if active is not None:
pulumi.set(__self__, "active", active)
if description is not None:
pulumi.set(__self__, "description", description)
if identifier is not None:
pulumi.set(__self__, "identifier", identifier)
if ingestion_policy is not None:
pulumi.set(__self__, "ingestion_policy", ingestion_policy)
if permissions is not None:
pulumi.set(__self__, "permissions", permissions)
if user_groups is not None:
pulumi.set(__self__, "user_groups", user_groups)
@property
@pulumi.getter
def active(self) -> Optional[pulumi.Input[bool]]:
"""
Whether or not the service account is active
"""
return pulumi.get(self, "active")
@active.setter
def active(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "active", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of the service account
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def identifier(self) -> Optional[pulumi.Input[str]]:
"""
The (unique) identifier of the service account to create. Must start with sa::
"""
return pulumi.get(self, "identifier")
@identifier.setter
def identifier(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "identifier", value)
@property
@pulumi.getter(name="ingestionPolicy")
def ingestion_policy(self) -> Optional[pulumi.Input[str]]:
"""
ID of ingestion policy
"""
return pulumi.get(self, "ingestion_policy")
@ingestion_policy.setter
def ingestion_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ingestion_policy", value)
@property
@pulumi.getter
def permissions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of permission to grant to this service account. Valid options are
`agent_management`, `alerts_management`, `dashboard_management`, `embedded_charts`, `events_management`, `external_links_management`,
`host_tag_management`, `metrics_management`, `user_management`
"""
return pulumi.get(self, "permissions")
@permissions.setter
def permissions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "permissions", value)
@property
@pulumi.getter(name="userGroups")
def user_groups(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of user groups for this service account
"""
return pulumi.get(self, "user_groups")
@user_groups.setter
def user_groups(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "user_groups", value)
class ServiceAccount(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
active: Optional[pulumi.Input[bool]] = None,
description: Optional[pulumi.Input[str]] = None,
identifier: Optional[pulumi.Input[str]] = None,
ingestion_policy: Optional[pulumi.Input[str]] = None,
permissions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
user_groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None):
"""
Provides a Wavefront Service Account Resource. This allows service accounts to be created, updated, and deleted.
## Example Usage
```python
import pulumi
import pulumi_wavefront as wavefront
basic = wavefront.ServiceAccount("basic",
active=True,
identifier="sa::tftesting")
```
## Import
Service accounts can be imported using `identifier`, e.g.
```sh
$ pulumi import wavefront:index/serviceAccount:ServiceAccount basic sa::tftesting
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] active: Whether or not the service account is active
:param pulumi.Input[str] description: The description of the service account
:param pulumi.Input[str] identifier: The (unique) identifier of the service account to create. Must start with sa::
:param pulumi.Input[str] ingestion_policy: ID of ingestion policy
:param pulumi.Input[Sequence[pulumi.Input[str]]] permissions: List of permission to grant to this service account. Valid options are
`agent_management`, `alerts_management`, `dashboard_management`, `embedded_charts`, `events_management`, `external_links_management`,
`host_tag_management`, `metrics_management`, `user_management`
:param pulumi.Input[Sequence[pulumi.Input[str]]] user_groups: List of user groups for this service account
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ServiceAccountArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a Wavefront Service Account Resource. This allows service accounts to be created, updated, and deleted.
## Example Usage
```python
import pulumi
import pulumi_wavefront as wavefront
basic = wavefront.ServiceAccount("basic",
active=True,
identifier="sa::tftesting")
```
## Import
Service accounts can be imported using `identifier`, e.g.
```sh
$ pulumi import wavefront:index/serviceAccount:ServiceAccount basic sa::tftesting
```
:param str resource_name: The name of the resource.
:param ServiceAccountArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ServiceAccountArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
active: Optional[pulumi.Input[bool]] = None,
description: Optional[pulumi.Input[str]] = None,
identifier: Optional[pulumi.Input[str]] = None,
ingestion_policy: Optional[pulumi.Input[str]] = None,
permissions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
user_groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ServiceAccountArgs.__new__(ServiceAccountArgs)
__props__.__dict__["active"] = active
__props__.__dict__["description"] = description
if identifier is None and not opts.urn:
raise TypeError("Missing required property 'identifier'")
__props__.__dict__["identifier"] = identifier
__props__.__dict__["ingestion_policy"] = ingestion_policy
__props__.__dict__["permissions"] = permissions
__props__.__dict__["user_groups"] = user_groups
super(ServiceAccount, __self__).__init__(
'wavefront:index/serviceAccount:ServiceAccount',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
active: Optional[pulumi.Input[bool]] = None,
description: Optional[pulumi.Input[str]] = None,
identifier: Optional[pulumi.Input[str]] = None,
ingestion_policy: Optional[pulumi.Input[str]] = None,
permissions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
user_groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None) -> 'ServiceAccount':
"""
Get an existing ServiceAccount resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] active: Whether or not the service account is active
:param pulumi.Input[str] description: The description of the service account
:param pulumi.Input[str] identifier: The (unique) identifier of the service account to create. Must start with sa::
:param pulumi.Input[str] ingestion_policy: ID of ingestion policy
:param pulumi.Input[Sequence[pulumi.Input[str]]] permissions: List of permission to grant to this service account. Valid options are
`agent_management`, `alerts_management`, `dashboard_management`, `embedded_charts`, `events_management`, `external_links_management`,
`host_tag_management`, `metrics_management`, `user_management`
:param pulumi.Input[Sequence[pulumi.Input[str]]] user_groups: List of user groups for this service account
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ServiceAccountState.__new__(_ServiceAccountState)
__props__.__dict__["active"] = active
__props__.__dict__["description"] = description
__props__.__dict__["identifier"] = identifier
__props__.__dict__["ingestion_policy"] = ingestion_policy
__props__.__dict__["permissions"] = permissions
__props__.__dict__["user_groups"] = user_groups
return ServiceAccount(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def active(self) -> pulumi.Output[Optional[bool]]:
"""
Whether or not the service account is active
"""
return pulumi.get(self, "active")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
The description of the service account
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def identifier(self) -> pulumi.Output[str]:
"""
The (unique) identifier of the service account to create. Must start with sa::
"""
return pulumi.get(self, "identifier")
@property
@pulumi.getter(name="ingestionPolicy")
def ingestion_policy(self) -> pulumi.Output[Optional[str]]:
"""
ID of ingestion policy
"""
return pulumi.get(self, "ingestion_policy")
@property
@pulumi.getter
def permissions(self) -> pulumi.Output[Sequence[str]]:
"""
List of permission to grant to this service account. Valid options are
`agent_management`, `alerts_management`, `dashboard_management`, `embedded_charts`, `events_management`, `external_links_management`,
`host_tag_management`, `metrics_management`, `user_management`
"""
return pulumi.get(self, "permissions")
@property
@pulumi.getter(name="userGroups")
def user_groups(self) -> pulumi.Output[Sequence[str]]:
"""
List of user groups for this service account
"""
return pulumi.get(self, "user_groups")
| 1.84375 | 2 |
ZipFolders -GUI.py | mshambharkar/Scripts | 0 | 12757315 | import os
import shutil
import send2trash
import tkinter
import tkinter.filedialog
definitions=['.zip','.tar','.rar']
cur_dir='C:\\Users\\Zombie\\Downloads'
#processedobjects
compressedlist=list()
extractedfolders=list()
cur_dir = tkinter.filedialog.askdirectory(initialdir="/",title='Please select a directory')
print(cur_dir)
def ValidateFile(fileItem):
if os.path.splitext(fileItem)[1].lower() in definitions:
name=('.').join(os.path.basename(fileItem).split('.')[:-1])
compressedlist.append(name)
def ValidateFolder(folderName):
if folderName in compressedlist:
return True
else:
return False
def IterateChildZip(folder):
for fitem in os.listdir(folder):
if os.path.isdir(folder+'\\'+fitem):
IterateChildZip(folder+'\\'+fitem)
else:
ValidateFile(folder+'\\'+fitem)
def IterateChildFolder(folder):
for fitem in os.listdir(folder):
if os.path.isdir(folder+'\\'+fitem):
if ValidateFolder(fitem):
extractedfolders.append(folder+'\\'+fitem)
else:
IterateChildFolder(folder+'\\'+fitem)
#loop through each definition
IterateChildZip(cur_dir)
IterateChildFolder(cur_dir)
print(compressedlist)
print(extractedfolders)
print('Do you want to proceed with delete ? Y/N')
proceed=input()
if proceed.lower() == 'y':
for folder in extractedfolders:
try:
send2trash.send2trash(folder)
print('Moved to recycle bin-'+folder)
except Exception as e:
print(e)
print('done enter any key to exit')
input()
| 3.09375 | 3 |
d25/part1.py | Jamie-Chang/advent2021 | 0 | 12757316 | from __future__ import annotations
from dataclasses import dataclass
from typing import Iterable, Iterator
def read() -> Iterator[str]:
with open("d25/input.txt") as f:
for l in f:
yield l.strip()
def parse_row(line: str):
east = Vector(x=1, y=0)
south = Vector(x=0, y=1)
for character in line:
match character:
case ".":
yield None
case "v":
yield SeaCucumber(south)
case ">":
yield SeaCucumber(east)
def load(lines: Iterable[str]):
return Board([list(parse_row(line)) for line in lines])
@dataclass(frozen=True, eq=True)
class Vector:
x: int
y: int
def __add__(self, other: Vector) -> Vector:
"""
>>> Vector(1, 2) + Vector(2, 1)
Vector(x=3, y=3)
"""
return Vector(self.x + other.x, self.y + other.y)
def __sub__(self, other: Vector) -> Vector:
"""
>>> Vector(1, 2) - Vector(2, 1)
Vector(x=-1, y=1)
"""
return Vector(self.x - other.x, self.y - other.y)
def __iter__(self) -> Iterator[int]:
yield self.x
yield self.y
def __mod__(self, other: Vector) -> Vector:
"""
>>> Vector(-1, -1) % Vector(5, 5)
Vector(x=4, y=4)
>>> Vector(6, 6) % Vector(5, 5)
Vector(x=1, y=1)
"""
return Vector(self.x % other.x, self.y % other.y)
def __getitem__(self, p: int) -> int:
if p == 0:
return self.x
if p == 1:
return self.y
raise IndexError(p)
@dataclass
class SeaCucumber:
direction: Vector
@dataclass
class Board:
board: list[list[SeaCucumber | None]]
def __setitem__(self, key: Vector, value: SeaCucumber | None):
self.board[key.y][key.x] = value
def __getitem__(self, key: Vector) -> SeaCucumber | None:
return self.board[key.y][key.x]
def pop(self, key: Vector) -> SeaCucumber | None:
value = self[key]
self[key] = None
return value
def __iter__(self) -> Iterator[Vector]:
width, height = self.size
for y in range(height):
for x in range(width):
yield Vector(x, y)
@property
def size(self) -> Vector:
if not self.board:
return Vector(0, 0)
return Vector(len(self.board[0]), len(self.board))
def movements(board: Board, direction: Vector) -> dict[Vector, Vector]:
return dict(_movements(board, direction))
def _movements(board: Board, direction: Vector) -> Iterator[tuple[Vector, Vector]]:
for start in board:
value = board[start]
if value is None:
continue
if value.direction != direction:
continue
stop = (start + value.direction) % board.size
if board[stop] is None:
yield start, stop
def step(board: Board):
east = Vector(1, 0)
south = Vector(0, 1)
changed = False
for start, stop in movements(board, east).items():
changed = True
board[stop] = board.pop(start)
for start, stop in movements(board, south).items():
changed = True
board[stop] = board.pop(start)
return changed
def move(board: Board) -> Iterator[Board]:
while step(board):
yield board
def ilen(it: Iterable) -> int:
return sum(1 for _ in it)
def display(value: SeaCucumber | Board | None) -> str:
match value:
case SeaCucumber(Vector(0, 1)):
return "v"
case SeaCucumber(Vector(1, 0)):
return ">"
case None:
return "."
case Board() as b:
return "\n".join("".join(display(item) for item in row) for row in b.board)
case _:
assert False, value
if __name__ == "__main__":
print(ilen(move(load(read()))) + 1)
| 3.578125 | 4 |
ensembl_prodinf/__init__.py | dbolser-ebi/ensembl-prodinf-core | 0 | 12757317 | <reponame>dbolser-ebi/ensembl-prodinf-core
from ensembl_prodinf.hive import Analysis
from ensembl_prodinf.hive import Result
from ensembl_prodinf.hive import LogMessage
from ensembl_prodinf.hive import Job
from ensembl_prodinf.hive import HiveInstance
from ensembl_prodinf.utils import dict_to_perl_string, list_to_perl_string, escape_perl_string
from ensembl_prodinf.db_utils import list_databases
from ensembl_prodinf.server_utils import get_status
from ensembl_prodinf.reporting import get_logger, set_logger_context
from ensembl_prodinf.email_celery_app import app as email_celery_app
from ensembl_prodinf.handover_celery_app import app as handover_celery_app
from ensembl_prodinf.event_celery_app import app as event_celery_app
from ensembl_prodinf.reporting import QueueAppenderHandler, ContextFilter, JsonFormatter
| 1.015625 | 1 |
events/importer/harrastushaku.py | jannetasa/linkedevents | 20 | 12757318 | import logging
import traceback
from collections import namedtuple
from copy import deepcopy
from datetime import datetime, timedelta
from functools import lru_cache, partial
import pytz
import requests
from django.db import transaction
from django.utils.dateparse import parse_time
from django.utils.timezone import now
from django_orghierarchy.models import Organization
from events.importer.sync import ModelSyncher
from events.importer.util import clean_text
from events.importer.yso import KEYWORDS_TO_ADD_TO_AUDIENCE
from events.keywords import KeywordMatcher
from events.models import DataSource, Event, Keyword, Place
from .base import Importer, register_importer
# Per module logger
logger = logging.getLogger(__name__)
HARRASTUSHAKU_API_BASE_URL = 'http://nk.hel.fi/harrastushaku/api/'
TIMEZONE = pytz.timezone('Europe/Helsinki')
MAX_RECURRING_EVENT_LENGTH = 366 # days
MAIN_CATEGORY_KEYWORDS = {
'1': {'yso:p3466'},
'2': {'yso:p916', 'yso:p6062'},
'3': {'yso:p13084', 'yso:p2023'},
'4': {'yso:p2445', 'yso:p20405'},
'5': {'yso:p1808'},
'7': {'yso:p2851'},
'8': {'yso:p1278'},
'9': {'yso:p6940'},
'11': {'yso:p143', 'yso:p9270'},
}
AUDIENCE_BY_AGE_RANGE = (
((0, 6), {'yso:p4354'}),
((7, 16), {'yso:p16485'}),
((10, 18), {'yso:p11617'}),
)
SubEventTimeRange = namedtuple('SubEventTimeRange', ['start', 'end'])
class HarrastushakuException(Exception):
pass
@register_importer
class HarrastushakuImporter(Importer):
name = 'harrastushaku'
supported_languages = ['fi']
def setup(self):
logger.debug('Running Harrastushaku importer setup...')
self.data_source, _ = DataSource.objects.get_or_create(id=self.name, defaults={'name': 'Harrastushaku'})
self.tprek_data_source = DataSource.objects.get(id='tprek')
self.ahjo_data_source, _ = DataSource.objects.get_or_create(id='ahjo', defaults={'name': 'Ahjo'})
self.organization, _ = Organization.objects.get_or_create(origin_id='u48040030',
data_source=self.ahjo_data_source)
self.tprek_ids = {place.origin_id for place in Place.objects.filter(data_source=self.tprek_data_source)}
self.keywords = {keyword.id: keyword for keyword in Keyword.objects.all()}
self.keyword_matcher = KeywordMatcher()
def import_places(self):
"""Import Harrastushaku locations as Places
- If we can find a close-enough match for the location object coming from Harrastushaku in Toimipisterekisteri,
we do not import that location object, as this this will cause duplicate location issue due to
Harrastushaku data being of low quality.
- If, however, we cannot find a match, location object will be imported with data source "harrastushaku".
"""
logger.info('Importing places...')
locations = self.fetch_locations()
logger.debug('Handling {} locations...'.format(len(locations)))
self.location_id_to_place_id = self.map_harrastushaku_location_ids_to_tprek_ids(locations)
for location in locations:
try:
self.handle_location(location)
except Exception as e: # noqa
message = e if isinstance(e, HarrastushakuException) else traceback.format_exc()
logger.error('Error handling location {}: {}'.format(location.get('id'), message))
def map_harrastushaku_location_ids_to_tprek_ids(self, harrastushaku_locations):
'''
Example mapped dictionary result:
{
'95': 'harrastushaku:95',
'953': 'harrastushaku:953',
'968': 'tprek:20479',
'97': 'tprek:8062',
'972': 'tprek:9079',
'987': 'harrastushaku:987',
'99': 'tprek:8064',
}
'''
result = dict()
for harrastushaku_location in harrastushaku_locations:
harrastushaku_location_id = harrastushaku_location['id']
strict_filters = {
'id__startswith': self.tprek_data_source,
'name': harrastushaku_location['name'],
'address_locality': harrastushaku_location['city'],
'postal_code': harrastushaku_location['zip'],
'street_address': harrastushaku_location['address'],
}
flexible_filters = {
'id__startswith': self.tprek_data_source,
'address_locality': harrastushaku_location['city'],
'postal_code': harrastushaku_location['zip'],
'street_address': harrastushaku_location['address'],
}
tprek_place = (Place.objects.filter(**strict_filters).first() or
Place.objects.filter(**flexible_filters).first())
if tprek_place:
result[harrastushaku_location_id] = tprek_place.id
else:
result[harrastushaku_location_id] = '{}:{}'.format(self.data_source.id, harrastushaku_location_id)
return result
def import_courses(self):
"""Import Harrastushaku activities as Courses
Activities having "active" anything else than "1" or "K" will be
ignored.
When importing and an existing course isn't present in imported data:
- If the course's end time is in the past, the course will be left as
it is.
- If the course's end time is not in the past, the course will be soft
deleted alongside its sub events.
If an activity has something in field "timetables", it will be imported
as a recurring event, otherwise as a one-time event.
A recurring course will have a super event which includes the course's
whole time period, and sub events which will represent individual course
occurrences. Other than start and end times, a super event and its sub
events will all contain the same data.
A recurring course's sub event start and end datetimes will be build using
the activity's "timetables". The time tables contain info out weekday,
times, and repetition which means number of days there is between
occurrences (basically a multiple of 7).
A recurring course's sub events will be given an ID that has the
activity's ID and start and end times of the sub event in a compressed
form. This also means that between imports only sub events that are
happening exactly at the same time are considered to be the same instance,
so if a sub event's begin or end time changes at all, a new sub event will
be created instead of updating an old one (because there is no unambiguous
way to determine which old sub event the new one corresponds to).
A course's keywords will come from both of the following:
- The activity's main category. There are hardcoded keywords for every
main category.
- The activity's sub category's "searchwords". Those are manually
entered words, which are mapped to keywords using KeywordMatcher
(from events.keywords).
A course's audience will come from both of the following:
- The activity's "audience_max_age" and "audience_min_age" using
hardcoded keywords for certain age ranges.
- The course's keywords, adding the ones that are present in
KEYWORDS_TO_ADD_TO_AUDIENCE (from events.importer.yso).
"""
logger.info('Importing courses...')
locations = self.fetch_locations()
if not locations:
logger.warning('No location data fetched, aborting course import.')
return
self.location_id_to_place_id = self.map_harrastushaku_location_ids_to_tprek_ids(locations)
activities = self.fetch_courses()
if not activities:
logger.info('No activity data fetched.')
return
def event_delete(event):
if event.end_time < now():
return
event.soft_delete()
for sub_event in event.sub_events.all():
sub_event.soft_delete()
self.event_syncher = ModelSyncher(
Event.objects.filter(data_source=self.data_source, super_event=None),
lambda event: event.id,
event_delete,
)
num_of_activities = len(activities)
logger.debug('Handling {} activities...'.format(num_of_activities))
for i, activity in enumerate(activities, 1):
try:
self.handle_activity(activity)
except Exception as e: # noqa
message = e if isinstance(e, HarrastushakuException) else traceback.format_exc()
logger.error('Error handling activity {}: {}'.format(activity.get('id'), message))
if not i % 10:
logger.debug('{} / {} activities handled.'.format(i, num_of_activities))
self.event_syncher.finish(force=True)
logger.info('Course import finished.')
def fetch_locations(self):
logger.debug('Fetching locations...')
try:
url = '{}location/'.format(HARRASTUSHAKU_API_BASE_URL)
response = requests.get(url)
response.raise_for_status()
return response.json()
except requests.RequestException as e:
logger.error('Cannot fetch locations: {}'.format(e))
return []
def fetch_courses(self):
logger.debug('Fetching courses...')
try:
url = '{}activity/'.format(HARRASTUSHAKU_API_BASE_URL)
response = requests.get(url)
response.raise_for_status()
return response.json()['data']
except requests.RequestException as e:
logger.error('Cannot fetch courses: {}'.format(e))
return []
@transaction.atomic
def handle_location(self, location_data):
harrastushaku_location_id = location_data.get('id')
harrastushaku_location_mapped_id = self.location_id_to_place_id.get(harrastushaku_location_id)
if harrastushaku_location_mapped_id.startswith(self.tprek_data_source.id):
return
else:
self.handle_non_tprek_location(location_data)
def handle_non_tprek_location(self, location_data):
get_string = bind_data_getters(location_data)[0]
place_data = {
'name': get_string('name', localized=True),
'info_url': get_string('url', localized=True),
'street_address': get_string('address', localized=True),
'address_locality': get_string('city', localized=True),
'postal_code': get_string('zip'),
'data_source': self.data_source,
'origin_id': location_data['id'],
'publisher': self.organization,
}
self.save_place(place_data)
@transaction.atomic
def handle_activity(self, activity_data):
if activity_data.get('active') not in ('1', 'K'):
logger.debug('Skipping inactive activity {}'.format(activity_data.get('id')))
return
event_data = self.get_event_data(activity_data)
if event_data['start_time'] > event_data['end_time']:
raise HarrastushakuException('Start time after end time')
time_tables = activity_data.get('timetables', [])
if time_tables:
self.handle_recurring_event(event_data, time_tables)
else:
self.handle_one_time_event(event_data)
def create_registration_links(self, activity_data):
# Harrastushaku has own registration links which should be created in the imported events as well
if activity_data.get('regavailable', 0) and '1' in activity_data['regavailable']:
# regstart and regend sometimes take "false" value which seem to mean in the cases regavailable=='1' that
# the registration is going on indefinitely
reg_start = activity_data['regstartdate'] if isinstance(activity_data['regstartdate'], int) else 0
reg_end = activity_data['regenddate'] if isinstance(activity_data['regenddate'], int) else 9999999999
if datetime.utcfromtimestamp(reg_start) <= datetime.utcnow() <= datetime.utcfromtimestamp(reg_end):
return {'fi': {'registration': f"https://harrastushaku.fi/register/{activity_data['id']}"}}
return ''
def get_event_data(self, activity_data):
get_string, get_int, get_datetime = bind_data_getters(activity_data)
keywords = self.get_event_keywords(activity_data)
audience = self.get_event_audiences_from_ages(activity_data) | self.get_event_audiences_from_keywords(keywords)
keywords |= audience
event_data = {
'name': get_string('name', localized=True),
'description': get_string('description', localized=True),
'audience_max_age': get_int('agemax'),
'audience_min_age': get_int('agemin'),
'start_time': get_datetime('startdate'),
'end_time': get_datetime('enddate'),
'date_published': get_datetime('publishdate'),
'external_links': self.create_registration_links(activity_data),
'organizer_info': self.get_organizer_info(activity_data),
'extension_course': {
'enrolment_start_date': get_datetime('regstartdate'),
'enrolment_end_date': get_datetime('regenddate'),
'maximum_attendee_capacity': get_int('maxentries'),
'remaining_attendee_capacity': get_int('regavailable'),
},
'data_source': self.data_source,
'origin_id': activity_data['id'],
'publisher': self.organization,
'location': self.get_event_location(activity_data),
'keywords': keywords,
'in_language': self.get_event_languages(activity_data),
'images': self.get_event_images(activity_data),
'offers': self.get_event_offers(activity_data),
'audience': audience,
}
return event_data
def handle_recurring_event(self, event_data, time_tables):
start_date, end_date = self.get_event_start_and_end_dates(event_data)
if not start_date:
raise HarrastushakuException('No start time')
if not end_date:
raise HarrastushakuException('No end time')
if end_date - start_date > timedelta(days=MAX_RECURRING_EVENT_LENGTH):
raise HarrastushakuException('Too long recurring activity')
sub_event_time_ranges = self.build_sub_event_time_ranges(start_date, end_date, time_tables)
if not sub_event_time_ranges:
raise HarrastushakuException('Erroneous time tables: {}'.format(time_tables))
super_event = self.save_super_event(event_data)
self.save_sub_events(event_data, sub_event_time_ranges, super_event)
def handle_one_time_event(self, event_data):
event_data['has_start_time'] = False
event_data['has_end_time'] = False
event = self.save_event(event_data)
self.event_syncher.mark(event)
def get_event_keywords(self, activity_data):
keywords = (self.get_event_keywords_from_main_categories(activity_data) |
self.get_event_keywords_from_search_words(activity_data))
return keywords
def get_event_keywords_from_main_categories(self, activity_data):
main_category_ids = {c.get('maincategory_id') for c in activity_data.get('categories', [])}
keyword_ids = set()
for main_category_id in main_category_ids:
keyword_ids |= MAIN_CATEGORY_KEYWORDS.get(main_category_id, set())
return {self.keywords.get(kw_id) for kw_id in keyword_ids if kw_id in self.keywords}
def get_event_keywords_from_search_words(self, activity_data):
keywords = set()
search_words = activity_data.get('searchwords', [])
cleaned_search_words = [s.strip().lower() for s in search_words.split(',') if s.strip()]
for kw in cleaned_search_words:
matches = self.match_keyword(kw)
if matches:
keywords |= set(matches)
return keywords
def get_event_languages(self, activity_data):
language_text = activity_data.get('languages', '').lower()
languages = {obj for code, obj in self.languages.items() if obj.name_fi and obj.name_fi in language_text}
return languages
def get_event_start_and_end_dates(self, event_data):
start_datetime = event_data.get('start_time')
start_date = start_datetime.date() if start_datetime else None
end_datetime = event_data.get('end_time')
end_date = end_datetime.date() if end_datetime else None
return start_date, end_date
def get_organizer_info(self, activity_data):
org_details = clean_text(activity_data.get('organiserdetails', ''), strip_newlines=True, parse_html=True)
reg_details = clean_text(activity_data.get('regdetails', ''), strip_newlines=True, parse_html=True)
return {'fi': f'{reg_details} {org_details}'.strip()} if org_details or reg_details else ''
def build_sub_event_time_ranges(self, start_date, end_date, time_tables):
sub_event_time_ranges = []
for time_table in time_tables:
current_date = start_date
weekday = int(time_table.get('weekday'))
start_time = parse_time(time_table.get('starttime'))
end_time = parse_time(time_table.get('endtime'))
repetition = int(time_table.get('repetition'))
if repetition == 0:
repetition = 7 # assume repetition 0 and 7 mean the same thing
if not (weekday and repetition) or start_time >= end_time:
continue
while current_date.isoweekday() != weekday:
current_date += timedelta(days=1)
while current_date <= end_date:
sub_event_time_ranges.append(SubEventTimeRange(
datetime.combine(current_date, start_time).astimezone(TIMEZONE),
datetime.combine(current_date, end_time).astimezone(TIMEZONE),
))
current_date += timedelta(days=repetition)
return sub_event_time_ranges
def save_super_event(self, event_data):
super_event_data = deepcopy(event_data)
super_event_data['super_event_type'] = Event.SuperEventType.RECURRING
event = self.save_event(super_event_data)
self.event_syncher.mark(event)
return event
def save_sub_events(self, event_data, sub_event_time_ranges, super_event):
super_event._changed = False
def delete_sub_event(obj):
logger.debug('{} deleted'.format(obj))
obj.deleted = True
obj.save()
sub_event_syncher = ModelSyncher(
super_event.sub_events.filter(deleted=False), lambda o: o.id, delete_func=delete_sub_event)
sub_event_data = deepcopy(event_data)
sub_event_data['super_event'] = super_event
for sub_event_time_range in sub_event_time_ranges:
sub_event_data['start_time'] = sub_event_time_range.start
sub_event_data['end_time'] = sub_event_time_range.end
sub_event_data['origin_id'] = (
event_data['origin_id'] + self.create_sub_event_origin_id_suffix(sub_event_time_range))
sub_event = self.save_event(sub_event_data)
if sub_event._changed:
super_event._changed = True
sub_event_syncher.mark(sub_event)
old_sub_event_count = super_event.sub_events.count()
sub_event_syncher.finish(force=True)
if super_event.sub_events.count() != old_sub_event_count:
super_event._changed = True
if super_event._changed:
super_event.save()
def create_sub_event_origin_id_suffix(self, sub_event_time_range):
start, end = sub_event_time_range
assert start.date() == end.date()
date = start.date().strftime('%Y%m%d')
times = '{}{}'.format(*(time.time().strftime('%H%M') for time in (start, end)))
return '_{}{}'.format(date, times)
def get_event_images(self, activity_data):
image_data = activity_data.get('images')
if not isinstance(image_data, dict):
return []
event_image_data = [{
'name': image_datum.get('name', ''),
'url': image_datum.get('filename', ''),
} for image_datum in image_data.values()]
return event_image_data
def get_event_location(self, activity_data):
location_id = activity_data.get('location_id')
if not location_id:
return None
return {'id': self.location_id_to_place_id.get(location_id)}
def get_event_offers(self, activity_data):
offers = []
for price_data in activity_data.get('prices', ()):
get_string = bind_data_getters(price_data)[0]
price = get_string('price', localized=False)
description = get_string('description', localized=True)
is_free = price is not None and price == '0'
if not description and len(activity_data['prices']) == 1:
description = get_string('pricedetails', localized=True)
offers.append({
'price': price if not is_free else None,
'is_free': is_free,
'description': description,
})
return offers
def get_event_audiences_from_ages(self, activity_data):
audience_keyword_ids = set()
age_min = get_int_from_data(activity_data, 'agemin') or 0
age_max = get_int_from_data(activity_data, 'agemax') or 200
for age_range, keyword_ids in AUDIENCE_BY_AGE_RANGE:
if ranges_overlap(age_min, age_max, age_range[0], age_range[1]):
audience_keyword_ids |= keyword_ids
return {self.keywords.get(k_id) for k_id in audience_keyword_ids if k_id in self.keywords}
def get_event_audiences_from_keywords(self, keywords):
return {kw for kw in keywords if kw.id in KEYWORDS_TO_ADD_TO_AUDIENCE}
@lru_cache()
def match_keyword(self, text):
return self.keyword_matcher.match(text)
def get_string_from_data(data, field, localized=False):
value = data.get(field)
if not isinstance(value, str):
return None
value = clean_text(value)
if not value:
return None
return {'fi': value} if localized else value
def get_int_from_data(data, field):
value = data.get(field)
if value in (None, False, ''):
return None
return int(value)
def get_datetime_from_data(data, field):
value = data.get(field)
if value in (None, False, ''):
return None
return datetime.utcfromtimestamp(int(value)).replace(tzinfo=pytz.utc).astimezone(TIMEZONE)
def bind_data_getters(data):
get_string = partial(get_string_from_data, data)
get_int = partial(get_int_from_data, data)
get_datetime = partial(get_datetime_from_data, data)
return get_string, get_int, get_datetime
def ranges_overlap(x1, x2, y1, y2):
return x1 <= y2 and y1 <= x2
| 1.804688 | 2 |
helper/omim_ps_constructor.py | PEDIA-Charite/PEDIA-workflow | 9 | 12757319 | # snipplets from omim api for phenotypic series creation
def api_query_phenotype_mapping(self, mim_pheno: str):
'''Map phenotype to phenotypic series number.'''
entry_list = self.api_query_entry(mim_pheno)
phenotypic_numbers = [
v['phenotypeMap']['phenotypicSeriesNumber']
for e in entry_list
if 'phenotypeMapList' in e
for v in e['phenotypeMapList']
if 'phenotypicSeriesExists' in e and e['phenotypicSeriesExists']
]
return phenotypic_numbers
def construct_phenotypic_series_mapping(self):
'''Construct a dict with mapping of phenotypic omim ids to
phenotypic series ids.'''
output_path = os.path.join(self._mimdir, "mim_to_ps.json")
if os.path.exists(output_path):
with open(output_path, "r") as old_json:
old = json.load(old_json)
if old['morbidmap_hash'] == self.morbidmap_hash:
LOGGER.warning(
("Phenotypic series mapping with same base "
"morbidmap has already been constructed.")
)
return
mim_numbers = self.morbidmap[
"phen_mim_number"].dropna().drop_duplicates()
mim_ps_mapping = {
num: self.api_query_phenotype_mapping(num)
for num in mim_numbers
}
mim_ps_data = {
'morbidmap_hash': self.morbidmap_hash,
'mapping': mim_ps_mapping
}
with open(output_path, "w") as new_json:
json.dump(mim_ps_data, new_json)
return mim_ps_data
| 2.328125 | 2 |
13-protocol-abc/double/double_test.py | SeirousLee/example-code-2e | 990 | 12757320 | <filename>13-protocol-abc/double/double_test.py
from typing import TYPE_CHECKING
import pytest
from double_protocol import double
def test_double_int() -> None:
given = 2
result = double(given)
assert result == given * 2
if TYPE_CHECKING:
reveal_type(given)
reveal_type(result)
def test_double_str() -> None:
given = 'A'
result = double(given)
assert result == given * 2
if TYPE_CHECKING:
reveal_type(given)
reveal_type(result)
def test_double_fraction() -> None:
from fractions import Fraction
given = Fraction(2, 5)
result = double(given)
assert result == given * 2
if TYPE_CHECKING:
reveal_type(given)
reveal_type(result)
def test_double_array() -> None:
from array import array
given = array('d', [1.0, 2.0, 3.14])
result = double(given)
if TYPE_CHECKING:
reveal_type(given)
reveal_type(result)
def test_double_nparray() -> None:
import numpy as np # type: ignore
given = np.array([[1, 2], [3, 4]])
result = double(given)
comparison = result == given * 2
assert comparison.all()
if TYPE_CHECKING:
reveal_type(given)
reveal_type(result)
def test_double_none() -> None:
given = None
with pytest.raises(TypeError):
double(given)
| 2.921875 | 3 |
get_negmovie_directornum.py | wangzhen21/neural_collaborative_filtering | 0 | 12757321 | <reponame>wangzhen21/neural_collaborative_filtering
#coding=utf-8
#将文件转化为新的ID 新文件为 ml-1m.test.rating.stance ml-1m.train.rating.stance
import cPickle
from tqdm import tqdm
import cPickle
import utils.tools as tools
def outfile(file,stritem):
with open(file,"aw+") as f:
f.write(stritem + "\n")
newid_oldid = cPickle.load(open("Data/newid_oldid.p","rb"))
oldmovie_newid = cPickle.load(open("Data/oldmovie_newid.p","rb"))
movielid_dir_name_full_movie = cPickle.load(open("Data/movielid_dir_name_full_movie.p","rb"))
user_director_count_all = cPickle.load(open("Data/user_director_count_all.d","rb"))
tools.deletefilesmovies(["Data/ml-1m.test.negtive.flag.stance.newindex"])
test_id_index = {}
def get_saw_dire_num(user_id,movie_id_new):
director_count = user_director_count_all[int(user_id)]
movie_id_old = newid_oldid[movie_id_new]
try:
dir_index = movielid_dir_name_full_movie[movie_id_old][0]
except:
return "1"
if dir_index in director_count.keys():
return str(director_count[dir_index] + 1)
else:
return "1"
# with open("Data/ml-1m.test.rating.stance","rb") as f:
# for line in f:
# line_split = line.strip().split("\t")
# test_id_index[int(line_split[0])] = line_split[1]
with open("Data/ml-1m.test.negtive.stance.newindex","rb") as f:
for line in tqdm(f):
later = ""
line_split = line.strip().split(")")
id_pair = line_split[0]
user_id = (id_pair.split(","))[0][1:len((id_pair.split(","))[0])]
negtive_movie_id_list = line_split[1].strip().split("\t")
for item in negtive_movie_id_list:
new_dire_count = get_saw_dire_num(user_id,int(item))
later = later + "\t" + new_dire_count
outfile("Data/ml-1m.test.negtive.flag.stance.newindex",id_pair + ")" + later)
| 2.390625 | 2 |
src/simmate/calculators/deepmd/inputs/type_and_set.py | jacksund/simmate | 9 | 12757322 | <reponame>jacksund/simmate<filename>src/simmate/calculators/deepmd/inputs/type_and_set.py<gh_stars>1-10
# -*- coding: utf-8 -*-
import os
import numpy
from sklearn.model_selection import train_test_split
from django_pandas.io import read_frame
from simmate.toolkit import Structure, Composition
from simmate.utilities import get_directory
class DeepmdDataset:
"""
This class works simply as a converter. You provide it a list of
IonicStepStructures that have forces and energies, and it will create a
deepmd input for you.
The input consists of 2 "type" files and then a subfolder made of 4 files for
the actaul data. An example folder looks like this:
type.raw
type_map.raw
set.000
box.npy
coord.npy
energy.npy
force.npy
The "type" files are very simple. "type_map.raw" is just a list of elements
and the "type.raw" is our composition listing what each site is. For example,
H20 (water) would just be...
type_map.raw --> H O
type.raw --> 0 0 1
These .npy are "numpy" files, where the data included in each is...
box = lattice matrixes
coord = site's cartesian coordinates
energy = calculated energies
force = calculated forces for sites
All data is collapsed to 1D arrays. For example, this means the 3x3 lattice matrix
becomes a 1x9 list of numbers and the entire file is a list of matricies in this
format. The same is done to forces, coords, and energies.
Note this is the folder/file format for EACH composition. So this class creates
many of these folders if you provide input structures that have varying
compositions (such as CoO2, Co2O4, Co3O6, etc.).
Further, we split the input structures into training and test datasets. So
the final folder setup will look like...
deepmd_data
CoO2_train
CoO2_test
Co2O4_train
Co2O4_test
<< etc. >>
All data required is available from an IonicStepStructure in our database, so
this is our current input format.
"""
# TODO: currently we use the IonicStepStructure from our relaxation database
# but in the future this should be a core class that extends the pymatgen
# Structure object. So for the moment, the ionic_step_structures can be
# provide like so:
#
# from simmate.database import connect
# from simmate.database.workflow_results import MITRelaxationStructure
# ionic_step_structures = MITRelaxationStructure.objects.filter(
# energy__isnull=False, site_forces__isnull=False
# ).all()
#
# from simmate.calculators.deepmd.inputs.set import DeepmdSet
# DeepmdSet.to_folder(ionic_step_structures)
#
@staticmethod
def to_file(
ionic_step_structures,
directory="deepmd_data",
test_size=0.2,
):
# Grab the path to the desired directory and create it if it doesn't exist
directory = get_directory(directory)
# convert the ionic_step_structures queryset to a pandas dataframe
structures_dataframe = read_frame(ionic_step_structures)
# because we are using the database model, we first want to convert to
# pymatgen structures objects and add a column to the dataframe for these
#
# structures_dataframe["structure"] = [
# structure.to_toolkit() for structure in ionic_step_structures
# ]
#
# BUG: the read_frame query creates a new query, so it may be a different
# length from ionic_step_structures. For this reason, we can't iterate
# through the queryset like in the commented out code above. Instead,
# we need to iterate through the dataframe rows.
# See https://github.com/chrisdev/django-pandas/issues/138 for issue
structures_dataframe["structure"] = [
Structure.from_str(s.structure_string, fmt="POSCAR")
for _, s in structures_dataframe.iterrows()
]
# split the structures into test and training sets randomly
dataframe_train, dataframe_test = train_test_split(
structures_dataframe,
test_size=test_size,
)
# The process for creating files is the same for both the test and training
# sets, where the only difference is the folder ending we add to each. Other
# methods (such as creating the input.json for DeePMD) require the names
# of the folders created here -- so we also store them in lists to return
# at the end of the function too.
folders_train = []
folders_test = []
for folder_suffix, dataframe_set, folder_list in [
("train", dataframe_train, folders_train),
("test", dataframe_test, folders_test),
]:
# grab a list of the unique compositions in our set of structures
unique_compositions = dataframe_set.formula_full.unique()
# for each composition, we want to filter off those structures and
# then write them to a individual folder for deepmd.
# note these compositions are just strings right now.
for composition_str in unique_compositions:
# convert to a pymatgen composition object
composition = Composition(composition_str)
# Let's establish where this folder will be and also store it
composition_directory = os.path.join(
directory, composition_str + "_" + folder_suffix
)
folder_list.append(composition_directory)
# this creates the directory or grabs the full path
composition_directory = get_directory(composition_directory)
# first let's write the type_map file, which is just a list of elements
mapping_filename = os.path.join(composition_directory, "type_map.raw")
with open(mapping_filename, "w") as file:
for element in composition:
file.write(str(element) + "\n")
# Now we can write the type file while also establish the mapping.
# Note the mapping is just the index (0, 1, 2, ...) of each element.
type_filename = os.path.join(
directory, composition_str + "_" + folder_suffix, "type.raw"
)
with open(type_filename, "w") as file:
for mapping_value, element in enumerate(composition):
for i in range(int(composition[element])):
file.write(str(mapping_value) + "\n")
# Now we need the relevent structures. Let's filter off the
# structures that have this composition for us to use below
dataframe_subset = dataframe_set[
dataframe_set["formula_full"] == composition_str
]
# We iterate through each structure (and its data) to compile everything
# into the deepmd list format.
lattices = []
coordinates = []
forces = []
energies = []
# Note the "_," here is because row index is returned but we dont need it
for _, structure_data in dataframe_subset.iterrows():
# grab the pymatgen structure
structure = structure_data.structure
# flatten the lattice matrix to a 1D array and add to our list
lattice_flat = structure.lattice.matrix.flatten()
lattices.append(lattice_flat)
# flatten the cartesian coordinates to a 1D array and add to our list
coords_flat = structure.cart_coords.flatten()
coordinates.append(coords_flat)
# flatten the forces to a 1D array and add to our list
forces_flat = numpy.array(structure_data.site_forces).flatten()
forces.append(forces_flat)
# the energy is just single value so we can add it to our list
energies.append(structure_data.energy)
# Now we want to convert all lists to numpy.
lattices = numpy.array(lattices)
coordinates = numpy.array(coordinates)
forces = numpy.array(forces)
energies = numpy.array(energies)
# Note, the dtype=object can be added as a kwarg here in order to clear
# a warning that prints when we have arrays of different lengths. This
# happens when we have structures with different nsites. However, this
# causes errors with DeepMD which can load object type arrays.
# for now we assume the dataset is written to set.000, so we
# make that folder first.
set_directory = os.path.join(composition_directory, "set.000")
set_directory = get_directory(set_directory)
# now write our numpy files to the folder specified
for filename, filedata in [
("box.npy", lattices),
("coord.npy", coordinates),
("energy.npy", energies),
("force.npy", forces),
]:
filename_full = os.path.join(set_directory, filename)
with open(filename_full, "wb") as file:
numpy.lib.format.write_array(fp=file, array=filedata)
# all of the folders have been created and we return a list of where
return folders_train, folders_test
| 2.625 | 3 |
main.py | kaushalvivek/twitter-sentiment-analysis | 0 | 12757323 | import tweepy
from textblob import TextBlob
# Twitter API variables
con_key = ""
con_secret = ""
access_token = ""
access_token_secret = ""
auth = tweepy.OAuthHandler(con_key, con_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
search_term = input("Enter term to analyse:\n")
tweets = api.search(search_term)
polarity = 0.0
subjectivity = 0.0
for tweet in tweets:
analysis = TextBlob(tweet.text)
polarity+= analysis.sentiment.polarity
subjectivity+= analysis.sentiment.subjectivity
subjectivity/= len(tweets)
polarity/= len(tweets)
print("\n"+search_term+"'s public perception is "+str(round(polarity,2))+" on a scale of -1 to 1")
print("Tweets on "+search_term+" are "+str(round(subjectivity*100,2))+" percent subjective.") | 3.5 | 4 |
agents/value_function.py | schuderer/bprl | 2 | 12757324 | <reponame>schuderer/bprl<filename>agents/value_function.py
# Stdlib imports
from collections import defaultdict
import logging
# Third party imports
import numpy as np
# Application imports
from .discretizer import create_discretizers
logger = logging.getLogger(__name__)
class QFunction:
"""Tabular Q-Value (State-Action-Value) Approximator"""
def __init__(
self, env, default_value=0, discretize_bins=12, discretize_log=False
):
"""Initialize a Tabular Q-Value (State-Action-Value) Approximator.
Params:
env: the openai gym compatible environment to use
default_value=0: initialize state-action value (Q) table with this value
discretize_bins: discretize any non-discrete state dimensions with this
number of bins
discretize_log: use exponentially increasing instead of equal bin sizes
(the further from zero, the coarser the bin size)
"""
# q_table: initial q table (optional, for continuing with pre-filled approximator,
# empty by default)
self.env = env
self.default_value = default_value
self.state_disc, self.action_disc = create_discretizers(
self.env, discretize_bins, discretize_log
)
state_value_default = [
default_value for _ in range(self.action_disc.space.n)
]
self.q_table = defaultdict(lambda: state_value_default.copy(), {})
self.remembered_state_keys = {}
def select_action(
self, observation, policy, policy_params, save=None, load=None
):
"""Get action/value tuple of selected action"""
if load is None:
discrete_state = self.state_disc.discretize(observation)
state_key = self._stateKeyFor(discrete_state)
if save is not None:
self.remembered_state_keys[save] = state_key
else:
state_key = self.remembered_state_keys[load]
action_values = self._get_action_values(state_key, self.q_table)
action_index, action_value = policy(action_values, policy_params)
action = int(self.action_disc.undiscretize(action_index))
return action, action_value
def update_value(self, observation, action, value, save=None, load=None):
"""Update value of observation-action"""
if load is None:
discrete_state = self.state_disc.discretize(observation)
state_key = self._stateKeyFor(discrete_state)
if save is not None:
self.remembered_state_keys[save] = state_key
else:
state_key = self.remembered_state_keys[load]
discrete_action = self.action_disc.discretize(action)
self.q_table[state_key][int(discrete_action)] = value
# def print_q(self, q_table=None):
# q_table = q_table or self.q_table
# for s in range(self.state_disc.space.n):
# if self.state_disc.grid is None:
# s = np.array(s)
# else: # todo
# # indices = np.unravel_index(range(statesDisc.n), state_grid.shape)
# s = self.state_disc.grid[s]
# logger.info(
# [
# q_table[self._stateKeyFor(s)][a]
# for a in range(self.action_disc.space.n)
# ]
# )
#
# def print_q_frozenlake(self, q_table=None):
# q_table = q_table or self.q_table
# for s in range(self.state_disc.space.n):
# s = np.array(s)
# print(
# [
# q_table[self._stateKeyFor(s)][a]
# for a in range(self.action_disc.space.n)
# ]
# )
@staticmethod
def _stateKeyFor(discreteObs):
if discreteObs.shape == ():
d_obs = np.reshape(discreteObs, (1,))
else:
d_obs = discreteObs
# slow using timeit():
return "-".join(map(str, d_obs))
# 100x faster using timeit(), but only a few percent faster in reality:
# return reduce(lambda a, v: str(a) + '-' + str(v), discreteObs)
def _get_action_values(self, key_start, q_table):
return q_table[key_start]
| 2.265625 | 2 |
dpkt/edp.py | Vito-Swift/dpkt | 924 | 12757325 | <gh_stars>100-1000
"""Extreme Discovery Protocol."""
from __future__ import absolute_import
import dpkt
class EDP(dpkt.Packet):
__hdr__ = (
('version', 'B', 1),
('reserved', 'B', 0),
('hlen', 'H', 0),
('sum', 'H', 0),
('seq', 'H', 0),
('mid', 'H', 0),
('mac', '6s', b'')
)
def __bytes__(self):
if not self.sum:
self.sum = dpkt.in_cksum(dpkt.Packet.__bytes__(self))
return dpkt.Packet.__bytes__(self)
class TestEDP(object):
"""
Test basic EDP functionality.
"""
@classmethod
def setup_class(cls):
from binascii import unhexlify
cls.buf = unhexlify(
'01' # version
'00' # reserved
'013c' # hlen
'9e76' # sum
'001b' # seq
'0000' # mid
'080027' # mac
'2d90ed990200240000000000000000000000000f020207000000000000000000000000000000009901010445584f532d32000000000000000'
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000000000099000004'
)
cls.p = EDP(cls.buf)
def test_version(self):
assert (self.p.version == 1)
def test_reserved(self):
assert (self.p.reserved == 0)
def test_hlen(self):
assert (self.p.hlen == 316)
def test_sum(self):
assert (self.p.sum == 40566)
def test_seq(self):
assert (self.p.seq == 27)
def test_mid(self):
assert (self.p.mid == 0)
def test_mac(self):
assert (self.p.mac == b"\x08\x00'-\x90\xed")
def test_bytes(self):
assert bytes(self.p) == self.buf
# force recalculation of the checksum
edp = EDP(self.buf)
edp.sum = 0
assert edp.sum == 0
assert bytes(edp) == self.buf
| 2.296875 | 2 |
voiceplay/player/tasks/actions.py | tb0hdan/voiceplay | 2 | 12757326 | <reponame>tb0hdan/voiceplay
#-*- coding: utf-8 -*-
""" Track actions task module """
from voiceplay.database import voiceplaydb
from voiceplay.webapp.baseresource import APIV1Resource
from voiceplay.utils.helpers import SingleQueueDispatcher
from .basetask import BasePlayerTask
class CurrentTrackResource(APIV1Resource):
"""
Current track API endpoint
"""
route_base = '/api/v1/tracks/current'
queue = None
def get(self):
"""
HTTP GET handler
"""
result = {'status': 'timeout', 'message': ''}
if self.queue:
dispatcher = SingleQueueDispatcher(queue=self.queue)
message = dispatcher.send_and_wait('current_track')
result = {'status': 'ok', 'message': message}
return result
class LoveTrackResource(APIV1Resource):
"""
Love track API endpoint
"""
route_base = '/api/v1/tracks/love'
queue = None
def get(self):
"""
HTTP GET handler
"""
result = {'status': 'timeout', 'message': ''}
if self.queue:
dispatcher = SingleQueueDispatcher(queue=self.queue)
message = dispatcher.send_and_wait('love')
result = {'status': 'ok', 'message': message}
return result
class BanTrackResource(APIV1Resource):
"""
Ban track API endpoint
"""
route_base = '/api/v1/tracks/ban'
queue = None
def get(self):
"""
HTTP GET handler
"""
result = {'status': 'timeout', 'message': ''}
if self.queue:
dispatcher = SingleQueueDispatcher(queue=self.queue)
message = dispatcher.send_and_wait('ban')
result = {'status': 'ok', 'message': message}
return result
class CurrentTrackTask(BasePlayerTask):
"""
Get current track
"""
__group__ = ['current_track']
__regexp__ = ['^current(.+)?$']
__priority__ = 190
@classmethod
def process(cls, regexp, message):
"""
Run task - get current track
"""
cls.logger.debug('Message: %r matches %r, running %r', message, regexp, cls.__name__)
current_track = cls.get_current_track()
cls.say('Current track is %s' % current_track)
return current_track
class LoveTrackTask(BasePlayerTask):
"""
Love current track
"""
__group__ = ['love', 'like', 'unban']
__regexp__ = ['^love(.+)?$', '^like(.+)?$', '^unban(.+)?$']
__priority__ = 210
@classmethod
def process(cls, regexp, message):
"""
Run task - get current track and set status to 'love'
"""
cls.logger.debug('Message: %r matches %r, running %r', message, regexp, cls.__name__)
current_track = cls.get_current_track()
if current_track:
voiceplaydb.set_track_status(current_track, 'loved')
cls.say('Track %s was marked as loved' % current_track)
return None
class BanTrackTask(BasePlayerTask):
"""
Ban current track
"""
__group__ = ['ban', 'hate', 'dislike']
__regexp__ = ['^ban(.+)?$', '^hate(.+)?$', '^dislike(.+)?$']
__priority__ = 220
@classmethod
def process(cls, regexp, message):
"""
Run task - get current track and set status to 'ban'
"""
cls.logger.debug('Message: %r matches %r, running %r', message, regexp, cls.__name__)
current_track = cls.get_current_track()
if current_track:
voiceplaydb.set_track_status(current_track, 'banned')
cls.say('Track %s was marked as banned' % current_track)
return None
| 2.234375 | 2 |
tests/test_inverse.py | d21d3q/thermalprinter | 28 | 12757327 | # coding: utf-8
def test_default_value(printer):
assert printer._inverse is False
def test_changing_no_value(printer):
printer.inverse()
assert printer._inverse is False
def test_changing_state_on(printer):
printer.inverse(True)
assert printer._inverse is True
def test_changing_state_off(printer):
printer.inverse(False)
assert printer._inverse is False
def test_reset_value(printer):
printer.reset()
assert printer._inverse is False
| 2.34375 | 2 |
app/main/errors.py | kennjr/Pitch | 0 | 12757328 | from flask import render_template
from urllib import error
# @app.errorhandler(404)
from app.main import main
@main.app_errorhandler(404)
def page_not_found(error):
return render_template('404_page.html'), 404
@main.app_errorhandler(error.HTTPError)
def http_error(error):
return render_template('404_page.html'), 404
| 2.5625 | 3 |
Custom/__init__.py | Grim-es/udon-pie-auto-completion | 0 | 12757329 | from UdonPie import GameObject
from UdonPie import Transform
this_trans = Transform()
this_gameObj = GameObject()
Void = None
def instantiate(arg1):
'''
:param arg1: GameObject
:type arg1: GameObject
'''
pass
| 2.125 | 2 |
data.py | wy1211/NiuTensor_2 | 1 | 12757330 | # 数据处理
# pickle是一个将任意复杂的对象转成对象的文本或二进制表示的过程
# 也可以将这些字符串、文件或任何类似于文件的对象 unpickle 成原来的对象
import pickle
import os
import random
import numpy as np
# 标签字典
tag2label = {"O": 0,
"B-PER": 1, "I-PER": 2,
"B-LOC": 3, "I-LOC": 4,
"B-ORG": 5, "I-ORG": 6
}
def read_corpus(corpus_path): # 输入train_data文件的路径,读取训练集的语料,输出train_data
data = []
with open(corpus_path, encoding='utf-8') as fr:
lines = fr.readlines() # 返回的是一个列表,一行数据一个元素
sent_, tag_ = [], []
for line in lines:
if line != '\n':
[char, label] = line.strip().split()
sent_.append(char) # 字放进sent_
tag_.append(label) # tag放进tag_
else:
data.append((sent_, tag_))
sent_, tag_ = [], []
return data
# 由train_data来构造一个(统计非重复字)字典{'第一个字':[对应的id,该字出现的次数],'第二个字':[对应的id,该字出现的次数], , ,}
# 去除低频词,生成一个word_id的字典并保存在输入的vocab_path的路径下,
# 保存的方法是pickle模块自带的dump方法,保存后的文件格式是word2id.pkl文件
def vocab_build(vocab_path, corpus_path, min_count): # min_count设为3
data = read_corpus(corpus_path)
word2id = {}
for sent_, tag_ in data:
for word in sent_:
if word.isdigit(): # 字符是数字
word = '<NUM>'
elif ('\u0041' <= word <= '\u005a') or ('\u0061' <= word <= '\u007a'): # 字符是字母
word = '<ENG>'
if word not in word2id: # 如果不在字典中,就加入到字典中
word2id[word] = [len(word2id)+1, 1]
else: # 在字典中就次数+1
word2id[word][1] += 1
low_freq_words = [] # 低频词
for word, [word_id, word_freq] in word2id.items():
if word_freq < min_count and word != '<NUM>' and word != '<ENG>': # 统计低频词
low_freq_words.append(word)
for word in low_freq_words:
del word2id[word] # 从字典中删除低频词
new_id = 1 # 重构字典
for word in word2id.keys():
word2id[word] = new_id
new_id += 1
word2id['<UNK>'] = new_id
word2id['<PAD>'] = 0
print(len(word2id))
with open(vocab_path, 'wb') as fw:
pickle.dump(word2id, fw) # 序列化到名字为word2id.pkl文件中
def sentence2id(sent, word2id): # 输入一句话,生成一个 sentence_id
sentence_id = []
for word in sent:
if word.isdigit():
word = '<NUM>'
elif ('\u0041' <= word <= '\u005a') or ('\u0061' <= word <= '\u007a'):
word = '<ENG>'
if word not in word2id: # 在字典中找不到就用<UNK>表示
word = '<UNK>'
sentence_id.append(word2id[word])
return sentence_id
def read_dictionary(vocab_path): # 通过pickle模块自带的load方法(反序列化方法)加载输出word2id.pkl文件
vocab_path = os.path.join(vocab_path)
with open(vocab_path, 'rb') as fr:
word2id = pickle.load(fr)
print('vocab_size:', len(word2id))
return word2id
def random_embedding(vocab, embedding_dim): # 输入vocab,vocab就是前面得到的word2id,embedding_dim=300
embedding_mat = np.random.uniform(-0.25, 0.25, (len(vocab), embedding_dim))
embedding_mat = np.float32(embedding_mat)
# 返回一个len(vocab)*embedding_dim=3905*300的矩阵(每个字投射到300维)作为初始值
return embedding_mat
# padding,输入一句话,不够标准的样本用pad_mark来补齐
"""输入:seqs的形状为二维矩阵,形状为[[33,12,17,88,50]-第一句话
[52,19,14,48,66,31,89]-第二句话]
输出:seq_list为seqs经过padding后的序列
seq_len_list保留了padding之前每条样本的真实长度
seq_list和seq_len_list用来喂给feed_dict"""
def pad_sequences(sequences, pad_mark=0):
max_len = max(map(lambda x: len(x), sequences)) # 返回一个序列中长度最长的那条样本的长度
seq_list, seq_len_list = [], []
for seq in sequences:
seq = list(seq)
# 不够最大长度的样本用0补上放到列表seq_list
seq_ = seq[:max_len] + [pad_mark] * max(max_len - len(seq), 0)
seq_list.append(seq_)
seq_len_list.append(min(len(seq), max_len))
return seq_list, seq_len_list
''' seqs的形状为二维矩阵,形状为[[33,12,17,88,50....]...第一句话
[52,19,14,48,66....]...第二句话
]
labels的形状为二维矩阵,形状为[[0, 0, 3, 4]....第一句话
[0, 0, 3, 4]...第二句话
]
'''
def batch_yield(data, batch_size, vocab, tag2label, shuffle=False): # 生成batch
if shuffle: # 乱序数据
random.shuffle(data)
seqs, labels = [], []
for (sent_, tag_) in data:
sent_ = sentence2id(sent_, vocab) # 返回在字典中的编号
label_ = [tag2label[tag] for tag in tag_] # 返回tag的value值
if len(seqs) == batch_size:
yield seqs, labels # yield 是一个类似 return 的关键字,只是这个函数返回的是个生成器
seqs, labels = [], []
seqs.append(sent_)
labels.append(label_)
if len(seqs) != 0:
yield seqs, labels
| 2.703125 | 3 |
FunctionWorker/python/StateUtils.py | hcngac/knix_dev | 1 | 12757331 | <filename>FunctionWorker/python/StateUtils.py
# Copyright 2020 The KNIX Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
import copy
from datetime import datetime
import json
import socket
import time
import threading
import anytree
from thriftpy2.transport import TFramedTransportFactory, TServerSocket
from thriftpy2.protocol import TCompactProtocolFactory
from thriftpy2.server import TSimpleServer
from thriftpy2.thrift import TProcessor
from ujsonpath import parse, tokenize
import py3utils
from DataLayerClient import DataLayerClient
class StateUtils:
defaultStateType = 'Task_SAND'
taskStateType = 'Task'
choiceStateType = 'Choice'
passStateType = 'Pass'
succeedStateType = 'Succeed'
failStateType = 'Fail'
waitStateType = 'Wait'
parallelStateType = 'Parallel'
mapStateType = 'Map'
mapFunctionOutput = {}
def __init__(self, functionstatetype=defaultStateType, functionstatename='', functionstateinfo='{}', functionruntime="", logger=None, workflowid=None, sandboxid=None, functiontopic=None, datalayer=None, storage_userid=None, internal_endpoint=None):
self.operators = ['And', 'BooleanEquals', 'Not', 'NumericEquals', 'NumericGreaterThan', 'NumericGreaterThanEquals',\
'NumericLessThan', 'NumericLessThanEquals', 'Or', 'StringEquals', 'StringGreaterThan',\
'StringGreaterThanEquals', 'StringLessThan', 'StringLessThanEquals', 'TimestampEquals', 'TimestampGreaterThan',\
'TimestampGreaterThanEquals', 'TimestampLessThan', 'TimestampLessThanEquals']
self.operators_python = ['and', '==', 'not', '==', '>', '>=', '<', '<=', 'or', '==', '>', '>=', '<', '<=', '==', '>', '>=', '<', '<=']
self.operators_set = set(self.operators)
self.asl_errors = ("States.ALL", "States.Timeout", "States.TaskFailed", "States.Permissions", "States.ResultPathMatchFailure", "States.BranchFailed", "States.NoChoiceMatched")
self.nodelist = []
self.parsed_trees = []
self.default_next_choice = []
self.input_path_dict = {}
self.items_path_dict = {}
self.result_path_dict = {}
self.output_path_dict = {}
self.parameters_dict = {}
self.functionstatetype = functionstatetype
self.functionstatename = functionstatename
self.functionstateinfo = functionstateinfo
self.functiontopic = functiontopic
self._datalayer = datalayer
self._storage_userid = storage_userid
self._internal_endpoint = internal_endpoint
self._function_runtime = functionruntime
if self._function_runtime == "java":
# if java, this is the address we'll send requests to be handled
self._java_handler_address = "/tmp/java_handler_" + self.functionstatename + ".uds"
self.parsedfunctionstateinfo = {}
self.workflowid = workflowid
self.sandboxid = sandboxid
self.choiceNext = ''
self.mapStateCounter = 0
self.evaluateCounter = 0
self.catcher_list = []
self.retry_list = []
self._logger = logger
self.parse_function_state_info()
self.function_output_batch_list = []
self.tobeProcessedlater = []
self.outputMapStatebatch = []
self.mapPartialResult = {}
def call_counter(func):
def helper(*args, **kwargs):
helper.calls += 1
return func(*args, **kwargs)
helper.calls = 0
helper.__name__= func.__name__
return helper
# find target next for error in catcher list
def find_cat_data(self, err, cat_list):
cat_result = "$" # default
cat_next = [] # default
for cat in cat_list:
if "ErrorEquals" in cat and (str(err) in cat["ErrorEquals"] or err.__class__.__name__ in cat["ErrorEquals"]):
cat_next = cat['Next']
if "ResultPath" in cat:
cat_result = cat['ResultPath']
return cat_next, cat_result
def find_ret_data(self, err, ret_list):
ret_max_attempts = 1 # default
ret_interval_seconds = 1 # default
ret_backoff_rate = 1.0 # default
for ret in ret_list:
if err in ret['ErrorEquals'] or err.__class__.__name__ in ret['ErrorEquals']:
if "MaxAttempts" in list(ret.keys()):
ret_max_attempts = ret['MaxAttempts']
if "IntervalSeconds" in list(ret.keys()):
ret_interval_seconds = ret['IntervalSeconds']
if "BackoffRate" in list(ret.keys()):
ret_backoff_rate = ret['BackoffRate']
return ret_max_attempts, ret_interval_seconds, ret_backoff_rate
def isMapState(self):
return self.functionstatetype == StateUtils.mapStateType
def isTaskState(self):
return self.functionstatetype == StateUtils.taskStateType or self.functionstatetype == StateUtils.defaultStateType
def applyParameters(self, raw_state_input):
#2c. Apply Parameters, if available and applicable (The Parameters field is used in Map to select values in the input)
# in = raw_state_input
# if Parameters:
# in = raw_state_input[ItemsPath]
#
try:
function_input = raw_state_input
self._logger.debug("inside applyParameters: " + str(self.parameters_dict) + ", raw_state_input: " + str(raw_state_input))
if self.parameters_dict:
function_input = self.process_parameters(self.parameters_dict, function_input)
return function_input
except Exception:
raise Exception("Parameters processing exception")
def applyItemsPath(self, raw_state_input):
#2a. Apply ItemsPath, if available and applicable (The ItemsPath field is used in Map to select an array in the input)
# in = raw_state_input
# if ItemsPath:
# in = raw_state_input[ItemsPath]
#
try:
function_input = raw_state_input
if self.items_path_dict and 'ItemsPath' in self.items_path_dict:
function_input = self.process_items_path(self.items_path_dict, function_input)
return function_input
except Exception:
raise Exception("Items path processing exception")
def applyInputPath(self, raw_state_input):
#2. Apply InputPath, if available (Extract function_input from raw_state_input)
# in = raw_state_input
# if InputPath:
# in = raw_state_input[InputPath]
#
try:
function_input = raw_state_input
if self.input_path_dict and 'InputPath' in self.input_path_dict:
function_input = self.process_input_path(self.input_path_dict, function_input)
return function_input
except Exception:
raise Exception("Input path processing exception")
# send a request to the java worker and get the result
def _send_java_request(self, java_input, java_output, api_server, server_socket):
# get a connection to the java worker
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
# send the request
max_num_tries = 10
num_tries = 0
trying = True
has_error = False
while trying:
try:
sock.connect(self._java_handler_address)
trying = False
except socket.error as msg:
num_tries += 1
if num_tries > max_num_tries:
self._logger.debug("cannot open connection to java worker: %s", msg)
trying = False
has_error = True
else:
self._logger.debug("will retry connection to java worker...")
time.sleep(0.05*num_tries)
if not has_error:
try:
sock.sendall(java_input.encode())
sock.shutdown(socket.SHUT_WR)
# receive the response
chunks = []
while True:
data = sock.recv(4096)
if not data:
sock.close()
break
chunks.append(data.decode())
output_data = "".join(chunks)
self._logger.debug("received output_data: " + output_data)
try:
output_data = json.loads(output_data)
if not output_data["hasError"]:
java_output["functionResult"] = output_data["functionResult"]
java_output["hasError"] = False
java_output["errorType"] = ""
java_output["errorTrace"] = ""
else:
java_output["hasError"] = output_data["hasError"]
java_output["errorType"] = output_data["errorType"]
java_output["errorTrace"] = output_data["errorTrace"]
except Exception as exc:
self._logger.debug("Problem in received output_data: " + output_data)
pass
# close the api server in the main thread, so that we can continue with publishing the output
api_server.close()
server_socket.close()
except socket.error as msg:
self._logger.debug("cannot send request to java worker: %s", msg)
#os._exit(1)
def _exec_function(self, runtime, exec_arguments, sapi):
if runtime == "python 3.6":
func = exec_arguments["function"]
args = exec_arguments["function_input"]
function_output = func(args, sapi)
elif runtime == "java":
# open the API server for this request
api_uds = exec_arguments["api_uds"]
thriftAPIService = exec_arguments["thriftAPIService"]
java_input = exec_arguments["function_input"]
processor = TProcessor(thriftAPIService, sapi)
server_socket = TServerSocket(unix_socket=api_uds, client_timeout=None)
# no need for any other type of server; there will only be a single client: the java function instance
api_server = TSimpleServer(processor, server_socket,
iprot_factory=TCompactProtocolFactory(),
itrans_factory=TFramedTransportFactory())
self._logger.debug("API server at: " + api_uds)
self._logger.debug("starting with java_input: " + java_input)
# access to the output for the thread via an object
java_output = {}
# send it to the java worker in a thread
# (thread has access to api_server object and server_socket to stop it)
# (thread has also access to the output to set it in the main thread of execution)
try:
t = threading.Thread(target=self._send_java_request, args=(java_input, java_output, api_server, server_socket,))
t.start()
except Exception as exc:
pass
# meanwhile, the main thread listens and serves API requests
# when the execution is finished, the api server will be stopped
try:
self._logger.debug("API server serving...")
api_server.serve()
except Exception as exc:
#raise exc
pass
# when the java worker function returns, it stops the API server and sets the output that was produced
# get the output
has_error = java_output["hasError"]
error_type = java_output["errorType"]
error_trace = java_output["errorTrace"]
if not has_error:
function_output = java_output["functionResult"]
else:
# _XXX_: need to raise the exception, so that the catcher and retryer can have a chance
raise Exception(error_type)
return function_output
#@retry(ZeroDivisionError, tries=10, delay=1) # ToDo: parse parameters of of retryers and catchers
#@retry([x[0] for x in self.asl_errors], tries=3, delay=2) # ToDo: parse parameters of of retryers and catchers
#@retry("States.ALL", tries=3, delay=2)
def exec_function_catch_retry(self, runtime, exec_arguments, sapi):
retryer = self.retry_list
catcher = self.catcher_list
ret_error_list = []
ret_interval_seconds = 0
ret_backoff_rate = 0
ret_max_attempts = 0
cat_next = ""
ret_value = []
for ret in retryer:
ret_error_list = ret['ErrorEquals']
self._logger.debug("[StateUtils] found a ASL workflow retryer, retry for: " + str(ret_error_list))
try:
ret_value = self._exec_function(runtime, exec_arguments, sapi)
return ret_value
except Exception as exc:
self._logger.debug("[StateUtils] retryer just caught an error: " + ", " + str(exc) + ", " + str(exc.__class__.__name__) + ", " + str(retryer))
ret_max_attempts, ret_interval_seconds, ret_backoff_rate = self.find_ret_data(exc, retryer) # get the retry data for this error
delay = int(ret_interval_seconds)
max_attempts = int(ret_max_attempts)
backoff_rate = float(ret_backoff_rate)
# start retrying on this error
while max_attempts:
try:
ret_value = self._exec_function(runtime, exec_arguments, sapi)
return ret_value
except Exception as e_retry:
if (any(str(e_retry) in s0 for s0 in ret_error_list) or any(e_retry.__class__.__name__ in s1 for s1 in ret_error_list)):
self._logger.debug("[StateUtils] MFn ASL retryer just caught an error:" + str(e_retry) + str(retryer))
self._logger.debug("[StateUtils] retrying for Error: " + str(e_retry) + ", remaining attempts: " + str(max_attempts))
max_attempts -= 1
if not max_attempts:
ret_value = {"Error": str(exc), "Cause": "Error not caught by MFn ASL Workflow retryer"}
self._logger.error("[StateUtils] Error not caught by MFn ASL Workflow retryer!")
return ret_value
#raise # max retries have been reached
self._logger.warning('%s, retrying in %s seconds... ' % (e_retry, str(delay)))
time.sleep(delay)
delay *= backoff_rate
if catcher:
self._logger.debug("[StateUtils] found a ASL workflow catcher")
# there was no retry information provided for this function, proceed with catch
ret_value = {"Error": "Catcher", "Cause": "error caught by MFn ASL Workflow catcher"}
try:
ret_value = self._exec_function(runtime, exec_arguments, sapi)
return ret_value
except Exception as exc:
exc_msg = str(exc)
self._logger.error("[StateUtils] catcher just caught an error: " + exc_msg + " " + str(catcher))
cat_next, cat_result = self.find_cat_data(exc, catcher)
if cat_next != []:
self._logger.error("[StateUtils] matching catch list entry target and result for this error: " + str(cat_next) + " " + str(cat_result))
self.result_path_dict['ResultPath'] = cat_result
ret_value = {"Error": exc_msg, "Cause": "this error caught by MFn ASL Workflow catcher!"}
if runtime == "java":
# do an extra serialization, because we were expecting a java output,
# but got a python object
val = {}
val["value"] = exc_msg
exc_msg = json.dumps(val)
sapi.add_dynamic_next(cat_next, exc_msg)
return ret_value
else: # no catcher could be found for this error
self._logger.error("[StateUtils] Error not caught by MFn ASL Workflow catcher!")
raise exc
else: # neither catcher nor retryers are set
ret_value = self._exec_function(runtime, exec_arguments, sapi)
return ret_value
def getChoiceResults(self, value_output):
choice_next_list = []
#self._logger.debug("[StateUtils] getChoiceResults Inputs: " + str(self.choiceNext) + str(self.functionstatetype))
if self.functionstatetype == self.choiceStateType and self.choiceNext != '':
choice_next_list.append({"next": self.choiceNext, "value": value_output})
return choice_next_list
def evaluateChoiceConditions(self, function_input):
self.choiceNext = ''
self.choiceNext = self.evaluateNextState(function_input)
self._logger.debug("[StateUtils] Evaluated Choice condition: " + str(self.choiceNext))
def evaluateMapState(self, function_input, key, metadata, sapi):
name_prefix = self.functiontopic + "_" + key
if "MaxConcurrency" in self.parsedfunctionstateinfo:
maxConcurrency = self.parsedfunctionstateinfo["MaxConcurrency"]
else:
maxConcurrency = 0
self.parsedfunctionstateinfo["MaxConcurrency"] = maxConcurrency
if "Parameters" in self.parsedfunctionstateinfo:
mapParamters = self.parsedfunctionstateinfo["Parameters"]
else:
mapParameters = {}
self._logger.debug("[StateUtils] evaluateMapState, maxConcurrency: " + str(maxConcurrency))
self._logger.debug("[StateUtils] evaluateMapState metadata: " + str(metadata))
counter_name_topic = self.functionstatename + "-" + self.sandboxid
total_branch_count = len(function_input) # all branches executed concurrently
klist = [total_branch_count]
self.parsedfunctionstateinfo["BranchCount"] = int(total_branch_count) # overwrite parsed BranchCount with new value
self._logger.debug("[StateUtils] evaluateMapState, total_branch_count: " + str(total_branch_count))
# prepare counter metadata
counter_metadata = {}
counter_metadata["__state_action"] = "post_map_processing"
counter_metadata["__async_execution"] = metadata["__async_execution"]
workflow_instance_metadata_storage_key = name_prefix + "_workflow_metadata"
counter_metadata["WorkflowInstanceMetadataStorageKey"] = workflow_instance_metadata_storage_key
counter_metadata["CounterValue"] = 0 # this should be updated by riak hook
counter_metadata["Klist"] = klist
counter_metadata["TotalBranches"] = total_branch_count
counter_metadata["ExecutionId"] = key
counter_metadata["FunctionTopic"] = self.functiontopic
counter_metadata["Endpoint"] = self._internal_endpoint
iterator = self.parsedfunctionstateinfo["Iterator"]
counter_name_trigger_metadata = {"k-list": klist, "total-branches": total_branch_count}
# dynamic values used for generation of branches
counter_name_key = key
branch_out_keys = []
for i in range(total_branch_count):
branch_out_key = key + "-branch-" + str(i+1)
branch_out_keys.append(branch_out_key)
# prepare counter name value metadata
counter_name_value_metadata = copy.deepcopy(metadata)
counter_name_value_metadata["WorkflowInstanceMetadataStorageKey"] = workflow_instance_metadata_storage_key
counter_name_value_metadata["CounterValue"] = 0 # this should be updated by riak hook
counter_name_value_metadata["__state_action"] = "post_map_processing"
counter_name_value_metadata["state_counter"] = metadata["state_counter"]
self._logger.debug("[StateUtils] evaluateMapState, metadata[state_counter]: " + str(metadata["state_counter"]))
self.mapStateCounter = int(metadata["state_counter"])
counter_name_value = {"__mfnmetadata": counter_name_value_metadata, "__mfnuserdata": '{}'}
#CounterName = json.dumps([str(counter_name_topic), str(counter_name_key), counter_name_trigger_metadata, counter_name_value])
CounterName = str(counter_name_topic) + "-" + str(total_branch_count) + "-" + str(counter_name_key)
# prepare mapInfo metadata
workflow_instance_outputkeys_set_key = key +"_"+ self.functionstatename + "_outputkeys_set"
mapInfo = {}
mapInfo["CounterTopicName"] = counter_name_topic
mapInfo["CounterNameKey"] = counter_name_key
mapInfo["TriggerMetadata"] = counter_name_trigger_metadata
mapInfo["CounterNameValueMetadata"] = counter_name_value_metadata
mapInfo["BranchOutputKeys"] = branch_out_keys
mapInfo["CounterName"] = CounterName
mapInfo["MaxConcurrency"] = maxConcurrency
mapInfo["BranchOutputKeysSetKey"] = workflow_instance_outputkeys_set_key
mapInfo["Klist"] = klist
mapInfo_key = self.functionstatename + "_" + key + "_map_info"
metadata[mapInfo_key] = mapInfo
# create counter for Map equivalent Parallel state
assert py3utils.is_string(CounterName)
counterName = str(mapInfo["CounterName"])
counter_metadata_key_name = counterName + "_metadata"
try:
dlc = DataLayerClient(locality=1, suid=self._storage_userid, is_wf_private=False, connect=self._datalayer)
# create a triggerable counter to start the post-parallel when parallel state finishes
dlc.createCounter(CounterName, 0, tableName=dlc.countertriggerstable)
dlc.put(counter_metadata_key_name, json.dumps(counter_metadata), tableName=dlc.countertriggersinfotable)
except Exception as exc:
self._logger.error("Exception in creating counter: " + str(exc))
self._logger.error(exc)
raise
finally:
dlc.shutdown()
assert py3utils.is_string(workflow_instance_metadata_storage_key)
self._logger.debug("[StateUtils] full_metadata_encoded put key: " + str(workflow_instance_metadata_storage_key))
sapi.put(workflow_instance_metadata_storage_key, json.dumps(metadata))
# Now provide each branch with its own input
branch = self.parsedfunctionstateinfo["Iterator"] # this is just onee set
# launch a branch for each input element
startat = str(branch["StartAt"])
for i in range(len(function_input)):
sapi.add_dynamic_next(startat, function_input[i]) # Alias for add_workflow_next(self, next, value)
sapi.put(name_prefix + "_" + "mapStateInputValue", str(function_input[i]))
sapi.put(name_prefix + "_" + "mapStateInputIndex", str(i))
self._logger.debug("\t Map State StartAt:" + startat)
self._logger.debug("\t Map State input:" + str(function_input[i]))
return function_input, metadata
def evaluatePostMap(self, function_input, key, metadata, sapi):
name_prefix = self.functiontopic + "_" + key
# function is triggered by post-commit hook with metadata containing information about state results in buckets.
# It collects these results and returns metadata and post_map_output_results
action = metadata["__state_action"]
assert action == "post_map_processing"
counterValue = function_input["CounterValue"]
state_counter = 0
if "state_counter" in metadata:
state_counter = metadata["state_counter"]
self._logger.debug("\t metadata:" + json.dumps(metadata))
workflow_instance_metadata_storage_key = str(function_input["WorkflowInstanceMetadataStorageKey"])
assert py3utils.is_string(workflow_instance_metadata_storage_key)
full_metadata_encoded = sapi.get(workflow_instance_metadata_storage_key)
self._logger.debug("[StateUtils] full_metadata_encoded get: " + str(full_metadata_encoded))
full_metadata = json.loads(full_metadata_encoded)
full_metadata["state_counter"] = state_counter
mapInfoKey = self.functionstatename + "_" + key + "_map_info"
mapInfo = full_metadata[mapInfoKey]
branchOutputKeysSetKey = str(mapInfo["BranchOutputKeysSetKey"])
branchOutputKeysSet = sapi.retrieveSet(branchOutputKeysSetKey)
self._logger.debug("\t branchOutputKeysSet: " + str(branchOutputKeysSet))
if not branchOutputKeysSet:
self._logger.error("[StateUtils] branchOutputKeysSet is empty")
raise Exception("[StateUtils] branchOutputKeysSet is empty")
klist = mapInfo["Klist"]
#self._logger.debug("\t action: " + action)
#self._logger.debug("\t counterValue:" + str(counterValue))
#self._logger.debug("\t WorkflowInstanceMetadataStorageKey:" + metadata["WorkflowInstanceMetadataStorageKey"])
#self._logger.debug("\t full_metadata:" + full_metadata_encoded)
#self._logger.debug("\t mapInfoKey: " + mapInfoKey)
#self._logger.debug("\t mapInfo:" + json.dumps(mapInfo))
#self._logger.debug("\t branchOutputKeysSetKey:" + branchOutputKeysSetKey)
#self._logger.debug("\t branchOutputKeysSet:" + str(branchOutputKeysSet))
#self._logger.debug("\t klist:" + str(klist))
NumBranchesFinished = abs(counterValue)
self._logger.debug("\t NumBranchesFinished:" + str(NumBranchesFinished))
do_cleanup = False
if klist[-1] == NumBranchesFinished:
do_cleanup = True
self._logger.debug("\t do_cleanup:" + str(do_cleanup))
counterName = str(mapInfo["CounterName"])
counter_metadata_key_name = counterName + "_metadata"
assert py3utils.is_string(counterName)
if do_cleanup:
assert py3utils.is_string(counterName)
try:
dlc = DataLayerClient(locality=1, suid=self._storage_userid, is_wf_private=False, connect=self._datalayer)
# done with the triggerable counter
dlc.deleteCounter(counterName, tableName=dlc.countertriggerstable)
dlc.delete(counter_metadata_key_name, tableName=dlc.countertriggersinfotable)
except Exception as exc:
self._logger.error("Exception deleting counter: " + str(exc))
self._logger.error(exc)
raise
finally:
dlc.shutdown()
post_map_output_values = []
self._logger.debug("\t mapInfo_BranchOutputKeys:" + str(mapInfo["BranchOutputKeys"]))
self._logger.debug("\t mapInfo_BranchOutputKeys length: " + str(len(mapInfo["BranchOutputKeys"])))
for outputkey in mapInfo["BranchOutputKeys"]:
outputkey = str(outputkey)
if outputkey in branchOutputKeysSet: # mapInfo["BranchOutputKeys"]:
self._logger.debug("\t BranchOutputKey:" + outputkey)
while sapi.get(outputkey) == "":
time.sleep(0.1) # wait until value is available
branchOutput = sapi.get(outputkey)
branchOutput_decoded = json.loads(branchOutput)
self._logger.debug("\t branchOutput(type):" + str(type(branchOutput)))
self._logger.debug("\t branchOutput:" + branchOutput)
self._logger.debug("\t branchOutput_decoded(type):" + str(type(branchOutput_decoded)))
self._logger.debug("\t branchOutput_decoded:" + str(branchOutput_decoded))
post_map_output_values = post_map_output_values + [branchOutput_decoded]
if do_cleanup:
sapi.delete(outputkey) # cleanup the key from data layer
self._logger.debug("\t cleaned output key:" + outputkey)
else:
post_map_output_values = post_map_output_values + [None]
self._logger.debug("\t this_BranchOutputKeys is not contained: " + str(outputkey))
self._logger.debug("\t post_map_output_values:" + str(post_map_output_values))
while (sapi.get(name_prefix + "_" + "mapStatePartialResult")) == "":
time.sleep(0.1) # wait until value is available
mapStatePartialResult = ast.literal_eval(sapi.get(name_prefix + "_" + "mapStatePartialResult"))
mapStatePartialResult += post_map_output_values
sapi.put(name_prefix + "_" + "mapStatePartialResult", str(mapStatePartialResult))
# now apply ResultPath and OutputPath
if do_cleanup:
sapi.deleteSet(branchOutputKeysSetKey)
if ast.literal_eval(sapi.get(name_prefix + "_" + "mapInputCount")) == len(mapStatePartialResult):
# we are ready to publish but need to honour ResultPath and OutputPath
res_raw = ast.literal_eval(sapi.get(name_prefix + "_" +"mapStatePartialResult"))
# remove unwanted keys from input before publishing
function_input = {}
function_input_post_result = self.applyResultPath(function_input, res_raw)
function_input_post_output = self.applyResultPath(function_input_post_result, function_input_post_result)
if "Next" in self.parsedfunctionstateinfo:
if self.parsedfunctionstateinfo["Next"]:
sapi.add_dynamic_next(self.parsedfunctionstateinfo["Next"], function_input_post_output )
if "End" in self.parsedfunctionstateinfo:
if self.parsedfunctionstateinfo["End"]:
sapi.add_dynamic_next("end", function_input_post_output)
sapi.delete(name_prefix + "_" + "mapInputCount")
sapi.delete(name_prefix + "_" + "mapStateInputIndex")
sapi.delete(name_prefix + "_" + "mapStateInputValue")
sapi.delete(name_prefix + "_" + "mapStatePartialResult")
sapi.delete(name_prefix + "_" + "tobeProcessedlater")
post_map_output_values = function_input_post_output
return post_map_output_values, full_metadata
def evaluateParallelState(self, function_input, key, metadata, sapi):
name_prefix = self.functiontopic + "_" + key
total_branch_count = self.parsedfunctionstateinfo["BranchCount"]
assert total_branch_count == len(self.parsedfunctionstateinfo["Branches"])
klist = []
if "WaitForNumBranches" in self.parsedfunctionstateinfo:
klist = self.parsedfunctionstateinfo["WaitForNumBranches"]
if not isinstance(klist, list):
self._logger.info("(StateUtils) WaitForNumBranches must be a sorted list with 1 or more integers")
raise Exception("(StateUtils) WaitForNumBranches must be a sorted list with 1 or more integers")
klist.sort()
for k in klist:
if not isinstance(k, int):
self._logger.info("(StateUtils) Values inside WaitForNumBranches must be integers")
raise Exception("(StateUtils) Values inside WaitForNumBranches must be integers")
if k > total_branch_count:
self._logger.info("(StateUtils) Values inside WaitForNumBranches list cannot be greater than the number of branches in the parallel state")
raise Exception("(StateUtils) Values inside WaitForNumBranches list cannot be greater than the number of branches in the parallel state")
else:
klist.append(total_branch_count)
counter_name_topic = self.functionstatename + "-" + self.sandboxid
counter_name_trigger_metadata = {"k-list": klist, "total-branches": total_branch_count}
counter_name_key = key
# dynamic values
branch_out_keys = []
for i in range(total_branch_count):
branch_out_key = name_prefix + "_branch_" + str(i+1)
branch_out_keys.append(branch_out_key)
# prepare counter metadata
counter_metadata = {}
counter_metadata["__state_action"] = "post_parallel_processing"
counter_metadata["__async_execution"] = metadata["__async_execution"]
workflow_instance_metadata_storage_key = name_prefix + "_workflow_metadata"
counter_metadata["WorkflowInstanceMetadataStorageKey"] = workflow_instance_metadata_storage_key
counter_metadata["CounterValue"] = 0 # this should be updated by riak hook
counter_metadata["Klist"] = klist
counter_metadata["TotalBranches"] = total_branch_count
counter_metadata["ExecutionId"] = key
counter_metadata["FunctionTopic"] = self.functiontopic
counter_metadata["Endpoint"] = self._internal_endpoint
# prepare counter name value metadata
counter_name_value_metadata = copy.deepcopy(metadata)
counter_name_value_metadata["WorkflowInstanceMetadataStorageKey"] = workflow_instance_metadata_storage_key
counter_name_value_metadata["CounterValue"] = 0 # this should be updated by riak hook
counter_name_value_metadata["__state_action"] = "post_parallel_processing"
counter_name_value_metadata["state_counter"] = metadata["state_counter"]
counter_name_value = {"__mfnmetadata": counter_name_value_metadata, "__mfnuserdata": '{}'}
#CounterName = json.dumps([str(counter_name_topic), str(counter_name_key), counter_name_trigger_metadata, counter_name_value])
CounterName = str(counter_name_topic) + "-" + str(total_branch_count) + "-" + str(counter_name_key)
#CounterName = name_prefix + "_counter"
counter_metadata_key_name = CounterName + "_metadata"
workflow_instance_outputkeys_set_key = name_prefix + "_outputkeys_set"
# prepare parallelInfo metadata
parallelInfo = {}
parallelInfo["CounterName"] = CounterName
parallelInfo["BranchOutputKeys"] = branch_out_keys
parallelInfo["BranchOutputKeysSetKey"] = workflow_instance_outputkeys_set_key
parallelInfo["Klist"] = klist
parallelInfo["TotalBranches"] = total_branch_count
parallelInfo["ExecutionId"] = key
parallelInfo["FunctionTopic"] = self.functiontopic
parallelInfo["Endpoint"] = self._internal_endpoint
parallelInfo_key = self.functionstatename + "_" + key + "_parallel_info"
metadata[parallelInfo_key] = parallelInfo
assert py3utils.is_string(CounterName)
try:
dlc = DataLayerClient(locality=1, suid=self._storage_userid, is_wf_private=False, connect=self._datalayer)
# create a triggerable counter to start the post-parallel when parallel state finishes
dlc.createCounter(CounterName, 0, tableName=dlc.countertriggerstable)
dlc.put(counter_metadata_key_name, json.dumps(counter_metadata), tableName=dlc.countertriggersinfotable)
except Exception as exc:
self._logger.error("Exception in creating counter: " + str(exc))
self._logger.error(exc)
raise
finally:
dlc.shutdown()
assert py3utils.is_string(workflow_instance_metadata_storage_key)
sapi.put(workflow_instance_metadata_storage_key, json.dumps(metadata))
branches = self.parsedfunctionstateinfo["Branches"]
for branch in branches:
startat = str(branch["StartAt"])
sapi.add_dynamic_next(startat, function_input)
return function_input, metadata
def processBranchTerminalState(self, key, value_output, metadata, sapi):
if 'End' not in self.parsedfunctionstateinfo:
return
if self.parsedfunctionstateinfo["End"] and "ParentParallelInfo" in self.parsedfunctionstateinfo:
parentParallelInfo = self.parsedfunctionstateinfo["ParentParallelInfo"]
parallelName = parentParallelInfo["Name"]
branchCounter = parentParallelInfo["BranchCounter"]
#self._logger.debug("[StateUtils] processBranchTerminalState: ")
#self._logger.debug("\t ParentParallelInfo:" + json.dumps(parentParallelInfo))
#self._logger.debug("\t parallelName:" + parallelName)
#self._logger.debug("\t branchCounter: " + str(branchCounter))
#self._logger.debug("\t key:" + key)
#self._logger.debug("\t metadata:" + json.dumps(metadata))
#self._logger.debug("\t value_output(type):" + str(type(value_output)))
#self._logger.debug("\t value_output:" + value_output)
parallelInfoKey = parallelName + "_" + key + "_parallel_info"
#self._logger.debug("\t parallelInfoKey:" + parallelInfoKey)
if parallelInfoKey in metadata:
parallelInfo = metadata[parallelInfoKey]
counterName = str(parallelInfo["CounterName"])
branchOutputKeys = parallelInfo["BranchOutputKeys"]
branchOutputKey = str(branchOutputKeys[branchCounter-1])
branchOutputKeysSetKey = str(parallelInfo["BranchOutputKeysSetKey"])
#self._logger.debug("\t branchOutputKey:" + branchOutputKey)
#self._logger.debug("\t branchOutputKeysSetKey:" + branchOutputKeysSetKey)
assert py3utils.is_string(branchOutputKey)
sapi.put(branchOutputKey, value_output)
assert py3utils.is_string(branchOutputKeysSetKey)
sapi.addSetEntry(branchOutputKeysSetKey, branchOutputKey)
assert py3utils.is_string(counterName)
try:
dlc = DataLayerClient(locality=1, suid=self._storage_userid, is_wf_private=False, connect=self._datalayer)
# increment the triggerable counter
dlc.incrementCounter(counterName, 1, tableName=dlc.countertriggerstable)
except Exception as exc:
self._logger.error("Exception incrementing counter: " + str(exc))
self._logger.error(exc)
raise
finally:
dlc.shutdown()
else:
self._logger.error("[StateUtils] processBranchTerminalState Unable to find ParallelInfo")
raise Exception("processBranchTerminalState Unable to find ParallelInfo")
if self.parsedfunctionstateinfo["End"] and "ParentMapInfo" in self.parsedfunctionstateinfo:
parentMapInfo = self.parsedfunctionstateinfo["ParentMapInfo"]
mapName = parentMapInfo["Name"]
mapInfoKey = mapName + "_" + key + "_map_info"
branchCounter = parentMapInfo["BranchCounter"]
#self._logger.debug("[StateUtils] processBranchTerminalState: ")
#self._logger.debug("\t ParentMapInfo:" + json.dumps(parentMapInfo))
#self._logger.debug("\t mapName:" + mapName)
#self._logger.debug("\t branchCounter: " + str(branchCounter))
#self._logger.debug("\t key:" + key)
#self._logger.debug("\t metadata:" + json.dumps(metadata))
#self._logger.debug("\t value_output(type):" + str(type(value_output)))
#self._logger.debug("\t value_output:" + value_output)
if mapInfoKey in metadata:
mapInfo = metadata[mapInfoKey]
rest = metadata["__function_execution_id"].split("_")[1:]
for codes in rest: # find marker for map state and use it to calculate curent index
if "-M" in codes:
index = rest.index(codes)
current_index = int(rest[index].split("-M")[0])
self._logger.debug("[StateUtils] current_index: " + str(current_index))
if mapInfo["MaxConcurrency"] != 0:
current_index = current_index % int(mapInfo["MaxConcurrency"])
counterName = str(mapInfo["CounterName"])
branchOutputKeys = mapInfo["BranchOutputKeys"]
branchOutputKey = str(branchOutputKeys[current_index])
branchOutputKeysSetKey = str(mapInfo["BranchOutputKeysSetKey"])
self._logger.debug("\t branchOutputKey:" + branchOutputKey)
self._logger.debug("\t branchOutputKeysSetKey:" + branchOutputKeysSetKey)
assert py3utils.is_string(branchOutputKey)
sapi.put(branchOutputKey, value_output)
assert py3utils.is_string(branchOutputKeysSetKey)
sapi.addSetEntry(branchOutputKeysSetKey, branchOutputKey)
assert py3utils.is_string(counterName)
try:
dlc = DataLayerClient(locality=1, suid=self._storage_userid, is_wf_private=False, connect=self._datalayer)
# increment the triggerable counter
dlc.incrementCounter(counterName, 1, tableName=dlc.countertriggerstable)
except Exception as exc:
self._logger.error("Exception incrementing counter: " + str(exc))
self._logger.error(exc)
raise
finally:
dlc.shutdown()
else:
self._logger.error("[StateUtils] processBranchTerminalState Unable to find MapInfo")
raise Exception("processBranchTerminalState Unable to find MapInfo")
def evaluatePostParallel(self, function_input, key, metadata, sapi):
action = metadata["__state_action"]
assert action == "post_parallel_processing"
counterValue = function_input["CounterValue"]
workflow_instance_metadata_storage_key = str(function_input["WorkflowInstanceMetadataStorageKey"])
assert py3utils.is_string(workflow_instance_metadata_storage_key)
full_metadata_encoded = sapi.get(workflow_instance_metadata_storage_key)
full_metadata = json.loads(full_metadata_encoded)
parallelInfoKey = self.functionstatename + "_" + key + "_parallel_info"
parallelInfo = full_metadata[parallelInfoKey]
branchOutputKeysSetKey = str(parallelInfo["BranchOutputKeysSetKey"])
branchOutputKeysSet = sapi.retrieveSet(branchOutputKeysSetKey)
if not branchOutputKeysSet:
self._logger.error("[StateUtils] branchOutputKeysSet is empty")
raise Exception("[StateUtils] branchOutputKeysSet is empty")
klist = parallelInfo["Klist"]
NumBranchesFinished = abs(counterValue)
do_cleanup = False
if klist[-1] == NumBranchesFinished:
do_cleanup = True
counterName = str(parallelInfo["CounterName"])
assert py3utils.is_string(counterName)
counter_metadata_key_name = counterName + "_metadata"
if do_cleanup:
assert py3utils.is_string(counterName)
try:
dlc = DataLayerClient(locality=1, suid=self._storage_userid, is_wf_private=False, connect=self._datalayer)
# done with the triggerable counter
dlc.deleteCounter(counterName, tableName=dlc.countertriggerstable)
dlc.delete(counter_metadata_key_name, tableName=dlc.countertriggersinfotable)
except Exception as exc:
self._logger.error("Exception deleting counter: " + str(exc))
self._logger.error(exc)
raise
finally:
dlc.shutdown()
sapi.delete(workflow_instance_metadata_storage_key)
post_parallel_output_values = []
for outputkey in parallelInfo["BranchOutputKeys"]:
outputkey = str(outputkey)
if outputkey in branchOutputKeysSet:
while sapi.get(outputkey) == "":
time.sleep(0.1) # wait until value is available
branchOutput = sapi.get(outputkey)
branchOutput_decoded = json.loads(branchOutput)
post_parallel_output_values = post_parallel_output_values + [branchOutput_decoded]
if do_cleanup:
sapi.delete(outputkey) # cleanup the key from data layer
else:
post_parallel_output_values = post_parallel_output_values + [None]
if do_cleanup:
sapi.deleteSet(branchOutputKeysSetKey)
if "Next" in self.parsedfunctionstateinfo:
sapi.add_dynamic_next(self.parsedfunctionstateinfo["Next"], post_parallel_output_values)
if "End" in self.parsedfunctionstateinfo:
if self.parsedfunctionstateinfo["End"]:
sapi.add_dynamic_next("end", post_parallel_output_values)
return function_input, full_metadata
def evaluateNonTaskState(self, function_input, key, metadata, sapi):
# 3. Evaluate Non Task states
#self._logger.debug("[StateUtils] NonTask state type: " + str(self.functionstatetype))
#self._logger.debug("[StateUtils] Welcome to evaluateNonTaskState! Current key:" + str(key))
function_output = None
if self.functionstatetype == StateUtils.choiceStateType:
#self._logger.debug("[StateUtils] Choice state info:" + str(self.functionstateinfo))
self.evaluateChoiceConditions(function_input) # this sets chosen Next state
#self._logger.debug("[StateUtils] Choice state Next:" + str(self.choiceNext))
function_output = function_input # output of the Choice state
elif self.functionstatetype == StateUtils.waitStateType:
#self._logger.debug("[StateUtils] Wait state info:" + str(self.functionstateinfo))
function_output = function_input
if "Seconds" in list(json.loads(self.functionstateinfo).keys()):
wait_state_seconds = json.loads(self.functionstateinfo)['Seconds']
#self._logger.debug("[StateUtils] Wait state seconds:" + str(wait_state_seconds))
time.sleep(float(wait_state_seconds))
elif "SecondsPath" in list(json.loads(self.functionstateinfo).keys()):
wait_state_secondspath = json.loads(self.functionstateinfo)['SecondsPath']
#self._logger.debug("[StateUtils] Wait state secondspath:" + str(wait_state_secondspath))
wait_state_secondspath_data = [match.value for match in parse(wait_state_secondspath).find(function_input)]
if wait_state_secondspath_data == []:
#self._logger.exception("[StateUtils] Wait state timestamppath does not match: " + str(wait_state_secondspath))
raise Exception("Wait state timestamppath does not match")
#self._logger.debug("[StateUtils] Wait state timestamppath data parsed:" + str(wait_state_secondspath_data[0]))
time.sleep(float(wait_state_secondspath_data[0]))
elif "Timestamp" in list(json.loads(self.functionstateinfo).keys()):
wait_state_timestamp = json.loads(self.functionstateinfo)['Timestamp']
#self._logger.debug("[StateUtils] Wait state timestamp:" + str(wait_state_timestamp))
target_time = datetime.strptime(str(wait_state_timestamp), "%Y-%m-%dT%H:%M:%SZ")
current_time = datetime.utcnow()
#self._logger.debug("[StateUtils] Wait state timestamp difference" + str(current_time) + str(target_time))
remaining = (target_time - current_time).total_seconds()
#self._logger.debug("[StateUtils] Wait state timestamp remaining total_seconds:" + str(remaining))
remaining_time = float(remaining)
if remaining_time > 0:
time.sleep(remaining_time)
else:
self._logger.error("[StateUtils] Wait state timestamp target lies in the past!" + str(wait_state_timestamp))
elif "TimestampPath" in list(json.loads(self.functionstateinfo).keys()):
wait_state_timestamppath = json.loads(self.functionstateinfo)['TimestampPath']
self._logger.debug("[StateUtils] Wait state timestamppath:" + str(wait_state_timestamppath))
# need to communicate with datalayer for definition of trigger for hibernating/resuming task
wait_state_timestamppath_data = [match.value for match in parse(wait_state_timestamppath).find(function_input)]
if wait_state_timestamppath_data == []:
#self._logger.exception("[StateUtils] Wait state timestamp_path does not match: " + str(wait_state_timestamppath))
raise Exception("Wait state timestamp_path does not match")
self._logger.debug("[StateUtils] Wait state timestamppath data parsed:" + str(wait_state_timestamppath_data[0]))
target_time = datetime.strptime(str(wait_state_timestamppath_data[0]), "%Y-%m-%dT%H:%M:%SZ")
self._logger.debug("[StateUtils] Wait state timestamp data" + str(target_time))
current_time = datetime.utcnow()
self._logger.debug("[StateUtils] Wait state timestamp difference" + str(current_time) + str(target_time))
remaining = (target_time - current_time).total_seconds()
self._logger.debug("[StateUtils] Wait state timestamp remaining total_seconds:" + str(remaining))
remaining_time = float(remaining)
self._logger.debug("[StateUtils] Wait state timestamp remaining total_seconds:" + str(remaining_time))
if remaining_time > 0:
time.sleep(remaining_time)
else:
self._logger.error("[StateUtils] Wait state timestamp target lies in the past!" + str(wait_state_timestamppath_data[0]))
raise Exception("Wait state timestamp target lies in the past!" + str(wait_state_timestamppath_data[0]))
else:
raise Exception("Wait state: Missing required field")
elif self.functionstatetype == StateUtils.passStateType:
self._logger.debug("[StateUtils] Pass state handling, received value:" + str(function_input))
function_output = function_input
if "Result" in self.functionstateinfo:
pass_state_result = json.loads(self.functionstateinfo)['Result']
self._logger.debug("[StateUtils] Pass state result:" + str(pass_state_result))# self.functionstateinfo['Result']))
function_output = pass_state_result
elif self.functionstatetype == StateUtils.succeedStateType:
function_output = function_input
elif self.functionstatetype == StateUtils.failStateType:
self._logger.debug("[StateUtils] Fail state handling, received value:" + str(function_input))
self._logger.debug("[StateUtils] Fail state handling, received metadata:" + str(metadata))
if "Cause" in self.functionstateinfo:
fail_state_cause = json.loads(self.functionstateinfo)['Cause']
self._logger.debug("[StateUtils] Fail state cause info:" + str(fail_state_cause))
if "Error" in self.functionstateinfo:
error_state_error = json.loads(self.functionstateinfo)['Error']
self._logger.debug("[StateUtils] Fail state error info:" + str(error_state_error))
function_output = function_input
elif self.functionstatetype == StateUtils.parallelStateType:
self._logger.debug("[StateUtils] Parallel state handling function_input: " + str(function_input))
self._logger.debug("[StateUtils] Parallel state handling metadata: " + str(metadata))
self._logger.debug("[StateUtils] Parallel state handling")
if "__state_action" not in metadata or metadata["__state_action"] != "post_parallel_processing":
function_output, metadata = self.evaluateParallelState(function_input, key, metadata, sapi)
else:
if metadata["__state_action"] == "post_parallel_processing":
function_output, metadata = self.evaluatePostParallel(function_input, key, metadata, sapi)
elif self.functionstatetype == StateUtils.mapStateType:
name_prefix = self.functiontopic + "_" + key
self._logger.debug("[StateUtils] Map state handling function_input: " + str(function_input))
self._logger.debug("[StateUtils] Map state handling metadata: " + str(metadata))
if "MaxConcurrency" in self.parsedfunctionstateinfo.keys():
maxConcurrency = int(self.parsedfunctionstateinfo["MaxConcurrency"])
else:
maxConcurrency = 0
self._logger.debug("[StateUtils] Map state maxConcurrency: " + str(maxConcurrency))
self._logger.debug("[StateUtils] Map state handling")
if "__state_action" not in metadata or metadata["__state_action"] != "post_map_processing":
# here we start the iteration process on a first batch
if maxConcurrency != 0:
tobeProcessednow = function_input[:maxConcurrency] # take the first maxConcurrency elements
tobeProcessedlater = function_input[maxConcurrency:] # keep the remaining elements for later
else:
tobeProcessednow = function_input
tobeProcessedlater = []
self._logger.debug("[StateUtils] Map state function_input split:" + str(tobeProcessednow) + " " + str(tobeProcessedlater))
sapi.put(name_prefix + "_" + "tobeProcessedlater", str(tobeProcessedlater)) # store elements to be processed on DL
sapi.put(name_prefix + "_" + "mapStatePartialResult", "[]") # initialise the collector variable
sapi.put(name_prefix + "_" + "mapInputCount", str(len(function_input)))
function_output, metadata = self.evaluateMapState(tobeProcessednow, key, metadata, sapi)
elif metadata["__state_action"] == "post_map_processing":
tobeProcessedlater = ast.literal_eval(sapi.get(name_prefix + "_" + "tobeProcessedlater")) # get all elements that have not yet been processed
self._logger.debug("[StateUtils] Map state post_map processing input:" + str(tobeProcessedlater))
# we need to decide at this point if there is a need for more batches. if so:
if len(tobeProcessedlater) > 0: # we need to start another batch
function_output, metadata2 = self.evaluatePostMap(function_input, key, metadata, sapi) # take care not to overwrite metadata
function_output, metadata = self.evaluateMapState(tobeProcessedlater[:maxConcurrency], key, metadata, sapi) # start a new batch
sapi.put(name_prefix + "_" + "tobeProcessedlater", str(tobeProcessedlater[maxConcurrency:])) # store remaining elements to be processed on DL
else:# no more batches required. we are at the iteration end, publish the final result
self._logger.debug("[StateUtils] Map state input final stage: " + str(function_input))
function_output, metadata = self.evaluatePostMap(function_input, key, metadata, sapi)
else:
raise Exception("Unknow action type in map state")
else:
raise Exception("Unknown state type")
return function_output, metadata
def applyResultPath(self, raw_state_input, function_output):
#4. Apply ResultPath, if available and if not 'Parallel' state
# if ResultPath:
# if ResultPath == '$' (this is the default value)
# raw_state_input_midway = function_output
# if ResultPath == 'null'
# raw_state_input_midway = raw_state_input
# if ResultPath == some variable name
# raw_state_input[some variable name] = function_output
# raw_state_input_midway = raw_state_input
# else:
# raw_state_input_midway = function_output
#
raw_state_input_midway = raw_state_input
#self._logger.debug("Reached applyResultPath: " + str(self.result_path_dict))
try:
if self.result_path_dict and 'ResultPath' in self.result_path_dict:
raw_state_input_midway = self.process_result_path(self.result_path_dict, raw_state_input, function_output)
else:
raw_state_input_midway = function_output
return raw_state_input_midway
except Exception as exc:
raise Exception("Result path processing exception: " + str(exc))
#self._logger.exception("Result path processing exception")
#sys.stdout.flush()
#self._logger.exception(exc)
#raise
def applyOutputPath(self, raw_state_input_midway):
#5. Apply OutputPath, if available
# if OutputPath:
# if OutputPath == '$' (this is the default value)
# raw_state_output = raw_state_input_midway
# if OutputPath = 'null'
# raw_state_output = {}
# if OutputPath == some existing variable in 'raw_state_input_midway'
# raw_state_output = raw_state_input_midway[some existing variable]
# if OutputPath == some non-existing variable
# throw exception
# else:
# raw_state_output = raw_state_input_midway
raw_state_output = raw_state_input_midway
try:
if self.output_path_dict and 'OutputPath' in self.output_path_dict:
raw_state_output = self.process_output_path(self.output_path_dict, raw_state_input_midway)
else:
raw_state_output = raw_state_input_midway
return raw_state_output
except Exception as exc:
raise Exception("Output path processing exception: " + str(exc))
#self._logger.exception("Output path processing exception")
#sys.stdout.flush()
#self._logger.exception(exc)
#raise
def parse_function_state_info(self):
if self.functionstatetype == StateUtils.defaultStateType:
#self._logger.debug("Task_SAND state parsing. Not parsing further")
return
else:
self.parsedfunctionstateinfo = json.loads(self.functionstateinfo)
statedef = self.parsedfunctionstateinfo
statetype = self.functionstatetype
assert statetype == statedef['Type']
if statetype == StateUtils.waitStateType:
self._logger.debug("Wait state parsing")
if statetype == StateUtils.failStateType:
self._logger.debug("Fail state parsing")
if statetype == StateUtils.succeedStateType:
self._logger.debug("Succeed state parsing")
if statetype == StateUtils.taskStateType:
#self._logger.debug("Task state parsing")
if "InputPath" in statedef: # read the I/O Path dicts
self.input_path_dict['InputPath'] = statedef['InputPath']
#self._logger.debug("found InputPath: " + json.dumps(self.input_path_dict['InputPath']))
if "OutputPath" in statedef:
self.output_path_dict['OutputPath'] = statedef['OutputPath']
#self._logger.debug("found OutputPath: " + json.dumps(self.output_path_dict['OutputPath']))
if "ResultPath" in statedef:
self.result_path_dict['ResultPath'] = statedef['ResultPath']
if "Parameters" in statedef:
self.parameters_dict['Parameters'] = statedef['Parameters']
self._logger.debug("found Parameters: " + json.dumps(self.parameters_dict['Parameters']))
if "Catch" in statedef:
self.catcher_list = statedef['Catch']
# parse it once and store it
self.catcher_list = ast.literal_eval(str(self.catcher_list))
#self._logger.debug("found Catchers: " + str(self.catcher_list))
if "Retry" in statedef:
self.retry_list = statedef['Retry']
# parse it once and store it
self.retry_list = ast.literal_eval(str(self.retry_list))
#self._logger.debug("found Retry: " + str(self.retry_list))
if statetype == StateUtils.choiceStateType:
#self._logger.debug("Choice state parsing")
if "InputPath" in statedef:
self.input_path_dict['InputPath'] = statedef['InputPath']
self._logger.debug("found InputPath: " + json.dumps(statedef['InputPath']))
if "OutputPath" in statedef:
self.output_path_dict['OutputPath'] = statedef['OutputPath']
self._logger.debug("found OutputPath: " + json.dumps(statedef['OutputPath']))
if "ResultPath" in statedef:
self.result_path_dict['ResultPath'] = statedef['ResultPath']
self._logger.debug("found ResultPath: " + json.dumps(self.result_path_dict['ResultPath']))
self._logger.debug("Choice state rules: " + json.dumps(statedef))
if "Default" in statedef:
self.default_next_choice.append(statedef["Default"])
self._logger.debug("DefaultTarget: " + str(self.default_next_choice))
choices_list = statedef['Choices'] # get the choice rule list for this state
self._logger.debug("Choice state rules list: " + str(choices_list))
key_dict = {} # parse the choice rule list into an expression tree
for choices in choices_list:
self._logger.debug("Choice state rule element processed: " + json.dumps(list(choices.keys())))
#self._logger.debug("converted_function_output: " + str(converted_function_output))
operator_counter = 0
if ("Not" in list(choices.keys())) or ("And" in list(choices.keys())) or ("Or" in list(choices.keys())):
operator_counter += 1
if operator_counter == 0: # No operators, so no recursive evaluation required
self.traverse(choices['Next'], choices)
hostname = self.nodelist[-1].split("/")[0]
childname = self.nodelist[-1].split("/")[1]
previousnode = anytree.Node(choices['Next'])
root = previousnode
key_dict[hostname] = previousnode
previousnode = anytree.Node(childname, parent=previousnode) # key_dict[hostname])
#evalname = ast.literal_eval(str(previousnode.name))
else: # operator detected, we need to traverse the choice rule tree
self.traverse(choices['Next'], choices)
nodename = self.nodelist[-1].split("/")[0]
previousnode = anytree.Node(nodename)
root = previousnode
key_dict[self.nodelist[-1].split("/{")[0]] = previousnode
no_childs = 1 # we already have attached the root
for i in range(len(self.nodelist)): # count the nodes in the choice rule tree which do not have childs
children = self.nodelist[-(i+1)].split("/")[-1]
if children.strip("") == "{}":
no_childs += 1
for i in range(no_childs):
nodename = self.nodelist[-(i+2)].split("/")[i+1]
previousnode = anytree.Node(nodename, parent=previousnode)
key_dict[self.nodelist[-(i+2)].split("/{")[0]] = previousnode
# from now on we have to attach the children expressions
for i in range(len(self.nodelist)-no_childs):
childname = self.nodelist[-(i+no_childs+1)].split("/")[-1]
hostname = self.nodelist[-(i+no_childs+1)].split("/{")[0]
previousnode = anytree.Node(childname, key_dict[hostname])
#self._logger.debug("Resulting Rendered Tree: " + str(anytree.RenderTree(root)))
self.parsed_trees.append(root)
if statetype == StateUtils.passStateType:
self._logger.debug("[StateUtils] Pass state parsing")
if "InputPath" in statedef:
self.input_path_dict['InputPath'] = statedef['InputPath']
self._logger.debug("found InputPath: " + json.dumps(self.input_path_dict['InputPath']))
if "OutputPath" in statedef:
self.output_path_dict['OutputPath'] = statedef['OutputPath']
self._logger.debug("found OutputPath: " + json.dumps(self.output_path_dict['OutputPath']))
if "ResultPath" in statedef:
self.result_path_dict['ResultPath'] = statedef['ResultPath']
self._logger.debug("found ResultPath: " + json.dumps(self.result_path_dict['ResultPath']))
if "Parameters" in statedef:
self.parameters_dict['Parameters'] = statedef['Parameters']
self._logger.debug("found Parameters: " + json.dumps(self.parameters_dict['Parameters']))
if statetype == StateUtils.parallelStateType:
#self._logger.debug("[StateUtils] Parallel state parsing")
if "InputPath" in statedef:
self.input_path_dict['InputPath'] = statedef['InputPath']
self._logger.debug("found InputPath: " + json.dumps(self.input_path_dict['InputPath']))
if "OutputPath" in statedef:
self.output_path_dict['OutputPath'] = statedef['OutputPath']
self._logger.debug("found OutputPath: " + json.dumps(self.output_path_dict['OutputPath']))
if "ResultPath" in statedef:
self.result_path_dict['ResultPath'] = statedef['ResultPath']
self._logger.debug("found ResultPath: " + json.dumps(self.result_path_dict['ResultPath']))
if "Parameters" in statedef:
self.parameters_dict['Parameters'] = statedef['Parameters']
self._logger.debug("found Parameters: " + json.dumps(self.parameters_dict['Parameters']))
if statetype == StateUtils.mapStateType:
#self._logger.debug("[StateUtils] Parallel state parsing")
if "InputPath" in statedef:
self.input_path_dict['InputPath'] = statedef['InputPath']
self._logger.debug("found InputPath: " + json.dumps(self.input_path_dict['InputPath']))
if "ItemsPath" in statedef:
self.items_path_dict['ItemsPath'] = statedef['ItemsPath']
self._logger.debug("found ItemsPath: " + json.dumps(self.items_path_dict['ItemsPath']))
if "ResultPath" in statedef:
self.result_path_dict['ResultPath'] = statedef['ResultPath']
self._logger.debug("found ResultPath: " + json.dumps(self.result_path_dict['ResultPath']))
if "OutputPath" in statedef:
self.output_path_dict['OutputPath'] = statedef['OutputPath']
self._logger.debug("found OutputPath: " + json.dumps(self.output_path_dict['OutputPath']))
if "Parameters" in statedef:
self.parameters_dict['Parameters'] = statedef['Parameters']
self._logger.debug("found Parameters: " + json.dumps(self.parameters_dict['Parameters']))
def EvaluateNode(self, node):
"""
Recursively parse the expression tree starting from given node into a python statement
"""
if not node.children: # this is a leaf node
evalname = json.dumps(ast.literal_eval(str(node.name)))
#type(evalname) == int or type(evalname) == float:
ev_expr = "(" + self.evaluate(evalname) + ")"
return ev_expr
else: #node is an operator
if node.name == "Not": # there can be only one child
child = node.children[0]
evalname = json.dumps(ast.literal_eval(str(child.name)))
ev_expr = self.evaluate(evalname)
return "not (%s)" % ev_expr
if node.name == "And": # collect all children recursively
child_and_array = []
for child in node.children:
child_and_array.append(self.EvaluateNode(child))
returnstr = "(" + " and ".join(child_and_array) + ")"
return returnstr
if node.name == "Or": # collect all children recursively
child_or_array = []
for child in node.children:
child_or_array.append(self.EvaluateNode(child))
returnstr = "(" + " or ".join(child_or_array) + ")"
return returnstr
else: #unknown operator found here. Thow some error!
raise Exception("Parse Error: unknown operator found: ", node.name)
def evaluate(self, expression):
"""
evaluate a AWS Choice rule expression with the data contained in values
"""
expr = []
ex = json.loads(expression)
self._logger.debug(expression)
vals = {}
if "Variable" in ex.keys():
k = ex["Variable"].split("$.")[1]
vals[k] = ""
expr.append(k)
for op in self.operators:
if op in ex.keys():
expr.append(self.operators_python[self.operators.index(op)])
expr.append(ex[op])
break
if isinstance(expr[2], (int, float)):
result = "%s %s %s" % (expr[0], expr[1], expr[2])
else:
result = "%s %s '%s'" % (expr[0], expr[1], expr[2]) # we want to compare strings with strings
return result
def process_parameters(self, parameters, state_data):
"""
evaluate JSON path Paramaters in conjunction with state_data
"""
parameters = parameters['Parameters']
ret_value = None
ret_item_value = None
if parameters == "$": # return unfiltered input data
ret_value = state_data
elif parameters is None: #return empty json
ret_value = {}
else: # contains a parameter filter, get it and return selected kv pairs
ret_value = {}
ret_index = {}
for key in parameters.keys(): # process parameters keys
if key.casefold() == "comment".casefold(): # ignore
ret_value[key] = parameters[key]
elif parameters[key] == "$$.Map.Item.Value": # get Items key
value_key = key.split(".$")[0]
ret_value = value_key
ret_item_value = value_key
elif parameters[key] == "$$.Map.Item.Index": # get Index key
index_key = key.split(".$")[0]
ret_index = index_key
else: # processing more complex Parameters values
if isinstance(parameters[key], dict): # parameters key refers to dict value
ret_value[key] = {}
for k in parameters[key]: # get nested keys
if not k.split(".")[-1] == "$": # parse static value
print (parameters[key][k])
ret_value[key][k] = parameters[key][k]
else:
new_key = k.split(".$")[0] # use the json paths in paramters to match
ret_value[key][new_key] = [match.value for match in parse(parameters[key][k]).find(state_data)][0]
return ret_value
if isinstance(parameters[key], str): # parameters key refers to string value
ret_value = {}
new_key = key.split(".$")[0] # get the parameters key
query_key = parameters[key].split("$.")[1] # correct the correspondig value
new_value = state_data[query_key] # save the actual value before replacing the key
for kk in state_data.keys():
if isinstance(state_data[kk], dict): # value encapsulates dict
ret_value[new_key] = new_value
if ret_item_value != None:
ret_value[ret_item_value] = state_data[kk]
else:
raise Exception("Error: item value is not set!")
ret_value_dict = {}
ret_value_dict[kk] = ret_value
return ret_value_dict
if isinstance(state_data[kk], list): # value encapsulates list
ret_value_list = []
for data in state_data[kk]:
ret_value_list.append({new_key: new_value, ret_item_value: data})
ret_value_dict = {}
ret_value_dict[kk] = ret_value_list
return ret_value_dict
else:
raise Exception("Error: invaldid Parmeters format: " + str(parameters[key]))
# calculate transformed state output provided to Iterator
ret_total = []
ret_total_dict = {}
if isinstance(state_data, dict):
for kk in state_data.keys():
for key in state_data[kk]:
if ret_value != {} and ret_index == {}:
ret_total.append({ret_value: key})
elif ret_value == {} and ret_index != {}:
ret_total.append({ret_index: state_data[kk].index(key) })
elif ret_value != {} and ret_index != {}:
ret_total.append({ret_value: key, ret_index: state_data[kk].index(key) })
else:
raise Exception("Map State Parameters parse error on dict input: " + str(state_data))
ret_total_dict[kk] = ret_total
ret_value = ret_total_dict
elif isinstance(state_data, list):
for key in state_data:
if ret_value != {} and ret_index == {}:
ret_total.append({ret_value: key})
elif ret_value == {} and ret_index != {}:
ret_total.append({ret_index: state_data.index(key) })
elif ret_value != {} and ret_index != {}:
ret_total.append({ret_value: key, ret_index: state_data.index(key) })
else:
raise Exception("Map State Parameters parse error on list input: " + str(list))
ret_value = ret_total
else:
raise Exception("Map state parse error: invalid state input")
return ret_value
def process_items_path(self, path_fields, state_data):
ret_value = None
if 'ItemsPath' not in list(path_fields.keys()):
path_fields['ItemsPath'] = "$"
input_path = path_fields['ItemsPath']
if input_path == "$": # return unfiltered input data
ret_value = state_data
elif input_path is None: #return empty list
ret_value = []
else: # it contains a filter, get it and return selected list in input
self._logger.debug("seeing items_path filter: " + str(input_path) + " " + str(state_data))
filtered_state_data = [match.value for match in parse(input_path).find(state_data)]
if not filtered_state_data:
raise Exception("Items Path processing exception: no match with map state item, invalid path!")
else:
filtered_state_data = [match.value for match in parse(input_path).find(state_data)][0]
ret_value = filtered_state_data
return ret_value
def process_input_path(self, path_fields, state_data):
ret_value = None
if 'InputPath' not in list(path_fields.keys()):
path_fields['InputPath'] = "$"
#return state_data
input_path = path_fields['InputPath']
if input_path == "$": # return unfiltered input data
ret_value = state_data
elif input_path is None: #return empty dict
ret_value = {}
else: # input_path contains a filter, get and apply it
self._logger.debug("seeing input_path filter: " + str(input_path) + " " + str(state_data))
filtered_state_data = [match.value for match in parse(input_path).find(state_data)]
self._logger.debug("after seeing input_path filter: " + str(filtered_state_data))
if not filtered_state_data:
raise Exception("Input Path processing exception: no match with state input item, invalid path!")
else:
filtered_state_data = [match.value for match in parse(input_path).find(state_data)][0]
ret_value = filtered_state_data
return ret_value
def nested_dict(self, keys, value):
if len(keys) == 1:
return {keys[0]: value}
return {keys[0]: self.nested_dict(keys[1:], value)}
def process_result_path(self, path_fields, state_data, task_output):
ret_value = None
# path_fields: result path dict
# state_data: input dict
# task_output: output of the state/task
if 'ResultPath' not in list(path_fields.keys()):
path_fields['ResultPath'] = "$"
result_path = path_fields['ResultPath']
if result_path == "$":
ret_value = state_data
elif result_path is None:
ret_value = {}
else: # result_path is not empty so is there a match?
self._logger.debug("inside ResultPath processing: " + str(result_path) + " " + str(task_output) )
keys = list(tokenize(result_path)) # get all keys
filtered_state_data = self.nested_dict(keys[1:], task_output)
if isinstance(state_data, dict):
ret_value = dict(list(filtered_state_data.items()) + list(state_data.items())) # adding key and values to new dict
else:
ret_value = filtered_state_data
return ret_value
def process_output_path(self, path_fields, raw_state_input_midway):
ret_value = None
if 'OutputPath' not in list(path_fields.keys()):
path_fields['OutputPath'] = "$"
output_path = path_fields['OutputPath']
if output_path == "$":
ret_value = raw_state_input_midway
elif output_path is None:
ret_value = {}
else: # output_path is not empty so is there a match?
filtered_state_data = [match.value for match in parse(output_path).find(raw_state_input_midway)]
if not filtered_state_data:
raise Exception("Exception: no match with state input item, invalid path!")
else:
key = str(parse(output_path).nodes[-1].value[0])
filtered_state_data = raw_state_input_midway[key]
ret_value = filtered_state_data
return ret_value
def traverse(self, path, obj):
"""
Traverse the object recursively and print every path / value pairs.
"""
cnt = -1
if isinstance(obj, dict):
d = obj
d_sum = {}
for k, v in list(d.items()):
if isinstance(v, dict):
self.traverse(path + "/" + k, v)
elif isinstance(v, list):
self.traverse(path + "/" + k, v)
else:
d_sum[k] = v
self.nodelist.append(path + "/" + str(d_sum))
if isinstance(obj, list):
li = obj
for e in li:
cnt += 1
if isinstance(e, dict):
self.traverse("{path}".format(path=path), e)
elif isinstance(e, list):
self.traverse("{path}".format(path=path), e)
def evaluateNextState(self, function_input):
# this should be called for Choice state only
# for the rest the next values are statically defined and are parsed by hostagent
if len(self.default_next_choice) > 0:
nextfunc = self.default_next_choice[-1]
self._logger.debug("[StateUtils] choice_function_input: " + str(function_input))
for tree in self.parsed_trees:
##self._logger.debug("Resulting Rendered Tree: " + str(anytree.RenderTree(tree.root)))
##self._logger.debug("Resulting Rendered Tree Root: " + str(tree.root))
test = self.EvaluateNode(tree.children[0])
self._logger.debug("[StateUtils] choice test: " + str(test))
self._logger.debug("Resulting Parsed Expression: " + str(test))
self._logger.debug("Current Value String: " + json.dumps(function_input))
# Sample value input to choice {"Comment": "Test my Iterator function", "iterator": {"count": 10, "index": 5, "step": 1}}
for key in list(function_input.keys()):
new_test = "False"
key = str(key)
if key == "Comment":
continue
#if "iterator.continue" == str(key):
self._logger.debug("[StateUtils] choice value key under test: " + key)
#keys = "continue"
if key in str(test):
val = function_input[key]
self._logger.debug("[StateUtils] choice val: " + str(val))
if isinstance(val, (int, float)): # calculate new_test value, no additional processing of values
self._logger.debug("[StateUtils] choice key/val: " + key + "/" + str(val))
new_test = test.replace(key, str(val))
self._logger.debug("[StateUtils] choice eval new_test: " + str(eval(str(new_test))))
elif "." in test: # need to process the json path of this variable name
test2 = "$." + test.lstrip('(').rstrip(')').split("==")[0] # rebuild the json path for the variable
jsonpath_expr = parse(test2)
choice_state_path_data = [match.value for match in jsonpath_expr.find(function_input)]
new_test = str(choice_state_path_data[0])
else:
new_test = test.replace(key, "'" + str(val)+"'") # need to add high colons to key to mark as string inside the expression
if eval(str(new_test)):
nextfunc = tree.root.name.strip("/")
self._logger.debug("now calling: " + str(nextfunc))
return nextfunc # {"next":nextfunc, "value": post_processed_value}
# if no choice rule applied, return the last one (assigned at the beginning)
self._logger.debug("now calling: " + str(nextfunc))
return nextfunc
| 1.71875 | 2 |
src/data/datamodule.py | ChristianHinge/geometric | 3 | 12757332 | import pytorch_lightning as pl
import torch
from torch.utils.data import random_split
from torch_geometric import datasets
from torch_geometric.data import DataLoader
from src.settings.paths import CLEANED_DATA_PATH, NOT_CLEANED_DATA_PATH
class MUTANGDataModule(pl.LightningDataModule):
def __init__(
self,
batch_size: int = 32,
cleaned: bool = False,
split=None,
num_workers: int = 0,
seed: int = 0,
):
super().__init__()
if split is None:
split = [0.6, 0.2, 0.2]
self.split = split
self.batch_size = batch_size
self.train_set = None
self.test_set = None
self.val_set = None
self.cleaned = cleaned
self.num_workers = num_workers
self.seed = seed
if sum(self.split) != 1:
raise ValueError("Expected split list to sum to 1")
def prepare_data(self):
return datasets.TUDataset(
root=CLEANED_DATA_PATH if self.cleaned else NOT_CLEANED_DATA_PATH,
name="MUTAG",
cleaned=self.cleaned,
pre_transform=None,
)
def setup(self, stage: str = None):
torch.manual_seed(self.seed)
self.full_set = datasets.TUDataset(
root=CLEANED_DATA_PATH if self.cleaned else NOT_CLEANED_DATA_PATH,
name="MUTAG",
cleaned=self.cleaned,
pre_transform=None,
)
self.train_set, self.val_set, self.test_set = random_split(
self.full_set,
[
round(len(self.full_set) * self.split[0]),
round(len(self.full_set) * self.split[1]),
len(self.full_set)
- round(len(self.full_set) * self.split[0])
- round(len(self.full_set) * self.split[1]),
],
)
def train_dataloader(self):
return DataLoader(
self.train_set,
batch_size=self.batch_size,
shuffle=True,
num_workers=self.num_workers,
)
def val_dataloader(self):
return DataLoader(
self.val_set,
batch_size=self.batch_size,
shuffle=True,
num_workers=self.num_workers,
)
def test_dataloader(self):
return DataLoader(
self.test_set,
batch_size=self.batch_size,
shuffle=False,
num_workers=self.num_workers,
)
| 2.328125 | 2 |
src/figcli/commands/config/generate.py | figtools/figgy-cli | 36 | 12757333 | from os import getcwd
from typing import Tuple
from prompt_toolkit import prompt
from figcli.commands.config_context import ConfigContext
from figcli.commands.types.config import ConfigCommand
from figcli.io.input import Input
from figcli.svcs.observability.anonymous_usage_tracker import AnonymousUsageTracker
from figcli.svcs.observability.version_tracker import VersionTracker
from figcli.utils.utils import *
log = logging.getLogger(__name__)
class Generate(ConfigCommand):
def __init__(self, colors_enabled: bool, config_context: ConfigContext):
super().__init__(generate, colors_enabled, config_context)
self._from_path = config_context.ci_config_path if config_context.ci_config_path else Utils.find_figgy_json()
self._utils = Utils(colors_enabled)
self._errors_detected = False
self.example = f"{self.c.fg_bl}{CLI_NAME} config {self.command_printable} " \
f"--env dev --config /path/to/config{self.c.rs}"
@staticmethod
def _get_service_name_and_version(service_name: str) -> Tuple[str, int]:
base_matcher = re.compile(r"^([A-Za-z0-9_-]+)([0-9]+)$")
result = base_matcher.match(service_name)
base_name = result.group(1) if result else service_name
version = int(result.group(2)) if result else 1
return base_name, version
def _generate(self):
from_config = self._utils.get_ci_config(self._from_path)
service_name = self._utils.get_namespace(from_config).split('/')[2]
current_ns = self._utils.get_namespace(from_config)
base_name, version = self._get_service_name_and_version(service_name)
base_name = base_name if not base_name.endswith('-') else base_name[:-1]
new_service_name = f'{base_name}-{version + 1}'
new_name = Input.input(f'Please select a new service name, it CANNOT be: {service_name}: ',
default=new_service_name)
self._utils.validate(new_name != service_name, f"You must select a new service name that differs from the one"
f"designated in your source figgy.json file. "
f"(NOT {service_name})")
new_ns = f'{self.context.defaults.service_ns}/{new_name}/'
# Update all configs destinations to leverage new namespace. Easiest to search/replace across everything.
output_string = json.dumps(from_config)
output_string = output_string.replace(current_ns[:-1], new_ns[:-1])
new_config = json.loads(output_string)
# Remove existing configs that will be replicated
new_config[CONFIG_KEY] = []
# Configure replicate_from block
new_config[REPL_FROM_KEY] = {
SOURCE_NS_KEY: from_config.get(REPL_FROM_KEY, {}).get(SOURCE_NS_KEY, current_ns),
PARAMETERS_KEY: from_config.get(REPL_FROM_KEY, {}).get(PARAMETERS_KEY, [])
}
for param in from_config.get(CONFIG_KEY, []):
new_config[REPL_FROM_KEY][PARAMETERS_KEY].append(self._utils.get_parameter_only(param))
formatted_config = self._utils.format_config(new_config)
current_dir = getcwd()
output_file = prompt(f'Write new config here?: ', default=f'{current_dir}/{new_name}-config.json')
self._utils.is_valid_input(output_file, "output_file", True)
with open(output_file, "w") as file:
file.write(json.dumps(formatted_config, sort_keys=False, indent=4))
print(f'{self.c.fg_gr}New config successfully generated at location: {output_file}{self.c.rs}')
@VersionTracker.notify_user
@AnonymousUsageTracker.track_command_usage
def execute(self):
self._generate()
| 1.898438 | 2 |
dataent/tests/test_translation.py | dataent/dataent | 0 | 12757334 | # Copyright (c) 2015, Dataent Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import dataent, unittest, os
import dataent.translate
# class TestTranslations(unittest.TestCase):
# def test_doctype(self, messages=None):
# if not messages:
# messages = dataent.translate.get_messages_from_doctype("Role")
# self.assertTrue("Role Name" in messages)
#
# def test_page(self, messages=None):
# if not messages:
# messages = dataent.translate.get_messages_from_page("finder")
# self.assertTrue("Finder" in messages)
#
# def test_report(self, messages=None):
# if not messages:
# messages = dataent.translate.get_messages_from_report("ToDo")
# self.assertTrue("Test" in messages)
#
# def test_include_js(self, messages=None):
# if not messages:
# messages = dataent.translate.get_messages_from_include_files("dataent")
# self.assertTrue("History" in messages)
#
# def test_server(self, messages=None):
# if not messages:
# messages = dataent.translate.get_server_messages("dataent")
# self.assertTrue("Login" in messages)
# self.assertTrue("Did not save" in messages)
#
# def test_all_app(self):
# messages = dataent.translate.get_messages_for_app("dataent")
# self.test_doctype(messages)
# self.test_page(messages)
# self.test_report(messages)
# self.test_include_js(messages)
# self.test_server(messages)
#
# def test_load_translations(self):
# dataent.translate.clear_cache()
# self.assertFalse(dataent.cache().hget("lang_full_dict", "de"))
#
# langdict = dataent.translate.get_full_dict("de")
# self.assertEqual(langdict['Row'], 'Reihe')
#
# def test_write_csv(self):
# tpath = dataent.get_pymodule_path("dataent", "translations", "de.csv")
# if os.path.exists(tpath):
# os.remove(tpath)
# dataent.translate.write_translations_file("dataent", "de")
# self.assertTrue(os.path.exists(tpath))
# self.assertEqual(dict(dataent.translate.read_csv_file(tpath)).get("Row"), "Reihe")
#
# def test_get_dict(self):
# dataent.local.lang = "de"
# self.assertEqual(dataent.get_lang_dict("doctype", "Role").get("Role"), "Rolle")
# dataent.local.lang = "en"
#
# if __name__=="__main__":
# dataent.connect("site1")
# unittest.main()
| 2 | 2 |
client/training.py | rcbyron/2048-ai | 10 | 12757335 | """ Used for training hyperparameters and running multiple simulations """
import time
from threading import Thread
from ai import simulate, show
# # Tuning parameters and weights
# MAX_DEPTH = 4
# EMPTY_TILE_POINTS = 12
# SMOOTHNESS_WEIGHT = 30
# EDGE_WEIGHT = 30
# LOSS_PENALTY = -200000
# MONOTONICITY_POWER = 3.0
# MONOTONICITY_WEIGHT = 27.0
# SUM_POWER = 3.5
# SUM_WEIGHT = 11.0
# EMPTY_WEIGHT = 270.0
test_cases = 10
best_board = 0
best_score = 0
best_moves = 0
avg_score = 0
avg_moves = 0
prog = 0
def progress_bar():
""" Increments the progress indicator """
global prog
prog += 1
print(str((prog/test_cases)*100)+"%")
def worker():
""" Runs a simulation on a seprate thread and records statistics """
global avg_moves, avg_score, best_board, best_score, best_moves
board, s, m = simulate()
avg_score += s
avg_moves += m
if s > best_score:
best_board = board
best_score = s
best_moves = m
progress_bar()
def multi_simulate():
""" Runs multiple simulation worker threads and reports results """
start_time = time.clock()
workers = []
print("0%")
for i in range(0, test_cases):
t = Thread(target=worker, args=())
t.start()
workers.append(t)
""" Block until all threads finished """
for w in workers:
w.join()
print("\nBest Score:", best_score, "Best Moves:", best_moves)
show(best_board, show_best_tile=True)
print("\nAvg Score:", (avg_score/test_cases), "Avg Moves:", (avg_moves/test_cases))
print("Time:", (time.clock()-start_time), "seconds")
multi_simulate()
| 3.296875 | 3 |
lightil/util/exception.py | codertx/lightil | 1 | 12757336 | <reponame>codertx/lightil<filename>lightil/util/exception.py
from rest_framework import status
from rest_framework.views import exception_handler
from rest_framework.exceptions import APIException, NotAcceptable
def custom_exception_handler(exc, context):
# Get the standard error response
response = exception_handler(exc, context)
if response is not None:
response.data['detail'] = exc.detail
return response
class ClientException(APIException):
status_code = status.HTTP_400_BAD_REQUEST
default_detail = 'Invalid request'
| 2.234375 | 2 |
timed.py | Arif064001/multifocus_multiview_stereo_reconstruction | 2 | 12757337 | <filename>timed.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 29 16:11:10 2019
@author: pi
"""
import logging
import time
from functools import wraps
logger = logging.getLogger(__name__)
# Misc logger setup so a debug log statement gets printed on stdout.
logger.setLevel("DEBUG")
handler = logging.StreamHandler()
log_format = "%(asctime)s %(levelname)s -- %(message)s"
formatter = logging.Formatter(log_format)
handler.setFormatter(formatter)
if len(logger.handlers) == 0:
logger.addHandler(handler)
def timed(func):
'''This decorator prints the execution time for the decorated function.
'''
@wraps(func)
def wrapper(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
end = time.time()
logger.debug("{} ran in {}s".format(func.__name__, round(end - start, 2)))
return result
return wrapper
| 3.265625 | 3 |
scd/scd_app/views.py | felipetomm/POX-Django | 1 | 12757338 | <gh_stars>1-10
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, render_to_response
from django.http import HttpResponse
from django.template import RequestContext
from django.db.models import Count
from array import *
from .models import *
from django.db.models import F
from datetime import timedelta
from django.utils import timezone
from django.core import serializers
from scd_coletor import scd_flows
def index(request):
return render_to_response('index.html', context_instance=RequestContext(request))
def sobre(request):
return render_to_response('index.html', context_instance=RequestContext(request))
def switches(request):
switches = ScdComutador.objects.all().order_by('comut_id')
#PERFUMARIA - LISTAR O TOTAL DE REGRAS E O TOTAL DE REGRAS COM CONFLITO
"""lista_regras=[]
for switch in switches:
flows_count = ScdFlow.objects.all().Count('fl_id').filter(id_comutador=switch.comut_id)
flows = ScdFlow.objects.all().order_by('fl_id').filter(id_comutador=switch.comut_id)
for flow in flows:
regras = scd_flows()
conflitos = ScdConflito.objects.all().order_by('con_flow_principal').filter(con_flow_principal=flow.fl_id)
for conflito in conflitos:
regras.conflito.append(conflito.con_flow_analisada.fl_id)"""
return render_to_response('switches.html', RequestContext(request,{'switches':switches}))
def regras(request):
switches = ScdComutador.objects.all().order_by('comut_nome')
if (request.GET.get('opt_politica')): slc_politica = int(request.GET.get('opt_politica'))
else: slc_politica = 0
if (request.GET.get('opt_switch')): slc_switch = int(request.GET.get('opt_switch'))
else: slc_switch = 0
lista_regras=[]
#FILTRA PELO SWITCH
if ((slc_switch != 0) and (slc_switch != None)):
flows = ScdFlow.objects.all().order_by('fl_id').filter(id_comutador=slc_switch)
else:
flows = ScdFlow.objects.all().order_by('fl_id')
for flow in flows:
regras = scd_flows()
regras.flow_id = flow.fl_id
regras.table = flow.fl_flowtable
regras.switch = flow.id_comutador.comut_nome
regras.priority = flow.fl_priority
regras.idle_timeout = flow.fl_idle_timeout
regras.hard_timeout = flow.fl_hard_timeout
regras.in_port = flow.fl_in_port
regras.actions = flow.fl_actions
regras.dl_dst = flow.fl_dl_dst
regras.dl_src = flow.fl_dl_src
regras.dl_type = flow.fl_dl_type
regras.dl_vlan = flow.fl_dl_vlan
regras.nw_src = flow.fl_nw_src
regras.nw_dst = flow.fl_nw_dst
regras.nw_tos = flow.fl_nw_tos
regras.nw_proto = flow.fl_nw_proto
regras.tp_dst = flow.fl_tp_dst
regras.tp_src = flow.fl_tp_src
conflitos = ScdConflito.objects.all().order_by('con_flow_principal').filter(con_flow_principal=flow.fl_id)
for conflito in conflitos:
regras.conflito.append(conflito.con_flow_analisada.fl_id)
regras.conflito_sugestao.append(conflito.con_sugestao)
regras.conflito_nivel.append(conflito.con_nivel)
if ((slc_politica == 0) or (slc_politica == None)): lista_regras.append(regras)
elif ((slc_politica == 1) and (regras.conflito.__len__() > 0)): lista_regras.append(regras)
elif ((slc_politica == 2) and (regras.conflito.__len__() < 1)): lista_regras.append(regras)
regras = None
return render_to_response('regras.html', RequestContext(request,{'regras':lista_regras, 'switches':switches}))
| 2.046875 | 2 |
state.py | Interpause/not-just-gomoku | 1 | 12757339 | <gh_stars>1-10
from copy import deepcopy
from game_exceptions import *
from itertools import product
#TODO: make setters raise errors regarding whether move was successful
class state():
'''the game state.
Attributes:
__winnum (int): Number of pieces in a line required to win.
__height (int): The height of the grid.
__length (int): The length of the grid.
__pieces (str[]): The player pieces used in the grid.
__empty ((int,int)[]): The list of empty coordinates.
__board (dict): The list of coordinates occupied by each piece.
__grid (str[][]): The grid of pieces placed.
'''
def __init__(self,height,length,winnum,pieces):
'''The constructor for the game state
Args:
height (int): The height of the grid.
length (int): The length of the grid.
winnum (int): Number of pieces in a line required to win.
pieces (str[]): The player pieces used in the grid.
'''
self.__winnum = winnum
self.__height = height
self.__length = length
self.__pieces = pieces
self.__empty = list(product(range(length),range(height)))
self.__board = dict([(piece,[]) for piece in pieces])
self.__grid = [[None for x in range(length)] for y in range(height)]
def forgetmefornow(self,memo):
'''?????????????'''
cls = self.__class__
self.__winnum = other.winnum()
self.__height = other.height()
self.__length = other.length()
self.__pieces = other.pieces()
self.__empty = other.spaces()
self.__board = other.board()
self.__grid = other.grid()
def __str__(self):
msg = ""
msg += "Pieces are: %s"%str(self.__pieces).strip('[]')
msg += "\n"
msg += ("*-"*self.__length+"*")
for y in range(self.__height):
row = ""
for x in range(self.__length):
for piece, coords in self.__board.items():
if (x,y) in coords:
row += ("|"+piece)
break
else:
row += "| "
msg += ("\n"+row+"|")
msg += ("\n"+"*-"*self.__length+"*")
msg += ("\nSpaces left: %d"%len(self.__empty))
msg += ("\nWin condition: %d"%self.__winnum)
return msg
#Helper functions
def isEmpty(self,coord):
'''Whether a coord on the grid is empty.'''
if coord[0] < 0 or coord[1] < 0 or coord[0] >= self.__length or coord[1] >= self.__height:
raise OutOfBoundsException("Coordinates (%d,%d) are out of range of a (%d,%d) board."%(coord+(self.__length,self.__height)))
if self.__grid[coord[1]][coord[0]] == None:
return True
else:
return False
def isWin(self):
'''Checks whether there is a winner.'''
for piece,coords in self.__board.items():
if len(coords) < self.__winnum:
continue
else:
for coord in coords:
#TODO: find way to limit coords to those with potential
#check horizontal
won = True
for i in range(self.__winnum):
x = coord[0]+i
y = coord[1]
if x >= self.__length:
won = False
break
if self.__grid[y][x] != piece:
won = False
break
if won:
return piece
#check vertical
won = True
for i in range(self.__winnum):
x = coord[0]
y = coord[1]+i
if y >= self.__height:
won = False
break
if self.__grid[y][x] != piece:
won = False
break
if won:
return piece
#check right diagonal
won = True
for i in range(self.__winnum):
x = coord[0]+i
y = coord[1]+i
if x >= self.__length or y >= self.__height:
won = False
break
if self.__grid[y][x] != piece:
won = False
break
if won:
return piece
#check left diagonal
won = True
for i in range(self.__winnum):
x = coord[0]+i
y = coord[1]-i
if x >= self.__length or y < 0:
won = False
break
if self.__grid[y][x] != piece:
won = False
break
if won:
return piece
if len(self.__empty) == 0:
return True
return False
#safe getters
def board(self):
return deepcopy(self.__board)
def grid(self):
return deepcopy(self.__grid)
def height(self):
return int(self.__height)
def length(self):
return int(self.__length)
def winnum(self):
return int(self.__winnum)
def pieces(self):
return list(self.__pieces)
def full(self):
return len(self.__empty) == 0
def spaces(self):
return list(self.__empty)
#unsafe setters
def overwrite(self,piece,coord):
'''Overwrites the coord with the piece.'''
self.erase(coord)
self.__board[piece].append(coord)
self.__empty.remove(coord)
self.__grid[coord[1]][coord[0]] = piece
return
def erase(self,coord):
'''Erases the piece on that coord.'''
self.__grid[coord[1]][coord[0]] = None
for piece in self.__board.values():
try:
piece.remove(coord)
self.__empty.append(coord)
return
except ValueError:
continue
return
#all in one function
def place(self,piece,coord):
'''Places a piece at that coord if it is empty.'''
if len(self.__empty) == 0:
raise OutOfSpaceException("The board is out of space. isWin() should be called.")
if self.isEmpty(coord):
self.__board[piece].append(coord)
self.__empty.remove(coord)
self.__grid[coord[1]][coord[0]] = piece
else:
raise SpaceTakenException("(%d,%d) is taken already."%coord)
return
if __name__ == "__main__":
def test():
print("Simple functionality Test")
import random
import string
h = random.randint(3,10)
l = random.randint(3,10)
w = random.randint(3,min(h,l))
p = random.randint(2,8)
print("Creating disboard of height %d, length %d, win number of %d and %d pieces..."%(h,l,w,p))
disboard = state(h,l,w,random.sample(string.ascii_lowercase,p))
print("Checking pretty print...")
print(disboard)
p1 = (random.randint(0,l-1),random.randint(0,h-1))
c1 = random.choice(disboard.pieces())
print("Placing %s at random point x=%d, y=%d..." % ((c1,)+p1))
disboard.place(c1,p1)
print(disboard)
l2 = disboard.pieces()
l2.remove(c1)
c3 = random.choice(l2)
print("Placing %s at same point x=%d, y=%d..." % ((c3,)+p1))
disboard.place(c3,p1)
print(disboard)
l1 = disboard.pieces()
l1.remove(c3)
c4 = random.choice(l1)
print("Overwriting with %s at previous point x=%d, y=%d..." % ((c4,)+p1))
disboard.overwrite(c4,p1)
print(disboard)
p2 = (random.randint(0,l-1),random.randint(0,h-1))
c2 = random.choice(disboard.pieces())
print("Overwriting with %s at random point x=%d, y=%d..." % ((c2,)+p2))
disboard.overwrite(c2,p2)
print(disboard)
print("Updating spaces left...")
disboard.update_space()
print(disboard)
print("Erasing previous point x=%d, y=%d..." % p1)
disboard.erase(p1)
print(disboard)
p3 = (random.randint(0,l-1),random.randint(0,h-1))
print("Erasing random point x=%d, y=%d..." % p3)
disboard.erase(p3)
print(disboard)
print("Updating spaces left...")
disboard.update_space()
print(disboard)
print("Testing if erased point is empty...")
print(disboard.isEmpty(p1))
print("Testing if previous point is empty...")
print(disboard.isEmpty(p2))
print("\nWin function Test")
for i in range(10):
h = random.randint(3,10)
l = random.randint(3,10)
w = random.randint(3,min(h,l))
p = random.randint(2,8)
disboard = state(h,l,w,random.sample(string.ascii_lowercase,p))
prev = disboard.pieces()[0]
for y in range(h):
for x in range(l):
playable = disboard.pieces()
playable.remove(prev)
prev = random.choice(playable)
disboard.place(prev,(x,y))
if disboard.isWin() != False:
break
else:
continue
break
if disboard.isWin() == True:
print(disboard)
print("DRAW!")
else:
print(disboard)
print("%s WINS!"%disboard.isWin())
test()
| 3.28125 | 3 |
scripts/metrics.py | bzshang/yelp-photo-classification | 10 | 12757340 | """
Custom metric for mxnet
"""
__author__ = 'bshang'
from sklearn.metrics import f1_score
from sklearn import preprocessing
def f1(label, pred):
""" Custom evaluation metric on F1.
"""
pred_bin = preprocessing.binarize(pred, threshold=0.5)
score = f1_score(label, pred_bin, average='micro')
return score
| 2.515625 | 3 |
hw1_4.py | eddylau328/speech-recognition-system-exercise | 0 | 12757341 | <gh_stars>0
from matplotlib.patches import Rectangle
import matplotlib.pyplot as plt
import numpy as np
import wave
def sgn(value):
return -1 if value < 0 else 1
def split_frame(signal, size, step):
signal_list = signal.tolist()
result = [signal_list[i: i + size]
for i in range(0, len(signal_list) - step, step)]
result[-1] = result[-1] + [0 for i in range(0, size - len(result[-1]))]
return np.array(result, dtype='float')
def get_zero_crossing_rate(signal, size, step):
# zero_crossings = np.nonzero(np.diff(signal > 0))[0]
acc, index, start, end = 0, 1, 0, size
results = []
while (index <= len(signal)):
if (start <= index <= end and index < len(signal)):
acc += np.absolute(sgn(signal[index]) - sgn(signal[index - 1]))
else:
results.append(acc / size)
acc = 0
start += step
end += step
if (index < len(signal)):
index = index - step
else:
break
index += 1
return np.array(results, 'float')
def normalized(signal):
# root_mean_square = np.sqrt(np.mean(signal**2))
# return np.array(signal, dtype='float') / root_mean_square
return np.array(signal, dtype='float') / 32767.0
def detect_endpoint(filename, raw_signal, frame_rate):
# convert signal to numpy array
signal = np.frombuffer(raw_signal, "int16")
# plt.figure(1)
# plt.title("{filename} Signal Wave".format(filename=filename))
# plt.plot(signal)
# plt.show()
normalized_signal = normalized(signal)
plt.subplot(3, 1, 1)
plt.title("{filename} Signal".format(filename=filename))
plt.plot(
np.array([i/frame_rate for i in range(len(signal))]), normalized_signal)
size = np.floor((20 / 1000) / (1 / frame_rate)).astype('int')
step = np.floor((10 / 1000) / (1 / frame_rate)).astype('int')
split_signal_frame = split_frame(normalized_signal, size, step)
square_split_signal_frame = np.square(split_signal_frame)
energy_level = np.sum(square_split_signal_frame, axis=1)
plt.subplot(3, 1, 2)
plt.title("{filename} Energy Level".format(filename=filename))
plt.plot(energy_level)
zero_crossing_rate = get_zero_crossing_rate(
normalized_signal, size, step)
plt.subplot(3, 1, 3)
plt.title("{filename} Zero Crossing Rate".format(filename=filename))
plt.plot(zero_crossing_rate)
plt.tight_layout()
energy_thershold = 1.5
zero_crossing_rate_thershold = (0.02, 0.1)
start_successive_frame, end_successive_frame = 5, 8
start, end = -1, -1
for i in range(len(energy_level)):
if (start == -1):
count = 0
for j in range(i, len(energy_level)):
is_energy_exceed = energy_level[i] >= energy_thershold
is_within_zero_crossing_rate_thershold = zero_crossing_rate_thershold[
0] <= zero_crossing_rate[j] <= zero_crossing_rate_thershold[1]
if (is_energy_exceed and is_within_zero_crossing_rate_thershold):
count += 1
elif (count >= start_successive_frame):
break
else:
break
if (count >= start_successive_frame):
start = i
else:
count = 0
elif (end == -1):
count = 0
for j in range(i, len(energy_level)):
is_energy_below = energy_level[i] < energy_thershold
is_within_zero_crossing_rate_thershold = zero_crossing_rate_thershold[
0] <= zero_crossing_rate[j] <= zero_crossing_rate_thershold[1]
if (is_energy_below and is_within_zero_crossing_rate_thershold):
count += 1
elif (count >= end_successive_frame):
break
else:
break
if (count >= end_successive_frame):
end = i
else:
count = 0
if (start != -1 and end != -1):
start_time = start * step / frame_rate
end_time = end * step / frame_rate
print('End Point: start_frame = {} , end_frame = {}'.format(start, end))
print('End Point: start_time = {}, end_time = {}'.format(
np.round(start_time, 2), np.round(end_time, 2)))
ax = plt.subplot(3, 1, 1)
ax.add_patch(Rectangle((start_time, -500), end_time -
start_time, 1000, edgecolor='red', facecolor='white'))
ax = plt.subplot(3, 1, 2)
ax.add_patch(Rectangle((start, -500), end -
start, 1000, edgecolor='red', facecolor='white'))
ax = plt.subplot(3, 1, 3)
ax.add_patch(Rectangle((start, -500), end - start,
1000, edgecolor='red', facecolor='white'))
else:
print('End Point cannot find')
plt.savefig('visualize/{}-end-point-detection.jpg'.format(filename))
plt.show()
return signal, normalized_signal, start_time, end_time
def extract_segment(signal, start_time, end_time, frame_rate):
start = int(start_time * frame_rate)
end = int(end_time * frame_rate)
result = np.array([signal[x] for x in range(start, end)], 'float')
return np.array([result[x] for x in range(0, int(0.02 * frame_rate))], 'float')
def plot_fourier_transform(filename, signal):
N = len(signal)
X_real = []
X_img = []
for m in range(int(N / 2)):
tmp_real, tmp_img = 0, 0
for k in range(N):
theta = 2 * np.pi * k * m / N
tmp_real += signal[k] * np.cos(theta)
tmp_img -= signal[k] * np.sin(theta)
X_real.append(tmp_real)
X_img.append(tmp_img)
result = np.sqrt(np.square(X_real) + np.square(X_img))
plt.title("{filename} Fourier Transform".format(filename=filename))
plt.plot(result)
plt.savefig('plot/fourier_{}.jpg'.format(filename))
plt.show()
def pre_emphasis(filename, signal, pre_emphasis_constant):
result = [signal[0]]
for k in range(1, len(signal)):
result.append(signal[k] - pre_emphasis_constant * signal[k-1])
pre_emphasis_signal = np.array(result, 'float')
plt.title(
"{filename} Pre-emphasis Signal & Signal Segment".format(filename=filename))
plt.plot(signal)
plt.plot(pre_emphasis_signal)
plt.savefig('plot/pre_emphasis_{}.jpg'.format(filename))
plt.show()
return pre_emphasis_signal
def auto_correlation(signal, order):
auto_coeff = []
for i in range(order + 1):
auto_coeff.append(0)
for j in range(i, len(signal)):
auto_coeff[i] += signal[j] * signal[j-i]
return np.array(auto_coeff, 'float')
def find_lpc(signal, order):
auto_coeff = auto_correlation(signal, order)
matrix = []
for i in range(order):
row = []
for j in range(i, 0, -1):
row.append(auto_coeff[j])
for j in range(order - i):
row.append(auto_coeff[j])
matrix.append(row)
matrix = np.array(matrix, 'float')
lpc_parameter = np.dot(np.linalg.inv(matrix), np.transpose(auto_coeff[1:]))
print('LPC parameters: {}'.format(lpc_parameter))
def answer_q4(path, filenames=[]):
for filename in filenames:
spf = wave.open(
"{path}/{filename}.wav".format(path=path, filename=filename), "r")
# Extract Raw Audio from Wav File
raw_signal = spf.readframes(-1)
frame_rate = spf.getframerate()
total_frame = spf.getnframes()
print('Target: {}.wav'.format(filename))
print('framerate: {}'.format(frame_rate))
print('Total frames: {}'.format(total_frame))
print()
# 4a
signal, normalized_signal, start_time, end_time = detect_endpoint(
filename, raw_signal, frame_rate)
# 4b
segment = extract_segment(
normalized_signal, start_time, end_time, frame_rate)
# 4c
plot_fourier_transform(filename, segment)
# 4d
pre_emphasis_segment = pre_emphasis(filename, segment, 0.945)
# 4e
find_lpc(pre_emphasis_segment, 10)
if (__name__ == '__main__'):
answer_q4('recordings', ['s1A'])
| 2.625 | 3 |
cohesity_management_sdk/models/rename_object_param_proto.py | chandrashekar-cohesity/management-sdk-python | 1 | 12757342 | # -*- coding: utf-8 -*-
# Copyright 2019 Cohesity Inc.
class RenameObjectParamProto(object):
"""Implementation of the 'RenameObjectParamProto' model.
Message to specify the prefix/suffix added to rename an object. At least
one
of prefix or suffix must be specified. Please note that both prefix and
suffix can be specified.
Attributes:
prefix (string): Prefix to be added to a name.
suffix (string): Suffix to be added to a name.
"""
# Create a mapping from Model property names to API property names
_names = {
"prefix":'prefix',
"suffix":'suffix'
}
def __init__(self,
prefix=None,
suffix=None):
"""Constructor for the RenameObjectParamProto class"""
# Initialize members of the class
self.prefix = prefix
self.suffix = suffix
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
prefix = dictionary.get('prefix')
suffix = dictionary.get('suffix')
# Return an object of this model
return cls(prefix,
suffix)
| 3.265625 | 3 |
scripts/setJson.py | TadeasPilar/KiKit | 784 | 12757343 | <reponame>TadeasPilar/KiKit
#!/usr/bin/env python3
import click
import json
from collections import OrderedDict
def setKey(obj, path, value):
key = path[0] if isinstance(obj, dict) else int(path[0])
if len(path) == 1:
obj[key] = value
return
setKey(obj[key], path[1:], value)
@click.command()
@click.argument("input", type=click.File("r"))
@click.argument("output", type=click.File("w"))
@click.option("--property", "-s", type=str, multiple=True, help="<path>=<value>")
def run(input, output, property):
"""
Set a key to a value in JSON.
"""
obj = json.load(input, object_pairs_hook=OrderedDict)
for p in property:
path, value = tuple(p.split("="))
path = path.split(".")
value = json.loads(value, object_pairs_hook=OrderedDict)
setKey(obj, path, value)
json.dump(obj, output, indent=4)
if __name__ == "__main__":
run()
| 2.8125 | 3 |
test/functional/feature_mandatory_coinbase.py | nondejus/elements | 947 | 12757344 | #!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test mandatory coinbase feature"""
from binascii import b2a_hex
from test_framework.blocktools import create_coinbase
from test_framework.messages import CBlock, CProof, CTxOutValue, CTxOut
from test_framework.script import CScript, OP_RETURN
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
mandatory_privkey = "<KEY>"
mandatory_address = "XP3bwB9jSxt58frSa3cJismgGL3F57ukUy"
#mandatory_pubkey = "<KEY>"
mandatory_script = "a914804b9fd9d6939c2e960b7aa31124a5d532f4e59c87"
def b2x(b):
return b2a_hex(b).decode('ascii')
def assert_template(node, block, expect, rehash=True):
if rehash:
block.hashMerkleRoot = block.calc_merkle_root()
rsp = node.getblocktemplate({'data': b2x(block.serialize()), 'mode': 'proposal', 'rules': 'segwit'})
assert_equal(rsp, expect)
class MandatoryCoinbaseTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
# Non-zero coinbase outputs *must* match this. Not setting it means anything is allowed
self.extra_args = [["-con_mandatorycoinbase="+mandatory_script], []]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
node0 = self.nodes[0]
node1 = self.nodes[1]
node0.importprivkey(mandatory_privkey)
self.log.info("generatetoaddress: Making blocks of various kinds, checking for rejection")
# Create valid blocks to get out of IBD and get some funds (subsidy goes to permitted addr)
node0.generatetoaddress(101, mandatory_address)
# Generating for another address will not work
assert_raises_rpc_error(-1, "CreateNewBlock: TestBlockValidity failed: bad-coinbase-txos", node0.generatetoaddress, 1, node0.getnewaddress())
# Have non-mandatory node make a template
self.sync_all()
tmpl = node1.getblocktemplate({'rules': ['segwit']})
# We make a block with OP_TRUE coinbase output that will fail on node0
coinbase_tx = create_coinbase(height=int(tmpl["height"]))
# sequence numbers must not be max for nLockTime to have effect
coinbase_tx.vin[0].nSequence = 2 ** 32 - 2
coinbase_tx.rehash()
block = CBlock()
block.nVersion = tmpl["version"]
block.hashPrevBlock = int(tmpl["previousblockhash"], 16)
block.nTime = tmpl["curtime"]
block.nBits = int(tmpl["bits"], 16)
block.nNonce = 0
block.proof = CProof(bytearray.fromhex('51'))
block.vtx = [coinbase_tx]
block.block_height = int(tmpl["height"])
block.hashMerkleRoot = block.calc_merkle_root()
self.log.info("getblocktemplate: Test block on both nodes")
assert_template(node1, block, None)
assert_template(node0, block, 'bad-coinbase-txos')
self.log.info("getblocktemplate: Test non-subsidy block on both nodes")
# Without block reward anything goes, this allows commitment outputs like segwit
coinbase_tx.vout[0].nValue = CTxOutValue(0)
coinbase_tx.vout[0].scriptPubKey = CScript([OP_RETURN, b'\xff'])
coinbase_tx.rehash()
block.vtx = [coinbase_tx]
assert_template(node0, block, None)
assert_template(node1, block, None)
#
# Also test that coinbases can't have fees.
self.sync_all()
tmpl = node1.getblocktemplate({'rules': ['segwit']})
coinbase_tx = create_coinbase(height=int(tmpl["height"]))
# sequence numbers must not be max for nLockTime to have effect
coinbase_tx.vin[0].nSequence = 2 ** 32 - 2
# Add fee output.
coinbase_tx.vout[0].nValue.setToAmount(coinbase_tx.vout[0].nValue.getAmount() - 1)
coinbase_tx.vout.append(CTxOut(1))
coinbase_tx.rehash()
block = CBlock()
block.nVersion = tmpl["version"]
block.hashPrevBlock = int(tmpl["previousblockhash"], 16)
block.nTime = tmpl["curtime"]
block.nBits = int(tmpl["bits"], 16)
block.nNonce = 0
block.proof = CProof(bytearray.fromhex('51'))
block.vtx = [coinbase_tx]
block.block_height = int(tmpl["height"])
block.hashMerkleRoot = block.calc_merkle_root()
# should not be accepted
assert_template(node0, block, "bad-cb-fee")
assert_template(node1, block, "bad-cb-fee")
if __name__ == '__main__':
MandatoryCoinbaseTest().main()
| 2.1875 | 2 |
code/convNet.py | UKPLab/scienceie2017 | 40 | 12757345 | <reponame>UKPLab/scienceie2017
#!/usr/bin/python
# -*- coding: UTF-8 -*-
from extras import VSM, read_and_map
from representation import VeryStupidCBOWMapper, CharMapper
import sys, numpy as np,os
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix
from sklearn.metrics import precision_recall_fscore_support
from keras.layers import Dense, Dropout, Activation, Embedding
from keras.models import Sequential
from keras.utils.np_utils import to_categorical
from keras.layers import Convolution1D, GlobalMaxPooling1D, Lambda, Merge
from keras.preprocessing import sequence
from keras import backend as K
maxlen=50
maxlen=100
maxlen=150
maxlen=50+2*30
try:
L = int(sys.argv[5])
M = int(sys.argv[6])
R = int(sys.argv[7])
except IndexError:
L = 30
M = 50
R = 30
maxlen=L+M+R
# this is a simple cnn
# if you would want to use it below, you would have to do
# X_train = X_train.reshape(len(X_train),input_shape[0],input_shape[1])
def build_cnn(input_shape, output_dim,nb_filter):
clf = Sequential()
clf.add(Convolution1D(nb_filter=nb_filter,
filter_length=4,border_mode="valid",activation="relu",subsample_length=1,input_shape=input_shape))
clf.add(GlobalMaxPooling1D())
clf.add(Dense(100))
clf.add(Dropout(0.2))
clf.add(Activation("tanh"))
clf.add(Dense(output_dim=output_dim, activation='softmax'))
clf.compile(optimizer='adagrad',
loss='categorical_crossentropy',
metrics=['accuracy'])
return clf
# just one filter
def build_cnn_char(input_dim, output_dim,nb_filter):
clf = Sequential()
clf.add(Embedding(input_dim,
32, # character embedding size
input_length=maxlen,
dropout=0.2))
clf.add(Convolution1D(nb_filter=nb_filter,
filter_length=3,border_mode="valid",activation="relu",subsample_length=1))
clf.add(GlobalMaxPooling1D())
clf.add(Dense(100))
clf.add(Dropout(0.2))
clf.add(Activation("tanh"))
clf.add(Dense(output_dim=output_dim, activation='softmax'))
clf.compile(optimizer='adagrad',
loss='categorical_crossentropy',
metrics=['accuracy'])
return clf
# just one filter
def build_cnn_char_threeModels(input_dim, output_dim,nb_filter,filter_size=3):
left = Sequential()
left.add(Embedding(input_dim,
32, # character embedding size
input_length=L,
dropout=0.2))
left.add(Convolution1D(nb_filter=nb_filter,
filter_length=filter_size,border_mode="valid",activation="relu",subsample_length=1))
left.add(GlobalMaxPooling1D())
left.add(Dense(100))
left.add(Dropout(0.2))
left.add(Activation("tanh"))
center = Sequential()
center.add(Embedding(input_dim,
32, # character embedding size
input_length=M,
dropout=0.2))
center.add(Convolution1D(nb_filter=nb_filter,
filter_length=filter_size,border_mode="valid",activation="relu",subsample_length=1))
center.add(GlobalMaxPooling1D())
center.add(Dense(100))
center.add(Dropout(0.2))
center.add(Activation("tanh"))
right = Sequential()
right.add(Embedding(input_dim,
32, # character embedding size
input_length=R,
dropout=0.2))
right.add(Convolution1D(nb_filter=nb_filter,
filter_length=filter_size,border_mode="valid",activation="relu",subsample_length=1))
right.add(GlobalMaxPooling1D())
right.add(Dense(100))
right.add(Dropout(0.2))
right.add(Activation("tanh"))
clf = Sequential()
clf.add(Merge([left,center,right],mode="concat"))
clf.add(Dense(output_dim=output_dim, activation='softmax'))
clf.compile(optimizer='adagrad',
loss='categorical_crossentropy',
metrics=['accuracy'])
return clf
def max_1d(X):
return K.max(X,axis=1)
# multiple filters
def build_cnn_char_complex(input_dim, output_dim,nb_filter):
randomEmbeddingLayer = Embedding(input_dim,32, input_length=maxlen,dropout=0.1)
poolingLayer = Lambda(max_1d, output_shape=(nb_filter,))
conv_filters = []
for n_gram in range(2,4):
ngramModel = Sequential()
ngramModel.add(randomEmbeddingLayer)
ngramModel.add(Convolution1D(nb_filter=nb_filter,
filter_length=n_gram,
border_mode="valid",
activation="relu",
subsample_length=1))
ngramModel.add(poolingLayer)
conv_filters.append(ngramModel)
clf = Sequential()
clf.add(Merge(conv_filters,mode="concat"))
clf.add(Activation("relu"))
clf.add(Dense(100))
clf.add(Dropout(0.1))
clf.add(Activation("tanh"))
clf.add(Dense(output_dim=output_dim, activation='softmax'))
clf.compile(optimizer='adagrad',
loss='categorical_crossentropy',
metrics=['accuracy'])
return clf
def acc(correct, total):
return 1.0*correct/total
# example argline:
# python convNet.py ../scienceie2017_train/train2 ../scienceie2017_dev/dev ../resources/vsm/glove.6B/glove.6B.100d.txt
if __name__=="__main__":
train_src = sys.argv[1]
dev_src = sys.argv[2]
# vsm_path = sys.argv[3]
vsm_path = None
print("Loading VSM")
vsm = VSM(vsm_path)
try:
csize = 2
except IndexError:
csize = int(sys.argv[4])
try:
n_filter = int(sys.argv[8])
except IndexError:
n_filter = 250
try:
filter_size = int(sys.argv[9])
except IndexError:
filter_size = 3
if len(sys.argv)>10 and sys.argv[10]=="document":
SB = False
else:
SB = True
mapper = CharMapper(vsm,csize,L=L,M=M,R=R,sentence_boundaries=SB)
print("Reading training data")
X_train, y_train, y_values, _ = read_and_map(train_src, mapper)
X_dev, y_dev_gold, _, estrings = read_and_map(dev_src, mapper, y_values)
vocabSize = mapper.curVal
print(X_train.shape)
print(y_train.shape)
#sys.exit(1)
print("Trainig a model")
timesteps = 2*csize + 1 # left, right, center
context_dim = 100
input_shape = (timesteps,context_dim)
clf = build_cnn_char(vocabSize+1, len(y_values)+1,n_filter)
clf = build_cnn_char_threeModels(vocabSize+1, len(y_values)+1,n_filter)
X_left = X_train[:,:L]
X_center = X_train[:,L:L+M]
X_right = X_train[:,L+M:L+M+R]
print L,M,R,X_train.shape,X_left.shape,X_center.shape,X_right.shape,y_train,y_values
clf.fit([X_left,X_center,X_right], to_categorical(y_train, len(y_values)+1), verbose=1, nb_epoch=15)
print("Reading test data")
print("Testing")
X_dev_left = X_dev[:,:L]
X_dev_center = X_dev[:,L:L+M]
X_dev_right = X_dev[:,L+M:L+M+R]
print(X_dev.shape,X_dev_left.shape,X_dev_center.shape,X_dev_right.shape)
y_dev_auto = clf.predict_classes([X_dev_left,X_dev_center,X_dev_right]) # for LogisticRegression just do predict()
print "==PREDICTING=="
for i in xrange(len(y_dev_auto)):
print y_values[y_dev_auto[i]]
| 2.796875 | 3 |
chatApp/server/server.py | Mark-Suckau/ChatApp | 2 | 12757346 | <gh_stars>1-10
import socket, queue, select, json, traceback
from datetime import datetime
from hashlib import sha256
from chatapp.shared import exceptions, message
from chatapp.server import db_connector as db_conn
from chatapp.server import load_server_config
server_config = load_server_config.load_config()
server_config_python_server = server_config['python_server']
server_config_postgres_server = server_config['postgres_server']
class TCP_Nonblocking_Server:
def __init__(self, host, port, verbose_output=True):
self.host = host
self.port = port
self.sock = None
self.timeout = 1 # timeout for select.select() in listen_for_connections()
self.format = 'utf-8'
self.verbose_output = verbose_output # determines if anything is logged to terminal
self.client_list = [] # used for storing sockets
self.client_info = {} # used for storing info about sockets (ex. address, etc.)
self.client_messages = queue.Queue() # used for saving messages from clients before sending them to all other clients
self.db_connector = db_conn.DB_Connector(server_config_postgres_server['ip'],
server_config_postgres_server['port'],
server_config_postgres_server['dbname'],
server_config_postgres_server['user'],
server_config_postgres_server['password'])
self.configure_server()
def print_tstamp(self, msg):
if self.verbose_output:
current_time = datetime.now().strftime('%Y-%M-%d %H:%M:%S')
print(f'[{current_time}] [SERVER] {msg}')
def configure_server(self):
self.print_tstamp('Initializing database...')
self.db_connector.create_tables_if_needed()
self.print_tstamp('Creating socket...')
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setblocking(False)
self.print_tstamp(f'Binding socket to [{self.host}] on port [{self.port}]...')
self.sock.bind((self.host, self.port))
self.client_list.append(self.sock)
def accept_client_socket(self):
client_sock, client_addr = self.sock.accept()
self.print_tstamp(f'Accepted new connection from {client_addr}')
client_sock.setblocking(False)
self.client_info[client_sock] = {'address': client_addr, 'verified': False}
self.client_list.append(client_sock)
def close_client_socket(self, client_sock):
client_addr = self.client_info[client_sock]['address']
self.print_tstamp(f'Closing socket from address {client_addr}...')
for s in self.client_list:
if s == client_sock:
self.client_list.remove(s)
del self.client_info[client_sock]
client_sock.close()
self.print_tstamp(f'Socket closed from address {client_addr}')
def receive_message(self, client_sock):
try:
data_encoded = client_sock.recv(1024)
client_addr = self.client_info[client_sock]['address']
client_verified = self.client_info[client_sock]['verified']
if not data_encoded:
# no data sent from client thus client must have disconnected
self.close_client_socket(client_sock)
self.print_tstamp(f'{client_addr} disconnected')
return
data = data_encoded.decode(self.format) # decoding from utf-8 bytes to string
data = json.loads(data) # converting from json string to python dict
# check if client is verified
if not client_verified:
# check if client is sending a valid verification request
# TODO replace if statement with try catch using InvalidMessageFormattingError exception
if message.is_type(data, message.config_msg_types['VERIFICATION_REQUEST']):
# TODO remove repeating code for sending response messages
# attempt to verify client
could_verify = self.verify_client(client_sock, data['username'], data['password'])
msg = message.create_message(type=message.config_msg_types['VERIFICATION_RESPONSE'], success=could_verify, error_msg='a', status_code='a')
msg = json.dumps(msg)
msg = msg.encode(self.format)
client_sock.send(msg)
# close connection if couldnt verify username and/or password
if not could_verify:
self.close_client_socket(client_sock)
elif message.is_type(data, message.config_msg_types['SIGNUP_REQUEST']):
could_signup = self.sign_up_client(data['username'], data['password'])
msg = message.create_message(type=message.config_msg_types['SIGNUP_RESPONSE'], success=could_signup, error_msg='', status_code='')
msg = json.dumps(msg)
msg = msg.encode(self.format)
client_sock.send(msg)
else:
# closes connection if verification message is not in correct format
self.close_client_socket(client_sock)
else:
# if client is verified, then check if message is correctly formatted and accept message, not correctly formatted messages will be ignored
if message.is_type(data, message.config_msg_types['CLIENT_TEXT']):
self.print_tstamp(f'{client_addr} client says: [{data}]')
self.client_messages.put(data)
except json.JSONDecodeError:
self.print_tstamp('Encountered error: Could not decode JSON')
traceback.print_exc()
self.close_client_socket(client_sock)
except OSError as err:
self.print_tstamp(f'Encountered error: ')
traceback.print_exc()
self.close_client_socket(client_sock)
def verify_client(self, client_sock, username, password):
#return True or False depending on if client could be verified
try:
user_info = self.db_connector.get_user_info(username)
except exceptions.ClientLookupError:
return False
password_hash = sha256(password.encode('utf-8')).hexdigest()
if username == user_info[1] and password_hash == user_info[2]:
self.client_info[client_sock]['verified'] = True
self.client_info[client_sock]['user_id'] = user_info[0]
self.client_info[client_sock]['username'] = user_info[1]
return True
return False
def sign_up_client(self, username, password):
# signs up new clients, client must then seperately verify to be able to use server
password_hash = <PASSWORD>56(password.encode('utf-8')).hexdigest()
try:
self.db_connector.insert_user(username, password_hash)
return True
except exceptions.DuplicateUserError as err:
self.print_tstamp(err.message)
return False
def broadcast_message(self, client_socks):
# broadcasts messages sent from other clients to other clients
# takes a list of client sockets to broadcast message to
try:
msg = self.client_messages.get_nowait() # get message sent from a client
msg = json.dumps(msg) # convert from python dict to json string
msg = msg.encode(self.format) # encode from json string to utf-8 bytes
self.print_tstamp(f'Broadcasting message to {len(client_socks)} clients...')
for s in client_socks:
s.send(msg)
self.print_tstamp(f'Broadcasted message to {len(client_socks)} clients')
except queue.Empty:
pass
def send_message(self, client_sock, msg_content):
# respond to client with a predefined message from the server
try:
client_addr = self.client_info[client_sock]['address']
client_username = self.client_info[client_sock]['username']
msg = message.create_message(type=message.config_msg_types['SERVER_TEXT'], msg_body=msg_content, username=client_username)
msg = json.dumps(msg) # convert msg from python dict to json string
msg = msg.encode(self.format) # encoding msg from json string to utf-8 bytes
send_info = client_sock.send(msg) # sending msg
self.print_tstamp(f'Sent {send_info} bytes to {client_addr}')
except KeyError:
# handles a condition where client socket is in the writable list
# even though it's been removed from the dictionary
pass
except OSError as err:
self.print_tstamp(f'Encountered error: {err}')
def handle_exception_socket(self, client_sock):
client_addr = self.client_info[client_sock]['address']
self.close_client_socket(client_sock)
self.print_tstamp(f'Closed exception socket from address {client_addr}')
def listen_for_connections(self):
# main function which handles connections and dispatches them to appropriate functions to be accepted, responded to, received from, etc.
self.print_tstamp('Listening for connections...')
self.sock.listen(10)
try:
while True:
readable_socks, writable_socks, error_socks = select.select(self.client_list, self.client_list, self.client_list, self.timeout)
# read from readable sockets
for sock in readable_socks:
# if the socket is server socket, accept the connection
if sock is self.sock:
self.accept_client_socket()
# otherwise receive the client request
else:
self.receive_message(sock)
if writable_socks:
self.broadcast_message(writable_socks)
for sock in error_socks:
self.handle_exception_socket(sock)
except KeyboardInterrupt:
self.print_tstamp('Shutting down server...')
self.shutdown_server()
self.print_tstamp('Server shut down')
def shutdown_server(self):
for s in self.client_list:
s.close()
del self.client_list
del self.client_info
self.sock.close()
def run_server():
server = TCP_Nonblocking_Server('localhost', 8080)
server.listen_for_connections()
if __name__ == '__main__':
run_server() | 2.5 | 2 |
python/orca/src/bigdl/orca/automl/xgboost/auto_xgb.py | EmiCareOfCell44/BigDL | 0 | 12757347 | <filename>python/orca/src/bigdl/orca/automl/xgboost/auto_xgb.py
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either exp'
# ress or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bigdl.orca.automl.xgboost.XGBoost import XGBoostModelBuilder
from bigdl.orca.automl.auto_estimator import AutoEstimator
class AutoXGBClassifier(AutoEstimator):
def __init__(self,
logs_dir="/tmp/auto_xgb_classifier_logs",
cpus_per_trial=1,
name=None,
**xgb_configs
):
"""
Automated xgboost classifier
Example:
>>> search_space = {"n_estimators": hp.grid_search([50, 1000]),
"max_depth": hp.grid_search([2, 15]),
"lr": hp.loguniform(1e-4, 1e-1)}
>>> auto_xgb_clf = AutoXGBClassifier(cpus_per_trial=4,
name="auto_xgb_classifier",
**config)
>>> auto_xgb_clf.fit(data=(X_train, y_train),
validation_data=(X_val, y_val),
metric="error",
metric_mode="min",
n_sampling=1,
search_space=search_space)
>>> best_model = auto_xgb_clf.get_best_model()
:param logs_dir: Local directory to save logs and results. It defaults to
"/tmp/auto_xgb_classifier_logs"
:param cpus_per_trial: Int. Number of cpus for each trial. It defaults to 1.
The value will also be assigned to n_jobs in xgboost,
which is the number of parallel threads used to run xgboost.
:param name: Name of the auto xgboost classifier.
:param xgb_configs: Other scikit learn xgboost parameters. You may refer to
https://xgboost.readthedocs.io/en/latest/python/python_api.html#module-xgboost.sklearn
for the parameter names to specify. Note that we will directly use cpus_per_trial value
for n_jobs in xgboost and you shouldn't specify n_jobs again.
"""
xgb_model_builder = XGBoostModelBuilder(model_type='classifier',
cpus_per_trial=cpus_per_trial,
**xgb_configs)
resources_per_trial = {"cpu": cpus_per_trial} if cpus_per_trial else None
super().__init__(model_builder=xgb_model_builder,
logs_dir=logs_dir,
resources_per_trial=resources_per_trial,
name=name)
class AutoXGBRegressor(AutoEstimator):
def __init__(self,
logs_dir="/tmp/auto_xgb_regressor_logs",
cpus_per_trial=1,
name=None,
**xgb_configs
):
"""
Automated xgboost regressor
Example:
>>> search_space = {"n_estimators": hp.grid_search([800, 1000]),
"max_depth": hp.grid_search([10, 15]),
"lr": hp.loguniform(1e-4, 1e-1),
"min_child_weight": hp.choice([1, 2, 3]),
}
>>> auto_xgb_reg = AutoXGBRegressor(cpus_per_trial=2,
name="auto_xgb_regressor",
**config)
>>> auto_xgb_reg.fit(data=(X_train, y_train),
validation_data=(X_val, y_val),
metric="rmse",
n_sampling=1,
search_space=search_space)
>>> best_model = auto_xgb_reg.get_best_model()
:param logs_dir: Local directory to save logs and results. It defaults to
"/tmp/auto_xgb_classifier_logs"
:param cpus_per_trial: Int. Number of cpus for each trial. The value will also be assigned
to n_jobs, which is the number of parallel threads used to run xgboost.
:param name: Name of the auto xgboost classifier.
:param xgb_configs: Other scikit learn xgboost parameters. You may refer to
https://xgboost.readthedocs.io/en/latest/python/python_api.html#module-xgboost.sklearn
for the parameter names to specify. Note that we will directly use cpus_per_trial value
for n_jobs in xgboost and you shouldn't specify n_jobs again.
"""
xgb_model_builder = XGBoostModelBuilder(model_type='regressor',
cpus_per_trial=cpus_per_trial,
**xgb_configs)
resources_per_trial = {"cpu": cpus_per_trial} if cpus_per_trial else None
super().__init__(model_builder=xgb_model_builder,
logs_dir=logs_dir,
resources_per_trial=resources_per_trial,
name=name)
| 2.125 | 2 |
tools/translation/helper/sanity_check.py | zealoussnow/chromium | 14,668 | 12757348 | # Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Sanity checking for grd_helper.py. Run manually before uploading a CL."""
import io
import os
import subprocess
import sys
# Add the parent dir so that we can import from "helper".
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from helper import grd_helper
from helper import translation_helper
if sys.platform.startswith('win'):
# Use the |git.bat| in the depot_tools/ on Windows.
GIT = 'git.bat'
else:
GIT = 'git'
here = os.path.dirname(os.path.realpath(__file__))
repo_root = os.path.normpath(os.path.join(here, '..', '..', '..'))
def list_files_in_repository(repo_path, pattern):
"""Lists all files matching given pattern in the given git repository"""
# This works because git does its own glob expansion even though there is no
# shell to do it.
output = subprocess.check_output([GIT, 'ls-files', '--', pattern],
cwd=repo_path).decode('utf-8')
return output.strip().splitlines()
def read_file_as_text(path):
with io.open(path, mode='r', encoding='utf-8') as f:
return f.read()
# Sanity checks to ensure that we can parse all grd and grdp files in the repo.
# Must not fail.
def Run():
grds = list_files_in_repository(repo_root, '*.grd')
grdps = list_files_in_repository(repo_root, '*.grdp')
print('Found %d grds, %d grdps in the repo.' % (len(grds), len(grdps)))
# Make sure we can parse all .grd files in the source tree. Grd files are
# parsed via the file path.
for grd in grds:
# This file is intentionally missing an include, skip it.
if grd == os.path.join('tools', 'translation', 'testdata', 'internal.grd'):
continue
path = os.path.join(repo_root, grd)
grd_helper.GetGrdMessages(path, os.path.dirname(path))
# Make sure we can parse all .grdp files in the source tree.
# Grdp files are parsed using file contents instead of path.
for grdp in grdps:
path = os.path.join(repo_root, grdp)
# Parse grdp files using file contents.
contents = read_file_as_text(path)
grd_helper.GetGrdpMessagesFromString(contents)
print('Successfully parsed all .grd and .grdp files in the repo.')
# Additional check for translateable grds. Translateable grds are a subset
# of all grds so this checks some files twice, but it exercises the
# get_translatable_grds() path and also doesn't need to skip internal.grd.
TRANSLATION_EXPECTATIONS_PATH = os.path.join(repo_root, 'tools',
'gritsettings',
'translation_expectations.pyl')
translateable_grds = translation_helper.get_translatable_grds(
repo_root, grds, TRANSLATION_EXPECTATIONS_PATH)
print('Found %d translateable .grd files in translation expectations.' %
len(translateable_grds))
for grd in translateable_grds:
path = os.path.join(repo_root, grd.path)
grd_helper.GetGrdMessages(path, os.path.dirname(path))
print('Successfully parsed all translateable_grds .grd files in translation '
'expectations.')
print('DONE')
if __name__ == '__main__':
Run()
| 2.328125 | 2 |
test/test_cassandra.py | claytonbrown/simhash-db-py | 51 | 12757349 | <filename>test/test_cassandra.py
#! /usr/bin/env python
'''Make sure the Cassandra client is sane'''
import unittest
from test import BaseTest
from simhash_db import Client
class CassandraTest(BaseTest, unittest.TestCase):
'''Test the Cassandra client'''
def make_client(self, name, num_blocks, num_bits):
return Client('cassandra', name, num_blocks, num_bits)
if __name__ == '__main__':
unittest.main()
| 2.375 | 2 |
hackerrank/flipping-the-matrix/solution.py | SamProkopchuk/coding-problems | 0 | 12757350 | <filename>hackerrank/flipping-the-matrix/solution.py
from itertools import product
for _ in range(int(input())):
n = int(input())
matrix = [[*map(int, input().split())] for _ in range(2 * n)]
print(sum(max(matrix[i][j],
matrix[2 * n - i - 1][j],
matrix[i][2 * n - j - 1],
matrix[2 * n - i - 1][2 * n - j - 1]) for i, j in product(range(n), range(n))))
| 3.34375 | 3 |