content stringlengths 5 1.05M |
|---|
import os
import json
import pytest
from click.testing import CliRunner
from skedulord.common import HEARTBEAT_PATH
from skedulord.web.app import create_app
from skedulord.cli import history, summary, version
from skedulord import version as lord_version
@pytest.fixture()
def clean_start_small():
os.system("lord nuke --really --yes")
os.system("lord init")
os.system("lord run foo 'python jobs/pyjob.py'")
os.system("lord run bar 'python jobs/pyjob.py'")
os.system("lord run buz 'python jobs/pyjob.py'")
yield 1
os.system("lord nuke --really --yes")
@pytest.fixture()
def dirty_start_small():
os.system("lord nuke --yes --really")
os.system("lord init")
os.system("lord run buz 'python jobs/pyjob.py'")
os.system("lord run bad 'python jobs/badpyjob.py'")
yield 1
os.system("lord nuke --yes --really")
def test_basic_heartbeat_file(clean_start_small):
with open(HEARTBEAT_PATH, "r") as f:
jobs = [json.loads(_) for _ in f.readlines()]
assert len(jobs) == 3
assert {_['name'] for _ in jobs} == {'foo', 'bar', 'buz'}
def test_basic_summary(clean_start_small):
runner = CliRunner()
result = runner.invoke(summary)
print(result.output)
assert len(result.output.split("\n")) == 8
assert result.exit_code == 0
assert 'foo' in result.output
assert 'bar' in result.output
assert 'buz' in result.output
def test_basic_history(clean_start_small):
runner = CliRunner()
result = runner.invoke(history)
assert result.exit_code == 0
assert len(result.output.split("\n")) == 8
assert 'foo' in result.output
assert 'bar' in result.output
assert 'buz' in result.output
assert '✅' in result.output
def test_basic_heartbeat_server(clean_start_small):
test_app = create_app().test_client()
json_blob = test_app.get("/api/test_heartbeats").get_json()
assert len(json_blob) == 3
assert {_['name'] for _ in json_blob} == {'foo', 'bar', 'buz'}
def test_adv_heartbeat_file(dirty_start_small):
with open(HEARTBEAT_PATH, "r") as f:
jobs = [json.loads(_) for _ in f.readlines()]
assert len(jobs) == 2
assert {_['name'] for _ in jobs} == {'buz', 'bad'}
def test_adv_history(dirty_start_small):
runner = CliRunner()
result = runner.invoke(history)
assert result.exit_code == 0
assert len(result.output.split("\n")) == 7
assert 'bad' in result.output
assert 'buz' in result.output
assert '✅' in result.output
assert '❌' in result.output
def test_adv_summary(dirty_start_small):
runner = CliRunner()
result = runner.invoke(history)
assert result.exit_code == 0
assert len(result.output.split("\n")) == 7
assert 'bad' in result.output
assert 'buz' in result.output
def test_adv_heartbeat_server(dirty_start_small):
test_app = create_app().test_client()
json_blob = test_app.get("/api/test_heartbeats").get_json()
assert len(json_blob) == 2
assert {_['name'] for _ in json_blob} == {'buz', 'bad'}
def test_version():
runner = CliRunner()
result = runner.invoke(version)
assert result.exit_code == 0
assert lord_version in result.output
|
from itertools import cycle
class DummySsmClient:
def __init__(self):
self.__dummy_params = cycle([
{
'Name': 'TestParameter',
'Type': 'String',
'Value': 'StringValue1',
'Version': 10,
},
{
'Name': 'TestParameter',
'Type': 'StringList',
'Value': 'StringValue2',
'Version': 20,
},
{
'Name': 'TestParameter',
'Type': 'SecureString',
'Value': 'StringValue3',
'Version': 30,
},
])
self.get_parameters_function_calls = 0
def get_parameters(self, *args, **kwargs):
self.get_parameters_function_calls += 1
return {
'Parameters': [next(self.__dummy_params)],
'InvalidParameters': []
}
|
from typing import Dict
from types import FunctionType
routes: Dict[str, FunctionType] = {}
def add(path: str, method: str) -> FunctionType:
def update(func: FunctionType) -> FunctionType:
global routes
routes[path.upper()+method.upper()] = func
return func
return update |
# coding=utf-8
"""BinaryNet implemented in Keras."""
import keras
from scripts.ann_architectures.BinaryConnect import binarization
# This class extends the Lasagne DenseLayer to support BinaryConnect
class Dense(keras.layers.Dense):
"""Binary Dense layer."""
def __init__(self, num_units, binary=True, stochastic=True, h=1., **kwargs):
self.binary = binary
self.stochastic = stochastic
self.H = h
self.W = None
self.Wb = None
super(Dense, self).__init__(num_units, **kwargs)
def get_output_for(self, input_node, deterministic=False, **kwargs):
"""
Parameters
----------
input_node :
deterministic :
kwargs :
Returns
-------
"""
self.Wb = binarization(self.W, self.H, self.binary, deterministic,
self.stochastic)
wr = self.W
self.W = self.Wb
rvalue = super(Dense, self).get_output_for(input_node, **kwargs)
self.W = wr
return rvalue
# This class extends the Lasagne Conv2DLayer to support BinaryConnect
class Conv2D(keras.layers.Conv2D):
"""Binary convolution layer."""
def __init__(self, filters, kernel_size, binary=True, stochastic=True,
h=1., **kwargs):
self.binary = binary
self.stochastic = stochastic
self.H = h
self.W = None
self.Wb = None
super(Conv2D, self).__init__(filters, kernel_size, **kwargs)
def convolve(self, input_node, deterministic=False, **kwargs):
"""Convolution operation.
Parameters
----------
input_node :
deterministic :
kwargs :
Returns
-------
"""
self.Wb = binarization(self.W, self.H, self.binary, deterministic,
self.stochastic)
wr = self.W
self.W = self.Wb
rvalue = super(Conv2D, self).convolve(input_node, **kwargs)
self.W = wr
return rvalue
|
#!/usr/bin/env python
""" A unittest script for the MIMS module. """
import unittest
from cutlass import MIMS
class MimsTest(unittest.TestCase):
""" Unit tests for the cutlass MIMS class. """
def testRequiredFields(self):
""" Test the required_fields() static method. """
required = MIMS.required_fields()
self.assertEqual(type(required), tuple,
"required_fields() returns a list.")
self.assertTrue(len(required) > 0,
"required_fields() return is not empty.")
def testInsufficientData(self):
""" Test the MIMS class with insufficient data. """
too_little = {"adapters": "test_adapters"}
valid = MIMS.check_dict(too_little)
self.assertFalse(valid, "False result for superfluous data.")
def testSuperfluousData(self):
""" Test the MIMS class with excess data. """
too_much = {
"adapters": "test_adapters",
"annot_source": "test_annot_source",
"assembly": "test_assembly",
"assembly_name": "test_assembly_name",
"biome": "test_biome",
"collection_date": "test_collection_date",
"env_package": "test_env_package",
"extrachrom_elements": "test_extrachrom_elements",
"encoded_traits": "test_encoded_traits",
"experimental_factor": "test_experimental_factor",
"feature": "test_feature",
"findex": "test_findex",
"finishing_strategy": "test_finishing_strategy",
"geo_loc_name": "test_geo_loc_name",
"investigation_type": "test_investigation_type",
"lat_lon": "test_lat_long",
"lib_const_meth": "test_lib_const_meth",
"lib_reads_seqd": "test_lib_reads_seqd",
"lib_screen": "test_lib_screen",
"lib_size": 2000,
"lib_vector": "test_lib_vector",
"material": "test_material",
"nucl_acid_amp": "test_nucl_acid_amp",
"nucl_acid_ext": "test_nucl_acid_ext",
"project_name": "test_project_name",
"rel_to_oxygen": "test_rel_to_oxygen",
"rindex": "test_rindex",
"samp_collect_device": "test_samp_collect_device",
"samp_mat_process": "test_samp_map_process",
"samp_size": "test_samp_size",
"seq_meth": "test_seq_meth",
"sop": ["a", "b", "c"],
"source_mat_id": ["a", "b", "c"],
"submitted_to_insdc": True,
"url": ["a", "b", "c"],
# And now for some spurious data
"ned": "stark",
"sansa": "stark",
"ramsay": "bolton"
}
valid = MIMS.check_dict(too_much)
self.assertFalse(valid, "False result for superfluous data.")
def testValidData(self):
""" Test the MIMS class with completely valid data. """
just_right = {
"adapters": "test_adapters",
"annot_source": "test_annot_source",
"assembly": "test_assembly",
"assembly_name": "test_assembly_name",
"biome": "test_biome",
"collection_date": "test_collection_date",
"env_package": "test_env_package",
"extrachrom_elements": "test_extrachrom_elements",
"encoded_traits": "test_encoded_traits",
"experimental_factor": "test_experimental_factor",
"feature": "test_feature",
"findex": "test_findex",
"finishing_strategy": "test_finishing_strategy",
"geo_loc_name": "test_geo_loc_name",
"investigation_type": "test_investigation_type",
"lat_lon": "test_lat_long",
"lib_const_meth": "test_lib_const_meth",
"lib_reads_seqd": "test_lib_reads_seqd",
"lib_screen": "test_lib_screen",
"lib_size": 2000,
"lib_vector": "test_lib_vector",
"material": "test_material",
"nucl_acid_amp": "test_nucl_acid_amp",
"nucl_acid_ext": "test_nucl_acid_ext",
"project_name": "test_project_name",
"rel_to_oxygen": "test_rel_to_oxygen",
"rindex": "test_rindex",
"samp_collect_device": "test_samp_collect_device",
"samp_mat_process": "test_samp_map_process",
"samp_size": "test_samp_size",
"seq_meth": "test_seq_meth",
"sop": ["a", "b", "c"],
"source_mat_id": ["a", "b", "c"],
"submitted_to_insdc": True,
"url": ["a", "b", "c"]
}
valid = MIMS.check_dict(just_right)
self.assertTrue(valid, "True result for valid data.")
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
import argparse
from pathlib import Path
import numpy as np
from tqdm import tqdm
from carla_constants import *
def make_spherical_lut(lut_file, output_width, output_height):
lutx = np.zeros((output_height, output_width))
luty = np.zeros((output_height, output_width))
depth_multiplier = np.zeros((output_height, output_width))
local_thetas = np.zeros((output_height, output_width))
local_phis = np.zeros((output_height, output_width))
thetas = np.linspace(-np.pi, np.pi, num=output_width, endpoint=False)
phis = np.linspace(-np.pi / 2, np.pi / 2, num=output_height,
endpoint=False)
# CARLA parameters
input_width = IMAGE_WIDTH
input_height = IMAGE_HEIGHT
fov = FOV
focal = input_width / (2 * np.tan(fov * np.pi / 360))
c_x = input_width * 0.5
c_y = input_height * 0.5
for r in tqdm(range(output_height), desc='Height', unit='pixel'):
phi = phis[r]
for c in range(output_width):
theta = thetas[c]
# get XYZ point
Z = np.sin(theta) * np.cos(phi)
Y = np.sin(phi)
X = np.cos(theta) * np.cos(phi)
# translate to x, y, mag coords for proper face
# input/index of form: Back, Left, Front, Right, Top, Bottom
largest = np.argmax(np.abs([X, Y, Z]))
if largest == 0:
if X <= 0: # back
ind = 0
x = -Z
y = Y
mag = X
else: # front
ind = 2
x = Z
y = Y
mag = X
elif largest == 1:
if Y <= 0: # top
ind = 4
x = Z
y = X
mag = Y
else: # bottom
ind = 5
x = Z
y = -X
mag = Y
else:
if Z <= 0: # left
ind = 1
x = X
y = Y
mag = Z
else: # right
ind = 3
x = -X
y = Y
mag = Z
# local_theta = np.arctan2(x, mag)
# local_phi = np.arcsin(
# y / np.sqrt(x ** 2 + y ** 2 + mag ** 2))
# project back to raw
x = focal * x / np.abs(mag) + c_x
y = focal * y / np.abs(mag) + c_y
local_theta = np.arctan2(x - c_x, focal)
local_phi = np.arctan2(y - c_y, focal)
depth_multiplier[r, c] = 1 / (
np.cos(local_theta) * np.cos(local_phi))
# offset for given image
x = x + ind * input_width
# store in lookup table
lutx[r, c] = x
luty[r, c] = y
local_thetas[r, c] = local_theta
local_phis[r, c] = local_phi
lut = np.concatenate(
[np.expand_dims(lutx, axis=-1), np.expand_dims(luty, axis=-1),
np.expand_dims(depth_multiplier, axis=-1)], axis=-1)
np.save(lut_file, lut)
# solve for theta intrinsics
x0 = 0
x1 = len(thetas) - 1
theta0 = thetas[0]
theta1 = thetas[-1]
c_theta = (theta0 * x1 - theta1 * x0) / (theta0 - theta1)
f_theta = (x0 - x1) / (theta0 - theta1)
# solve for phi intrinsics
y0 = 0
y1 = len(phis) - 1
phi0 = phis[0]
phi1 = phis[-1]
c_phi = (phi0 * y1 - phi1 * y0) / (phi0 - phi1)
f_phi = (y0 - y1) / (phi0 - phi1)
intrinsics_file = Path(lut_file.name).with_name("spherical_intrinsics.txt")
# save intrinsics to file
with intrinsics_file.open('w') as f:
f.write('%.15f %.15f %.15f %.15f' % (f_theta, c_theta, f_phi, c_phi))
def load_spherical_intrinsics(intrinsics_file: Path = Path("./spherical_intrinsics.txt")):
"""
:return: (f_theta, c_theta, f_phi, c_phi)
"""
txt = intrinsics_file.open("r").read()
return (np.float32(t) for t in txt.split(' '))
def make_cylindrical_lut(lut_file, output_width, output_height):
bottom = -0.5
top = 0.5
lutx = np.zeros((output_height, output_width))
luty = np.zeros((output_height, output_width))
thetas = np.linspace(-np.pi, np.pi, num=output_width, endpoint=False)
heights = np.linspace(bottom, top, num=output_height,
endpoint=True)
# CARLA parameters
input_width = IMAGE_WIDTH
input_height = IMAGE_HEIGHT
fov = FOV
focal = input_width / (2 * np.tan(fov * np.pi / 360))
c_x = input_width * 0.5
c_y = input_height * 0.5
for r in tqdm(range(output_height), desc='Height', unit='pixel'):
height = heights[r]
for c in range(output_width):
theta = thetas[c]
# select raw image
if -3 * np.pi / 4 <= theta < -np.pi / 4:
ind = 1
theta_offset = np.pi / 2 # left
elif -np.pi / 4 <= theta < np.pi / 4:
ind = 2
theta_offset = 0 # forward
elif np.pi / 4 <= theta < 3 * np.pi / 4:
ind = 3
theta_offset = -np.pi / 2 # right
else:
ind = 0
theta_offset = -np.pi # backwards
# get XYZ point
X = np.sin(theta + theta_offset)
Y = height
Z = np.cos(theta + theta_offset)
# project to raw image
x = focal * X / Z + c_x
y = focal * Y / Z + c_y
# offset for given image
x = x + ind * input_width
# store in lookup table
lutx[r, c] = x
luty[r, c] = y
lut = np.concatenate(
[np.expand_dims(lutx, axis=-1), np.expand_dims(luty, axis=-1)], axis=-1)
np.save(lut_file, lut)
# solve for theta intrinsics
x0 = 0
x1 = len(thetas) - 1
theta0 = thetas[0]
theta1 = thetas[-1]
c_theta = (theta0 * x1 - theta1 * x0) / (theta0 - theta1)
f_theta = (x0 - x1) / (theta0 - theta1)
# solve for Z intrinsics
y0 = 0
y1 = len(heights) - 1
Z0 = heights[0]
Z1 = heights[-1]
c_Z = (Z0 * y1 - Z1 * y0) / (Z0 - Z1)
f_Z = (y0 - y1) / (Z0 - Z1)
intrinsics_file = Path(lut_file.name).with_name("cylindrical_intrinsics.txt")
# save intrinsics to file
with intrinsics_file.open('w') as f:
f.write('%.15f %.15f %.15f %.15f' % (f_theta, c_theta, f_Z, c_Z))
def load_cylindrical_intrinsics(intrinsics_file: Path = Path("./cylindrical_intrinsics.txt")):
"""
:return: (f_theta, c_theta, f_Z, c_Z)
"""
txt = intrinsics_file.open("r").read()
return (np.float32(t) for t in txt.split(' '))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--cylindrical_lut", "-c",
help="Cylindrical lookup table file.")
parser.add_argument("--spherical_lut", "-s",
help="Spherical lookup table file.")
parser.add_argument("--output_width", "-ow",
type=int,
default=2048,
help="Output width (in pixels)")
parser.add_argument("--output_height", "-oh",
type=int,
default=1024,
help="Output height (in pixels)")
args = parser.parse_args()
if 'cylindrical_lut' in args:
print("Making cylindrical lookup table...")
make_cylindrical_lut(open(args.cylindrical_lut, 'wb+'), args.output_width, args.output_height)
if 'spherical_lut' in args:
print("Making spherical lookup table...")
make_spherical_lut(open(args.spherical_lut, 'wb+'), args.output_width, args.output_height)
|
# Simple neural network with multiple inputs numpy code
import numpy as np
weights = np.array([0.1, 0.2, 0])
def neural_network(input, weights):
pred = input.dot(weights)
return pred
toes = np.array([8.5, 9.5, 9.9, 9.0])
wlrec = np.array([0.65, 0.8, 0.8, 0.9])
nfans = np.array([1.2, 1.3, 0.5, 1.0])
input = np.array([toes[0], wlrec[0], nfans[0]])
pred = neural_network(input, weights)
print(pred) |
""" File: P1_utility_functions.py
Utility functions used in Phase 1 Pre-processing of the raw chat (.csv) file
"""
import os.path
import re
import nltk
import gensim
from nltk.stem import WordNetLemmatizer
def getStopWords(stopWordFileName):
"""Reads stop-words text file which is assumed to have one word per line.
Returns stopWordDict.
"""
stopWordDict = {}
stopWordFile = open(stopWordFileName, 'r')
for line in stopWordFile:
word = line.strip().lower()
stopWordDict[word] = None
return stopWordDict
def getFileName(prompt):
"""Prompts the user for a valid file which it returns.
"""
while True:
fileName = input(prompt+" ")
if os.path.exists(fileName):
return fileName
else:
print("File not found! Make sure that the file is inside this directory.")
def readRawChats(inFile):
"""
Reads .csv file and split into transcripts by splitting on the Timestamp which includes the Date.
The returned transcriptList is a list-of-lists where each "outer" list item contains information about
a single chat.
"""
inFile = open(inFile, "r") # NOTE .csv file assumed to have column-headings line
dateAtStartCount = 0
transcriptList = []
currentTranscriptLines = []
for line in inFile:
frontOfLine = line[:6]
if frontOfLine.count("/") == 2:
dateAtStartCount += 1
if dateAtStartCount == 1: #ignore header line
currentTranscriptLines = [line.strip()]
else:
transcriptList.append(currentTranscriptLines)
currentTranscriptLines = [line.strip()]
else:
currentTranscriptLines.append(line.strip())
transcriptList.append(currentTranscriptLines)
return transcriptList
def findInitialQuestion(transList, transIndex):
"""
Takes in transList which is a list of strings containing the information about a single chat.
The index 0 string will contain the Initial Question field, which it returns if it exists; otherwise
None is returned."
"""
firstCommaIndex = transList[0].find(",")
if firstCommaIndex == -1:
print("First comma not found")
return None
else:
secondCommaIndex = transList[0].find(",",firstCommaIndex+1)
if secondCommaIndex == -1:
print("Second comma not found")
return None
else:
thirdCommaIndex = transList[0].find(",",secondCommaIndex+1)
if thirdCommaIndex == -1:
thirdCommaIndex = len(transList[0])-1
#print(secondCommaIndex, thirdCommaIndex)
if secondCommaIndex + 1 == thirdCommaIndex:
return None
else:
return transList[0][secondCommaIndex+1:thirdCommaIndex]
def generateTranscriptDialogList(trans):
transcriptDialogList = []
transStr = " ".join(trans) # merge transcript back to a single string
#split by time-stamps to get a dialogList
transTimeIndexList = []
for index in range(2,len(transStr)-6):
if transStr[index] == ":" and transStr[index+3] == ":" and transStr[index+1:index+3].isdigit() and transStr[index+4:index+6].isdigit():
transTimeIndexList.append(index-2)
dialogList = []
for i in range(len(transTimeIndexList)-1):
dialogList.append(transStr[transTimeIndexList[i]:transTimeIndexList[i+1]])
if len(transTimeIndexList) == 0:
dialogList.append(transStr)
else:
dialogList.append(transStr[transTimeIndexList[-1]:])
return dialogList
def findInitialQuestionInDialog(dialogList, chatIndex):
""" If the 'Initial question' column in the .csv file was empty, this function is called
to find and return the initial question from the chat dialog."""
for i in range(len(dialogList)):
helpYouCount = dialogList[i].lower().count("help you")
welcomeCount = dialogList[i].lower().count("welcome")
infoDeskCount = dialogList[i].lower().count("info desk")
try:
if helpYouCount == 0 and welcomeCount == 0 and infoDeskCount == 0 and len(dialogList[i]) >= 40:
return dialogList[i]
except:
print("\n\nNO QUESTION FOUND! ",chatIndex)
break
def removeTags(fileStr):
"""
Removes all tags from the chat that start with '<xyz' and end with '</xyz'.
"""
current = 0
while True:
#print("Next char:",fileStr[current])
openAngleBracketIndex = fileStr.find('<',current)
if openAngleBracketIndex == -1:
break
spaceIndex = fileStr.find(' ', openAngleBracketIndex+1)
if spaceIndex == -1:
break
else:
current = spaceIndex
endStr = "</"+fileStr[openAngleBracketIndex+1:spaceIndex]+'>'
endIndex = fileStr.find(endStr, spaceIndex)
if endIndex == -1:
current = spaceIndex
else:
endIndex = endIndex+len(endStr)
#print(openAngleBracketIndex, endStr, endIndex+len(endStr))
fileStr = fileStr[:openAngleBracketIndex]+ \
fileStr[endIndex:]
#print(fileStr)
current = openAngleBracketIndex
return fileStr
"""
NOTE: The nltk.pos_tag function returns the Penn Treebank tag for the word but we just want
whether the word is a noun, verb, adjective or adverb. We need a short simplification routine to translate from
the Penn tag to a simpler tag.
"""
def simplify(penn_tag):
""" Simplify Penn tags to n (NOUN), v (VERB), a (ADJECTIVE) or r (ADVERB)"""
pre = penn_tag[0]
if pre == 'J':
return 'a'
elif pre == 'R':
return 'r'
elif pre == 'V':
return 'v'
elif pre == 'N':
return 'n'
else:
return 'r'
return 'other'
def preprocess(text, stop_words, POS_list):
""" Preprocesses the text to remove stopwords, lemmatizes each word and only includes
words that are POS in the global POS_LIST"""
toks = gensim.utils.simple_preprocess(str(text), deacc=True)
wn = WordNetLemmatizer()
return [wn.lemmatize(tok, simplify(pos)) for tok, pos in nltk.pos_tag(toks)
if tok not in stop_words and simplify(pos) in POS_list]
def writeInitialQuestion(chatIndexInCSV, questionFile, wholeChatsFileTxt, question, questionCount, stopWordsDict, POS_list):
""" Write a cleaned up version of the initial question to the question file. """
lemmatizer = nltk.WordNetLemmatizer()
cleanQuestion = ""
question = question.lower()
colonCount = question.count(":")
if colonCount >= 3: # time-stamp ##:##:## - person: question
colonOneIndex = question.find(":")
colonTwoIndex = question.find(":", colonOneIndex+1)
colonThreeIndex = question.find(":", colonTwoIndex+1)
question = question[colonThreeIndex+1:]
elif colonCount >= 1:
colonOneIndex = question.find(":")
question = question[colonOneIndex+1:]
question = question.replace(''', "'")
question = question.replace('/', " ")
question = question.replace(' ', " ")
question = question.replace('"','"')
### HERE CLEAN UP <xyz ......</xyz>, e.g., <a href.....</a>, <span ... </span>
question = removeTags(question)
question = question.replace('.','Z')
question = question.replace('!','Z')
question = question.replace('?','Z')
masterWordList = []
sentenceList = question.split("Z")
for question in sentenceList:
wordList = question.split()
cleanQuestion = ""
for word in wordList:
cleanWord = ""
for char in word:
if char >= 'a' and char <= 'z':
cleanWord += char
if len(cleanWord) > 0 and len(cleanWord) < 30: #upper bound to eliminate url's
cleanQuestion += lemmatizer.lemmatize(cleanWord) + " "
pos_wordList = preprocess(cleanQuestion, stopWordsDict, POS_list)
masterWordList.extend(pos_wordList)
chatCleaned = " ".join(masterWordList)
if len(chatCleaned) > 0:
questionFile.write(chatCleaned)
wholeChatsFileTxt.write(chatCleaned)
questionCount += 1
return questionCount
def writeChatDialog(excelLineNumber, wholeChatsFile, wholeChatsFileTxt, dialogList, stopWordsDict, POS_list):
""" Writes a chat's dialog to a line in the text file. """
for i in range(len(dialogList)):
writeInitialQuestion(excelLineNumber, wholeChatsFile, wholeChatsFileTxt, dialogList[i], 0, stopWordsDict, POS_list)
wholeChatsFile.write(" ") # separate end of this line with start of next line
wholeChatsFileTxt.write(" ") # separate end of this line with start of next line
def writeWholeChatsToFile(transcriptDialogList, dataFileName, stopWordsDict, POS_list):
""" Writes a whole chat's dialog one per line to a text file. Removed from
the line of text is:
1) time-stamps and names: e.g., '13:45:42 - Jordan:'
2) all punctuations
"""
wholeChatsFile = open(dataFileName+".csv", "w")
wholeChatsFileTxt = open(dataFileName+".txt", "w")
wholeChatsCount = 0
for transcriptDialog in transcriptDialogList:
if transcriptDialog[1] is not None:
wholeChatsFile.write(str(transcriptDialog[0])+",")
# check to see if initial question is already in the chat dialog
timeStampAndNameList = re.findall(r'[0-9][0-9]:[0-9][0-9]:[0-9][0-9] - [\w\s]+:', transcriptDialog[1])
if len(timeStampAndNameList) == 0: # no time-stamp so from 'initial question' column of .csv
# write initial question to file since it is not part of the chat dialog
writeInitialQuestion(transcriptDialog[0], wholeChatsFile, wholeChatsFileTxt, transcriptDialog[1], 0, stopWordsDict, POS_list)
wholeChatsFile.write(" ")
wholeChatsFileTxt.write(" ")
writeChatDialog(transcriptDialog[0],wholeChatsFile, wholeChatsFileTxt, transcriptDialog[2], stopWordsDict, POS_list)
#wholeChatsFile.write("\n")
wholeChatsCount += 1
wholeChatsFile.write("\n")
wholeChatsFileTxt.write("\n")
print("Whole Chats Count:", wholeChatsCount, "written to",dataFileName+".txt")
wholeChatsFile.close()
wholeChatsFileTxt.close()
def writeQuestionsOnlyToFile(transcriptDialogList, dataFileName, stopWordsDict, POS_list):
""" Writes only the initial questions one per line to a text file.
"""
questionFile = open(dataFileName+".csv", "w")
questionTxtFile = open(dataFileName+".txt", "w")
questionCount = 0
for transcriptDialog in transcriptDialogList:
if transcriptDialog[1] is not None:
currentCount = questionCount
questionCount = writeInitialQuestion(transcriptDialog[0], questionFile, questionTxtFile, transcriptDialog[1], questionCount, stopWordsDict, POS_list)
if currentCount < questionCount:
questionFile.write("\n")
questionTxtFile.write("\n")
print("Total Question Count:", questionCount, "written to",dataFileName+".txt")
questionFile.close()
questionTxtFile.close()
|
from __future__ import absolute_import
from .startup import set_line, set_teams, set_draft, draft
from .reporting import standings
from .utils import *
from .config import *
__all__ = ['startup', 'reporting', 'utils', 'config']
|
from discord.ext import commands
from .responder import ButtonResponse, SelectMenuResponse
class ComponentCog(commands.Cog):
def __init__(self, bot):
global Guild_settings, Texts
global get_txt
self.bot = bot
@commands.Cog.listener()
async def on_socket_response(self, pl):
if pl["t"] != "INTERACTION_CREATE":
return
data = pl["d"]
if data["type"] != 3:
return
if data["data"]["component_type"] == 2:
resp = ButtonResponse(self.bot, data, self.bot._get_state())
await resp._fetch()
self.bot.dispatch("button_click", resp)
elif data["data"]["component_type"] == 3:
resp = SelectMenuResponse(self.bot, data, self.bot._get_state())
await resp._fetch()
self.bot.dispatch("menu_select", resp)
def setup(_bot):
global bot
bot = _bot
_bot.add_cog(ComponentCog(_bot))
|
#!/usr/bin/env python3
# This is free and unencumbered software released into the public
# domain.
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a
# compiled binary, for any purpose, commercial or non-commercial, and
# by any means.
# In jurisdictions that recognize copyright laws, the author or
# authors of this software dedicate any and all copyright interest in
# the software to the public domain. We make this dedication for the
# benefit of the public at large and to the detriment of our heirs
# and successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to
# this software under copyright law.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# For more information, please refer to <http://unlicense.org>
"""Update ends of markdown files."""
import posixpath
import re
import common
END_TEMPLATE = """\
If you have trouble with this tutorial please [tell me about
it]({toplevel}/contact-me.md) and I'll make this tutorial better. If you
like this tutorial, please [give it a
star]({toplevel}/README.md#how-can-i-thank-you-for-writing-and-sharing-this-tutorial).
You may use this tutorial freely at your own risk. See
[LICENSE]({toplevel}/LICENSE).
{extralinks}[List of contents]({toplevel}/README.md#{readmeheader})
"""
CHAPTER_LINK_REGEX = r'^\d+\. \[.*\]\((.*\.md)\)$'
def get_filenames():
"""Get chapter files and other files from README.
Return a two-tuple of chapter file names and other file names as
iterables of strings.
"""
chapters = []
with open('README.md', 'r') as f:
# move to where the content list starts
while f.readline().strip() != "## List of contents":
pass
# now let's read the content list
for line in f:
line = line.strip()
if line.startswith('## '):
# end of content list
break
if line:
# not empty line
match = re.search(CHAPTER_LINK_REGEX, line)
if match is not None:
# it's a link to a chapter
chapters.append(match.group(1))
others = set(common.get_markdown_files()) - set(chapters)
return chapters, others
def update_end(filename, end):
"""Add *** and end to a file if it doesn't have them already.
filename should be relative to the toplevel using / as a path
separator.
"""
end = '\n***\n\n' + end
with common.slashfix_open(filename, 'r') as f:
content = f.read()
if content.endswith(end):
# No need to do anything.
print(" Has correct end:", filename)
return
if '\n***\n' in content:
# We need to remove the old ending first.
print(" Removing old end:", filename)
where = content.index('\n***\n')
with common.slashfix_open(filename, 'w') as f:
f.write(content[:where])
print(" Adding end:", filename)
with common.slashfix_open(filename, 'a') as f:
f.write(end)
def main():
chapter_files, other_files = get_filenames()
# make previous of first file and next of last file to just bring
# back to README
prevs = ['README.md'] + chapter_files[:-1]
nexts = chapter_files[1:] + ['README.md']
print("Chapter files:")
for prevpath, thispath, nextpath in zip(prevs, chapter_files, nexts):
# all paths should be like 'section/file.md'
where = posixpath.dirname(thispath)
prev = posixpath.relpath(prevpath, where)
next_ = posixpath.relpath(nextpath, where)
extralinks = "[Previous](%s) | [Next](%s) |\n" % (prev, next_)
end = END_TEMPLATE.format(
toplevel='..', extralinks=extralinks, readmeheader=where)
update_end(thispath, end)
print()
print("Other files:")
for filename in other_files:
where = posixpath.dirname(filename)
end = END_TEMPLATE.format(
toplevel=posixpath.relpath('.', where),
extralinks="", readmeheader='list-of-contents')
update_end(filename, end)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
from elasticsearch import Elasticsearch, ImproperlyConfigured
from elasticsearch_dsl import Search
from rest_framework import viewsets
from rest_framework_elasticsearch import es_views, es_filters
from api.pagination import (
LargeResultsSetPagination,
SmallResultsSetPagination
)
from api.serializers import (
CategoryListSerializer,
ItemSerializer,
BusinessAreaListSerializer,
PeopleSerializer,
ItemListSerializer,
CategorySerializer,
BusinessAreaSerializer
)
from person.models import Person
from register.models import Category, Item, BusinessArea
from search.indexes import ItemIndex, ItemSearch
from search.pagination import ElasticPageNumberPagination
class BaseNestedModelViewSet(viewsets.ModelViewSet):
serializers = {}
def get_serializer_class(self):
return self.serializers.get(self.action,
self.serializer_class)
def get_queryset(self):
queryset = super().get_queryset()
if self.action == 'list':
queryset = queryset.filter(level=0)
return queryset
class CategoryViewSet(viewsets.ModelViewSet):
"""
View set for Category
retrieve:
Detail view of a single Category
list:
List view of Categorys
create:
Add new Category
update:
Update existing Category
patch:
Partially update existing Category
delete:
Delete a Category
"""
queryset = Category.objects.all()
serializer_class = CategorySerializer
serializers = {
'list': CategoryListSerializer,
}
pagination_class = LargeResultsSetPagination
class ItemViewSet(viewsets.ModelViewSet):
"""
View set for Item
retrieve:
Detail view of a single Item
list:
List view of Items
create:
Add new Item
update:
Update existing Item
patch:
Partially update existing Item
delete:
Delete a Item
"""
queryset = Item.objects.all()
serializer_class = ItemSerializer
serializers = {
'list': ItemListSerializer,
}
pagination_class = SmallResultsSetPagination
class BusinessAreaViewSet(viewsets.ModelViewSet):
"""
View set for BusinessArea
retrieve:
Detail view of a single BusinessArea
list:
List view of BusinessAreas
create:
Add new BusinessArea
update:
Update existing BusinessArea
patch:
Partially update existing BusinessArea
delete:
Delete a BusinessArea
"""
queryset = BusinessArea.objects.all()
serializer_class = BusinessAreaSerializer
serializers = {
'list': BusinessAreaListSerializer,
}
pagination_class = LargeResultsSetPagination
class PeopleViewSet(viewsets.ModelViewSet):
"""
View set for People
retrieve:
Detail view of a single Person
list:
List view of People
create:
Add new Person
update:
Update existing Person
patch:
Partially update existing Person
delete:
Delete a Person
"""
queryset = Person.objects.all()
serializer_class = PeopleSerializer
pagination_class = LargeResultsSetPagination
class ItemSearchView(es_views.ListElasticAPIView):
es_paginator = ElasticPageNumberPagination()
queryset = Item.objects.all()
es_client = Elasticsearch()
es_model = ItemIndex
es_filter_backends = (
es_filters.ElasticFieldsFilter,
es_filters.ElasticSearchFilter
)
es_filter_fields = (
es_filters.ESFieldFilter('area', 'areas'),
es_filters.ESFieldFilter('category', 'categories'),
es_filters.ESFieldFilter('owner', 'owner'),
)
es_search_fields = (
'name',
'description',
'owner',
'areas',
'categories',
)
def get_es_search(self):
if self.es_model is None:
msg = (
"Cannot use %s on a view which does not have the 'es_model'"
)
raise ImproperlyConfigured(msg % self.__class__.__name__)
index = self.es_model()._get_index()
es_client = self.get_es_client()
s = Search(using=es_client, index=index)
return s
|
"""Module for interacting with Evergreen."""
import logging
from copy import deepcopy
from dsi.evergreen import helpers
DEFAULT_EVERGREEN_URL = "https://evergreen.mongodb.com"
"""The default Evergreen URL."""
class EvergreenError(Exception):
"""Generic class for Evergreen errors."""
class Empty(EvergreenError):
"""Indicates that an empty response from Evergreen was not expected."""
class Client:
"""Allows for interaction with an Evergreen server.
2020-01-10 Henrik Ingo: I deleted all methods that weren't used. See git history if you need
something. The idea is to get rid of all this and migrate to
https://pypi.org/project/evergreen.py/
"""
def __init__(self, configuration=None, verbose=True):
"""Create a new handle to an Evergreen server.
:param dict configuration: (optional) contents of personal Evergreen YAML config file
:param bool verbose: (optional) Control the verbosity of logging statements
"""
self.__setstate__({"configuration": configuration, "verbose": verbose})
def __getstate__(self):
"""
Get state for pickle support.
Multiprocessor uses pickle to serialize and deserialize data to sub processes. However,
complex types cannot be pickled. They can be recreated with the core state (and
this is what this calls does).
:return: The pickled state.
"""
return {"configuration": self.configuration, "verbose": self.verbose}
def __setstate__(self, state):
"""
Set state for pickle support.
Clear the lazy params so that the are recreated on demand.
:param dict state: The pickled state.
"""
self.configuration = state["configuration"]
self.verbose = state["verbose"]
self.logger = logging.getLogger("evergreen")
self.logger.level = logging.INFO if self.verbose else logging.WARNING
# Parse the config file
try:
self.headers = {
"api-user": self.configuration["user"],
"api-key": self.configuration["api_key"],
}
self.base_url = self.configuration["ui_server_host"]
except (TypeError, KeyError):
self.logger.warning("Using default evergreen credentials.")
self.base_url = DEFAULT_EVERGREEN_URL
self.headers = {}
def _redact_copy(self):
"""
Get a copy of the state and redact any sensitive info.
:returns: A redacted copy of the state.
"""
copy = deepcopy(self.__getstate__())
if "configuration" in copy:
configuration = copy["configuration"]
if "api_key" in configuration:
configuration["api_key"] = "XXXXXXXXX"
if "evergreen" in configuration and "api_key" in configuration["evergreen"]:
configuration["evergreen"]["api_key"] = "XXXXXXXXX"
if "github" in configuration and "token" in configuration["github"]:
configuration["github"]["token"] = "XXXXXXXXX"
return copy
def __str__(self):
"""
Get a readable string for this job.
:returns: A readable string.
"""
copy = self._redact_copy()
return str(copy)
def __repr__(self):
"""
Get an unambiguous string for this job.
:returns: An unambiguous string.
"""
copy = self._redact_copy()
return "<{}{}({!r})>".format(self.__module__, self.__class__.__name__, copy)
def query_project_history(self, project):
"""Gets all the information on the most recent revisions.
Evergreen endpoint: /rest/v1/projects/{project_name}/versions
:param str project: The name of the Evergreen project (e.g. 'performance', 'sys-perf')
:rtype: dict
"""
return helpers.get_as_json(
"{}/rest/v1/projects/{}/versions".format(self.base_url, project), headers=self.headers
)
def query_revision(self, revision_id):
"""Get information on a given revision.
Evergreen endpoint: /rest/v1/versions/{revision_id}
:param str revision_id: The Evergreen ID of a particular revision or version
:rtype: dict
"""
return helpers.get_as_json(
"{}/rest/v1/versions/{}".format(self.base_url, revision_id), headers=self.headers
)
def query_build_variant(self, build_variant_id):
"""Get information on a particular build variant.
Evergreen endpoint: /rest/v1/builds/{build_id}
:param str build_variant_id: The Evergreen ID of a particular build variant
:rtype: dict
"""
return helpers.get_as_json(
"{url}/rest/v1/builds/{build_id}".format(url=self.base_url, build_id=build_variant_id),
headers=self.headers,
)
def query_perf_results(self, task_id):
"""Get the 'perf.json' performance results for given task_id
Evergreen endpoint: /plugin/json/task/{task_id}/perf/
(Tested on sys-perf, but mongo-perf should be the same.)
:param str project: task_id of a specific build+variant
:rtype: dict
"""
return helpers.get_as_json(
"{}/plugin/json/task/{}/perf/".format(self.base_url, task_id), headers=self.headers
)
|
from __future__ import print_function
import os
import cv2
import pickle
import gzip
import numpy as np
from retinaface import RetinaFace
thresh = 0.7
flip = False
im_scale = 0.5
img_dir = "pic"
gpuid = 0
detector = RetinaFace('./model/R50', 0, gpuid, 'net3')
data = []
img_list = os.listdir(img_dir)
img_list.sort(key=lambda x: int(os.path.splitext(x)[0]))
for idx, img_name in enumerate(img_list):
img_path = os.path.join(img_dir, img_name)
img = cv2.imread(img_path)
faces, landmarks = detector.detect(img, thresh, scales=[im_scale], do_flip=flip)
data.append((img_name, faces, landmarks))
num = faces.shape[0]
print("[{}/{}]".format(idx+1, len(img_list)), img_name, num, faces.shape, landmarks.shape)
with gzip.open('pred.pkl.gz', "wb") as fp:
pickle.dump(data, fp)
|
import os
USER_HOME = os.getenv("HOME")
# index dir
index_sent_cache_dir = USER_HOME + "/a_path_to"
# concpet net
conceptnet_dir = USER_HOME + "/a_path_to"
rel2idx_path = os.path.join(conceptnet_dir, "assertions.csv_en.csv_rel2idx.txt")
triplet2template_path = os.path.join(conceptnet_dir, "template.json")
# raw corpus
omcs_dir = USER_HOME + "/a_path_to"
arc_dir = USER_HOME + "/a_path_to"
wikipedia_dir = USER_HOME + "/a_path_to"
openbookqa_dir = USER_HOME + "/a_path_to"
bookcorpus_dir = USER_HOME + "/a_path_to"
# KG-BERT's DATAIDR
kgbert_data_dir = USER_HOME + "/a_path_to"
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from object import Object
from separator import Separator
from size import Size
from objects import *
class Box(Object):
"""This class represents a box"""
__name__ = "Box"
def __init__(self):
Object.__init__(self)
self.separators = list()
def get_properties(self):
return Object.get_properties(self) + ["separators"]
def post(self):
self.handler.control[NORTHWEST].x = self.x
self.handler.control[NORTHWEST].y = self.y
self.handler.control[NORTHEAST].x = self.x + self.width
self.handler.control[NORTHEAST].y = self.y
self.handler.control[SOUTHWEST].x = self.x
self.handler.control[SOUTHWEST].y = self.y + self.height
self.handler.control[SOUTHEAST].x = self.x + self.width
self.handler.control[SOUTHEAST].y = self.y + self.height
self.handler.control[NORTH].x = self.x + self.width / 2
self.handler.control[NORTH].y = self.y
self.handler.control[SOUTH].x = self.x + self.width / 2
self.handler.control[SOUTH].y = self.y + self.height
self.handler.control[WEST].x = self.x
self.handler.control[WEST].y = self.y + self.height / 2
self.handler.control[EAST].x = self.x + self.width
self.handler.control[EAST].y = self.y + self.height / 2
self.magnetos[LEFT].x = self.handler.control[WEST].x
self.magnetos[LEFT].y = self.handler.control[WEST].y
self.magnetos[RIGHT].x = self.handler.control[EAST].x
self.magnetos[RIGHT].y = self.handler.control[EAST].y
self.magnetos[TOP].x = self.handler.control[NORTH].x
self.magnetos[TOP].y = self.handler.control[NORTH].y
self.magnetos[BOTTOM].x = self.handler.control[SOUTH].x
self.magnetos[BOTTOM].y = self.handler.control[SOUTH].y
def draw(self, context):
dash = list()
context.set_dash(dash)
context.set_line_width(self.thickness)
context.save()
context.new_path()
context.translate(self.x, self.y)
if (self.width > 0) and (self.height > 0):
context.scale(self.width,self.height)
context.rectangle(0, 0, 1, 1)
if self.fill_style == GRADIENT:
context.set_source(self.gradient.gradient)
elif self.fill_style == COLOR:
context.set_source_rgba(self.fill_color.red, self.fill_color.green,
self.fill_color.blue, self.fill_color.alpha)
context.fill_preserve()
context.restore()
context.set_source_rgba(self.stroke_color.red, self.stroke_color.green,
self.stroke_color.blue, self.stroke_color.alpha)
context.stroke()
for i, separator in enumerate(self.separators):
separator.synchronize(self)
separator.draw(context)
self.handler.control[ANONIMOUS+i] = separator.control
Object.draw(self, context)
###context.restore()
def transform(self, x, y):
direction = self.direction
if len(self.separators) > 0:
if direction >= ANONIMOUS:
separator = self.separators[direction - ANONIMOUS]
if separator.direction == VERTICAL and x >= self.x and x - self.x <= self.width:
separator.position = x - self.x
elif separator.direction == HORIZONTAL and y >= self.y and y - self.y <= self.height:
separator.position = y - self.y
def add_separator_vertical(self, position):
separator = Separator()
separator.position = position
separator.direction = VERTICAL
self.separators.append(separator)
self.handler.control.append(separator.control)
def add_separator_horizontal(self, position):
separator = Separator()
separator.position = position
separator.direction = HORIZONTAL
self.separators.append(separator)
self.handler.control.append(separator.control)
def remove_separator(self):
if self.separators.pop():
self.handler.control.pop()
def resize(self, x, y):
source = Size()
source.width = self.width
source.height = self.height
Object.resize(self, x, y)
target = Size()
target.width = self.width
target.height = self.height
if len(self.separators) > 0:
for separator in self.separators:
if separator.direction == HORIZONTAL and source.height and target.height:
separator.position = separator.position / source.height * target.height
separator.hidden = False
elif separator.direction == VERTICAL and source.width and target.width:
separator.position = separator.position / source.width * target.width
separator.hidden = False
else:
separator.hidden = True
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from airflow import DAG
from airflow.providers.cncf.kubernetes.operators import kubernetes_pod
from airflow.providers.google.cloud.transfers import gcs_to_bigquery
default_args = {
"owner": "Google",
"depends_on_past": False,
"start_date": "2021-03-01",
}
with DAG(
dag_id="cms_medicare.inpatient_charges",
default_args=default_args,
max_active_runs=1,
schedule_interval="@once",
catchup=False,
default_view="graph",
) as dag:
# Run CSV transform within kubernetes pod
inpatient_2011_transform_csv = kubernetes_pod.KubernetesPodOperator(
task_id="inpatient_2011_transform_csv",
startup_timeout_seconds=600,
name="cms_medicare_inpatient_charges_2011",
namespace="composer",
service_account_name="datasets",
image_pull_policy="Always",
image="{{ var.json.cms_medicare.container_registry.run_csv_transform_kub }}",
env_vars={
"SOURCE_URL": "https://www.cms.gov/Research-Statistics-Data-and-Systems/Statistics-Trends-and-Reports/Medicare-Provider-Charge-Data/Downloads/Inpatient_Data_2011_CSV.zip",
"SOURCE_FILE": "files/data.zip",
"TARGET_FILE": "files/data_output.csv",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "data/cms_medicare/inpatient_charges_2011/data_output.csv",
"CSV_HEADERS": '["provider_id","provider_name","provider_street_address","provider_city","provider_state","provider_zipcode","drg_definition","hospital_referral_region_description","total_discharges","average_covered_charges","average_total_payments","average_medicare_payments"]',
"RENAME_MAPPINGS": '{"Provider Id": "provider_id","Provider Name": "provider_name","Provider Street Address": "provider_street_address","Provider City": "provider_city","Provider State": "provider_state","Provider Zip Code": "provider_zipcode","DRG Definition": "drg_definition","Hospital Referral Region (HRR) Description": "hospital_referral_region_description","Total Discharges": "total_discharges","Average Covered Charges": "average_covered_charges","Average Total Payments": "average_total_payments","Average Medicare Payments": "average_medicare_payments"}',
"PIPELINE_NAME": "inpatient_charges_2011",
},
resources={"limit_memory": "2G", "limit_cpu": "1"},
)
# Run CSV transform within kubernetes pod
inpatient_2012_transform_csv = kubernetes_pod.KubernetesPodOperator(
task_id="inpatient_2012_transform_csv",
startup_timeout_seconds=600,
name="cms_medicare_inpatient_charges_2012",
namespace="composer",
service_account_name="datasets",
image_pull_policy="Always",
image="{{ var.json.cms_medicare.container_registry.run_csv_transform_kub }}",
env_vars={
"SOURCE_URL": "https://www.cms.gov/Research-Statistics-Data-and-Systems/Statistics-Trends-and-Reports/Medicare-Provider-Charge-Data/Downloads/Inpatient_Data_2012_CSV.zip",
"SOURCE_FILE": "files/data.zip",
"TARGET_FILE": "files/data_output.csv",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "data/cms_medicare/inpatient_charges_2012/data_output.csv",
"CSV_HEADERS": '["provider_id","provider_name","provider_street_address","provider_city","provider_state","provider_zipcode","drg_definition","hospital_referral_region_description","total_discharges","average_covered_charges","average_total_payments","average_medicare_payments"]',
"RENAME_MAPPINGS": '{"Provider Id": "provider_id","Provider Name": "provider_name","Provider Street Address": "provider_street_address","Provider City": "provider_city","Provider State": "provider_state","Provider Zip Code": "provider_zipcode","DRG Definition": "drg_definition","Hospital Referral Region (HRR) Description": "hospital_referral_region_description","Total Discharges": "total_discharges","Average Covered Charges": "average_covered_charges","Average Total Payments": "average_total_payments","Average Medicare Payments": "average_medicare_payments"}',
"PIPELINE_NAME": "inpatient_charges_2012",
},
resources={"limit_memory": "2G", "limit_cpu": "1"},
)
# Run CSV transform within kubernetes pod
inpatient_2013_transform_csv = kubernetes_pod.KubernetesPodOperator(
task_id="inpatient_2013_transform_csv",
startup_timeout_seconds=600,
name="cms_medicare_inpatient_charges_2013",
namespace="composer",
service_account_name="datasets",
image_pull_policy="Always",
image="{{ var.json.cms_medicare.container_registry.run_csv_transform_kub }}",
env_vars={
"SOURCE_URL": "https://www.cms.gov/Research-Statistics-Data-and-Systems/Statistics-Trends-and-Reports/Medicare-Provider-Charge-Data/Downloads/Inpatient_Data_2013_CSV.zip",
"SOURCE_FILE": "files/data.zip",
"TARGET_FILE": "files/data_output.csv",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "data/cms_medicare/inpatient_charges_2013/data_output.csv",
"CSV_HEADERS": '["provider_id","provider_name","provider_street_address","provider_city","provider_state","provider_zipcode","drg_definition","hospital_referral_region_description","total_discharges","average_covered_charges","average_total_payments","average_medicare_payments"]',
"RENAME_MAPPINGS": '{"Provider Id": "provider_id","Provider Name": "provider_name","Provider Street Address": "provider_street_address","Provider City": "provider_city","Provider State": "provider_state","Provider Zip Code": "provider_zipcode","DRG Definition": "drg_definition","Hospital Referral Region (HRR) Description": "hospital_referral_region_description","Total Discharges": "total_discharges","Average Covered Charges": "average_covered_charges","Average Total Payments": "average_total_payments","Average Medicare Payments": "average_medicare_payments"}',
"PIPELINE_NAME": "inpatient_charges_2013",
},
resources={"limit_memory": "2G", "limit_cpu": "1"},
)
# Run CSV transform within kubernetes pod
inpatient_2014_transform_csv = kubernetes_pod.KubernetesPodOperator(
task_id="inpatient_2014_transform_csv",
startup_timeout_seconds=600,
name="cms_medicare_inpatient_charges_2014",
namespace="composer",
service_account_name="datasets",
image_pull_policy="Always",
image="{{ var.json.cms_medicare.container_registry.run_csv_transform_kub }}",
env_vars={
"SOURCE_URL": "https://www.cms.gov/Research-Statistics-Data-and-Systems/Statistics-Trends-and-Reports/Medicare-Provider-Charge-Data/Downloads/Inpatient_Data_2014_CSV.zip",
"SOURCE_FILE": "files/data.zip",
"TARGET_FILE": "files/data_output.csv",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "data/cms_medicare/inpatient_charges_2014/data_output.csv",
"CSV_HEADERS": '["provider_id","provider_name","provider_street_address","provider_city","provider_state","provider_zipcode","drg_definition","hospital_referral_region_description","total_discharges","average_covered_charges","average_total_payments","average_medicare_payments"]',
"RENAME_MAPPINGS": '{"Provider Id": "provider_id","Provider Name": "provider_name","Provider Street Address": "provider_street_address","Provider City": "provider_city","Provider State": "provider_state","Provider Zip Code": "provider_zipcode","DRG Definition": "drg_definition","Hospital Referral Region (HRR) Description": "hospital_referral_region_description","Total Discharges": "total_discharges","Average Covered Charges": "average_covered_charges","Average Total Payments": "average_total_payments","Average Medicare Payments": "average_medicare_payments"}',
"PIPELINE_NAME": "inpatient_charges_2014",
},
resources={"limit_memory": "2G", "limit_cpu": "1"},
)
# Run CSV transform within kubernetes pod
inpatient_2015_transform_csv = kubernetes_pod.KubernetesPodOperator(
task_id="inpatient_2015_transform_csv",
startup_timeout_seconds=600,
name="cms_medicare_inpatient_charges_2015",
namespace="composer",
service_account_name="datasets",
image_pull_policy="Always",
image="{{ var.json.cms_medicare.container_registry.run_csv_transform_kub }}",
env_vars={
"SOURCE_URL": "https://www.cms.gov/Research-Statistics-Data-and-Systems/Statistics-Trends-and-Reports/Medicare-Provider-Charge-Data/Downloads/Inpatient_Data_2015_CSV.zip",
"SOURCE_FILE": "files/data.zip",
"TARGET_FILE": "files/data_output.csv",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "data/cms_medicare/inpatient_charges_2015/data_output.csv",
"CSV_HEADERS": '["provider_id","provider_name","provider_street_address","provider_city","provider_state","provider_zipcode","drg_definition","hospital_referral_region_description","total_discharges","average_covered_charges","average_total_payments","average_medicare_payments"]',
"RENAME_MAPPINGS": '{"Provider Id": "provider_id","Provider Name": "provider_name","Provider Street Address": "provider_street_address","Provider City": "provider_city","Provider State": "provider_state","Provider Zip Code": "provider_zipcode","DRG Definition": "drg_definition","Hospital Referral Region (HRR) Description": "hospital_referral_region_description","Total Discharges": "total_discharges","Average Covered Charges": "average_covered_charges","Average Total Payments": "average_total_payments","Average Medicare Payments": "average_medicare_payments"}',
"PIPELINE_NAME": "inpatient_charges_2015",
},
resources={"limit_memory": "2G", "limit_cpu": "1"},
)
# Task to load CSV data to a BigQuery table
load_inpatient_2011_to_bq = gcs_to_bigquery.GCSToBigQueryOperator(
task_id="load_inpatient_2011_to_bq",
bucket="{{ var.value.composer_bucket }}",
source_objects=["data/cms_medicare/inpatient_charges_2011/data_output.csv"],
source_format="CSV",
destination_project_dataset_table="cms_medicare.inpatient_charges_2011",
skip_leading_rows=1,
write_disposition="WRITE_TRUNCATE",
schema_fields=[
{
"description": "The CMS Certification Number (CCN) of the provider billing for outpatient hospital services",
"name": "provider_id",
"type": "STRING",
"mode": "REQUIRED",
},
{
"description": "The name of the provider",
"name": "provider_name",
"type": "STRING",
"mode": "NULLABLE",
},
{
"description": "The street address in which the provider is physically located",
"name": "provider_street_address",
"type": "STRING",
"mode": "NULLABLE",
},
{
"description": "The city in which the provider is physically located",
"name": "provider_city",
"type": "STRING",
"mode": "NULLABLE",
},
{
"description": "The state in which the provider is physically located",
"name": "provider_state",
"type": "STRING",
"mode": "NULLABLE",
},
{
"description": "The zip code in which the provider is physically located",
"name": "provider_zipcode",
"type": "INTEGER",
"mode": "NULLABLE",
},
{
"description": "The code and description identifying the MS-DRG. MS-DRGs are a classification system that groups similar clinical conditions (diagnoses) and the procedures furnished by the hospital during the stay",
"name": "drg_definition",
"type": "STRING",
"mode": "REQUIRED",
},
{
"description": "The Hospital Referral Region (HRR) in which the provider is physically located",
"name": "hospital_referral_region_description",
"type": "STRING",
"mode": "NULLABLE",
},
{
"description": "The number of discharges billed by the provider for inpatient hospital services",
"name": "total_discharges",
"type": "INTEGER",
"mode": "NULLABLE",
},
{
"description": "The provider's average charge for services covered by Medicare for all discharges in the MS-DRG. These will vary from hospital to hospital because of differences in hospital charge structures",
"name": "average_covered_charges",
"type": "FLOAT",
"mode": "NULLABLE",
},
{
"description": "The average total payments to all providers for the MS-DRG including the MSDRG amount, teaching, disproportionate share, capital, and outlier payments for all cases. Also included 5 in average total payments are co-payment and deductible amounts that the patient is responsible for and any additional payments by third parties for coordination of benefits",
"name": "average_total_payments",
"type": "FLOAT",
"mode": "NULLABLE",
},
{
"description": "The average amount that Medicare pays to the provider for Medicare's share of the MS-DRG. Average Medicare payment amounts include the MS-DRG amount, teaching, disproportionate share, capital, and outlier payments for all cases. Medicare payments DO NOT include beneficiary co-payments and deductible amounts nor any additional payments from third parties for coordination of benefits",
"name": "average_medicare_payments",
"type": "FLOAT",
"mode": "NULLABLE",
},
],
)
# Task to load CSV data to a BigQuery table
load_inpatient_2012_to_bq = gcs_to_bigquery.GCSToBigQueryOperator(
task_id="load_inpatient_2012_to_bq",
bucket="{{ var.value.composer_bucket }}",
source_objects=["data/cms_medicare/inpatient_charges_2012/data_output.csv"],
source_format="CSV",
destination_project_dataset_table="cms_medicare.inpatient_charges_2012",
skip_leading_rows=1,
write_disposition="WRITE_TRUNCATE",
schema_fields=[
{
"description": "The CMS Certification Number (CCN) of the provider billing for outpatient hospital services",
"name": "provider_id",
"type": "STRING",
"mode": "REQUIRED",
},
{
"description": "The name of the provider",
"name": "provider_name",
"type": "STRING",
"mode": "NULLABLE",
},
{
"description": "The street address in which the provider is physically located",
"name": "provider_street_address",
"type": "STRING",
"mode": "NULLABLE",
},
{
"description": "The city in which the provider is physically located",
"name": "provider_city",
"type": "STRING",
"mode": "NULLABLE",
},
{
"description": "The state in which the provider is physically located",
"name": "provider_state",
"type": "STRING",
"mode": "NULLABLE",
},
{
"description": "The zip code in which the provider is physically located",
"name": "provider_zipcode",
"type": "INTEGER",
"mode": "NULLABLE",
},
{
"description": "The code and description identifying the MS-DRG. MS-DRGs are a classification system that groups similar clinical conditions (diagnoses) and the procedures furnished by the hospital during the stay",
"name": "drg_definition",
"type": "STRING",
"mode": "REQUIRED",
},
{
"description": "The Hospital Referral Region (HRR) in which the provider is physically located",
"name": "hospital_referral_region_description",
"type": "STRING",
"mode": "NULLABLE",
},
{
"description": "The number of discharges billed by the provider for inpatient hospital services",
"name": "total_discharges",
"type": "INTEGER",
"mode": "NULLABLE",
},
{
"description": "The provider's average charge for services covered by Medicare for all discharges in the MS-DRG. These will vary from hospital to hospital because of differences in hospital charge structures",
"name": "average_covered_charges",
"type": "FLOAT",
"mode": "NULLABLE",
},
{
"description": "The average total payments to all providers for the MS-DRG including the MSDRG amount, teaching, disproportionate share, capital, and outlier payments for all cases. Also included 5 in average total payments are co-payment and deductible amounts that the patient is responsible for and any additional payments by third parties for coordination of benefits",
"name": "average_total_payments",
"type": "FLOAT",
"mode": "NULLABLE",
},
{
"description": "The average amount that Medicare pays to the provider for Medicare's share of the MS-DRG. Average Medicare payment amounts include the MS-DRG amount, teaching, disproportionate share, capital, and outlier payments for all cases. Medicare payments DO NOT include beneficiary co-payments and deductible amounts nor any additional payments from third parties for coordination of benefits",
"name": "average_medicare_payments",
"type": "FLOAT",
"mode": "NULLABLE",
},
],
)
# Task to load CSV data to a BigQuery table
load_inpatient_2013_to_bq = gcs_to_bigquery.GCSToBigQueryOperator(
task_id="load_inpatient_2013_to_bq",
bucket="{{ var.value.composer_bucket }}",
source_objects=["data/cms_medicare/inpatient_charges_2013/data_output.csv"],
source_format="CSV",
destination_project_dataset_table="cms_medicare.inpatient_charges_2013",
skip_leading_rows=1,
write_disposition="WRITE_TRUNCATE",
schema_fields=[
{
"description": "The CMS Certification Number (CCN) of the provider billing for outpatient hospital services",
"name": "provider_id",
"type": "STRING",
"mode": "REQUIRED",
},
{
"description": "The name of the provider",
"name": "provider_name",
"type": "STRING",
"mode": "NULLABLE",
},
{
"description": "The street address in which the provider is physically located",
"name": "provider_street_address",
"type": "STRING",
"mode": "NULLABLE",
},
{
"description": "The city in which the provider is physically located",
"name": "provider_city",
"type": "STRING",
"mode": "NULLABLE",
},
{
"description": "The state in which the provider is physically located",
"name": "provider_state",
"type": "STRING",
"mode": "NULLABLE",
},
{
"description": "The zip code in which the provider is physically located",
"name": "provider_zipcode",
"type": "INTEGER",
"mode": "NULLABLE",
},
{
"description": "The code and description identifying the MS-DRG. MS-DRGs are a classification system that groups similar clinical conditions (diagnoses) and the procedures furnished by the hospital during the stay",
"name": "drg_definition",
"type": "STRING",
"mode": "REQUIRED",
},
{
"description": "The Hospital Referral Region (HRR) in which the provider is physically located",
"name": "hospital_referral_region_description",
"type": "STRING",
"mode": "NULLABLE",
},
{
"description": "The number of discharges billed by the provider for inpatient hospital services",
"name": "total_discharges",
"type": "INTEGER",
"mode": "NULLABLE",
},
{
"description": "The provider's average charge for services covered by Medicare for all discharges in the MS-DRG. These will vary from hospital to hospital because of differences in hospital charge structures",
"name": "average_covered_charges",
"type": "FLOAT",
"mode": "NULLABLE",
},
{
"description": "The average total payments to all providers for the MS-DRG including the MSDRG amount, teaching, disproportionate share, capital, and outlier payments for all cases. Also included 5 in average total payments are co-payment and deductible amounts that the patient is responsible for and any additional payments by third parties for coordination of benefits",
"name": "average_total_payments",
"type": "FLOAT",
"mode": "NULLABLE",
},
{
"description": "The average amount that Medicare pays to the provider for Medicare's share of the MS-DRG. Average Medicare payment amounts include the MS-DRG amount, teaching, disproportionate share, capital, and outlier payments for all cases. Medicare payments DO NOT include beneficiary co-payments and deductible amounts nor any additional payments from third parties for coordination of benefits",
"name": "average_medicare_payments",
"type": "FLOAT",
"mode": "NULLABLE",
},
],
)
# Task to load CSV data to a BigQuery table
load_inpatient_2014_to_bq = gcs_to_bigquery.GCSToBigQueryOperator(
task_id="load_inpatient_2014_to_bq",
bucket="{{ var.value.composer_bucket }}",
source_objects=["data/cms_medicare/inpatient_charges_2014/data_output.csv"],
source_format="CSV",
destination_project_dataset_table="cms_medicare.inpatient_charges_2014",
skip_leading_rows=1,
write_disposition="WRITE_TRUNCATE",
schema_fields=[
{
"description": "The CMS Certification Number (CCN) of the provider billing for outpatient hospital services",
"name": "provider_id",
"type": "STRING",
"mode": "REQUIRED",
},
{
"description": "The name of the provider",
"name": "provider_name",
"type": "STRING",
"mode": "NULLABLE",
},
{
"description": "The street address in which the provider is physically located",
"name": "provider_street_address",
"type": "STRING",
"mode": "NULLABLE",
},
{
"description": "The city in which the provider is physically located",
"name": "provider_city",
"type": "STRING",
"mode": "NULLABLE",
},
{
"description": "The state in which the provider is physically located",
"name": "provider_state",
"type": "STRING",
"mode": "NULLABLE",
},
{
"description": "The zip code in which the provider is physically located",
"name": "provider_zipcode",
"type": "INTEGER",
"mode": "NULLABLE",
},
{
"description": "The code and description identifying the MS-DRG. MS-DRGs are a classification system that groups similar clinical conditions (diagnoses) and the procedures furnished by the hospital during the stay",
"name": "drg_definition",
"type": "STRING",
"mode": "REQUIRED",
},
{
"description": "The Hospital Referral Region (HRR) in which the provider is physically located",
"name": "hospital_referral_region_description",
"type": "STRING",
"mode": "NULLABLE",
},
{
"description": "The number of discharges billed by the provider for inpatient hospital services",
"name": "total_discharges",
"type": "INTEGER",
"mode": "NULLABLE",
},
{
"description": "The provider's average charge for services covered by Medicare for all discharges in the MS-DRG. These will vary from hospital to hospital because of differences in hospital charge structures",
"name": "average_covered_charges",
"type": "FLOAT",
"mode": "NULLABLE",
},
{
"description": "The average total payments to all providers for the MS-DRG including the MSDRG amount, teaching, disproportionate share, capital, and outlier payments for all cases. Also included 5 in average total payments are co-payment and deductible amounts that the patient is responsible for and any additional payments by third parties for coordination of benefits",
"name": "average_total_payments",
"type": "FLOAT",
"mode": "NULLABLE",
},
{
"description": "The average amount that Medicare pays to the provider for Medicare's share of the MS-DRG. Average Medicare payment amounts include the MS-DRG amount, teaching, disproportionate share, capital, and outlier payments for all cases. Medicare payments DO NOT include beneficiary co-payments and deductible amounts nor any additional payments from third parties for coordination of benefits",
"name": "average_medicare_payments",
"type": "FLOAT",
"mode": "NULLABLE",
},
],
)
# Task to load CSV data to a BigQuery table
load_inpatient_2015_to_bq = gcs_to_bigquery.GCSToBigQueryOperator(
task_id="load_inpatient_2015_to_bq",
bucket="{{ var.value.composer_bucket }}",
source_objects=["data/cms_medicare/inpatient_charges_2015/data_output.csv"],
source_format="CSV",
destination_project_dataset_table="cms_medicare.inpatient_charges_2015",
skip_leading_rows=1,
write_disposition="WRITE_TRUNCATE",
schema_fields=[
{
"description": "The CMS Certification Number (CCN) of the provider billing for outpatient hospital services",
"name": "provider_id",
"type": "STRING",
"mode": "REQUIRED",
},
{
"description": "The name of the provider",
"name": "provider_name",
"type": "STRING",
"mode": "NULLABLE",
},
{
"description": "The street address in which the provider is physically located",
"name": "provider_street_address",
"type": "STRING",
"mode": "NULLABLE",
},
{
"description": "The city in which the provider is physically located",
"name": "provider_city",
"type": "STRING",
"mode": "NULLABLE",
},
{
"description": "The state in which the provider is physically located",
"name": "provider_state",
"type": "STRING",
"mode": "NULLABLE",
},
{
"description": "The zip code in which the provider is physically located",
"name": "provider_zipcode",
"type": "INTEGER",
"mode": "NULLABLE",
},
{
"description": "The code and description identifying the MS-DRG. MS-DRGs are a classification system that groups similar clinical conditions (diagnoses) and the procedures furnished by the hospital during the stay",
"name": "drg_definition",
"type": "STRING",
"mode": "REQUIRED",
},
{
"description": "The Hospital Referral Region (HRR) in which the provider is physically located",
"name": "hospital_referral_region_description",
"type": "STRING",
"mode": "NULLABLE",
},
{
"description": "The number of discharges billed by the provider for inpatient hospital services",
"name": "total_discharges",
"type": "INTEGER",
"mode": "NULLABLE",
},
{
"description": "The provider's average charge for services covered by Medicare for all discharges in the MS-DRG. These will vary from hospital to hospital because of differences in hospital charge structures",
"name": "average_covered_charges",
"type": "FLOAT",
"mode": "NULLABLE",
},
{
"description": "The average total payments to all providers for the MS-DRG including the MSDRG amount, teaching, disproportionate share, capital, and outlier payments for all cases. Also included 5 in average total payments are co-payment and deductible amounts that the patient is responsible for and any additional payments by third parties for coordination of benefits",
"name": "average_total_payments",
"type": "FLOAT",
"mode": "NULLABLE",
},
{
"description": "The average amount that Medicare pays to the provider for Medicare's share of the MS-DRG. Average Medicare payment amounts include the MS-DRG amount, teaching, disproportionate share, capital, and outlier payments for all cases. Medicare payments DO NOT include beneficiary co-payments and deductible amounts nor any additional payments from third parties for coordination of benefits",
"name": "average_medicare_payments",
"type": "FLOAT",
"mode": "NULLABLE",
},
],
)
inpatient_2011_transform_csv >> load_inpatient_2011_to_bq
inpatient_2012_transform_csv >> load_inpatient_2012_to_bq
inpatient_2013_transform_csv >> load_inpatient_2013_to_bq
inpatient_2014_transform_csv >> load_inpatient_2014_to_bq
inpatient_2015_transform_csv >> load_inpatient_2015_to_bq
|
#!/usr/bin/python
import json
import tempfile
import subprocess
import re
import os
import shutil
import sys
from pprint import pprint
mypwd = os.getcwd()
tempdir = tempfile.mkdtemp()
os.chdir(tempdir)
#TODO change to variable
url=sys.argv[1]
json_data = subprocess.check_output(["youtube-dl","-j",url])
metadata = json.loads(json_data)
title=metadata[u'title']
artist=metadata[u'uploader']
vid=metadata[u'id']
print vid
if subprocess.call(["youtube-dl", "--id", "--write-thumbnail", "-x", "--audio-format", "mp3", "--audio-quality", "128K", url]) != 0:
raise Exception('could not download')
if subprocess.call(["eyeD3","-a",artist,"-t",title,"--set-encoding=utf8","--add-image="+vid+".jpg:FRONT_COVER",vid+".mp3"]) != 0:
raise Exception('could not tag')
os.rename(vid + '.mp3', '/home/rpetti/Music/'+title+'.mp3')
os.chdir(mypwd)
shutil.rmtree(tempdir)
|
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
__all__ = [x.replace('.py', '') for x in os.listdir(dir_path) if
x.endswith('.py') and x != '__init__.py']
|
import asyncio
from unittest import mock
def async_mock(*args, **kwargs):
m = mock.MagicMock(*args, **kwargs)
async def mock_coro(*args, **kwargs):
return m(*args, **kwargs)
mock_coro.mock = m
return mock_coro
def async_test(coro):
def wrapper(*args, **kwargs):
loop = asyncio.new_event_loop()
return loop.run_until_complete(coro(*args, **kwargs))
return wrapper
# class MockResponse:
# def __init__(self, text, status):
# self._text = text
# self.status = status
#
# async def text(self):
# return self._text
#
# async def __aexit__(self, exc_type, exc, tb):
# pass
#
# async def __aenter__(self):
# return self
|
# https://pythonhosted.org/setuptools/setuptools.html#namespace-packages
# __import__('pkg_resources').declare_namespace(__name__)
import mapzen.whosonfirst.pip
import mapzen.whosonfirst.placetypes
import shapely.geometry
import logging
def reverse_geocoordinates(feature):
props = feature['properties']
lat = props.get('lbl:latitude', None)
lon = props.get('lbl:longitude', None)
if not lat or not lon:
lat = props.get('geom:latitude', None)
lon = props.get('geom:longitude', None)
if not lat or not lon:
shp = shapely.geometry.asShape(feature['geometry'])
coords = shp.centroid
lat = coords.y
lon = coords.x
return lat, lon
# please rename me
# test with 18.48361, -77.53057
def whereami(feature, **kwargs):
raise Exception, "Please finish me"
"""
lat, lon = reverse_geocoordinates(feature)
props = feature['properties']
placetype = props['wof:placetype']
# see also : https://github.com/whosonfirst/go-whosonfirst-pip#wof-pip-server
pip = mapzen.whosonfirst.pip.proxy()
pt = mapzen.whosonfirst.placetypes.placetype(placetype)
for ancestor in pt.ancestors():
ancestor = str(ancestor)
# TO DO: some kind of 'ping' to make sure the server is actually
# there... (20151221/thisisaaronland)
# print "%s : %s,%s" % (parent, lat, lon)
try:
rsp = pip.reverse_geocode(ancestor, lat, lon)
except Exception, e:
logging.warning("failed to reverse geocode %s @%s,%s" % (parent, lat, lon))
continue
if len(rsp):
_rsp = rsp
break
pass
"""
def append_hierarchy_and_parent_pip(feature, **kwargs):
return append_hierarchy_and_parent(feature, **kwargs)
def append_hierarchy_and_parent(feature, **kwargs):
props = feature['properties']
placetype = props['wof:placetype']
lat, lon = reverse_geocoordinates(feature)
# see also : https://github.com/whosonfirst/go-whosonfirst-pip#wof-pip-server
pip = mapzen.whosonfirst.pip.proxy()
pt = mapzen.whosonfirst.placetypes.placetype(placetype)
_hiers = []
_rsp = []
parents = pt.parents()
logging.debug("feature is a %s, parents are %s" % (placetype, parents))
for parent in parents:
parent = str(parent)
# TO DO: some kind of 'ping' to make sure the server is actually
# there... (20151221/thisisaaronland)
logging.debug("reverse geocode for %s w/ %s,%s" % (parent, lat, lon))
try:
rsp = pip.reverse_geocode(parent, lat, lon, exclude=["superseded", "deprecated"])
except Exception, e:
logging.debug("failed to reverse geocode %s @%s,%s" % (parent, lat, lon))
continue
if len(rsp):
_rsp = rsp
break
wofid = props.get('wof:id', None)
for r in _rsp:
id = r['Id']
pf = mapzen.whosonfirst.utils.load(kwargs.get('data_root', ''), id)
pp = pf['properties']
ph = pp['wof:hierarchy']
if len(ph) == 0:
logging.warning("parent (%s) returned an empty hierarchy so making a truncated mock" % id)
pt = pp['wof:placetype']
pt = "%s_id" % pt
ph = [ {pt: id} ]
for h in ph:
if wofid:
h[ "%s_id" % placetype ] = wofid
_hiers.append(h)
parent_id = -1
if len(_rsp) == 0:
logging.debug("Failed to reverse geocode any parents for %s, %s" % (lat, lon))
elif len(_rsp) > 1:
logging.debug("Multiple reverse geocoding possibilities %s, %s" % (lat, lon))
else:
parent_id = _rsp[0]['Id']
props['wof:parent_id'] = parent_id
props['wof:hierarchy'] = _hiers
feature['properties'] = props
return True
|
import argparse
import os
import time
from os import path
import hashlib
from dateutil.parser import isoparse
import jsonpickle
import datetime
import json
from core_data_modules.cleaners import swahili, Codes
from core_data_modules.traced_data import Metadata, TracedData
from core_data_modules.traced_data.io import TracedDataJsonIO, TracedDataCSVIO
from core_data_modules.util import IOUtils, PhoneNumberUuidTable
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Cleans the surveys and exports variables to Coda for "
"manual verification and coding")
parser.add_argument("user", help="User launching this program, for use by TracedData Metadata")
parser.add_argument("json_input_path", metavar="json-input-path",
help="Path to input file, containing a list of serialized TracedData objects as JSON")
parser.add_argument("prev_coded_path", metavar="prev-coded-path",
help="Directory containing Coda files generated by a previous run of this pipeline stage. "
"New data will be appended to this file.")
parser.add_argument("json_output_path", metavar="json-output-path",
help="Path to a JSON file to write processed TracedData messages to")
parser.add_argument("coded_output_path", metavar="coding-output-path",
help="Directory to write coding files to")
parser.add_argument("flow_name", metavar="flow-name")
args = parser.parse_args()
user = args.user
json_input_path = args.json_input_path
prev_coded_path = args.prev_coded_path
json_output_path = args.json_output_path
coded_output_path = args.coded_output_path
flow_name = args.flow_name
CONTROL_CODES = ["NA", "NC", "WS"]
class CleaningPlan:
def __init__(self, raw_field, clean_field, coda_name, cleaner, scheme_id):
self.raw_field = raw_field
self.clean_field = clean_field
self.coda_name = coda_name
self.cleaner = cleaner
self.scheme_id = scheme_id
cleaning_plans = {
"call_center":
[CleaningPlan("informationcc_raw_radio_q1_why", "informationcc_radio_q1_why_clean", "RadioQ1",
None, None),
CleaningPlan("informationcc_raw_radio_q2_why", "informationcc_radio_q2_why_clean", "RadioQ2",
None, None)
]
}
cleaning_plan = cleaning_plans[flow_name]
# Load data from JSON file
with open(json_input_path, "r") as f:
data = TracedDataJsonIO.import_json_to_traced_data_iterable(f)
# Mark missing entries in the raw data as true missing
for td in data:
missing = dict()
for plan in cleaning_plan:
if td[plan.raw_field] == "n/a":
missing[plan.raw_field] = Codes.TRUE_MISSING
td.append_data(missing, Metadata(user, Metadata.get_call_location(), time.time()))
# Exclude missing data
for plan in cleaning_plan:
data = [td for td in data if td[plan.raw_field] not in {Codes.TRUE_MISSING, Codes.SKIPPED, Codes.NOT_LOGICAL}]
# Clean all responses, add MessageID and Labels
for td in data:
cleaned = dict()
message_id = dict()
labels = dict()
for plan in cleaning_plan:
hash_object = hashlib.sha256()
hash_object.update(td[plan.raw_field].encode('utf-8'))
message_id_string = hash_object.hexdigest()
message_id_key = "{} MessageID".format(plan.raw_field)
message_id[message_id_key] = message_id_string
labels_key = "{} Labels".format(plan.raw_field)
labels[labels_key] = []
td.append_data(cleaned, Metadata(user, Metadata.get_call_location(), time.time()))
td.append_data(message_id, Metadata(user, Metadata.get_call_location(), time.time()))
td.append_data(labels, Metadata(user, Metadata.get_call_location(), time.time()))
# Write json output
IOUtils.ensure_dirs_exist_for_file(json_output_path)
with open(json_output_path, "w") as f:
TracedDataJsonIO.export_traced_data_iterable_to_json(data, f, pretty_print=True)
# Output for manual verification + coding
IOUtils.ensure_dirs_exist(coded_output_path)
for plan in cleaning_plan:
coded_output_file_path = path.join(coded_output_path, "{}.json".format(plan.coda_name))
message_ids = list()
messages_to_code = list()
for td in data:
output = dict()
output["Labels"] = td["{} Labels".format(plan.raw_field)]
output["MessageID"] = td["{} MessageID".format(plan.raw_field)]
output["Text"] = str(td[plan.raw_field])
output["CreationDateTimeUTC"] = isoparse(td["start"]).isoformat()
if output["MessageID"] not in message_ids:
messages_to_code.append(output)
message_ids.append(output["MessageID"])
output["avf_phone_id"] = td["avf_phone_id"]
with open(coded_output_file_path, "w") as f:
jsonpickle.set_encoder_options("json", sort_keys=True)
f.write(jsonpickle.dumps(messages_to_code))
f.write("\n")
|
#!/usr/bin/env python3
################################################################################
#
# <PROJ> Create repo labels
# <FILE> make_repo_labels.py
# <AUTH> Benjamin Skinner (btskinner@virginia.edu)
# <INIT> 19 November 2018
#
################################################################################
# modules
import argparse
import json
import requests
# --------------------------------------
# FUNCTIONS
# --------------------------------------
# command line parser
def set_cli():
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--id',
help='GitHub ID',
metavar="ID",
required=True)
parser.add_argument('-t', '--token',
help='GitHub authorization token in file',
metavar='TOKEN',
type=argparse.FileType('r'),
required=True)
parser.add_argument('-r', '--repo',
help='Repository name',
metavar='REPO',
required=True)
parser.add_argument('-o', '--org',
help='Organization name',
metavar='ORG')
parser.add_argument('-l', '--labels',
help='Labels in JSON file',
metavar='LABELS',
type=argparse.FileType('r'))
parser.add_argument('-c', '--check_existing',
help='Flag to check existing labels',
action='store_true')
parser.add_argument('-d', '--drop_existing',
help='Flag to drop existing labels',
action='store_true')
return parser.parse_args()
# pretty print
def pretty_print(dict_list, header, key_list):
long_name = max(key_list, key=len)
print('-'*len(header))
print(header.upper())
print('-'*len(header), end='\n\n')
for item in dict_list:
for key in key_list:
space = len(long_name) - len(key) + 1
print(' {}:{} {}'.format(key.upper(), ' '*space, item[key]))
print('')
# read json labels
def read_json(args):
return json.loads(args.labels.read())
# set url
def set_url(args):
url = 'https://api.github.com/repos/'
owner = args.org if args.org else args.id
url += owner + '/' + args.repo + '/labels'
return url
# create authorization tuple
def set_auth(args):
return (args.id, args.token.read().strip())
# check label list
def check_label_list(label_list):
if not label_list:
print('')
print('No current labels!', end='\n\n')
exit()
# retrieve and print current labels
def get_current(url, auth):
r = requests.get(url, auth=auth)
return json.loads(r.text)
# drop current labels
def drop_current(url, auth):
label_list = get_current(url, auth)
check_label_list(label_list)
pretty_print(label_list, 'current labels to be dropped', ['name'])
proceed = input('Are you sure you want to drop these labels? [Y/N]: ')
proceed = (proceed.lower() in ['y','yes'])
if proceed:
print('Dropping:')
for item in label_list:
lab_url = url + '/' + item['name']
r = requests.delete(lab_url, auth=auth)
print(' - {}'.format(item['name']))
# add from file
def add_labels(args, url, auth):
new_labels = read_json(args)
pretty_print(new_labels, 'new labels to be added', ['name','color'])
proceed = input('Are you sure you want to add these labels? [Y/N]: ')
proceed = (proceed.lower() in ['y','yes'])
if proceed:
print('Adding:')
for item in new_labels:
r = requests.post(url, auth=auth, json=item)
print(' - {}'.format(item['name']))
# --------------------------------------
# RUN
# --------------------------------------
if __name__ == "__main__":
args = set_cli()
url = set_url(args)
auth = set_auth(args)
if args.check_existing:
label_list = get_current(url, auth)
check_label_list(label_list)
pretty_print(label_list, 'current labels', ['name','color','default'])
exit()
if args.drop_existing:
drop_current(url, auth)
if args.labels:
add_labels(args, url, auth)
|
version = "2018-01-29" |
from django.urls import path
from atlassian_jwt_auth.frameworks.django.tests import views
urlpatterns = [
path('asap/expected', views.expected_view, name='expected'),
path(r'^asap/unexpected', views.unexpected_view, name='unexpected'),
path('^asap/decorated', views.decorated_view, name='decorated'),
path('asap/settings', views.settings_view, name='settings'),
path('asap/subject_does_not_need_to_match_issuer',
views.subject_does_not_need_to_match_issuer_view,
name='subject_does_not_need_to_match_issuer'),
path('asap/subject_does_need_to_match_issuer_view',
views.subject_does_need_to_match_issuer_view,
name='subject_does_need_to_match_issuer'),
path('asap/subject_does_not_need_to_match_issuer_from_settings',
views.subject_does_not_need_to_match_issuer_from_settings_view,
name='subject_does_not_need_to_match_issuer_from_settings'),
path('asap/needed', views.needed_view, name='needed'),
path(r'asap/unneeded', views.unneeded_view, name='unneeded'),
path(r'asap/restricted_issuer', views.restricted_issuer_view,
name='restricted_issuer'),
path('asap/restricted_subject', views.restricted_subject_view,
name='restricted_subject'),
]
|
"""This is rc-update support."""
__revision__ = '$Revision$'
import os
import Bcfg2.Client.Tools
import Bcfg2.Client.XML
class RcUpdate(Bcfg2.Client.Tools.SvcTool):
"""RcUpdate support for Bcfg2."""
name = 'RcUpdate'
__execs__ = ['/sbin/rc-update', '/bin/rc-status']
__handles__ = [('Service', 'rc-update')]
__req__ = {'Service': ['name', 'status']}
def VerifyService(self, entry, _):
"""
Verify Service status for entry.
Assumes we run in the "default" runlevel.
"""
# check if service is enabled
cmd = '/sbin/rc-update show default | grep %s'
rc = self.cmd.run(cmd % entry.get('name'))[0]
is_enabled = (rc == 0)
if entry.get('mode', 'default') == 'supervised':
# check if init script exists
try:
os.stat('/etc/init.d/%s' % entry.get('name'))
except OSError:
self.logger.debug('Init script for service %s does not exist' %
entry.get('name'))
return False
# check if service is enabled
cmd = '/etc/init.d/%s status | grep started'
rc = self.cmd.run(cmd % entry.attrib['name'])[0]
is_running = (rc == 0)
else:
# we don't care
is_running = is_enabled
if entry.get('status') == 'on' and not (is_enabled and is_running):
entry.set('current_status', 'off')
return False
elif entry.get('status') == 'off' and (is_enabled or is_running):
entry.set('current_status', 'on')
return False
return True
def InstallService(self, entry):
"""
Install Service entry
In supervised mode we also take care it's (not) running.
"""
# don't take any actions for mode='manual'
if entry.get('mode', 'default') == 'manual':
self.logger.info("Service %s mode set to manual. Skipping "
"installation." % (entry.get('name')))
return False
self.logger.info('Installing Service %s' % entry.get('name'))
if entry.get('status') == 'on':
# make sure it's running if in supervised mode
if entry.get('mode', 'default') == 'supervised' \
and entry.get('current_status') == 'off':
self.start_service(entry)
# make sure it's enabled
cmd = '/sbin/rc-update add %s default'
rc = self.cmd.run(cmd % entry.get('name'))[0]
return (rc == 0)
elif entry.get('status') == 'off':
# make sure it's not running if in supervised mode
if entry.get('mode', 'default') == 'supervised' \
and entry.get('current_status') == 'on':
self.stop_service(entry)
# make sure it's disabled
cmd = '/sbin/rc-update del %s default'
rc = self.cmd.run(cmd % entry.get('name'))[0]
return (rc == 0)
return False
def FindExtra(self):
"""Locate extra rc-update services."""
cmd = '/bin/rc-status -s | grep started'
allsrv = [line.split()[0] for line in self.cmd.run(cmd)[1]]
self.logger.debug('Found active services:')
self.logger.debug(allsrv)
specified = [srv.get('name') for srv in self.getSupportedEntries()]
return [Bcfg2.Client.XML.Element('Service',
type='rc-update',
name=name) \
for name in allsrv if name not in specified]
|
'''调度器'''
from .Process import ProcessPool
from .Thread import ThreadMap, ThreadPool, Thread
from .Asyncio import Gevent, Asyncio
from .typing import Union, Callable, RunCalculation, RunIOintensive, RunTask
from .Communication import broadcast, Processbroadcast, asyncbroadcast, Lock
from .publictool import tasksequence, dict_slice, taskmethodput
import asyncio
from queue import Empty
import time
# 计算密集型
class Calculation(ProcessPool):
'''计算密集型'''
Preferred : RunCalculation = RunCalculation.APPLYASYNC
'''`Calculation` default `method`'''
def __init__(self, func: Union[list, Callable], *args, kwargs=[]):
super().__init__(func, *args, kwargs=kwargs)
def run(self, method : Union[int, RunCalculation] = None, *args, **kwargs):
''' 运行 `Calculation` 计算型密集型实例
Args:
`method` : `Union[int, RunCalculation]` 执行方法
`args` : `tuple` ProcessPool args
`kwargs` : `dict` ProcessPool kwargs
Returns:
`List` : 任务返回值列表
'''
if method:
if isinstance(method, int):
value = method
else:
value = method.value
else:
if isinstance(Calculation.Preferred, int):
value = Calculation.Preferred
else:
value = Calculation.Preferred.value
if value == 0 or value == RunCalculation.APPLY:
return self.apply(*args, **kwargs)
elif value == 1 or value == RunCalculation.APPLYASYNC:
return self.apply_async(*args, **kwargs)
elif value == 2 or value == RunCalculation.MAP:
return self.map(*args, **kwargs)
elif value == 3 or value == RunCalculation.MAPASYNC:
return self.map_async(*args, **kwargs)
elif value == 4 or value == RunCalculation.EXECUTOR:
return self.Executor(*args, **kwargs)
else:
raise TypeError(
f"Calculation.Preferred({Calculation.Preferred}) or Calculation.run.method({method}) not in Enum RunCalculation!")
# IO密集型
class IOintensive():
'''IO密集型'''
Preferred = RunIOintensive.RunThreadMap_MAP
'''`IOintensive` default `method`'''
def __init__(self, func: Union[list, Callable], *args, kwargs=[]):
self.func = func
self.args = args
self.kwargs = kwargs
def run(self, method : Union[int, RunIOintensive] = None, *args, **kwargs):
''' 运行 `IOintensive` IO型密集型实例
Args:
`method` : `Union[int, RunIOintensive]` 执行方法
`args` : `tuple` ProcessPool args
`kwargs` : `dict` ProcessPool kwargs
Returns:
`List` : 任务返回值列表
'''
if method:
if isinstance(method, int):
value = method
else:
value = method.value
else:
if isinstance(IOintensive.Preferred, int):
value = IOintensive.Preferred
else:
value = IOintensive.Preferred.value
if value < 20:
return Asyncio(self.func, *self.args, kwargs = self.kwargs).run(value - 10)
elif value < 30:
return ThreadMap(self.func, *self.args, kwargs = self.kwargs).run(value - 20)
elif value < 40:
return ThreadPool(self.func, *self.args, kwargs = self.kwargs).run(value - 30)
elif value < 50:
return Gevent(self.func, *self.args, kwargs = self.kwargs).run(value - 40)
else:
raise TypeError(
f"IOintensive.Preferred({Calculation.Preferred}) or IOintensive.run.method({method}) not in Enum RunIOintensive!")
# 任务
class Task:
Calculation = Calculation
IOintensive = IOintensive
def __init__(self, func: Union[list, Callable], *args, kwargs=[], method : Union[int, RunTask] = None):
self.func = func
self.args = args
self.kwargs = kwargs
self.runm = taskmethodput(method)
self.method = method
def run(self, method : Union[int, RunTask] = None):
''' 运行 `Task` 任务实例
Args:
`method` : `Union[int, RunTask]` 执行方法 可重载实例化时的 `method`
Returns:
`List` : 任务返回值列表
'''
if method == None:
method = self.method
if method == None:
raise TypeError(f'Task({self}) method is None')
task = getattr(Task, self.runm)(self.func,*self.args, kwargs = self.kwargs)
else:
task = getattr(Task, taskmethodput(method))(self.func,*self.args, kwargs = self.kwargs)
return task.run(method)
@staticmethod
def threads(func, *args, **kwargs):
'''异步任务队列'''
task = Thread(target = func, args = (*args, ), kwargs = kwargs)
task.start()
return task
def length(self):
return tasksequence(self.func, self.args, self.kwargs)
# 调度器
class Scheduler():
'''调度器 '''
Calculation = Calculation
IOintensive = IOintensive
def __init__(self):
self.tasks = [] # 任务链
self.sequence = 0 # 任务链长度
self.length = 0 # 任务数
self.lock = [] # 通讯锁
self.medium = [] # 通讯链路
self.waits = [] # 中继器线程
self.results = [] # 任务链结果
def addIO(self, func: Union[list, Callable], *args, kwargs = [], method : Union[int, RunIOintensive] = None):
''' 添加 `IOintensive` 型任务
Args:
`func` : Union[list, Callable] 任务函数
`args` : `tuple` 任务位置参数
`kwargs` : `dict` 任务关键字参数
`method` : `Union[int, RunIOintensive]` 执行方法 可重载IOintensive.Preferred的 `method`
Returns:
None
'''
if method == None:
method = IOintensive.Preferred
else:
if not taskmethodput(method)=='IOintensive':
raise TypeError(
f"Task.method({method}) not in Enum RunIOintensive!")
if self.tasks:
self.tasks.append(Task(func, *args, kwargs = kwargs, method = method))
else:
self.tasks.append(Task(func, *args, kwargs = kwargs, method = method))
self.length = tasksequence(func, args, kwargs)
def addCal(self, func: Union[list, Callable], *args, kwargs = [], method : Union[int, RunCalculation] = None):
''' 添加 `Calculation` 型任务
Args:
`func` : Union[list, Callable] 任务函数
`args` : `tuple` 任务位置参数
`kwargs` : `dict` 任务关键字参数
`method` : `Union[int, RunTask]` 执行方法 可重载Calculation.Preferred的 `method`
Returns:
None
'''
if method == None:
method = Calculation.Preferred
else:
if not taskmethodput(method)=='Calculation':
raise TypeError(
f"Task.method({method}) not in Enum RunCalculation!")
if self.tasks:
self.tasks.append(Task(func, *args, kwargs = kwargs, method = method))
else:
self.tasks.append(Task(func, *args, kwargs = kwargs, method = method))
self.length = tasksequence(func, args, kwargs)
def run(self, join : bool = True):
''' 运行 `Scheduler` 实例
Args:
`join` : bool 是否等待调度,当有 `Calculation` 任务时不可不等待
Returns:
List : 返回有序的调度任务结果
'''
if self.tasks:
self.sequence = len(self.tasks)
self.lock = Lock.get_lock(self.tasks, join)
self.medium = [{} for _ in range(self.sequence)]
# 序列参数添加 与 通讯装饰器增加
for i,j in enumerate(self.tasks):
if isinstance(j.func, list):
if asyncio.iscoroutinefunction(j.func[0]):
j.func = [asyncbroadcast(j.func[z], self.lock[i], self.medium[i]) for z in range(self.length)]
else:
if j.runm == 'Calculation':
j.func = [Processbroadcast(j.func[z], self.lock[i], self.medium[i]) for z in range(self.length)]
else:
j.func = [broadcast(j.func[z], self.lock[i], self.medium[i]) for z in range(self.length)]
else:
if asyncio.iscoroutinefunction(j.func):
j.func = asyncbroadcast(j.func, self.lock[i], self.medium[i])
else:
if j.runm == 'Calculation':
j.func = Processbroadcast(j.func, self.lock[i], self.medium[i])
else:
j.func = broadcast(j.func, self.lock[i], self.medium[i])
if i==0:
j.args = tuple([list(range(self.length))] + list(j.args))
# 初始任务
for i,j in enumerate(self.tasks):
if i==0:
task0 = getattr(Scheduler, j.runm)(j.func, *j.args,kwargs = j.kwargs)
self.threads(task0.run, j.method)
if i < self.sequence - 1:
if self.tasks[i].runm == 'Calculation':
self.waits.append(self.threads(self.CalRepeater, self.tasks[i+1], self.length, self.lock[i], self.medium[i]))
else:
self.waits.append(self.threads(self.IORepeater, self.tasks[i+1], self.length, self.lock[i], self.medium[i]))
else:
self.waits.append(self.threads(self.Interrupt, j, self.length, self.lock[i], self.medium[i]))
else:
raise TypeError("Scheduler tasks is null!")
if join:
self.wait()
for i in self.medium:
self.results.append(list(map(lambda x:x[1], sorted(i.items(), key=lambda x:x[0]))))
return self.results
def IORepeater(self, task, l, lock, medium):
'''线程 中继器'''
i = 0
while i<l:
with lock:
a = len(medium)
if a > i:
res = dict_slice(medium, i, a)
i = a
task.args = (list(res.keys()), list(res.values()))
task0 = getattr(Scheduler, task.runm)(task.func, *task.args)
self.threads(task0.run, task.method)
time.sleep(0.1)
def CalRepeater(self, task, l, lock, medium):
'''进程 中继器'''
i = 0
while i<l:
while True:
try:
medium.update(lock.get(timeout = 0.1))
except Empty:
a = len(medium)
if a > i:
res = dict_slice(medium, i, a)
i = a
task.args = (list(res.keys()), list(res.values()))
task0 = getattr(Scheduler, task.runm)(task.func, *task.args)
self.threads(task0.run, task.method)
break
time.sleep(0.1)
def Interrupt(self, task, l, lock, medium):
'''Scheduler 调度完结监听器'''
if task.runm == 'Calculation':
i = 0
while i<l:
while True:
try:
medium.update(lock.get(timeout = 0.1))
except Empty:
a = len(medium)
if a > i:
i = a
break
time.sleep(0.1)
else:
i = 0
while i<l:
with lock:
a = len(medium)
if a > i:
i = a
time.sleep(0.1)
def wait(self):
for i in self.waits:
i.join()
@staticmethod
def threads(func, *args, **kwargs):
'''异步任务队列'''
task = Thread(target = func, args = (*args, ), kwargs = kwargs)
task.start()
return task |
import sys, requests, getpass, json, argparse, re
class FFSQuery:
"""
FFSQuery provides a wrapper for creating FFS queries and returning results
The FFS Query class provides a number of methods for logging into Code42, building a query,
and conducting a search
"""
def __init__(self, base_url):
"""
Initialization method for FFSQuery
:param base_url: Base URL for Code42 API queries
:returns: Returns an FFSQuery object
"""
# Create a new request session object to persist a session
self.s = requests.Session()
# Auth token used for querying
self.auth_token = None
# Flag if we are logged in
self.logged_in = False
# Set base URL for queries
self.base_url = base_url
def _get_auth_token(self, sts_url, username, password):
"""
Internal method to get the Code42 v3 auth tokent
:param sts_url: URL for STS API calls
:param username: Username to log in as
:param password: Password for account
:returns: Returns the v3 auth token if successful, or None if authenictaion has failed
"""
url = 'https://{}/c42api/v3/auth/jwt?useBody=true'.format(sts_url)
response = self.s.get(url, auth=(username,password))
if response.status_code != 200:
return None
return json.loads(response.text)['data']['v3_user_token']
def do_login(self, sts_url, username, password):
"""
Login to the Code42 application
:param username: Username to log in as
:param password: Password for account
:returns: Returns True if login was successful, False otherwise
"""
# Get authentication token
self.auth_token = self._get_auth_token(sts_url, username, password)
if self.auth_token is None:
return False
else:
# Toggle flag
self.logged_in = True
return True
def build_query_payload(self, search_type, search_values, source, max_results, events_before=None, events_after=None):
"""
Build a query payload based on search type and values
:param search_type: Type of search you want to conduct, will be mapped to FFS API field types
:param search_values: list of values to search for
:param source: source of data (Endpoint, Google Drive, OneDrive, All)
:param max_results: Max number of results to return, limited to 10000 or less
:param events_before: Optional date in YYYY-MM-DD format for returning events on or before that date
:param events_after: Optional date in YYYY-MM-DD format for returning events on or after that date
:returns: Returns the query payload if the query build was successful
"""
# Map out the supported search fields to FFS API field names
mapper = {
'md5' : 'md5Checksum',
'sha256' : 'sha256Checksum',
'filename' : 'fileName',
'hostname' : 'osHostName',
'filepath' : 'filePath',
'fileowner' : 'fileOwner',
'actor' : 'actor',
'sharedwith' : 'sharedWith',
'event_id' : 'eventId',
'exposure' : 'exposure',
'device_vendor' : 'removableMediaVendor',
'device_name' : 'removableMediaName',
'device_sn' : 'removableMediaSerialNumber',
'process_owner' : 'processOwner',
'process_name' : 'processName',
'sync_destination' : 'syncDestination'
}
source_mapper = {
'google' : 'GoogleDrive',
'onedrive': 'OneDrive',
'endpoint': 'Endpoint'
}
exposure_mapper = {
'removable_media' : 'RemovableMedia',
'application_read' : 'ApplicationRead',
'cloud_storage' : 'CloudStorage'
}
# Start with blank query
self.query_payload = {}
# If a master group does not exist in the query yet, add it.
if 'groups' not in self.query_payload:
self.query_payload['groups'] = []
ffs_filters = {}
for search_value in search_values:
ffs_filter = {}
ffs_filter['operator'] = 'IS'
ffs_filter['term'] = mapper[search_type]
# Map exposure types
if search_type == 'exposure':
try:
ffs_filter['value'] = exposure_mapper[search_value]
except:
raise KeyError('An invalid exposure type was selected. Permitted values are {}'.format(exposure_mapper.keys()))
else:
ffs_filter['value'] = search_value
if 'filters' not in ffs_filters:
ffs_filters['filters'] = []
ffs_filters['filters'].append(ffs_filter)
ffs_filters['filterClause'] = 'OR'
self.query_payload['groups'].append(ffs_filters)
if source != 'all' and source is not None:
source_filters = {}
source_filter = {}
source_filter['operator'] = 'IS'
source_filter['term'] = 'source'
source_filter['value'] = source_mapper[source]
if 'filters' not in source_filters:
source_filters['filters'] = []
source_filters['filters'].append(source_filter)
self.query_payload['groups'].append(source_filters)
self.query_payload['groupClause'] = 'AND'
if events_before:
before_filters = {}
before_filter = {}
before_filter['operator'] = 'ON_OR_BEFORE'
before_filter['term'] = 'eventTimestamp'
before_filter['value'] = events_before
if 'filters' not in before_filters:
before_filters['filters'] = []
before_filters['filters'].append(before_filter)
self.query_payload['groups'].append(before_filters)
self.query_payload['groupClause'] = 'AND'
if events_after:
after_filters = {}
after_filter = {}
after_filter['operator'] = 'ON_OR_AFTER'
after_filter['term'] = 'eventTimestamp'
after_filter['value'] = events_after
if 'filters' not in after_filters:
after_filters['filters'] = []
after_filters['filters'].append(after_filter)
self.query_payload['groups'].append(after_filters)
self.query_payload['groupClause'] = 'AND'
self.query_payload['pgNum'] = 1
self.query_payload['pgSize'] = max_results
return self.query_payload
def load_query_payload_from_json(self, json_payload):
"""
Load a query payload from a JSON object.
:param json_payload: JSON object that is a valid payload for the FFS API
:returns: Returns True if the query load was successful
"""
# Make sure the json_payload is a dict
if isinstance(json_payload, dict):
# Simply copy the payload into the query_payload variable
self.query_payload = json_payload
return True
else:
return False
def do_search(self):
"""
Conduct a search using FFS API.
:returns: Returns the search result, or None if there is an error
"""
if self.query_payload is None:
return None
url = 'https://{}/forensic-search/queryservice/api/v1/fileevent'.format(self.base_url)
headers = {'authorization' : 'v3_user_token {}'.format(self.auth_token)}
headers ['Content-Type'] = 'application/json'
response = self.s.post(url,headers=headers, json=self.query_payload)
if response.status_code != 200:
return None
return json.loads(response.text)
def read_in_file(in_file, search_type):
try:
value_file = open(in_file, "r")
# If search type is raw, read the entire file and return the string, otherwise return a list
if search_type == 'raw':
return value_file.read()
else:
return value_file.read().splitlines()
except:
return None
def write_out_json(out_file, results):
try:
print('Writing results to file {}...'.format(out_file))
results_file = open(out_file, "w+")
json.dump(results, results_file, indent=4)
print('Write complete!')
return 0
except:
return 1
def write_out_count(out_file, count):
try:
print('Writing count to file {}...'.format(out_file))
results_file = open(out_file, "w+")
results_file.write(str(count))
print('Write complete!')
return 0
except:
return 1
def filter_results(results, out_filter):
newresults = []
mapper = {
'md5': 'md5Checksum',
'sha256': 'sha256Checksum'
}
for event in results['fileEvents']:
# Grab the attribute based on the mapping between out_filter options and the actual attribute name
newresults.append(event[mapper[out_filter]])
return newresults
def main():
# Define args
parser = argparse.ArgumentParser(description='Code42 Forensic File Search', formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--username', help='Local user for with Security Event Viewer rights', required=True)
parser.add_argument('--password', help='Local user password')
parser.add_argument('--sts_url', default='console.us.code42.com', help='STS URL for retrieving authentication token, defaults to console.us.code42.com')
parser.add_argument('--base_url', default='forensicsearch-east.us.code42.com', help='API URL for search, defaults to forensicsearch-east.us.code42.com')
parser.add_argument('--search_type', choices = ['md5', 'sha256', 'filename', 'filepath', 'fileowner', 'hostname', 'actor', 'sharedwith', 'event_id', 'exposure', 'device_vendor', 'device_name', 'device_sn', 'process_owner', 'process_name', 'sync_destination', 'raw'], help='Type of attribute to search for.\nA \'raw\' search will take a JSON string as a value and use that as the query payload for complex queries.\nFor \'exposure\' searches, allowable values are \'removable_media\', \'application_read\', or \'cloud_storage\'.', required=True)
parser.add_argument('--source', choices = ['google', 'onedrive', 'endpoint', 'all'], default='all', help='Source of file events, defaults to All')
parser.add_argument('--values', nargs='*', help='One or more values of attribute search_type to search for', metavar=('value1', 'value2'))
parser.add_argument('--max_results', help='Max results to return, must be 10000 or less, default is 100', default=100, type=int)
parser.add_argument('--events_before', help='Retrieve events on or before specific date in YYYY-MM-DD format', default=None)
parser.add_argument('--events_after', help='Retrieve events on or after specific date in YYYY-MM-DD format', default=None)
parser.add_argument('--count', help='Return count of results only', dest='count_only', action='store_true')
parser.add_argument('--in_file', help='Input file containing values (one per line) or raw JSON query payload')
parser.add_argument('--out_file', help='Output file for results')
parser.add_argument('--out_filter', choices = ['md5','sha256'], help='Selected attribute to export instead of all attributes for each event')
# Parse passed args
args = parser.parse_args()
# Sanity checks for options
if args.values is None and args.in_file is None:
print('Error: You must pass either one more --values or an --in_file with values to search. Quitting...')
sys.exit()
if args.values and len(args.values) > 1024:
print('Error: There is a limit of 1024 values per query, you have {} values. Quitting...'.format(len(args.values)))
sys.exit()
if args.count_only and args.out_filter:
print('Error: --count and --out_filter are mutually exclusive options. Quitting...')
sys.exit()
if args.max_results > 10000:
print('Error: --max_results cannot be greater than 10000. Quitting...')
sys.exit()
date_pattern = re.compile(r"^\d{4}\-\d{2}\-\d{2}")
if args.events_before:
if not date_pattern.match(args.events_before):
print('Error: --events_before not in YYYY-MM-DD format. Quitting...')
sys.exit()
if args.events_after:
if not date_pattern.match(args.events_after):
print('Error: --events_after not in YYYY-MM-DD format. Quitting...')
sys.exit()
# Parse in_file if passed, otherwise read Values
if args.in_file:
query_values = read_in_file(args.in_file, args.search_type)
if query_values is None:
print('Error parsing values in in_file. Quitting...')
sys.exit()
else:
if args.search_type == 'raw':
query_values = args.values[0]
else:
query_values = args.values
# Get password if it is not passed on the command line
if args.password is None:
password = getpass.getpass('Enter password for account {}:'.format(args.username))
else:
password = args.password
# Create FFSQuery object
ffs_query = FFSQuery(args.base_url)
response = ffs_query.do_login(args.sts_url, args.username, password)
if not response:
print('Could not log in. Quitting...')
sys.exit()
# Build query payload
if args.search_type == 'raw':
# Covert the raw value to a JSON object
try:
response = ffs_query.load_query_payload_from_json(json.loads(query_values))
except Exception as e:
print('Error parsing JSON input, message: \'{}\'. Quitting...'.format(str(e)))
sys.exit()
else:
try:
ffs_query.build_query_payload(args.search_type, query_values, args.source, args.max_results, args.events_before, args.events_after)
except Exception as e:
print('Error parsing values, message: \'{}\'. Quitting...'.format(str(e)))
sys.exit()
# Do the search
results = ffs_query.do_search()
if results is None:
print('Error returning results. Quitting...')
sys.exit()
# Filter results if there is an out_filter
if args.out_filter:
results = filter_results(results, args.out_filter)
# If --count is selected, just return the count
if args.count_only:
# If --out_file is specified, output to file, otherwise print to stdout
if args.out_file:
success = write_out_count(args.out_file,results['totalCount'])
if success != 0:
print('Error writing out_file. Quitting...')
sys.exit()
else:
# Print results
print(results['totalCount'])
else:
# If --out_file is specified, output to file, otherwise print to stdout
if args.out_file:
success = write_out_json(args.out_file,results)
if success != 0:
print('Error writing out_file. Quitting...')
sys.exit()
else:
# Print results
print(json.dumps(results, indent=4, sort_keys=True))
if __name__=='__main__':
main()
|
import json
import requests
def getStatusByName(data, name):
try:
return filter(lambda x: x['name'] == name, data['sensors']['door_locked'])[0]['value']
except KeyError:
return true
def getDoorstatus():
resp = requests.get(url="http://realraum.at/status.json")
data = resp.json()
locked = getStatusByName(data, 'TorwaechterLock')
kontakted = getStatusByName(data, 'TorwaechterAjarSensor')
return (locked, kontakted)
|
"""Script used to estimate discovery potential quickly. It uses some pretty
absurd approximations, but is REALLY REALLY fast (equivalent or faster than a
single trial, and is good to within ~30-50%). Use with some caution to
quickly guess appropriate flux scales, or understand trends, without full
calculations.
"""
import numpy as np
from flarestack.core.llh import LLH
from flarestack.core.astro import angular_distance
from flarestack.shared import k_to_flux
from flarestack.utils.catalogue_loader import load_catalogue, calculate_source_weight
from scipy.stats import norm
import logging
def estimate_discovery_potential(seasons, inj_dict, sources, llh_dict, raw_scale=1.0):
"""Function to estimate discovery potential given an injection model. It
assumes an optimal LLH construction, i.e aligned time windows and correct
energy weighting etc. Takes injectors and seasons.
:param injectors: Injectors to be used
:param sources: Sources to be evaluated
:return: An estimate for the discovery potential
"""
logging.info("Trying to guess scale using AsimovEstimator.")
season_bkg = []
season_sig = []
def weight_f(n_s, n_bkg):
metric = np.array(n_s) # /np.sqrt(np.array(n_bkg))
return 1.0 # metric #/ np.mean(metric)#/ max(metric)
def ts_weight(n_s):
return 1.0
# return n_s / np.sum(n_s)
# def weight_ts(ts, n_s)
weight_scale = calculate_source_weight(sources)
livetime = 0.0
n_s_tot = 0.0
n_tot = 0.0
n_tot_coincident = 0.0
all_ns = []
all_nbkg = []
all_ts = []
all_bkg_ts = []
final_ts = []
new_n_s = 0.0
new_n_bkg = 0.0
for season in seasons.values():
new_llh_dict = dict(llh_dict)
new_llh_dict["llh_name"] = "fixed_energy"
new_llh_dict["llh_energy_pdf"] = inj_dict["injection_energy_pdf"]
llh = LLH.create(season, sources, new_llh_dict)
data = season.get_background_model()
n_tot += np.sum(data["weight"])
livetime += llh.bkg_time_pdf.livetime * 60 * 60 * 24
def signalness(sig_over_background):
"""Converts a signal over background ratio into a signal
probability. This is ratio/(1 + ratio)
:param sig_over_background: Ratio of signal to background
probability
:return: Percentage probability of signal
"""
return sig_over_background / (1.0 + sig_over_background)
n_sigs = []
n_bkgs = []
ts_vals = []
bkg_vals = []
n_s_season = 0.0
# n_exp = np.sum(inj.n_exp["n_exp"]) * raw_scale
sig_times = np.array(
[llh.sig_time_pdf.effective_injection_time(x) for x in sources]
)
source_weights = np.array([calculate_source_weight(x) for x in sources])
mean_time = np.sum(sig_times * source_weights) / weight_scale
# print(source_weights)
fluences = (
np.array([x * sig_times[i] for i, x in enumerate(source_weights)])
/ weight_scale
)
# print(sources.dtype.names)
# print(sources["dec_rad"], np.sin(sources["dec_rad"]))
# print(fluences)
# input("?")
res = np.histogram(
np.sin(sources["dec_rad"]), bins=season.sin_dec_bins, weights=fluences
)
dummy_sources = []
bounds = []
n_eff_sources = []
for i, w in enumerate(res[0]):
if w > 0:
lower = res[1][i]
upper = res[1][i + 1]
mid = np.mean([upper, lower])
mask = np.logical_and(
np.sin(sources["dec_rad"]) > lower,
np.sin(sources["dec_rad"]) < upper,
)
n_eff_sources.append(
(np.sum(fluences[mask]) ** 2.0 / np.sum(fluences[mask] ** 2))
)
# print(n_eff_sources)
# print(fluences[mask])
#
# tester = np.array([1.5, 1.5, 1.5])
#
#
# print(np.sum(tester**2)/(np.mean(tester)**2.))
# input("?")
dummy_sources.append(
(np.arcsin(mid), res[0][i], 1.0, 1.0, "dummy_{0}".format(mid))
)
bounds.append((lower, upper))
dummy_sources = np.array(
dummy_sources,
dtype=np.dtype(
[
("dec_rad", np.float),
("base_weight", np.float),
("distance_mpc", np.float),
("injection_weight_modifier", np.float),
("source_name", np.str),
]
),
)
inj = season.make_injector(dummy_sources, **inj_dict)
for j, dummy_source in enumerate(dummy_sources):
lower, upper = bounds[j]
n_eff = n_eff_sources[j]
source_mc = inj.calculate_single_source(dummy_source, scale=raw_scale)
if len(source_mc) == 0:
logging.warning(
"Warning, no MC found for dummy source at declinbation ".format(
np.arcsin(lower), np.arcsin(upper)
)
)
ts_vals.append(0.0)
n_sigs.append(0.0)
n_bkgs.append(0.0)
else:
# Gives the solid angle coverage of the sky for the band
omega = 2.0 * np.pi * (upper - lower)
data_mask = np.logical_and(
np.greater(data["dec"], np.arcsin(lower)),
np.less(data["dec"], np.arcsin(upper)),
)
local_data = data[data_mask]
data_weights = (
signalness(llh.energy_weight_f(local_data)) * local_data["weight"]
)
# print("source_mc", source_mc)
mc_weights = signalness(llh.energy_weight_f(source_mc))
true_errors = angular_distance(
source_mc["ra"],
source_mc["dec"],
source_mc["trueRa"],
source_mc["trueDec"],
)
# median_sigma = weighted_quantile(
# true_errors, 0.5, source_mc["ow"] * mc_weights)
median_sigma = np.mean(local_data["sigma"])
area = (
np.pi * (2.0 * median_sigma) ** 2 / np.cos(dummy_source["dec_rad"])
)
local_rate = np.sum(data_weights)
# n_bkg = local_rate * area # * source_weight
n_bkg = np.sum(local_data["weight"])
n_tot_coincident += n_bkg
ratio_time = livetime / mean_time
sig_spatial = signalness(
(
1.0
/ (2.0 * np.pi * source_mc["sigma"] ** 2.0)
* np.exp(-0.5 * ((true_errors / source_mc["sigma"]) ** 2.0))
)
/ llh.spatial_pdf.background_spatial(source_mc)
)
ra_steps = np.linspace(-np.pi, np.pi, 100)
dec_steps = np.linspace(lower, upper, 10)
mean_dec = np.mean(
signalness(
norm.pdf(
dec_steps,
scale=median_sigma / np.cos(dummy_source["dec_rad"]),
loc=np.mean([lower, upper]),
)
* (upper - lower)
)
)
mean_ra = np.mean(
signalness(
norm.pdf(ra_steps, scale=median_sigma, loc=0.0) * 2.0 * np.pi
)
)
bkg_spatial = mean_dec * mean_ra # * n_eff
n_s_tot += np.sum(source_mc["ow"])
n_s_season += np.sum(source_mc["ow"])
med_sig = (
np.mean(sig_spatial * mc_weights)
* signalness(ratio_time)
* np.sum(source_mc["ow"])
)
med_bkg = (
np.mean(bkg_spatial * data_weights)
* (1.0 - signalness(ratio_time))
* n_bkg
)
new_n_s += med_sig
new_n_bkg += med_bkg
scaler_ratio = new_n_s / n_s_tot
scaler_ratio = new_n_bkg / n_tot_coincident
print("Scaler Ratio", scaler_ratio)
disc_count = norm.ppf(
norm.cdf(5.0), loc=0.0, scale=np.sqrt(new_n_bkg)
) # * scaler_ratio
simple = 5.0 * np.sqrt(new_n_bkg) # * scaler_ratio
#
# disc_count = simple
# print(disc_count, simple, simple/disc_count, n_s_tot)
#
# print("testerer", new_n_s, new_n_bkg)
#
print("Disc count", disc_count, disc_count / scaler_ratio)
scale = disc_count / new_n_s
print(scale)
# Convert from scale factor to flux units
scale = k_to_flux(scale) * raw_scale
logging.info(
"Estimated Discovery Potential is: {:.3g} GeV sr^-1 s^-1 cm^-2".format(scale)
)
return scale
class AsimovEstimator(object):
def __init__(self, seasons, inj_dict, llh_dict):
self.seasons = seasons
self.injectors = dict()
self.llh_dict = llh_dict
for season in self.seasons.values():
self.injectors[season.season_name] = season.make_injector([], **inj_dict)
def guess_discovery_potential(self, source_path):
sources = load_catalogue(source_path)
for inj in self.injectors.values():
inj.update_sources(sources)
return estimate_discovery_potential(self.injectors, sources, self.llh_dict)
|
import unittest
# Can't use from... import directly since the file is into another folder
import os, sys
from os.path import dirname, join, abspath
sys.path.insert(0, abspath(join(dirname(__file__), '..')))
from warmup.repeatedString import NewString
class TestRepeatedString(unittest.TestCase):
def setUp(self):
self.myString = NewString()
def test_randomString(self):
length=9
self.myString.randomString(length)
self.assertEqual(9,len(self.myString.string))
self.assertTrue('a' in self.myString.string)
def test_repeatSubstringExact(self):
n=27
self.myString.randomString(length=3)
self.myString.repeatSubstring(n)
self.assertEqual(n,len(self.myString.substring))
def test_repeatSubstringModule(self):
n=89
self.myString.randomString(length=4)
self.myString.repeatSubstring(n)
self.assertEqual(n,len(self.myString.substring))
def test_countLetter(self):
n=15
self.myString.string='baba'
self.myString.repeatSubstring(n)
self.myString.countLetter('a')
self.assertEqual(7,self.myString.letter)
def test_printResult(self):
n=15
self.myString.string='baba'
self.myString.repeatSubstring(n)
self.myString.countLetter('a')
self.assertEqual('There are 7 occurrences of a letter',self.myString.printResult())
|
from auto_schema.replica_set import ReplicaSet
dc = 'eqiad'
section = 's5'
# Don't add set session sql_log_bin=0;
command = 'REVOKE DROP ON \\`%wik%\\`.* FROM wikiadmin@\'10.%\';'
# DO NOT FORGET to set the right port if it's not 3306
replicas = None
replica_set = ReplicaSet(replicas, section, dc)
replica_set.sql_on_each_replica(
command, ticket=None, downtime_hours=None, should_depool=None)
|
import numpy as np
import math
from scipy.linalg import lu, qr, solve_triangular, inv, solve, svd
from numpy.linalg import cond
from numalgsolve.polynomial import Polynomial, MultiCheb, MultiPower, is_power
from scipy.sparse import csr_matrix, vstack
from numalgsolve.utils import Term, row_swap_matrix, clean_zeros_from_matrix, inverse_P, triangular_solve, divides, slice_top, mon_combos
import matplotlib.pyplot as plt
from collections import defaultdict
import numalgsolve.utils as utils
def Macaulay(initial_poly_list, global_accuracy = 1.e-10):
"""
Accepts a list of polynomials and use them to construct a Macaulay matrix.
parameters
--------
initial_poly_list: list
Polynomials for Macaulay construction.
global_accuracy : float
Round-off parameter: values within global_accuracy of zero are rounded to zero. Defaults to 1e-10.
Returns
-------
final_polys : list
Reduced Macaulay matrix that can be passed into the root finder.
"""
Power = is_power(initial_poly_list)
poly_coeff_list = []
degree = find_degree(initial_poly_list)
for i in initial_poly_list:
poly_coeff_list = add_polys(degree, i, poly_coeff_list)
matrix, matrix_terms = create_matrix(poly_coeff_list)
plt.matshow(matrix)
plt.show()
#rrqr_reduce2 and rrqr_reduce same pretty matched on stability, though I feel like 2 should be better.
matrix = utils.rrqr_reduce2(matrix, global_accuracy = global_accuracy) # here
matrix = clean_zeros_from_matrix(matrix)
non_zero_rows = np.sum(np.abs(matrix),axis=1) != 0
matrix = matrix[non_zero_rows,:] #Only keeps the non_zero_polymonials
matrix = triangular_solve(matrix)
matrix = clean_zeros_from_matrix(matrix)
#The other reduction option. I thought it would be really stable but seems to be the worst of the three.
#matrix = matrixReduce(matrix, triangular_solve = True, global_accuracy = global_accuracy)
rows = get_good_rows(matrix, matrix_terms)
final_polys = get_polys_from_matrix(matrix, matrix_terms, rows, Power)
return final_polys
|
from torch import nn
from detectron2.layers import Conv2d
from .partialconv2d import PartialConv2d
from .deform_conv import DFConv2d
from detectron2.layers.batch_norm import get_norm
import spconv
from spconv import ops
import spconv.functional as Fsp
import pdb
# from spconv import ops, SparseConvTensor
class SparseConvolutionWS(spconv.conv.SparseConvolution):
__constants__ = [
'stride', 'padding', 'dilation', 'groups', 'bias', 'subm', 'inverse',
'transposed', 'output_padding', 'fused_bn'
]
def forward(self, input):
assert isinstance(input, spconv.SparseConvTensor)
features = input.features
device = features.device
indices = input.indices
spatial_shape = input.spatial_shape
batch_size = input.batch_size
if not self.subm:
if self.transposed:
out_spatial_shape = ops.get_deconv_output_size(
spatial_shape, self.kernel_size, self.stride, self.padding,
self.dilation, self.output_padding)
else:
out_spatial_shape = ops.get_conv_output_size(
spatial_shape, self.kernel_size, self.stride, self.padding,
self.dilation)
else:
out_spatial_shape = spatial_shape
# input.update_grid(out_spatial_shape)
# t = time.time()
## Weight Standardization
weight = self.weight
weight_mean = weight.mean(dim=1, keepdim=True).mean(dim=2,
keepdim=True).mean(dim=3, keepdim=True)
weight = weight - weight_mean
std = weight.view(weight.size(0), -1).std(dim=1).view(-1, 1, 1, 1) + 1e-5
weight = weight / std.expand_as(weight)
if self.conv1x1:
features = torch.mm(
input.features,
weight.view(self.in_channels, self.out_channels))
if self.bias is not None:
features += self.bias
out_tensor = spconv.SparseConvTensor(features, input.indices,
input.spatial_shape,
input.batch_size)
out_tensor.indice_dict = input.indice_dict
out_tensor.grid = input.grid
return out_tensor
datas = input.find_indice_pair(self.indice_key)
if self.inverse:
assert datas is not None and self.indice_key is not None
_, outids, indice_pairs, indice_pair_num, out_spatial_shape = datas
assert indice_pair_num.shape[0] == np.prod(
self.kernel_size
), "inverse conv must have same kernel size as its couple conv"
else:
if self.indice_key is not None and datas is not None:
outids, _, indice_pairs, indice_pair_num, _ = datas
else:
outids, indice_pairs, indice_pair_num = ops.get_indice_pairs(
indices,
batch_size,
spatial_shape,
self.kernel_size,
self.stride,
self.padding,
self.dilation,
self.output_padding,
self.subm,
self.transposed,
grid=input.grid,
use_hash=self.use_hash)
input.indice_dict[self.indice_key] = (outids, indices,
indice_pairs,
indice_pair_num,
spatial_shape)
if self.fused_bn:
assert self.bias is not None
out_features = ops.fused_indice_conv(features, weight,
self.bias,
indice_pairs.to(device),
indice_pair_num,
outids.shape[0], self.inverse,
self.subm)
else:
if self.subm:
out_features = Fsp.indice_subm_conv(features, weight,
indice_pairs.to(device),
indice_pair_num,
outids.shape[0], self.algo)
else:
if self.inverse:
out_features = Fsp.indice_inverse_conv(
features, weight, indice_pairs.to(device),
indice_pair_num, outids.shape[0], self.algo)
else:
out_features = Fsp.indice_conv(features, weight,
indice_pairs.to(device),
indice_pair_num,
outids.shape[0], self.algo)
if self.bias is not None:
out_features += self.bias
out_tensor = spconv.SparseConvTensor(out_features, outids,
out_spatial_shape, batch_size)
out_tensor.indice_dict = input.indice_dict
out_tensor.grid = input.grid
return out_tensor
class SubMConv2dWS(SparseConvolutionWS):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
indice_key=None,
use_hash=False,
algo=ops.ConvAlgo.Native):
super(SubMConv2dWS, self).__init__(2,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
groups,
bias,
True,
indice_key=indice_key,
use_hash=use_hash,
algo=algo)
# class ConvAlgo(Enum):
# Native = 0 # small memory cost, faster when number of points is large.
# Batch = 1 # high memory cost, faster when number of points is small (< 50000)
# BatchGemmGather = 2 # high memory cost, faster when number of points medium
# from spconv.conv import (SparseConv2d, SparseConv3d, SparseConvTranspose2d,
# SparseConvTranspose3d, SparseInverseConv2d,
# SparseInverseConv3d, SubMConv2d, SubMConv3d)
def sparse_conv_with_kaiming_uniform(
norm=None, activation=None, use_sep=False, use_submconv=True, use_deconv=False, use_weight_std=False):
def make_conv(
in_channels, out_channels, kernel_size,
stride=1, padding=0, dilation=1, indice_key="subm0"
):
if use_weight_std:
assert use_submconv and not use_deconv, "WS are not added to others spconv layers yet"
if use_submconv:
if use_deconv:
conv_func = spconv.SparseConvTranspose2d
# conv_func = spconv.SparseInverseConv2d
else:
if use_weight_std:
conv_func = SubMConv2dWS
else:
conv_func = spconv.SubMConv2d
else:
if use_deconv:
conv_func = spconv.SparseConvTranspose2d
else:
conv_func = spconv.SparseConv2d
if use_sep:
assert in_channels == out_channels
groups = in_channels
else:
groups = 1
try:
conv = conv_func(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=(norm is None),
indice_key=indice_key,
algo=spconv.ops.ConvAlgo.Native
)
except:
conv = conv_func(
in_channels,
out_channels,
kernel_size=kernel_size,
bias=(norm is None),
indice_key=indice_key,
algo=spconv.ops.ConvAlgo.Native
)
# Caffe2 implementation uses XavierFill, which in fact
# corresponds to kaiming_uniform_ in PyTorch
nn.init.kaiming_uniform_(conv.weight, a=1)
if norm is None:
nn.init.constant_(conv.bias, 0)
module = [conv,]
if norm is not None and len(norm) > 0:
if norm == "GN":
raise NotImplementedError
print("GN")
norm_module = nn.GroupNorm(32, out_channels)
# elif norm == "SparseGN":
# # raise NotImplementedError
# norm_module = SparseGN(32, out_channels)
elif norm == "BN":
norm_module = nn.BatchNorm1d(out_channels)
else:
raise NotImplementedError
module.append(norm_module)
if activation is not None:
module.append(nn.ReLU(inplace=True))
if len(module) > 1:
return spconv.SparseSequential(*module)
return conv
return make_conv
|
#!/usr/bin/env python
from __future__ import print_function
from sys import argv, exit
from os.path import join, exists
from numpy import array,genfromtxt,atleast_2d
from numpy.linalg import norm
def extract_eigvals(folder):
'''
Reads eigvals.dat in folder and extracts raw data as a
two-dimensional array of double-precision floating point numbers
'''
filename = join(folder,"eigvals.dat")
assert exists(filename), "File does not exist {}".format(filename)
rawdata = atleast_2d(genfromtxt(filename,dtype="d"))
return rawdata
if __name__ == '__main__':
eigvals1=extract_eigvals(argv[1])[:,3]
eigvals2=extract_eigvals(argv[2])[:,3]
tol = 1.0e-16
if (len(argv) >= 4): tol = argv[3]
err = norm(eigvals1-eigvals2) > tol
exit(int(err))
|
# """
# ckwg +31
# Copyright 2020 by Kitware, Inc.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither name of Kitware, Inc. nor the names of any contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ==============================================================================
# Tests for Python interface to vital::polygon
# """
from kwiver.vital.types import Polygon
import nose.tools as nt
import numpy as np
class TestVitalPolygon(object):
def _create_points(self):
return [
np.array([10, 10]),
np.array([10, 50]),
np.array([50, 50]),
np.array([30, 30]),
]
def _create_polygons(self):
pts = self._create_points()
return (Polygon(), Polygon(pts))
def test_new(self):
pts = self._create_points()
Polygon()
Polygon(pts)
def test_at_out_of_bounds(self):
p1, p2 = self._create_polygons()
nt.assert_raises(IndexError, p1.at, 0)
nt.assert_raises(IndexError, p2.at, 4)
def test_at_with_initial_points(self):
pts = self._create_points()
_, p2 = self._create_polygons()
np.testing.assert_array_equal(pts[0], p2.at(0))
np.testing.assert_array_equal(pts[1], p2.at(1))
np.testing.assert_array_equal(pts[2], p2.at(2))
np.testing.assert_array_equal(pts[3], p2.at(3))
nt.assert_equal(p2.num_vertices(), 4)
def test_at_point_not_in_array(self):
_, p2 = self._create_polygons()
nt.assert_raises(
AssertionError, np.testing.assert_array_equal, np.array([17, 200]), p2.at(0)
)
nt.assert_raises(
AssertionError, np.testing.assert_array_equal, np.array([17, 200]), p2.at(1)
)
nt.assert_raises(
AssertionError, np.testing.assert_array_equal, np.array([17, 200]), p2.at(2)
)
nt.assert_raises(
AssertionError, np.testing.assert_array_equal, np.array([17, 200]), p2.at(3)
)
def test_initial_num_vertices(self):
p1, p2 = self._create_polygons()
nt.assert_equal(p1.num_vertices(), 0)
nt.assert_equal(p2.num_vertices(), 4)
def test_push_back(self):
p1, p2 = self._create_polygons()
pts = self._create_points()
# Start with p1
# Check pushing back np_arrays
p1.push_back(pts[0])
nt.assert_equal(p1.num_vertices(), 1)
p1.push_back(pts[1])
nt.assert_equal(p1.num_vertices(), 2)
# Check pushing back x and y coords
p1.push_back(pts[2][0], pts[2][1])
nt.assert_equal(p1.num_vertices(), 3)
p1.push_back(pts[3][0], pts[3][1])
nt.assert_equal(p1.num_vertices(), 4)
np.testing.assert_array_equal(pts[0], p1.at(0))
np.testing.assert_array_equal(pts[1], p1.at(1))
np.testing.assert_array_equal(pts[2], p1.at(2))
np.testing.assert_array_equal(pts[3], p1.at(3))
# Now p2
# Check np_arrays
temp_pt = np.array([40, 35])
p2.push_back(temp_pt)
nt.assert_equal(p2.num_vertices(), 5)
# Check x and y coords
x, y = 20, 10
p2.push_back(x, y)
nt.assert_equal(p2.num_vertices(), 6)
np.testing.assert_array_equal(temp_pt, p2.at(4))
np.testing.assert_array_equal(np.array([x, y]), p2.at(5))
def test_contains(self):
pts = self._create_points()
p1, p2 = self._create_polygons()
# p1 shouldn't contain any points
nt.assert_false(p1.contains(pts[0])) # Check np arrays
nt.assert_false(p1.contains(pts[1])) # Check np arrays
nt.assert_false(p1.contains(pts[2][0], pts[2][1])) # Check x and y coords
nt.assert_false(p1.contains(pts[3][0], pts[3][1])) # Check x and y coords
# p2 should contain all, as they are vertices
nt.ok_(p2.contains(pts[0])) # Check np arrays
nt.ok_(p2.contains(pts[1])) # Check np arrays
nt.ok_(p2.contains(pts[2][0], pts[2][1])) # Check x and y coords
nt.ok_(p2.contains(pts[3][0], pts[3][1])) # Check x and y coords
# p2 should also contain boundary points,
# and points inside the shape
nt.ok_(p2.contains(25, 40)) # x and y coord inside
nt.ok_(p2.contains(35, 50)) # x and y coord boundary
nt.ok_(p2.contains(np.array([20, 30]))) # np_array inside
nt.ok_(p2.contains(np.array([10, 30]))) # np_array boundary
def test_get_vertices(self):
pts = self._create_points()
p1, p2 = self._create_polygons()
# p1
np.testing.assert_array_equal(p1.get_vertices(), [])
p1.push_back(pts[0])
p1.push_back(pts[1])
p1.push_back(pts[2])
p1.push_back(pts[3])
np.testing.assert_array_equal(p1.get_vertices(), pts)
# p2
np.testing.assert_array_equal(p2.get_vertices(), pts)
temp_pt = np.array([40, 35])
p2.push_back(temp_pt)
np.testing.assert_array_equal(p2.get_vertices(), pts + [temp_pt])
|
import librosa.display
import matplotlib.pyplot as plt
import json
import torch
import torchaudio
import hifigan
def manual_logging(logger, item, idx, tag, global_step, data_type, config):
if data_type == "audio":
audio = item[idx, ...].detach().cpu().numpy()
logger.add_audio(
tag,
audio,
global_step,
sample_rate=config["preprocess"]["sampling_rate"],
)
elif data_type == "image":
image = item[idx, ...].detach().cpu().numpy()
fig, ax = plt.subplots()
_ = librosa.display.specshow(
image,
x_axis="time",
y_axis="linear",
sr=config["preprocess"]["sampling_rate"],
hop_length=config["preprocess"]["frame_shift"],
fmax=config["preprocess"]["sampling_rate"] // 2,
ax=ax,
)
logger.add_figure(tag, fig, global_step)
else:
raise NotImplementedError(
"Data type given to logger should be [audio] or [image]"
)
def load_vocoder(config):
with open(
"hifigan/config_{}.json".format(config["general"]["feature_type"]), "r"
) as f:
config_hifigan = hifigan.AttrDict(json.load(f))
vocoder = hifigan.Generator(config_hifigan)
vocoder.load_state_dict(torch.load(config["general"]["hifigan_path"])["generator"])
vocoder.remove_weight_norm()
for param in vocoder.parameters():
param.requires_grad = False
return vocoder
def get_conv_padding(kernel_size, dilation=1):
return int((kernel_size * dilation - dilation) / 2)
def plot_and_save_mels(wav, save_path, config):
spec_module = torchaudio.transforms.MelSpectrogram(
sample_rate=config["preprocess"]["sampling_rate"],
n_fft=config["preprocess"]["fft_length"],
win_length=config["preprocess"]["frame_length"],
hop_length=config["preprocess"]["frame_shift"],
f_min=config["preprocess"]["fmin"],
f_max=config["preprocess"]["fmax"],
n_mels=config["preprocess"]["n_mels"],
power=1,
center=True,
norm="slaney",
mel_scale="slaney",
)
spec = spec_module(wav.unsqueeze(0))
log_spec = torch.log(
torch.clamp_min(spec, config["preprocess"]["min_magnitude"])
* config["preprocess"]["comp_factor"]
)
fig, ax = plt.subplots()
_ = librosa.display.specshow(
log_spec.squeeze(0).numpy(),
x_axis="time",
y_axis="linear",
sr=config["preprocess"]["sampling_rate"],
hop_length=config["preprocess"]["frame_shift"],
fmax=config["preprocess"]["sampling_rate"] // 2,
ax=ax,
cmap="viridis",
)
fig.savefig(save_path, bbox_inches="tight", pad_inches=0)
def plot_and_save_mels_all(wavs, keys, save_path, config):
spec_module = torchaudio.transforms.MelSpectrogram(
sample_rate=config["preprocess"]["sampling_rate"],
n_fft=config["preprocess"]["fft_length"],
win_length=config["preprocess"]["frame_length"],
hop_length=config["preprocess"]["frame_shift"],
f_min=config["preprocess"]["fmin"],
f_max=config["preprocess"]["fmax"],
n_mels=config["preprocess"]["n_mels"],
power=1,
center=True,
norm="slaney",
mel_scale="slaney",
)
fig, ax = plt.subplots(nrows=3, ncols=3, figsize=(18, 18))
for i, key in enumerate(keys):
wav = wavs[key][0, ...].cpu()
spec = spec_module(wav.unsqueeze(0))
log_spec = torch.log(
torch.clamp_min(spec, config["preprocess"]["min_magnitude"])
* config["preprocess"]["comp_factor"]
)
ax[i // 3, i % 3].set(title=key)
_ = librosa.display.specshow(
log_spec.squeeze(0).numpy(),
x_axis="time",
y_axis="linear",
sr=config["preprocess"]["sampling_rate"],
hop_length=config["preprocess"]["frame_shift"],
fmax=config["preprocess"]["sampling_rate"] // 2,
ax=ax[i // 3, i % 3],
cmap="viridis",
)
fig.savefig(save_path, bbox_inches="tight", pad_inches=0)
def configure_args(config, args):
for key in ["stage", "corpus_type", "source_path", "aux_path", "preprocessed_path"]:
if getattr(args, key) != None:
config["general"][key] = str(getattr(args, key))
for key in ["n_train", "n_val", "n_test"]:
if getattr(args, key) != None:
config["preprocess"][key] = getattr(args, key)
for key in ["alpha", "beta", "learning_rate", "epoch"]:
if getattr(args, key) != None:
config["train"][key] = getattr(args, key)
for key in ["load_pretrained", "early_stopping"]:
config["train"][key] = getattr(args, key)
if args.feature_loss_type != None:
config["train"]["feature_loss"]["type"] = args.feature_loss_type
for key in ["pretrained_path"]:
if getattr(args, key) != None:
config["train"][key] = str(getattr(args, key))
return config, args
|
from syned.beamline.beamline_element import BeamlineElement
from wofrywise2.beamline.wise_optical_element import WiseOpticalElement
class WiseBeamlineElement(BeamlineElement):
def __init__(self, optical_element=WiseOpticalElement()):
super(WiseBeamlineElement, self).__init__(optical_element=optical_element, coordinates=None)
def get_coordinates(self):
raise NotImplementedError("this method cannot be used in WISE 2")
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Author: Andrey Skopenko <andrey@scopenco.net>
'''
This module is a set of funcs and classes
for ispmanager plugin creation.
'''
# configs
LOG_FILE = '/usr/local/ispmgr/var/ispmgr.log'
from os import getpid
from xml.dom import minidom
class ExitOk(Exception):
pass
# module 'logging' IMHO is not so convenient for this task
class Log(object):
'''Class used for add debug to ispmgr.log'''
def __init__(self, plugin=None, output=LOG_FILE):
import time
timenow = time.localtime(time.time())
self.timef = time.strftime("%b %d %H:%M:%S", timenow)
self.log = output
self.plugin_name = plugin
self.fsock = open(self.log, 'a+')
self.pid = getpid()
self.script_name = __file__
def write(self, desc):
if not (desc == "\n"):
if (desc[-1:] == "\n"):
self.fsock.write(
'%s [%s] ./%s \033[36;40mPLUGIN %s :: %s\033[0m' % (
self.timef, self.pid, self.script_name,
self.plugin_name, desc))
else:
self.fsock.write(
'%s [%s] ./%s \033[36;40mPLUGIN %s :: %s\033[0m\n' % (
self.timef, self.pid, self.script_name,
self.plugin_name, desc))
def close(self):
self.fsock.close()
def xml_doc(elem=None, text=None):
'''base xml output <doc>...</doc>'''
xmldoc = minidom.Document()
doc = xmldoc.createElement('doc')
xmldoc.appendChild(doc)
if elem:
emp = xmldoc.createElement(elem)
doc.appendChild(emp)
if text:
msg_text = xmldoc.createTextNode(text)
emp.appendChild(msg_text)
return xmldoc.toxml('UTF-8')
def xml_error(text, code_num=None):
'''base xml error output <doc><error>...</error></doc>'''
xmldoc = minidom.Document()
doc = xmldoc.createElement('doc')
xmldoc.appendChild(doc)
error = xmldoc.createElement('error')
doc.appendChild(error)
if code_num:
code = xmldoc.createAttribute('code')
error.setAttributeNode(code)
error.setAttribute('code', str(code_num))
if code_num in [2, 3, 6]:
obj = xmldoc.createAttribute('obj')
error.setAttributeNode(obj)
error.setAttribute('obj', str(text))
return xmldoc.toxml('UTF-8')
elif code_num in [4, 5]:
val = xmldoc.createAttribute('val')
error.setAttributeNode(val)
error.setAttribute('val', str(text))
return xmldoc.toxml('UTF-8')
error_text = xmldoc.createTextNode(text.decode('utf-8'))
error.appendChild(error_text)
return xmldoc.toxml('UTF-8')
def domain_to_idna(dom):
'''convert domain to idna format'''
dom_u = unicode(dom, 'utf-8')
return dom_u.encode("idna")
if __name__ == "__main__":
print __doc__
|
def algo_AES(string, do, key):
# IMPORTS
import base64
try:
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
from cryptography.fernet import Fernet
except ModuleNotFoundError as e:
print("Cryptography module is missing :'( ")
print("pip install cryptography")
# Key generation
def makeKEY():
salt = b'some salt'
kdf = PBKDF2HMAC(algorithm=hashes.SHA256(), length=32, salt=salt, iterations=100000, backend=default_backend())
return base64.urlsafe_b64encode(kdf.derive(key.encode()))
# Get fernet object
def getFernetObj():
return Fernet(makeKEY())
# main function
if do == "encrypt":
return getFernetObj().encrypt(string.encode()).decode()
elif do == "decrypt":
return getFernetObj().decrypt(string.encode()).decode()
else:
raise Exception(f"INVALID OPERATION: '{do}'")
# x = algo_AES("hello", "encrypt", "thisKEY")
# y = algo_AES(x, "decrypt", "thisKEY")
# print("x: ",x)
# print("y: ",y) |
from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
from django.utils.translation import ugettext_lazy as _
class BlogApphook(CMSApp):
name = _("Blog Apphook")
urls = ["cmsplugin_blog.urls"]
apphook_pool.register(BlogApphook) |
"""
Module for Serialization and Deserialization of a KNX Disconnect Request information.
Disconnect requests are used to disconnect a tunnel from a KNX/IP device.
"""
from __future__ import annotations
from typing import TYPE_CHECKING
from xknx.exceptions import CouldNotParseKNXIP
from .body import KNXIPBody
from .hpai import HPAI
from .knxip_enum import KNXIPServiceType
if TYPE_CHECKING:
from xknx.xknx import XKNX
class DisconnectRequest(KNXIPBody):
"""Representation of a KNX Disconnect Request."""
SERVICE_TYPE = KNXIPServiceType.DISCONNECT_REQUEST
def __init__(
self,
xknx: XKNX,
communication_channel_id: int = 1,
control_endpoint: HPAI = HPAI(),
):
"""Initialize DisconnectRequest object."""
super().__init__(xknx)
self.communication_channel_id = communication_channel_id
self.control_endpoint = control_endpoint
def calculated_length(self) -> int:
"""Get length of KNX/IP body."""
return 2 + HPAI.LENGTH
def from_knx(self, raw: bytes) -> int:
"""Parse/deserialize from KNX/IP raw data."""
if len(raw) < 2:
raise CouldNotParseKNXIP("Disconnect info has wrong length")
self.communication_channel_id = raw[0]
# raw[1] is reserved
return self.control_endpoint.from_knx(raw[2:]) + 2
def to_knx(self) -> bytes:
"""Serialize to KNX/IP raw data."""
return (
bytes((self.communication_channel_id, 0x00)) # 2nd byte is reserved
+ self.control_endpoint.to_knx()
)
def __str__(self) -> str:
"""Return object as readable string."""
return (
"<DisconnectRequest "
f'CommunicationChannelID="{self.communication_channel_id}" '
f'control_endpoint="{self.control_endpoint}" />'
)
|
print("Hello World!")
print("I'm modiftying this file and pushing it")
print("done the changes")
|
# Face detection by Dlib
## Dlib installation steps
# 1. brew install boost
# 2. brew install boost-python
# 3. Install XQuartz-2.7.9
# sudo ln -s /opt/X11/include/X11 /usr/local/include
# 4. Download dlib
# cd dlib-18.18/
# sudo python setup.py install
#
# Running samle
# a. pip install scikit-image
# b. Download https://sourceforge.net/projects/dclib/files/dlib/v18.10/shape_predictor_68_face_landmarks.dat.bz2
# c. python ./face_landmark_detection.py shape_predictor_68_face_landmarks.dat ../examples/faces
## reference
# - http://dlib.net/face_detector.py.html
import dlib
import cv2
predictor_path = "./dlib_data/shape_predictor_68_face_landmarks.dat"
class FaceDetector():
def __init__(self):
self.detector = dlib.get_frontal_face_detector()
self.predictor = dlib.shape_predictor(predictor_path)
def detectFace(self, gray):
dets = self.detector(gray, 1)
if len(dets) == 0:
return []
# drop k (detected face index)
return [(d.left(), d.top(), d.right(), d.bottom()) for k, d in enumerate(dets)]
def detectParts(self, gray, x, y, x2, y2):
eyes = self.predictor(gray, dlib.rectangle(x, y, x2, y2))
return eyes
def drawFacialParts(self, disp, parts):
if parts.num_parts != 68:
return
p = parts.part
idxs = [range(1, 16+1), range(28, 30+1), range(18, 21+1), range(23, 26+1),
range(31, 35+1), range(37, 41+1), range(43, 47+1), range(49, 59+1),
range(61-1, 67+1)]
[cv2.line(disp, (p(i).x, p(i).y), (p(i-1).x,p(i-1).y), (100, 100, 255), 1)
for r in idxs for i in r]
def detectAndDraw(self, disp, frame):
'''Detect face and eyes from the frame, and draw results on the disp.'''
#gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = frame
faces = self.detectFace(gray)
for (x, y, x2, y2) in faces:
parts = self.detectParts(gray, x, y, x2, y2)
#ps = [(parts.part(i).x, parts.part(i).y, parts.part(i+1).x, parts.part(i+1).y) \
# for i in range(0, parts.num_parts, 2)]
#[cv2.line(disp, (p[0],p[1]), (p[2],p[3]), (100, 100, 255), 2) for p in ps]
self.drawFacialParts(disp, parts)
cv2.rectangle(disp, (x, y), (x2, y2), (0, 0, 255), 2)
|
"""SoulfulArt_Platform URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.urls import re_path
from django.urls import include
from Controllers import HomeController
from Controllers import CreateDepController
from Controllers import CreateFuncController
from Controllers import ProjectController
from Controllers import BugTrackerController
from Controllers import SignInController
from Controllers import SignOutController
from Controllers import UserPageController
from Controllers import ProfilePageController
from Controllers import ChangePassController
from Controllers import SignUpController
urlpatterns = [
path('admin', admin.site.urls),
path('Projects/', ProjectController.index),
path('SignIn', SignInController.index),
path('SignIn/', SignInController.index),
path('SignUp', SignUpController.index),
path('SignUp/', SignUpController.index),
path('create_user', SignUpController.create_user),
path('SignOut', SignOutController.index),
path('SignOut/', SignOutController.index),
path('User_Platform/BugTracker/', BugTrackerController.index),
path('User_Platform/Profile/', ProfilePageController.index),
path('User_Platform/Profile/ChangePass', ChangePassController.index),
path('User_Platform/Profile/change_pass', ChangePassController.change_pass),
path('User_Platform/Profile/update_form', ProfilePageController.update_user),
path('HomePage/', HomeController.index),
path('CreateDep/', CreateDepController.index),
path('CreateFunc/', CreateFuncController.index),
path('Projects/update_form', ProjectController.funcform),
path('User_Platform/BugTracker/update_form', BugTrackerController.bugtrackerform),
re_path(r'User_Platform/BugTracker/download/*/*', BugTrackerController.dowload_bug_file),
path('CreateDep/update_form', CreateDepController.deptrackerform),
path('CreateFunc/update_form', CreateFuncController.funcform),
path('', HomeController.index),
path('User_Platform/UserPage', UserPageController.index),
]
|
import pytest
def test_index(client):
response = client.get('/api/')
# print(response.json)
assert response.json == {
'links': 'http://127.0.0.1:5000/api/links', 'users': 'http://127.0.0.1:5000/api/users', 'tags': 'http://127.0.0.1:5000/api/tags'}
|
try:
import drawBot as db
except:
pass
from coldtype.pens.draftingpen import DraftingPen
from coldtype.pens.draftingpens import DraftingPens
from coldtype.geometry import Rect, Edge, Point
from coldtype.pens.drawablepen import DrawablePenMixin
from coldtype.color import Color, Gradient
def get_image_rect(src):
w, h = db.imageSize(str(src))
return Rect(0, 0, w, h)
class DrawBotPen(DrawablePenMixin, DraftingPen):
def __init__(self, dat, rect=None):
super().__init__()
self.rect = rect
self.dat = dat
self.bp = db.BezierPath()
self.dat.replay(self.bp)
def fill(self, color):
if color:
if isinstance(color, Gradient):
self.gradient(color)
elif isinstance(color, Color):
db.fill(color.r, color.g, color.b, color.a)
else:
db.fill(None)
def stroke(self, weight=1, color=None, dash=None):
db.strokeWidth(weight)
if dash:
db.lineDash(dash)
if color:
if isinstance(color, Gradient):
pass # possible?
elif isinstance(color, Color):
db.stroke(color.r, color.g, color.b, color.a)
else:
db.stroke(None)
def image(self, src=None, opacity=1, rect=None, rotate=0, repeating=False, scale=True):
bounds = self.dat.bounds()
src = str(src)
if not rect:
rect = bounds
try:
img_w, img_h = db.imageSize(src)
except ValueError:
print("DrawBotPen: No image")
return
x = bounds.x
y = bounds.y
if repeating:
x_count = bounds.w / rect.w
y_count = bounds.h / rect.h
else:
x_count = 1
y_count = 1
_x = 0
_y = 0
while x <= (bounds.w+bounds.x) and _x < x_count:
_x += 1
while y <= (bounds.h+bounds.y) and _y < y_count:
_y += 1
with db.savedState():
r = Rect(x, y, rect.w, rect.h)
#db.fill(1, 0, 0.5, 0.05)
#db.oval(*r)
if scale == True:
db.scale(rect.w/img_w, center=r.point("SW"))
elif scale:
try:
db.scale(scale[0], scale[1], center=r.point("SW"))
except TypeError:
db.scale(scale, center=r.point("SW"))
db.rotate(rotate)
db.image(src, (r.x, r.y), alpha=opacity)
y += rect.h
y = 0
x += rect.w
def shadow(self, clip=None, radius=10, alpha=0.3, color=Color.from_rgb(0,0,0,1)):
if clip:
cp = DraftingPen(clip).f(None)
bp = db.BezierPath()
cp.replay(bp)
db.clipPath(bp)
#elif self.rect:
# cp = DATPen(fill=None).rect(self.rect).xor(self.dat)
# bp = db.BezierPath()
# cp.replay(bp)
# db.clipPath(bp)
db.shadow((0, 0), radius*3, list(color.with_alpha(alpha)))
def gradient(self, gradient):
stops = gradient.stops
db.linearGradient(stops[0][1], stops[1][1], [list(s[0]) for s in stops], [0, 1])
def draw(self, scale=1, style=None):
if hasattr(self.dat, "_pens"):
for p in self.dat._pens:
DrawBotPen(p, rect=self.rect).draw(scale=scale)
else:
with db.savedState():
db.scale(scale)
for attrs, attr in self.findStyledAttrs(style):
self.applyDATAttribute(attrs, attr)
db.drawPath(self.bp)
return self
def draw_with_filters(self, rect, filters):
im = db.ImageObject()
with im:
db.size(*rect.wh())
self.draw()
for filter_name, filter_kwargs in filters:
getattr(im, filter_name)(**filter_kwargs)
x, y = im.offset()
db.image(im, (x, y))
return self |
from bs4 import BeautifulSoup
import requests
source = requests.get('https://www.indeed.com/jobs?q=python+developer&l=').text
soup = BeautifulSoup(source,'html5lib')
for jobs in soup.find_all (class_='result'):
try:
title = jobs.h2.text.strip()
except Exception as e:
title = None
print('Job Title:', title)
try:
company = jobs.span.text.strip()
except Exception as e:
company= None
print('Company:', company)
try:
location = jobs.find('span', class_='location').text.strip()
except Exception as e:
location = None
print('Location:', location)
try:
summary = jobs.find('span', class_='summary').text.strip()
except Exception as e:
summary = None
print('Summary:', summary)
try:
salary = jobs.find('span', class_='no-wrap').text.strip()
except Exception as e:
salary = None
print('salary:', salary)
print('------------------')
|
#!/usr/bin/env python
"""
CLASS imu_visualizer
purpose:
- Class definition of 'IMUVisualizer' to be used in Visualizer node
"""
# essential modules
import numpy as np
import math
import rospy
import tf_conversions
import tf2_ros
import geometry_msgs.msg
from sensor_msgs.msg import Temperature, Imu
from nav_msgs.msg import Odometry
def handle_imu_pose(msg):
br = tf2_ros.TransformBroadcaster()
t = geometry_msgs.msg.TransformStamped()
t.header.stamp = rospy.Time.now()
t.header.frame_id = "plane"
t.child_frame_id = "imu_link"
t.transform.translation.x = 0
t.transform.translation.y = 0
t.transform.translation.z = msg.pose.pose.position.z
t.transform.rotation.x = msg.pose.pose.orientation.x
t.transform.rotation.y = msg.pose.pose.orientation.y
t.transform.rotation.z = msg.pose.pose.orientation.z
t.transform.rotation.w = msg.pose.pose.orientation.w
print("here")
br.sendTransform(t)
if __name__ == '__main__':
rospy.init_node('tf_broadcaster_imu')
rospy.Subscriber('/robot_0/odom', Odometry, handle_imu_pose)
rospy.spin() |
import pytest
def func(x):
return x + 1
def test_answer():
assert func(3) == 5
def f():
raise SystemExit(1)
def test_runner():
with pytest.raises(SystemExit):
f()
|
#coding=utf-8
'''
Created on 2016-1-18
@author: Devuser
'''
from django import template
from doraemon.auth_extend.user.templatetags.auth_required_node import LogoutRequiredNode,LoginRequiredNode,UserRequiredNode,ManagerRequiredNode,AdminRequiredNode
register = template.Library()
@register.tag()
def admin_required(parser, token):
nodelist = parser.parse(('end_admin',))
parser.delete_first_token()
return AdminRequiredNode(nodelist)
@register.tag()
def manager_required(parser, token):
nodelist = parser.parse(('end_manager',))
parser.delete_first_token()
return ManagerRequiredNode(nodelist)
@register.tag()
def user_required(parser, token):
nodelist = parser.parse(('end_user',))
parser.delete_first_token()
return UserRequiredNode(nodelist)
@register.tag()
def login_required(parser, token):
nodelist = parser.parse(('end_login',))
parser.delete_first_token()
return LoginRequiredNode(nodelist)
@register.tag()
def logout_required(parser, token):
nodelist = parser.parse(('end_logout',))
parser.delete_first_token()
return LogoutRequiredNode(nodelist) |
import Blender
from Blender import *
import Blender
RotationMatrix= Blender.Mathutils.RotationMatrix
MATRIX_IDENTITY_3x3 = Blender.Mathutils.Matrix([1.0,0.0,0.0],[0.0,1.0,0.0],[0.0,0.0,1.0])
def eulerRotateOrder(x,y,z):
x,y,z = x%360,y%360,z%360 # Clamp all values between 0 and 360, values outside this raise an error.
xmat = RotationMatrix(x,3,'x')
ymat = RotationMatrix(y,3,'y')
zmat = RotationMatrix(z,3,'z')
# Standard BVH multiplication order, apply the rotation in the order Z,X,Y
# Change the order here
return (ymat*(xmat * (zmat * MATRIX_IDENTITY_3x3))).toEuler()
def bone_locations():
print "---------------------"
ob_armature = Blender.Object.Get('Armature')
ob = Object.Get('Armature')
armature_data = ob.getData()
#armature_mat = armature_data.matrixWorld()
#print dir(ob_armature.getPose())
ob_pose = ob_armature.getPose()
#print dir(ob_pose.bones.keys() )
ob_keys = ob_pose.bones.keys()
print dir(ob_keys[0].translate)
armature_bones = armature_data.bones.values()
#print dir(armature_bones.index)
#print armature_mat
#for bone in armature_bones:
#print bone.name
#print dir(bone)
#print bone.name
#print bone.weight
#print bone.head
#print bone.rotation
#bone_mat= bone.matrix['ARMATURESPACE']
#print bone_mat #hm
#bone_mat_world= bone_mat*arm_mat
#ob_empty= Object.New('Empty', bone.name)
#scn.link(ob_empty)
#ob_empty.setMatrix(bone_mat_world)
#ob_empty.sel= 1
#Bone Name Print
#print (ob_armature)
#print (type(ob_armature))
#print (dir(ob_armature.bones.keys())) #This print out the functions
#bones_keys = ob_armature.bones.keys()
#print dir(ob_armature.bones.values)
#print dir(bones_keys[0])
#print bones_keys[0].BonePos
#pose = ob_armature.getPose()
#print "-----------------"
#bone_list = pose.bones.keys()
#print help(bone_list[0]) #Crash for blender
#type(bone_list)
def bone_list_name():
ob = Object.Get('Armature')
#Bone Name Print
pose = ob.getPose()
print "-----------------"
for bonename in pose.bones.keys():
#print pose.bones[bonename]
print bonename
#bone = pose.bones[bonename]
#print bone.constraints,bone.name
#print "loc ",bonename.loc
#bone_list_name()
bone_locations() |
from __future__ import absolute_import
from keras import regularizers
from keras.regularizers import l1,l2,l1_l2
from keras.utils import multi_gpu_model
from keras.models import Sequential, Model
from keras.layers import Input, Dropout, Concatenate, Embedding, concatenate
from keras.layers import Dense, Bidirectional, LSTM, GRU, CuDNNLSTM, CuDNNGRU
from keras.layers import Conv1D, MaxPooling1D, AveragePooling1D, GlobalMaxPooling1D, GlobalAveragePooling1D, Cropping1D, Reshape, BatchNormalization, SpatialDropout1D
from lib.utils import GlobalZeroMaskedAveragePooling1D, GlobalSumPooling1D, Attention
def rcnn(embedding_matrix, num_classes, max_seq_len, rnn_dim = 128, num_filters=64, l2_weight_decay=0.0001, dropout_val=0.25, dense_dim=32, auxiliary = False, dropout=0.2, recurrent_dropout=0.2, add_sigmoid=True, train_embeds=False, gpus=0, add_embeds=True, rnn_type='gru'):
if rnn_type == 'lstm':
RNN = CuDNNLSTM if gpus > 0 else LSTM
elif rnn_type == 'gru':
RNN = CuDNNGRU if gpus > 0 else GRU
else:
RNN = CuDNNLSTM if gpus > 0 else LSTM
input_ = Input(shape=(max_seq_len,))
embeds = Embedding(embedding_matrix.shape[0],
embedding_matrix.shape[1],
weights=[embedding_matrix],
input_length=max_seq_len,
trainable=train_embeds)(input_)
x = SpatialDropout1D(dropout_val)(embeds)
x = Bidirectional(RNN(rnn_dim, return_sequences = True, dropout=dropout, recurrent_dropout=recurrent_dropout))(x)
x = Conv1D(num_filters, kernel_size = 2, padding = "valid", kernel_initializer = "he_uniform")(x)
avg_pool = GlobalAveragePooling1D()(x)
max_pool = GlobalMaxPooling1D()(x)
x = concatenate([avg_pool, max_pool])
if auxiliary:
auxiliary_input = Input(shape=(5,), name='aux_input')
x = Concatenate()([x, auxiliary_input])
x = Dense(num_classes, activation = "sigmoid")(x)
if auxiliary:
model = Model(inputs=[input_, auxiliary_input], outputs=x)
else:
model = Model(inputs=input_, outputs=x)
if gpus > 0:
model = multi_gpu_model(model, gpus=gpus)
return model
def charrnn(embedding_matrix, num_classes, max_seq_len, rnn_dim = 128, num_filters=64, auxiliary = True, l2_weight_decay=0.0001, dropout_val=0.25, dropout= 0.2,recurrent_dropout=0.2, dense_dim=32, add_sigmoid=True, train_embeds=False, gpus=0, n_cnn_layers=1, pool='max', add_embeds=True, rnn_type='lstm'):
if rnn_type == 'lstm':
RNN = CuDNNLSTM if gpus > 0 else LSTM
elif rnn_type == 'gru':
RNN = CuDNNGRU if gpus > 0 else GRU
if pool == 'max':
Pooling = MaxPooling1D
elif pool == 'avg':
Pooling = AveragePooling1D
input_ = Input(shape=(max_seq_len,))
embeds = Embedding(embedding_matrix.shape[0],
embedding_matrix.shape[1],
weights=[embedding_matrix],
input_length=max_seq_len,
trainable=train_embeds)(input_)
x = SpatialDropout1D(dropout_val)(embeds)
w = Conv1D(filters=16,kernel_size=7, padding='same', activation='relu')(x)
w = Pooling(pool_size=2, strides=1)(w)
w = BatchNormalization()(w)
w = Conv1D(filters=32,kernel_size=5, padding='same', activation='relu')(w)
w = Pooling(pool_size=2, strides=1)(w)
w = BatchNormalization()(w)
w = Conv1D(filters=32,kernel_size=3, padding='same', activation='relu')(w)
w = Pooling(pool_size=2, strides=1)(w)
w = BatchNormalization()(w)
w1 = GlobalMaxPooling1D()(w)
w2 = GlobalAveragePooling1D()(w)
x = Bidirectional(RNN(rnn_dim, return_sequences=True, dropout=dropout, recurrent_dropout=recurrent_dropout))(x)
z1 = GlobalMaxPooling1D()(x)
z2 = GlobalAveragePooling1D()(x)
x = Concatenate()([z1, z2, w1, w2])
x = Dropout(dropout_val)(x)
if auxiliary:
auxiliary_input = Input(shape=(5,), name='aux_input')
x = Concatenate()([x, auxiliary_input])
x = Dense(dense_dim, activation="relu", kernel_regularizer=regularizers.l2(l2_weight_decay))(x)
if add_sigmoid:
x = Dense(num_classes, activation="sigmoid")(x)
if auxiliary:
model = Model(inputs=[input_, auxiliary_input], outputs=x)
else:
model = Model(inputs=input_, outputs=x)
if gpus > 0:
model = multi_gpu_model(model, gpus=gpus)
return model
def cnn(embedding_matrix, num_classes, max_seq_len, num_filters=64, l2_weight_decay=0.0001, dropout_val=0.5,
dense_dim=32, add_sigmoid=True, train_embeds=False, auxiliary=True, gpus=0, n_cnn_layers=1, pool='max',
add_embeds=False):
if pool == 'max':
Pooling = MaxPooling1D
GlobalPooling = GlobalMaxPooling1D
elif pool == 'avg':
Pooling = AveragePooling1D
GlobalPooling = GlobalAveragePooling1D
input_ = Input(shape=(max_seq_len,))
embeds = Embedding(embedding_matrix.shape[0],
embedding_matrix.shape[1],
weights=[embedding_matrix],
input_length=max_seq_len,
trainable=train_embeds)(input_)
# x = embeds
x = SpatialDropout1D(0.2)(embeds)
x0 = Conv1D(num_filters, 1, activation='relu', padding='same')(x)
x0 = Pooling(3)(x0)
x1 = Conv1D(num_filters, 1, activation='relu', padding='same')(x)
x1 = Conv1D(num_filters, 3, activation='relu', padding='same')(x1)
x1 = Conv1D(num_filters, 5, activation='relu', padding='same')(x1)
x1 = Conv1D(num_filters, 7, activation='relu', padding='same')(x1)
x1 = Pooling(3)(x1)
x2 = Conv1D(num_filters, 3, activation='relu', padding='same')(x)
x2 = Conv1D(num_filters, 5, activation='relu', padding='same')(x2)
x2 = Pooling(3)(x2)
x3 = Conv1D(num_filters, 4, activation='relu', padding='same')(x)
x3 = Conv1D(num_filters, 7, activation='relu', padding='same')(x3)
x3 = Pooling(3)(x3)
x4 = Conv1D(num_filters, 5, activation='relu', padding='same')(x)
x4 = Conv1D(num_filters, 3, activation='relu', padding='same')(x4)
x4 = Pooling(3)(x4)
x = Concatenate()([x0, x1, x2, x3, x4])
for i in range(n_cnn_layers - 1):
x = Conv1D(num_filters, 7, activation='relu', padding='same')(x)
x = Pooling(3)(x)
x = BatchNormalization()(x)
x = Dropout(dropout_val)(x)
x_0 = Conv1D(num_filters, 1, activation='relu', padding='same')(x)
x_0 = Pooling(3)(x_0)
x_1 = Conv1D(num_filters, 1, activation='relu', padding='same')(x)
x_1 = Conv1D(num_filters, 3, activation='relu', padding='same')(x_1)
x_1 = Conv1D(num_filters, 5, activation='relu', padding='same')(x_1)
x_1 = Conv1D(num_filters, 7, activation='relu', padding='same')(x_1)
x_1 = Pooling(3)(x_1)
x_2 = Conv1D(num_filters, 3, activation='relu', padding='same')(x)
x_2 = Conv1D(num_filters, 5, activation='relu', padding='same')(x_2)
x_2 = Conv1D(num_filters, 7, activation='relu', padding='same')(x_2)
x_2 = Pooling(3)(x_2)
x_3 = Conv1D(num_filters, 4, activation='relu', padding='same')(x)
x_3 = Conv1D(num_filters, 7, activation='relu', padding='same')(x_3)
x_3 = Pooling(3)(x_3)
x_4 = Conv1D(num_filters, 5, activation='relu', padding='same')(x)
x_4 = Conv1D(num_filters, 3, activation='relu', padding='same')(x_4)
x_4 = Pooling(3)(x_4)
x = Concatenate()([x_0, x_1, x_2, x_3, x_4])
for i in range(n_cnn_layers - 1):
x = Conv1D(num_filters, 7, activation='relu', padding='same')(x)
x = Pooling()(x)
z_0 = Conv1D(num_filters, 1, activation='relu', padding='same')(x)
z_0 = Pooling(3)(z_0)
z_1 = Conv1D(num_filters, 1, activation='relu', padding='same')(x)
z_1 = Conv1D(num_filters, 3, activation='relu', padding='same')(z_1)
z_1 = Conv1D(num_filters, 5, activation='relu', padding='same')(z_1)
z_1 = Conv1D(num_filters, 7, activation='relu', padding='same')(z_1)
z_1 = Pooling(3)(z_1)
z_2 = Conv1D(num_filters, 3, activation='relu', padding='same')(x)
z_2 = Conv1D(num_filters, 5, activation='relu', padding='same')(z_2)
z_2 = Conv1D(num_filters, 7, activation='relu', padding='same')(z_2)
z_2 = Pooling(3)(z_2)
z_3 = Conv1D(num_filters, 4, activation='relu', padding='same')(x)
z_3 = Conv1D(num_filters, 7, activation='relu', padding='same')(z_3)
z_3 = Pooling(3)(z_3)
z_4 = Conv1D(num_filters, 5, activation='relu', padding='same')(x)
z_4 = Conv1D(num_filters, 3, activation='relu', padding='same')(z_4)
z_4 = Pooling(3)(z_4)
x = Concatenate()([z_0, z_1, z_2, z_3, z_4])
for i in range(n_cnn_layers - 1):
x = Conv1D(num_filters, 7, activation='relu', padding='same')(x)
x = Pooling(2)(x)
x = BatchNormalization()(x)
x = Dropout(dropout_val)(x)
w_2 = Conv1D(num_filters, 3, activation='relu', padding='same')(x)
w_2 = Conv1D(num_filters, 5, activation='relu', padding='same')(w_2)
w_2 = Conv1D(num_filters, 7, activation='relu', padding='same')(w_2)
w_3 = Conv1D(num_filters, 3, activation='relu', padding='same')(x)
w_3 = Conv1D(num_filters, 3, activation='relu', padding='same')(w_3)
w_4 = Conv1D(num_filters, 7, activation='relu', padding='same')(x)
w_4 = Conv1D(num_filters, 5, activation='relu', padding='same')(w_4)
w_4 = Conv1D(num_filters, 3, activation='relu', padding='same')(w_4)
x = Concatenate()([w_2, w_3, w_4])
x = Conv1D(num_filters, 7, activation='relu', padding='same')(x)
x = GlobalPooling()(x)
if add_embeds:
x1 = Conv1D(num_filters, 7, activation='relu', padding='same')(embeds)
x1 = GlobalPooling()(x1)
x = Concatenate()([x, x1])
x = Dropout(dropout_val)(x)
if auxiliary:
auxiliary_input = Input(shape=(5,), name='aux_input')
x = Concatenate()([x, auxiliary_input])
x = BatchNormalization()(x)
x = Dropout(dropout_val)(x)
x = Dense(dense_dim, activation='relu', kernel_regularizer=regularizers.l2(l2_weight_decay))(x)
if add_sigmoid:
x = Dense(num_classes, activation='sigmoid')(x)
if auxiliary:
model = Model(inputs=[input_, auxiliary_input], outputs=x)
else:
model = Model(inputs=input_, outputs=x)
if gpus > 0:
model = multi_gpu_model(model, gpus=gpus)
return model
def _get_regularizer(regularizer_name, weight):
if regularizer_name is None:
return None
if regularizer_name == 'l1':
return l1(weight)
if regularizer_name == 'l2':
return l2(weight)
if regularizer_name == 'l1_l2':
return l1_l2(weight)
return None
def rnn(embedding_matrix, num_classes, max_seq_len, l2_weight_decay=0.0001, rnn_dim=100, dropout_val=0.3, dense_dim=32, n_branches=0, n_rnn_layers=1, n_dense_layers=1, add_sigmoid=True, train_embeds=False, gpus=0, rnn_type='lstm', mask_zero=True, auxiliary=True, kernel_regularizer=None, recurrent_regularizer=None, activity_regularizer=None, dropout=0.2, recurrent_dropout=0.2):
rnn_regularizers = {'kernel_regularizer': _get_regularizer(kernel_regularizer, l2_weight_decay),
'recurrent_regularizer': _get_regularizer(recurrent_regularizer, l2_weight_decay),
'activity_regularizer': _get_regularizer(activity_regularizer, l2_weight_decay)}
if gpus == 0:
rnn_regularizers['dropout'] = dropout
rnn_regularizers['recurrent_dropout'] = recurrent_dropout
if rnn_type == 'lstm':
RNN = CuDNNLSTM if gpus > 0 else LSTM
elif rnn_type == 'gru':
RNN = CuDNNGRU if gpus > 0 else GRU
mask_zero = mask_zero and gpus == 0
input_ = Input(shape=(max_seq_len,))
embeds = Embedding(embedding_matrix.shape[0],
embedding_matrix.shape[1],
weights=[embedding_matrix],
input_length=max_seq_len,
mask_zero=mask_zero,
trainable=train_embeds)(input_)
embeds = SpatialDropout1D(0.2)(embeds)
branches = []
for _ in range(n_branches):
branch = Bidirectional(RNN(rnn_dim, return_sequences=True, **rnn_regularizers))(embeds)
branch = Dropout(dropout_val)(branch)
branches.append(branch)
if n_branches > 1:
x = Concatenate()(branches)
elif n_branches == 1:
x = branches[0]
else:
x = embeds
'''
for _ in range(n_rnn_layers):
x = Bidirectional(RNN(rnn_dim, return_sequences=True, **rnn_regularizers))(x)
z0 = Cropping1D(cropping=(-1, 0))(x)
z1 = GlobalMaxPooling1D()(x)
z2 = GlobalAveragePooling1D()(x)
x = Concatenate()([z0, z1, z2])
x = Dropout(dropout_val)(x)
'''
x = Bidirectional(RNN(rnn_dim, return_sequences=True, **rnn_regularizers))(x)
z0 = Cropping1D(cropping=(max_seq_len - 1, 0))(x)
z0 = Reshape([rnn_dim * 2])(z0)
z1 = GlobalMaxPooling1D()(x)
z2 = GlobalAveragePooling1D()(x)
z3 = Attention(max_seq_len)(x)
x = Concatenate()([z0, z1, z2, z3])
x = Dropout(dropout_val)(x)
if auxiliary:
auxiliary_input = Input(shape=(5,), name='aux_input')
x = Concatenate()([x, auxiliary_input])
for _ in range(n_dense_layers-1):
x = Dense(dense_dim, activation="relu")(x)
x = Dropout(dropout_val)(x)
x = Dropout(dropout_val)(x)
if add_sigmoid:
x = Dense(num_classes, activation="sigmoid")(x)
if auxiliary:
model = Model(inputs=[input_, auxiliary_input], outputs=x)
else:
model = Model(inputs=input_, outputs=x)
if gpus > 0:
model = multi_gpu_model(model, gpus=gpus)
return model
def dense(embedding_matrix, num_classes, max_seq_len, dense_dim=100, n_layers=10, concat=0, dropout_val=0.5, l2_weight_decay=0.0001, pool='max', add_sigmoid=True, train_embeds=False, gpus=0):
GlobalPool = {
'avg': GlobalZeroMaskedAveragePooling1D,
'max': GlobalMaxPooling1D,
'sum': GlobalSumPooling1D
}
input_ = Input(shape=(max_seq_len,))
embeds = Embedding(embedding_matrix.shape[0],
embedding_matrix.shape[1],
weights=[embedding_matrix],
input_length=max_seq_len,
trainable=train_embeds)(input_)
if isinstance(pool, list) and len(pool) > 1:
to_concat = []
for p in pool:
to_concat.append(GlobalPool[p]()(embeds))
x = Concatenate()(to_concat)
else:
x = GlobalPool[pool]()(embeds)
prev = []
for i in range(n_layers):
if concat > 0:
if i == 0:
prev.append(x)
continue
elif i % concat == 0:
prev.append(x)
x = Concatenate(axis=-1)(prev)
x = Dense(dense_dim, activation="relu")(x)
x = Dropout(dropout_val)(x)
output_ = Dense(dense_dim, activation="relu", kernel_regularizer=regularizers.l2(l2_weight_decay))(x)
if add_sigmoid:
output_ = Dense(num_classes, activation="sigmoid")(output_)
model = Model(inputs=input_, outputs=output_)
if gpus > 0:
model = multi_gpu_model(model, gpus=gpus)
return model
def capsule(embedding_matrix, num_classes, max_seq_len,Dim_capsule = 16, Num_capsule = 10, Routings=5, l2_weight_decay=0.0001, rnn_dim=128, dropout_val=0.3, dense_dim=32, n_branches=0, n_rnn_layers=1, n_dense_layers=1, add_sigmoid=True, train_embeds=False, gpus=0, rnn_type='lstm', mask_zero=True, auxiliary=True, kernel_regularizer=None, recurrent_regularizer=None, activity_regularizer=None, dropout=0.2, recurrent_dropout=0.2):
if rnn_type == 'lstm':
RNN = CuDNNLSTM if gpus > 0 else LSTM
elif rnn_type == 'gru':
RNN = CuDNNGRU if gpus > 0 else GRU
else:
RNN = CuDNNLSTM if gpus > 0 else LSTM
input_ = Input(shape=(max_seq_len,))
embeds = Embedding(embedding_matrix.shape[0],
embedding_matrix.shape[1],
weights=[embedding_matrix],
input_length=max_seq_len,
mask_zero=mask_zero,
trainable=train_embeds)(input_)
embeds = SpatialDropout1D(0.25)(embeds)
x = Bidirectional(RNN(rnn_dim, return_sequences=True, activation='relu', dropout=dropout, recurrent_dropout=recurrent_dropout))(embeds)
capsule = Capsule(num_capsule=Num_capsule, dim_capsule=Dim_capsule, routings=Routings,
share_weights=True)(x)
capsule = Flatten()(capsule)
x = Dropout(dropout_val)(capsule)
if auxiliary:
auxiliary_input = Input(shape=(5,), name='aux_input')
x = Concatenate()([x, auxiliary_input])
x = Dense(num_classes, activation="sigmoid")(x)
if auxiliary:
model = Model(inputs=[input_, auxiliary_input], outputs=x)
else:
model = Model(inputs=input_, outputs=x)
if gpus > 0:
model = multi_gpu_model(model, gpus=gpus)
return model
def save_predictions(df, predictions, target_labels, additional_name=None):
for i, label in enumerate(target_labels):
if additional_name is not None:
label = '{}_{}'.format(additional_name, label)
df[label] = predictions[:, i]
|
import torch
import torch.nn as nn
import torchvision
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
# Class id to name mapping
COCO_INSTANCE_CATEGORY_NAMES = [
'__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'N/A', 'stop sign',
'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
'elephant', 'bear', 'zebra', 'giraffe', 'N/A', 'backpack', 'umbrella', 'N/A', 'N/A',
'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket',
'bottle', 'N/A', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'N/A', 'dining table',
'N/A', 'N/A', 'toilet', 'N/A', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'N/A', 'book',
'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush'
]
# Class definition for the model
class ObjectDetectionModel(object):
'''
The blackbox object detection model (Faster RCNN for those who want to know).
Given an image as numpy array (3, H, W), it detects objects (generates their category ids and bounding boxes).
'''
# __init__ function
def __init__(self):
self.model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)
self.model.eval()
# function for calling the faster-rcnn model
def __call__(self, input):
'''
Arguments:
input (numpy array): A (3, H, W) array of numbers in [0, 1] representing the image.
Returns:
pred_boxes (list): list of bounding boxes, [[x1 y1 x2 y2], ..] where (x1, y1) are the coordinates of the top left corner
and (x2, y2) are the coordinates of the bottom right corner.
pred_class (list): list of predicted classes
pred_score (list): list of the probability (confidence) of prediction of each of the bounding boxes
Tip:
You can print the outputs to get better clarity :)
'''
input_tensor = torch.from_numpy(input)
input_tensor = input_tensor.type(torch.FloatTensor)
input_tensor = input_tensor.unsqueeze(0)
predictions = self.model(input_tensor)
pred_class = [COCO_INSTANCE_CATEGORY_NAMES[i] for i in list(predictions[0]['labels'].numpy())] # Get the Prediction Score
pred_boxes = [[(i[0], i[1]), (i[2], i[3])] for i in list(predictions[0]['boxes'].detach().numpy())] # Bounding boxes
pred_score = list(predictions[0]['scores'].detach().numpy())
return pred_boxes, pred_class, pred_score
|
#
# Copyright 2012-2014 John Whitlock
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Define Route model for rows in routes.txt
Google documentation from
https://developers.google.com/transit/gtfs/reference
routes.txt is required
- route_id (required)
The route_id field contains an ID that uniquely identifies a route. The
route_id is dataset unique.
- agency_id (optional)
The agency_id field defines an agency for the specified route. This value is
referenced from the agency.txt file. Use this field when you are providing data
for routes from more than one agency.
- route_short_name (required)
The route_short_name contains the short name of a route. This will often be a
short, abstract identifier like "32", "100X", or "Green" that riders use to
identify a route, but which doesn't give any indication of what places the
route serves. If the route does not have a short name, please specify a
route_long_name and use an empty string as the value for this field.
See a Google Maps screenshot highlighting the route_short_name:
http://bit.ly/yIS1sa
- route_long_name (required)
The route_long_name contains the full name of a route. This name is generally
more descriptive than the route_short_name and will often include the route's
destination or stop. If the route does not have a long name, please specify a
route_short_name and use an empty string as the value for this field.
See a Google Maps screenshot highlighting the route_long_name:
http://bit.ly/wZw5yH
- route_desc (optional)
The route_desc field contains a description of a route. Please provide useful,
quality information. Do not simply duplicate the name of the route. For
example, "A trains operate between Inwood-207 St, Manhattan and Far
Rockaway-Mott Avenue, Queens at all times. Also from about 6AM until about
midnight, additional A trains operate between Inwood-207 St and Lefferts
Boulevard (trains typically alternate between Lefferts Blvd and Far Rockaway)."
- route_type (required)
The route_type field describes the type of transportation used on a route.
Valid values for this field are:
0 - Tram, Streetcar, Light rail. Any light rail or street level system
within a metropolitan area.
1 - Subway, Metro. Any underground rail system within a metropolitan area.
2 - Rail. Used for intercity or long-distance travel.
3 - Bus. Used for short- and long-distance bus routes.
4 - Ferry. Used for short- and long-distance boat service.
5 - Cable car. Used for street-level cable cars where the cable runs
beneath the car.
6 - Gondola, Suspended cable car. Typically used for aerial cable cars
where the car is suspended from the cable.
7 - Funicular. Any rail system designed for steep inclines.
See a Google Maps screenshot highlighting the route_type:
http://bit.ly/wSt2h0
- route_url (optional)
The route_url field contains the URL of a web page about that particular route.
This should be different from the agency_url.
The value must be a fully qualified URL that includes http:// or https://, and
any special characters in the URL must be correctly escaped. See
http://www.w3.org/Addressing/URL/4_URI_Recommentations.html
for a description of how to create fully qualified URL values.
- route_color (optional)
In systems that have colors assigned to routes, the route_color field defines a
color that corresponds to a route. The color must be provided as a
six-character hexadecimal number, for example, 00FFFF. If no color is
specified, the default route color is white (FFFFFF).
The color difference between route_color and route_text_color should provide
sufficient contrast when viewed on a black and white screen. The W3C Techniques
for Accessibility Evaluation And Repair Tools document offers a useful
algorithm for evaluating color contrast:
http://www.w3.org/TR/AERT#color-contrast
There are also helpful online tools for choosing contrasting colors, including
the snook.ca Color Contrast Check application:
http://snook.ca/technical/colour_contrast/colour.html
- route_text_color (optional)
The route_text_color field can be used to specify a legible color to use for
text drawn against a background of route_color. The color must be provided as a
six-character hexadecimal number, for example, FFD700. If no color is
specified, the default text color is black (000000).
The color difference between route_color and route_text_color should provide
sufficient contrast when viewed on a black and white screen.
"""
from __future__ import unicode_literals
from django.contrib.gis.geos import MultiLineString
from django.utils.encoding import python_2_unicode_compatible
from jsonfield import JSONField
from multigtfs.models.base import models, Base
@python_2_unicode_compatible
class Route(Base):
"""A transit route"""
feed = models.ForeignKey('Feed')
route_id = models.CharField(
max_length=255, db_index=True,
help_text="Unique identifier for route.")
agency = models.ForeignKey(
'Agency', null=True, blank=True, help_text="Agency for this route.")
short_name = models.CharField(
max_length=63,
help_text="Short name of the route")
long_name = models.CharField(
max_length=255,
help_text="Long name of the route")
desc = models.TextField(
"description",
blank=True,
help_text="Long description of a route")
rtype = models.IntegerField(
"route type",
choices=((0, 'Tram, Streetcar, or Light rail'),
(1, 'Subway or Metro'),
(2, 'Rail'),
(3, 'Bus'),
(4, 'Ferry'),
(5, 'Cable car'),
(6, 'Gondola or Suspended cable car'),
(7, 'Funicular')),
help_text='Type of transportation used on route')
url = models.URLField(
blank=True, help_text="Web page about for the route")
color = models.CharField(
max_length=6, blank=True,
help_text="Color of route in hex")
text_color = models.CharField(
max_length=6, blank=True,
help_text="Color of route text in hex")
geometry = models.MultiLineStringField(
null=True, blank=True,
help_text='Geometry cache of Trips')
extra_data = JSONField(default={})
def update_geometry(self):
"""Update the geometry from the Trips"""
original = self.geometry
trips = self.trip_set.exclude(geometry=None)
unique_coords = set()
unique_geom = list()
for t in trips:
coords = t.geometry.coords
if coords not in unique_coords:
unique_coords.add(coords)
unique_geom.append(t.geometry)
self.geometry = MultiLineString(unique_geom)
if self.geometry != original:
self.save()
def __str__(self):
return "%d-%s" % (self.feed.id, self.route_id)
class Meta:
db_table = 'route'
app_label = 'multigtfs'
_column_map = (
('route_id', 'route_id'),
('agency_id', 'agency__agency_id'),
('route_short_name', 'short_name'),
('route_long_name', 'long_name'),
('route_desc', 'desc'),
('route_type', 'rtype'),
('route_url', 'url'),
('route_color', 'color'),
('route_text_color', 'text_color')
)
_filename = 'routes.txt'
_sort_order = ('route_id', 'short_name')
_unique_fields = ('route_id',)
|
import logging
import pandas as pd
import time
from quartic_sdk.model.BaseQuarticModel import BaseQuarticModel
from quartic_sdk.utilities import constants
class SupportedModel(BaseQuarticModel):
"""
Example Model used for testing model
This is a valid model that can be save to quartic platform
"""
def __init__(self):
super().__init__("test_BaseQuarticModel")
def predict(self, input_df: pd.DataFrame) -> pd.Series:
return pd.Series([i for i in range(input_df.shape[0])])
class ModelThatReturnsList(BaseQuarticModel):
"""
Example Model used for testing model
This is a valid model that can be save to quartic platform
"""
def __init__(self):
super().__init__("test_BaseQuarticModel")
def predict(self, input_df: pd.DataFrame) -> pd.Series:
return [i for i in range(input_df.shape[0])]
class ModelThatReturnsNone(BaseQuarticModel):
"""
Example Model used for testing model
This is a valid model that can be save to quartic platform
"""
def __init__(self):
super().__init__("test_BaseQuarticModel")
def predict(self, input_df: pd.DataFrame) -> pd.Series:
return pd.Series([None for _ in range(input_df.shape[0])])
class ModelThatReturnsString(BaseQuarticModel):
"""
Example Model used for testing model
This is a invalid model whose predict function returns data of type string
"""
def __init__(self):
super().__init__("test_model")
def post_transform(self, data):
data = data.astype(str)
return data
def predict(self, input_df: pd.DataFrame) -> pd.Series:
output = pd.Series([i for i in range(input_df.shape[0])])
return self.post_transform(output)
class SlowModel(BaseQuarticModel):
"""
Example Model used for testing model
This is a invalid model whose predict function takes longer processing time than that is supported by Quartic
"""
def __init__(self):
super().__init__("test_model")
def pre_transform(self, df):
"""
A simple transformation that sleeps for x secs before returning same
"""
time.sleep(constants.MAX_PREDICTION_PROCESSING_TIME + 1)
return df
def predict(self, input_df: pd.DataFrame) -> pd.Series:
"""
sample prediction
"""
self.pre_transform(input_df)
return pd.Series([i for i in range(input_df.shape[0])])
class ModelWithLog(BaseQuarticModel):
"""
Example Model used for testing model
This is a invalid model whose predict function takes longer processing time than that is supported by Quartic
"""
def __init__(self):
super().__init__("test_model")
def predict(self, input_df: pd.DataFrame) -> pd.Series:
"""
sample prediction
"""
self.log.info("This is a info Log for Testing the logger")
self.log.debug("This is a debug Log for Testing the logger")
self.log.error("This is an error log")
return pd.Series([i for i in range(input_df.shape[0])])
class MockLoggingHandler(logging.Handler):
"""Mock logging handler to check for expected logs."""
def __init__(self, *args, **kwargs):
self.reset()
logging.Handler.__init__(self, *args, **kwargs)
def emit(self, record):
self.messages[record.levelname.lower()].append(record.getMessage())
def reset(self):
self.messages = {
'debug': [],
'info': [],
'warning': [],
'error': [],
'critical': [],
}
|
import requests
import json
from flask import Blueprint, request
from responses import Result, ServerErrorResult, InvalidParameterResult
_user = Blueprint('_user', __name__, template_folder='templates')
cloud = Blueprint('_user', __name__, url_prefix='/api/login')
@_user.route('/', methods=['POST'])
def api_login():
data = request.json
if 'email' not in data or 'password' not in data:
result = InvalidParameterResult(errors=["Both email and password are required"])
return result.http_response()
r = requests.post(request.url_root+"/auth",
data=json.dumps({'email': data['email'], 'password': data['password']}),
headers={'content-type': 'application/json'})
response = r.json()
if response['meta']['code'] == 200:
extra_fiends = {'authentication_token':response['response']['user']['authentication_token']}
result = Result(200, "SUCCESS", "SUCCESS",extra_fields = extra_fiends)
else:
result = Result(response['meta']['code'], "FAILED", "FAILED",extra_fields = response['response'])
return result.http_response() |
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: ESToCSV.py
Description : 从es中导出数据到csv,构建知识库
Author : charl
date: 2018/12/12
-------------------------------------------------
Change Activity: 2018/12/12:
-------------------------------------------------
"""
from elasticsearch import Elasticsearch
from elasticsearch import helpers
def getData():
'''
ES连接
:return:
'''
es = Elasticsearch(["192.168.11.211:9200"])
query = {
"query":{"match_all":{}}
}
scanResp = helpers.scan(client=es,
query=query,
scroll="3m",
index='zhizhuxia',
doc_type='ner_type',
timeout="3m")
for k in scanResp:
yield k
def write_file_detail(k):
'''
这个方法写入图数据库要求的详情表
:param k:
:return:
'''
with open('D:\project\Chatbot_CN\\Ner_detail.csv', 'ab') as f:
k = dict(k)
f.write(k['_source']['judgementId'].encode(encoding="utf-8")) # 写入Id
f.write(b',')
f.write(k['_source']['opponents'].encode(encoding="utf-8")) # 写入被告
f.write(b',')
f.write(k['_source']['court'].encode(encoding="utf-8"))
f.write(b',')
keywords = k['_source']['keywords']
keyw = '、'.join(keywords)
if keyw is not '':
f.write(keyw.encode(encoding="utf-8"))
f.write(b',')
text = k['_source']['judge_text']
f.write(text.encode(encoding="utf-8"))
f.write(b',')
f.write(b'\n')
f.flush()
def write_file_attr(k):
'''
写入图数据库要求的属性
:param k:
:return:
'''
with open('D:\project\Chatbot_CN\\Ner_attr.csv', 'ab') as f:
k = dict(k)
f.write(k['_source']['opponents'].encode(encoding="utf-8")) # 写入被告
f.write(b',')
f.write(k['_source']['judgementId'].encode(encoding="utf-8")) # 写入Id
f.write(b',')
f.write('被告'.encode(encoding="utf-8"))
f.write(b',')
f.write(b'\n')
f.write(k['_source']['proponents'].encode(encoding="utf-8")) # 写入原告
f.write(b',')
f.write(k['_source']['judgementId'].encode(encoding="utf-8")) # 写入Id
f.write(b',')
f.write('原告'.encode(encoding="utf-8"))
f.write(b',')
f.write(b'\n')
def write_file_relation(k):
'''
写入图数据库需要的关系
:param k:
:return:
'''
with open('D:\project\Chatbot_CN\\Ner_relation.csv', 'ab') as f:
k = dict(k)
f.write(k['_source']['opponents'].encode(encoding="utf-8")) # 写入被告
f.write(b',')
f.write('has'.encode(encoding="utf-8"))
f.write(b',')
loc = k['_source']['LOC']
loc = '、'.join(loc)
f.write(loc.encode(encoding="utf-8"))
f.write(b',')
f.write(b'\n')
f.flush()
def write_file_node(k):
'''
写入图数据库的节点表 (测试成功)
:param k:
:return:
'''
with open('D:\project\Chatbot_CN\\Ner_node.csv', 'ab') as f:
k = dict(k)
opp = k['_source']['opponents']
f.write(opp.encode(encoding="utf-8")) # 写入被告
# for i in range(len(opp)):
# f.write(opp[i].encode(encoding="utf-8"))
# f.write(b'\n')
# print(opp)
f.write(b'\n')
loc = k['_source']['LOC'] # 写入地址
if len(loc) == 0:
pass
if len(loc) >= 1:
for i in range(len(loc)):
f.write(loc[i].encode(encoding="utf-8"))
f.write(b'\n')
print(loc)
f.flush()
if __name__=='__main__':
datas = getData()
for index, k in enumerate(datas, 1):
# write_file_detail(k)
# write_file_relation(k)
# write_file_node(k)
write_file_attr(k)
print('正在导出' + str(index) + '条数据') |
"""
workflows of AiiDA-KKR
"""
# import all workflows here to expose them in `aiida_kkr.workflows` directly
from .voro_start import kkr_startpot_wc
from .dos import kkr_dos_wc, parse_dosfiles
from .kkr_scf import kkr_scf_wc
from .eos import kkr_eos_wc, rescale, get_primitive_structure
from .gf_writeout import kkr_flex_wc
from .kkr_imp_sub import kkr_imp_sub_wc
from .kkr_imp import kkr_imp_wc
from .kkr_imp_dos import kkr_imp_dos_wc
|
class LinearAveragesService:
def __init__(self, dataset):
self.dataset = dataset
def call(self):
averages = {}
counts = {}
for row in self.dataset:
# Get the class of this point
cl = row.match
averages.setdefault(cl, [0.0] * (len(row.data)))
counts.setdefault(cl, 0)
# Add this point to the averages
for i in range(len(row.data)):
averages[cl][i] += float(row.data[i])
# Keep track of how many points in each class
counts[cl] += 1
# Divide sums by counts to get the averages
for cl, avg in averages.items():
for i in range(len(avg)):
avg[i] /= counts[cl]
return averages
|
import base64
import random
from django.contrib.admin.sites import AdminSite
from django.contrib.admin.views.main import IS_POPUP_VAR
from django.contrib.contenttypes.models import ContentType
from django.contrib.messages.storage.fallback import FallbackStorage
from django.core.files.uploadedfile import SimpleUploadedFile
from django.http import Http404
from django.test import LiveServerTestCase, RequestFactory, TestCase, TransactionTestCase
from django.utils import six
from django.utils.timezone import now
from ..admin import FileAdmin, VideoAdmin
from ..models import File, Label, Video
from ..forms import mime_check
class BrokenFile:
"""
A special class designed to raise an IOError the second time it's `file`
method is called. Used to test sorl.
"""
name_requested = False
def __getattr__(self, name):
if name == 'file':
if not self.name_requested:
self.name_requested = True
return self.obj.file
return self.obj.file.file
return getattr(self.obj, name)
def __init__(self, *args, **kwargs):
self.obj = File.objects.create(**kwargs)
class MockSuperUser:
pk = 1
is_active = True
is_staff = True
@staticmethod
def has_perm(perm):
return True
class TestVideoAdmin(TestCase):
def setUp(self):
self.site = AdminSite()
self.video_admin = VideoAdmin(Video, self.site)
factory = RequestFactory()
self.request = factory.get('/')
self.request.user = MockSuperUser
def test_videoadmin_to_field_allowed(self):
self.assertTrue(self.video_admin.to_field_allowed(self.request, 'id'))
self.assertFalse(self.video_admin.to_field_allowed(self.request, 'foo'))
class TestFileAdminBase(TransactionTestCase):
def setUp(self):
self.site = AdminSite()
self.file_admin = FileAdmin(File, self.site)
File.objects.all().delete()
self.factory = RequestFactory()
self.request = self.factory.get('/')
self.request.user = MockSuperUser()
# An invalid JPEG
self.name_1 = '{}-{}.jpg'.format(
now().strftime('%Y-%m-%d_%H-%M-%S'),
random.randint(0, six.MAXSIZE)
)
self.obj_1 = File.objects.create(
title="Foo",
file=SimpleUploadedFile(self.name_1, b"data", content_type="image/jpeg")
)
# A valid GIF.
self.name_2 = '{}-{}.gif'.format(
now().strftime('%Y-%m-%d_%H-%M-%S'),
random.randint(0, six.MAXSIZE)
)
base64_string = b'R0lGODlhAQABAAAAACH5BAEKAAEALAAAAAABAAEAAAICTAEAOw=='
self.obj_2 = File.objects.create(
title="Foo 2",
file=SimpleUploadedFile(self.name_2, base64.b64decode(base64_string), content_type="image/gif")
)
# For mime_check: an image whose contents match the given extension
self.file_1 = SimpleUploadedFile(self.name_2, base64.b64decode(base64_string), content_type="image/gif")
# ...and one whose contents do not...
self.file_2 = SimpleUploadedFile(self.name_2, base64.b64decode(base64_string), content_type="image/jpeg")
# ...and one we should never check (we only care about images)
self.file_3 = SimpleUploadedFile(self.name_2, base64.b64decode(base64_string), content_type="text/html")
self.label = Label.objects.create(
name="Foo"
)
def tearDown(self):
self.obj_1.file.delete(False)
self.obj_1.delete()
# self.obj_2.file.delete(False)
# self.obj_2.delete()
def test_fileadminbase_to_field_allowed(self):
self.assertTrue(self.file_admin.to_field_allowed(self.request, 'id'))
self.assertFalse(self.file_admin.to_field_allowed(self.request, 'foo'))
def test_fileadminbase_add_label_action(self):
self.assertEqual(self.obj_1.labels.count(), 0)
self.file_admin.add_label_action(self.request, File.objects.all(), self.label)
self.assertEqual(self.obj_1.labels.count(), 1)
def test_fileadminbase_remove_label_action(self):
self.assertEqual(self.obj_1.labels.count(), 0)
self.obj_1.labels.add(self.label)
self.assertEqual(self.obj_1.labels.count(), 1)
self.file_admin.remove_label_action(self.request, File.objects.all(), self.label)
self.assertEqual(self.obj_1.labels.count(), 0)
def test_fileadminbase_get_actions(self):
actions = self.file_admin.get_actions(self.request)
self.assertEqual(len(actions), 2)
self.request = self.factory.get('/?{}'.format(IS_POPUP_VAR))
self.request.user = MockSuperUser()
actions = self.file_admin.get_actions(self.request)
self.assertEqual(len(actions), 0)
def test_fileadminbase_get_size(self):
# Why this has to use a unicode space, I don't know..
self.assertEqual(self.file_admin.get_size(self.obj_1), '4\xa0bytes')
obj = File.objects.create(
title="Foo",
file='media/not/a/real.file'
)
self.assertEqual(self.file_admin.get_size(obj), '0 bytes')
def test_fileadminbase_get_preview(self):
self.assertEqual(
self.file_admin.get_preview(self.obj_1),
'<img cms:permalink="/r/{}-{}/" src="/static/media/img/image-x-generic.png" width="56" height="66" alt="" title="Foo"/>'.format(
ContentType.objects.get_for_model(File).pk,
self.obj_1.pk
)
)
# We can't do an `assertEqual` here as the generated src URL is dynamic.
preview = self.file_admin.get_preview(self.obj_2)
self.assertIn(
'<img cms:permalink="/r/{}-{}/"'.format(
ContentType.objects.get_for_model(File).pk,
self.obj_2.pk
),
preview,
)
self.assertIn(
'width="66" height="66" alt="" title="Foo 2"/>',
preview,
)
obj = BrokenFile(
title="Foo",
file='media/not/a/real.png'
)
preview = self.file_admin.get_preview(obj)
self.assertEqual(preview, '<img cms:permalink="/r/{}-{}/" src="/static/media/img/image-x-generic.png" width="56" height="66" alt="" title="Foo"/>'.format(
ContentType.objects.get_for_model(File).pk,
obj.pk
))
obj = File.objects.create(
title="Foo",
file='media/not/a/real.file'
)
preview = self.file_admin.get_preview(obj)
self.assertEqual(preview, '<img cms:permalink="/r/{}-{}/" src="/static/media/img/text-x-generic-template.png" width="56" height="66" alt="" title="Foo"/>'.format(
ContentType.objects.get_for_model(File).pk,
obj.pk
))
obj.delete()
def test_fileadminbase_get_title(self):
self.assertEqual(self.file_admin.get_title(self.obj_1), 'Foo')
def test_fileadminbase_response_add(self):
# Allow the messages framework to work.
setattr(self.request, 'session', 'session')
messages = FallbackStorage(self.request)
setattr(self.request, '_messages', messages)
self.request.user = MockSuperUser()
response = self.file_admin.response_add(self.request, self.obj_1)
self.assertEqual(response.status_code, 302)
self.request = self.factory.get('/?_tinymce')
self.request.user = MockSuperUser()
setattr(self.request, 'session', 'session')
messages = FallbackStorage(self.request)
setattr(self.request, '_messages', messages)
self.request.user = MockSuperUser()
self.request.pages = {}
response = self.file_admin.response_add(self.request, self.obj_1)
self.assertEqual(response.status_code, 200)
def test_fileadminbase_changelist_view(self):
self.request.user = MockSuperUser()
view = self.file_admin.changelist_view(self.request)
self.assertEqual(view.status_code, 200)
self.assertEqual(view.template_name, 'admin/media/file/change_list.html')
self.assertNotIn('foo', view.context_data)
view = self.file_admin.changelist_view(self.request, extra_context={'foo': 'bar'})
self.assertEqual(view.status_code, 200)
self.assertEqual(view.template_name, 'admin/media/file/change_list.html')
self.assertIn('foo', view.context_data)
def test_fileadminbase_mime_check(self):
self.assertEqual(mime_check(self.file_1), True)
self.assertEqual(mime_check(self.file_2), False)
self.assertEqual(mime_check(self.file_3), True)
class LiveServerTestFileAdminBase(LiveServerTestCase):
def setUp(self):
self.site = AdminSite()
self.file_admin = FileAdmin(File, self.site)
self.factory = RequestFactory()
self.request = self.factory.get('/')
self.request.user = MockSuperUser
# An invalid JPEG
self.name_1 = '{}-{}.jpg'.format(
now().strftime('%Y-%m-%d_%H-%M-%S'),
random.randint(0, six.MAXSIZE)
)
self.obj_1 = File.objects.create(
title="Foo",
file=SimpleUploadedFile(self.name_1, b"data", content_type="image/jpeg")
)
def tearDown(self):
self.obj_1.file.delete(False)
self.obj_1.delete()
def test_fileadminbase_remote_view(self):
self.request.user = MockSuperUser()
view = self.file_admin.remote_view(self.request, self.obj_1.pk)
# 405: Method not allowed. We have to POST to this view.
self.assertEqual(view.status_code, 405)
self.request.method = 'POST'
# No URL supplied.
with self.assertRaises(Http404):
view = self.file_admin.remote_view(self.request, self.obj_1.pk)
# No permissions.
self.request.user.has_perm = lambda x: False
view = self.file_admin.remote_view(self.request, self.obj_1.pk)
self.assertEqual(view.status_code, 403)
self.request.user.has_perm = lambda x: True
# Allow the messages framework to work.
setattr(self.request, 'session', 'session')
messages = FallbackStorage(self.request)
setattr(self.request, '_messages', messages)
self.request.user = MockSuperUser()
self.request.POST = {
'url': self.live_server_url + '/static/media/img/text-x-generic.png'
}
view = self.file_admin.remote_view(self.request, self.obj_1.pk)
self.assertEqual(view.content, b'{"status": "ok"}')
self.assertEqual(view.status_code, 200)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-08-29 08:05
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('cms', '0018_add_group_permissions'),
]
operations = [
migrations.AlterModelOptions(
name='resultpage',
options={'permissions': (('api_read_resultpage', 'Can view result pages through the API'), ('api_add_resultpage', 'Can add result pages through the API'), ('api_change_resultpage', 'Can change result pages through the API'), ('api_delete_resultpage', 'Can delete result pages through the API'))},
),
]
|
import kass_nn.level_2.characteristics.min_vs_long_req as long
if __name__ == '__main__':
long.main("BIG_TEST_TRANS_min_long.log") |
import tensorflow as tf
import modules
import math
class CrazyCluster(modules.BaseModule):
def __init__(self, feature_size, hidden_size, num_frames, last_layer, num_cluster, do_shift=True):
self.feature_size = feature_size
self.num_frames = num_frames
self.hidden_size = hidden_size
self.num_cluster = num_cluster
self.last_layer = last_layer
self.do_shift = do_shift
def normal_attention(self, inputs, cluster_id):
"""
:param inputs: batch_size x num_frames x feature_size
:param cluster_id:
:return:
"""
with tf.variable_scope("cluster{}".format(str(cluster_id))):
attention_weights = tf.layers.dense(inputs, self.num_frames, activation=None, name="attention")
float_cpy = tf.cast(self.feature_size, dtype=tf.float32)
attention = tf.divide(attention_weights, tf.sqrt(float_cpy))
attention = tf.nn.softmax(attention)
output = tf.matmul(attention, inputs)
output = tf.reduce_mean(output, axis=1, keep_dims=True)
alpha = \
tf.get_variable("alpha",
[1],
initializer=tf.constant_initializer(1))
beta = \
tf.get_variable("beta",
[1],
initializer=tf.constant_initializer(0))
activation = alpha * output
activation = activation + beta
activation = tf.nn.l2_normalize(activation)
float_cpy = tf.cast(self.num_cluster, dtype=tf.float32)
activation = tf.divide(activation, tf.sqrt(float_cpy))
return activation
def forward(self, inputs, **unused_params):
result = self.normal_attention(inputs, cluster_id=0)
for i in range(1, self.num_cluster):
output = self.normal_attention(inputs, cluster_id=i)
result = tf.concat([result, output], 1)
return result
class CrazyFeedForward(modules.BaseModule):
""" Feed Forward Network. """
def __init__(self, feature_size, filter_size, relu_dropout,
is_train, scope_id):
""" Initialize class FeedForwardNetwork.
:param hidden_size: int
:param filter_size: int
:param relu_dropout: int
:param is_train: bool
:param scope_id: String
"""
self.feature_size = feature_size
self.filter_size = filter_size
self.relu_dropout = relu_dropout
self.is_train = is_train
self.scope_id = scope_id
def forward(self, inputs, **unused_params):
""" Forward method for FeedForwardNetwork.
:param inputs: 3D Tensor with size 'batch_size x num_feature x feature_size'
:return: 3D Tensor with size 'batch_size x num_feature x hidden_size'
"""
filter_output = tf.layers.dense(inputs, self.filter_size,
use_bias=True,
activation=tf.nn.relu,
name="filter_output{}".format(self.scope_id))
# if self.is_train:
# filter_output = tf.nn.dropout(filter_output, 1.0 - self.relu_dropout)
output = tf.layers.dense(filter_output, self.feature_size,
use_bias=True,
activation=tf.nn.relu,
name="ff_output{}".format(self.scope_id))
output = output + inputs
output = tf.contrib.layers.layer_norm(output)
return output
class CrazyMultiHead(modules.BaseModule):
def __init__(self, feature_size, num_heads, max_frames, is_training):
self.feature_size = feature_size
self.num_heads = num_heads
self.max_frames = max_frames
self.is_training = is_training
def self_attention(self, inputs, head_id):
with tf.variable_scope("head{}".format(head_id)):
Q = tf.layers.dense(inputs, self.feature_size, use_bias=False, activation=None)
K = tf.layers.dense(inputs, self.feature_size, use_bias=False, activation=None)
V = tf.layers.dense(inputs, self.feature_size, use_bias=False, activation=None)
attention = tf.matmul(Q, tf.transpose(K, perm=[0, 2, 1]))
float_cpy = tf.cast(self.feature_size, dtype=tf.float32)
attention = tf.divide(attention, tf.sqrt(float_cpy))
attention = tf.nn.softmax(attention)
activation = tf.matmul(attention, V)
# output: -> batch_size x max_frames x num_units
alpha = \
tf.get_variable("alpha",
[1],
initializer=tf.constant_initializer(1))
beta = \
tf.get_variable("beta",
[1],
initializer=tf.constant_initializer(0))
activation = activation * alpha
activation = activation + beta
activation = tf.nn.l2_normalize(activation)
float_cpy = tf.cast(self.num_heads, dtype=tf.float32)
activation = tf.divide(activation, tf.sqrt(float_cpy))
return activation
def forward(self, inputs, **unused_params):
result = self.self_attention(inputs, head_id=0)
for i in range(1, self.num_heads):
output = self.self_attention(inputs, head_id=i)
result = tf.concat([result, output], 2)
output = tf.layers.dense(result, self.feature_size, use_bias=False, activation=None)
output = tf.contrib.layers.layer_norm(output)
return output
class JuhanBlock(modules.BaseModule):
def __init__(self, feature_size, filter_size, num_cluster, num_units, max_frames,
is_training, last_layer, block_id):
self.feature_size = feature_size
self.filter_size = filter_size
self.num_cluster = num_cluster
self.num_units = num_units
self.max_frames = max_frames
self.is_training = is_training
self.last_layer = last_layer
self.block_id = block_id
self.multi_head = MultiHeadAttentionV2(feature_size=feature_size,
num_heads=num_cluster,
num_units=num_units,
max_frames=max_frames,
block_id=block_id)
self.ff1 = FeedForwardNetwork(feature_size=feature_size,
filter_size=filter_size,
relu_dropout=0.1,
is_train=is_training,
scope_id=block_id)
self.attention_cluster = OneFcAttentionV9(feature_size=feature_size,
hidden_size=num_units,
num_frames=max_frames,
num_cluster=num_cluster,
last_layer=last_layer,
do_shift=True)
self.ff2 = FeedForwardNetwork(feature_size=feature_size,
filter_size=filter_size,
relu_dropout=0.1,
is_train=is_training,
scope_id=block_id)
def forward(self, inputs, **unused_params):
""" Forward method
:param inputs: 3D Tensor with size 'batch_size x max_frames x feature_size'
:return: 3D Tensor with size 'batch_size x num_cluster x feature_size'
"""
with tf.variable_scope("block{}".format(str(self.block_id))):
with tf.variable_scope("multi_head"):
mh_output = self.multi_head.forward(inputs)
# -> batch_size x max_frames x feature_size
with tf.variable_scope("ff1"):
ff1_output = self.ff1.forward(mh_output)
# -> batch_size x max_frames x feature_size
with tf.variable_scope("one_attention"):
mh2_output = self.attention_cluster.forward(ff1_output)
# -> batch_size x cluster_size x feature_size
with tf.variable_scope("ff2"):
ff2_output = self.ff2.forward(mh2_output)
# -> batch_size x cluster_size x feature_size
return ff2_output
class MultiHeadAttentionV2(modules.BaseModule):
def __init__(self, feature_size, num_heads, num_units, max_frames, block_id):
"""
:param num_heads: Number of self-attention modules
:param num_units: last dimension of Q, K, V
"""
self.feature_size = feature_size
self.num_heads = num_heads
self.num_units = num_units
self.max_frames = max_frames
self.block_id = block_id
def self_attention(self, inputs, scope_id):
"""
:param Q: batch_size x max_frames x num_units
:param K: batch_size x max_frames x num_units
:param V: batch_size x max_frames x num_units
:return:
"""
with tf.variable_scope("Block{}Layer{}".format(self.block_id, scope_id)):
# Calculate query, key, value pair
Q = tf.layers.dense(inputs, self.num_units, use_bias=False, activation=None)
K = tf.layers.dense(inputs, self.num_units, use_bias=False, activation=None)
V = tf.layers.dense(inputs, self.num_units, use_bias=False, activation=None)
# Self-attention
attention = tf.matmul(Q, tf.transpose(K, perm=[0, 2, 1]))
# attention: -> batch_size x max_frames x max_frames
float_cpy = tf.cast(self.num_units, dtype=tf.float32)
attention = tf.nn.softmax(tf.divide(attention, tf.sqrt(float_cpy)))
output = tf.matmul(attention, V)
# output: -> batch_size x max_frames x num_units
alpha = \
tf.get_variable("alpha",
[1],
initializer=tf.constant_initializer(1))
beta = \
tf.get_variable("beta",
[1],
initializer=tf.constant_initializer(0))
reshaped_activation = alpha * output
reshaped_activation = reshaped_activation + beta
output = tf.nn.l2_normalize(reshaped_activation)
float_cpy = tf.cast(self.num_heads, dtype=tf.float32)
output = tf.divide(output, tf.sqrt(float_cpy))
return output
def forward(self, inputs, **unused_params):
result = self.self_attention(inputs, scope_id=0)
for i in range(1, self.num_heads):
output = self.self_attention(inputs, scope_id=i)
result = tf.concat([result, output], 2)
output = tf.layers.dense(result, self.feature_size, use_bias=False, activation=None)
output = tf.contrib.layers.layer_norm(output)
return output
class OneFcAttentionV9(modules.BaseModule):
def __init__(self, feature_size, hidden_size, num_frames, last_layer, num_cluster, do_shift=True):
self.feature_size = feature_size
self.num_frames = num_frames
self.hidden_size = hidden_size
self.num_cluster = num_cluster
self.last_layer = last_layer
self.do_shift = do_shift
def normal_attention(self, inputs, cluster_id):
"""
:param inputs: batch_size x num_frames x feature_size
:param cluster_id:
:return:
"""
with tf.variable_scope("cluster{}".format(str(cluster_id))):
keys = tf.layers.dense(inputs, self.feature_size, use_bias=False, activation=None, name="k")
values = tf.layers.dense(inputs, self.feature_size, use_bias=False, activation=None, name="v")
attention_weights = tf.layers.dense(keys, self.num_frames, activation=None, name="attention")
float_cpy = tf.cast(self.feature_size, dtype=tf.float32)
attention = tf.nn.softmax(tf.divide(attention_weights, tf.sqrt(float_cpy)))
output = tf.matmul(attention, values)
output = tf.reduce_mean(output, axis=1, keep_dims=True)
# output: -> batch_size x max_frames x num_units
alpha = \
tf.get_variable("alpha",
[1],
initializer=tf.constant_initializer(1))
beta = \
tf.get_variable("beta",
[1],
initializer=tf.constant_initializer(0))
reshaped_activation = alpha * output
reshaped_activation = reshaped_activation + beta
reshaped_activation = tf.nn.l2_normalize(reshaped_activation)
float_cpy = tf.cast(self.num_cluster, dtype=tf.float32)
output = tf.divide(reshaped_activation, tf.sqrt(float_cpy))
return output
def forward(self, inputs, **unused_params):
result = self.normal_attention(inputs, cluster_id=0)
for i in range(1, self.num_cluster):
output = self.normal_attention(inputs, cluster_id=i)
result = tf.concat([result, output], 1)
output = tf.layers.dense(result, self.feature_size, use_bias=False, activation=None)
output = tf.contrib.layers.layer_norm(output)
return output
class OneFcAttentionV3(modules.BaseModule):
def __init__(self, feature_size, num_frames, num_cluster, do_shift=True):
self.feature_size = feature_size
self.num_frames = num_frames
self.num_cluster = num_cluster
self.do_shift = do_shift
def forward(self, inputs, **unused_params):
attention = tf.layers.dense(inputs, self.num_cluster, activation=None)
float_cpy = tf.cast(self.feature_size, dtype=tf.float32)
attention = tf.divide(attention, tf.sqrt(float_cpy))
attention = tf.nn.softmax(attention)
attention = tf.transpose(attention, perm=[0, 2, 1])
activation = tf.matmul(attention, inputs)
# -> batch_size x num_cluster x feature_size
output = tf.layers.dense(activation, self.feature_size, activation=None)
output = tf.nn.l2_normalize(output)
float_cpy = tf.cast(self.num_cluster, dtype=tf.float32)
output = tf.divide(output, tf.sqrt(float_cpy))
return output
class TransformerEncoderBlockV2(modules.BaseModule):
def __init__(self, is_training, num_units, max_frames, feature_size, num_heads, block_id):
"""
:param is_training:
:param num_units: Number of hidden units of fully connected layers
"""
self.is_training = is_training
self.num_units = num_units
self.max_frames = max_frames
self.feature_size = feature_size
self.num_heads = num_heads
self.block_id = block_id
def forward(self, inputs, **unused_params):
"""
One block of encoder containing one self-attention layer and one fully connected layer.
:param inputs: (batch_size * max_frames) x feature_size
:param unused_params:
:return:
"""
multi_head_layer = MultiHeadAttentionV2(self.num_heads, self.num_units, self.max_frames, self.block_id)
attention_output = multi_head_layer.forward(inputs)
# output: -> batch_size x max_frames x (num_units * num_heads)
attention_output = tf.reshape(attention_output, [-1, self.num_units * self.num_heads])
# output: -> (batch_size * max_frames) x (num_units * num_heads)
attention_output = tf.layers.dense(attention_output, self.feature_size, activation=tf.nn.relu)
# output: -> (batch_size * max_frames) x feature_size
# Residual connection & Layer normalization
attention_output += inputs
attention_output = tf.contrib.layers.layer_norm(attention_output)
# Residual connection & Layer normalization
output = tf.contrib.layers.layer_norm(attention_output)
# output = tf.reshape(output, [-1, self.feature_size])
return output
class TransformerEncoder(modules.BaseModule):
def __init__(self, feature_size, hidden_size, num_heads, attention_dropout,
ff_filter_size, ff_relu_dropout,
is_train, scope_id):
self.feature_size = feature_size
self.hidden_size = hidden_size
self.num_heads = num_heads
self.attention_dropout = attention_dropout
self.ff_filter_size = ff_filter_size
self.ff_relu_dropout = ff_relu_dropout
self.is_train = is_train
self.scope_id = scope_id
self.multi_head_attention = MultiHeadAttention(feature_size,
hidden_size,
num_heads,
attention_dropout,
is_train)
self.ff_network = FeedForwardNetwork(feature_size,
ff_filter_size,
ff_relu_dropout,
is_train,
self.scope_id)
def forward(self, inputs, **unused_params):
"""
:param inputs: [batch_size, input_length, hidden_size]
:param unused_params:
:return:
"""
attention = self.multi_head_attention.forward(inputs, inputs)
attention = attention + inputs
attention = tf.contrib.layers.layer_norm(attention)
ff_output = self.ff_network.forward(attention)
ff_output = ff_output + attention
ff_output = tf.contrib.layers.layer_norm(ff_output)
return ff_output
class TransformerEncoderMod(modules.BaseModule):
def __init__(self, feature_size, hidden_size, num_heads, attention_dropout,
ff_filter_size, ff_relu_dropout,
is_train, scope_id, final_size):
self.feature_size = feature_size
self.hidden_size = hidden_size
self.num_heads = num_heads
self.attention_dropout = attention_dropout
self.ff_filter_size = ff_filter_size
self.ff_relu_dropout = ff_relu_dropout
self.is_train = is_train
self.scope_id = scope_id
self.final_size = final_size
self.multi_head_attention = MultiHeadAttentionBN(feature_size,
hidden_size,
num_heads,
attention_dropout,
is_train)
self.ff_network = FeedForwardNetworkMod(feature_size,
ff_filter_size,
ff_relu_dropout,
is_train,
self.scope_id,
final_size)
def forward(self, inputs, **unused_params):
"""
:param inputs: [batch_size, input_length, hidden_size]
:param unused_params:
:return:
"""
attention = self.multi_head_attention.forward(inputs, inputs)
attention = tf.layers.dropout(attention, rate=1.0 - self.attention_dropout, training=self.is_train)
attention = attention + inputs
# Final residual connection removed:
attention = tf.contrib.layers.layer_norm(attention)
ff_output = self.ff_network.forward(attention)
return ff_output
class TransformerDecoder(modules.BaseModule):
def __init__(self, feature_size, hidden_size, num_heads, attention_dropout,
ff_filter_size, ff_relu_dropout,
is_train, scope_id):
self.feature_size = feature_size
self.hidden_size = hidden_size
self.num_heads = num_heads
self.attention_dropout = attention_dropout
self.ff_filter_size = ff_filter_size
self.ff_relu_dropout = ff_relu_dropout
self.is_train = is_train
self.scope_id = scope_id
self.multi_head_attention1 = MultiHeadAttention(feature_size,
hidden_size,
num_heads,
attention_dropout,
is_train)
self.multi_head_attention2 = MultiHeadAttention(feature_size,
hidden_size,
num_heads,
attention_dropout,
is_train)
self.ff_network = FeedForwardNetwork(feature_size,
ff_filter_size,
ff_relu_dropout,
is_train,
scope_id)
def forward(self, inputs, encoder_inputs, **unused_params):
with tf.variable_scope("first_mha"):
attention1 = self.multi_head_attention1.forward(inputs, inputs)
attention1 = attention1 + inputs
attention1 = tf.contrib.layers.layer_norm(attention1)
with tf.variable_scope("second_mha"):
attention2 = self.multi_head_attention2.forward(attention1, encoder_inputs)
attention2 = attention2 + attention1
attention2 = tf.contrib.layers.layer_norm(attention2)
ff_output = self.ff_network.forward(attention2)
ff_output = ff_output + attention2
ff_output = tf.contrib.layers.layer_norm(ff_output)
return ff_output
class MultiHeadAttention(modules.BaseModule):
def __init__(self, feature_size, hidden_size, num_heads, attention_dropout, is_train):
""" Initialize class MultiHeadAttention.
:param hidden_size: int
:param num_heads: int
:param attention_dropout: float
:param is_train: bool
"""
self.feature_size = feature_size
self.hidden_size = hidden_size
self.num_heads = num_heads
self.attention_dropout = attention_dropout
self.is_train = is_train
def split_heads(self, inputs):
""" Split x into different heads, and transpose the resulting value.
The tensor is transposed to insure the inner dimensions hold the correct
values during the matrix multiplication.
:param inputs: 3D Tensor with shape 'batch_size x length x hidden_size'
:return:
"""
with tf.name_scope("split_heads"):
batch_size = tf.shape(inputs)[0]
length = tf.shape(inputs)[1]
# Calculate depth of last dimension after it has been split.
depth = (self.hidden_size // self.num_heads)
# Split the last dimension
x = tf.reshape(inputs, [batch_size, length, self.num_heads, depth])
# Transpose the result
return tf.transpose(x, [0, 2, 1, 3])
def combine_heads(self, inputs):
""" Combine tensor that has been split.
:param inputs: 4D Tensor with shape 'batch_size x num_heads, num_feature, hidden_size/num_heads'
:return: 3D Tensor with shape 'batch_size x length x hidden_size'
"""
with tf.name_scope("combine_heads"):
batch_size = tf.shape(inputs)[0]
length = tf.shape(inputs)[2]
x = tf.transpose(inputs, [0, 2, 1, 3]) # --> [batch, length, num_heads, depth]
return tf.reshape(x, [batch_size, length, self.hidden_size])
def forward(self, queries, keys):
""" Forward method for MultiHeadAttention
:param queries: 3D Tensor with shape 'batch_size x length x hidden_size'
:param keys: 3D Tensor with shape 'batch_size x length x hidden_size'
:return:
"""
# Layers for linearly projecting the queries, keys, and values.
q = tf.layers.dense(queries, self.hidden_size, use_bias=False, name="q")
k = tf.layers.dense(keys, self.hidden_size, use_bias=False, name="k")
v = tf.layers.dense(keys, self.hidden_size, use_bias=False, name="v")
# Split q, k, v into heads.
q = self.split_heads(q)
k = self.split_heads(k)
v = self.split_heads(v)
# -> [batch_size, num_heads, length, hidden_size/num_heads]
# Scale q to prevent the dot product between q and k from growing too large.
depth = (self.hidden_size // self.num_heads)
q *= depth ** -0.5
logits = tf.matmul(q, k, transpose_b=True)
weights = tf.nn.softmax(logits, name="attention_weights")
# if self.is_train:
# weights = tf.nn.dropout(weights, 1.0 - self.attention_dropout)
attention_output = tf.matmul(weights, v)
# -> batch_size x length x hidden_size]
attention_output = self.combine_heads(attention_output)
attention_output = tf.layers.dense(attention_output,
self.feature_size,
use_bias=True, name="output_transform")
return attention_output
class MultiHeadAttentionBN(modules.BaseModule):
def __init__(self, feature_size, hidden_size, num_heads, attention_dropout, is_train):
""" Initialize class MultiHeadAttention.
:param hidden_size: int
:param num_heads: int
:param attention_dropout: float
:param is_train: bool
"""
self.feature_size = feature_size
self.hidden_size = hidden_size
self.num_heads = num_heads
self.attention_dropout = attention_dropout
self.is_train = is_train
def split_heads(self, inputs):
""" Split x into different heads, and transpose the resulting value.
The tensor is transposed to insure the inner dimensions hold the correct
values during the matrix multiplication.
:param inputs: 3D Tensor with shape 'batch_size x length x hidden_size'
:return:
"""
with tf.name_scope("split_heads"):
batch_size = tf.shape(inputs)[0]
length = (inputs.get_shape())[1]
# Calculate depth of last dimension after it has been split.
depth = (self.hidden_size // self.num_heads)
# Split the last dimension
x = tf.reshape(inputs, [batch_size, length, self.num_heads, depth])
# Transpose the result
return tf.transpose(x, [0, 2, 1, 3])
def combine_heads(self, inputs):
""" Combine tensor that has been split.
:param inputs: 4D Tensor with shape 'batch_size x num_heads, num_feature, hidden_size/num_heads'
:return: 3D Tensor with shape 'batch_size x length x hidden_size'
"""
with tf.name_scope("combine_heads"):
batch_size = tf.shape(inputs)[0]
length = tf.shape(inputs)[2]
x = tf.transpose(inputs, [0, 2, 1, 3]) # --> [batch, length, num_heads, depth]
return tf.reshape(x, [batch_size, length, self.hidden_size])
def forward(self, queries, keys):
""" Forward method for MultiHeadAttention
:param queries: 3D Tensor with shape 'batch_size x length x hidden_size'
:param keys: 3D Tensor with shape 'batch_size x length x hidden_size'
:return:
"""
# Layers for linearly projecting the queries, keys, and values.
q = tf.layers.dense(queries, self.hidden_size, use_bias=False, name="q")
k = tf.layers.dense(keys, self.hidden_size, use_bias=False, name="k")
v = tf.layers.dense(keys, self.hidden_size, use_bias=False, name="v")
# Split q, k, v into heads.
q = self.split_heads(q)
k = self.split_heads(k)
v = self.split_heads(v)
# -> [batch_size, num_heads, length, hidden_size/num_heads]
# Batch norm logits instead of scaling "q":
logits = tf.matmul(q, k, transpose_b=True)
logits = slim.batch_norm(
logits,
center=True,
scale=True,
is_training=self.is_train,
scope="logits_bn")
weights = tf.nn.softmax(logits, name="attention_weights")
attention_output = tf.matmul(weights, v)
# -> batch_size x length x hidden_size]
attention_output = self.combine_heads(attention_output)
attention_output = slim.batch_norm(
attention_output,
center=True,
scale=True,
is_training=self.is_train,
scope="attention_bn")
attention_output = tf.layers.dense(attention_output,
self.feature_size,
use_bias=True, name="output_transform")
return attention_output
class FeedForwardNetwork(modules.BaseModule):
""" Feed Forward Network. """
def __init__(self, feature_size, filter_size, relu_dropout,
is_train, scope_id):
""" Initialize class FeedForwardNetwork.
:param hidden_size: int
:param filter_size: int
:param relu_dropout: int
:param is_train: bool
:param scope_id: String
"""
self.feature_size = feature_size
self.filter_size = filter_size
self.relu_dropout = relu_dropout
self.is_train = is_train
self.scope_id = scope_id
def forward(self, inputs, **unused_params):
""" Forward method for FeedForwardNetwork.
:param inputs: 3D Tensor with size 'batch_size x num_feature x feature_size'
:return: 3D Tensor with size 'batch_size x num_feature x hidden_size'
"""
filter_output = tf.layers.dense(inputs, self.filter_size,
use_bias=True,
activation=tf.nn.relu,
name="filter_output{}".format(self.scope_id))
# if self.is_train:
# filter_output = tf.nn.dropout(filter_output, 1.0 - self.relu_dropout)
output = tf.layers.dense(filter_output, self.feature_size,
use_bias=True,
activation=tf.nn.relu,
name="ff_output{}".format(self.scope_id))
output = output + inputs
output = tf.contrib.layers.layer_norm(output)
return output
class FeedForwardNetworkMod(modules.BaseModule):
""" Feed Forward Network. """
def __init__(self, feature_size, filter_size, relu_dropout,
is_train, scope_id, final_size):
""" Initialize class FeedForwardNetwork.
:param hidden_size: int
:param filter_size: int
:param relu_dropout: int
:param is_train: bool
:param scope_id: String
"""
self.feature_size = feature_size
self.filter_size = filter_size
self.relu_dropout = relu_dropout
self.is_train = is_train
self.scope_id = scope_id
self.final_size = final_size
def forward(self, inputs, **unused_params):
""" Forward method for FeedForwardNetwork.
:param inputs: 3D Tensor with size 'batch_size x num_feature x feature_size'
:return: 3D Tensor with size 'batch_size x num_feature x hidden_size'
"""
filter_output = tf.layers.dense(inputs, self.filter_size,
use_bias=True,
activation=tf.nn.relu,
name="filter_output{}".format(self.scope_id))
filter_output = slim.batch_norm(
filter_output,
center=True,
scale=True,
is_training=self.is_train,
scope="filter_bn")
output = tf.layers.dense(filter_output,
self.final_size,
use_bias=True,
activation=tf.nn.relu,
name="ff_output{}".format(self.scope_id))
output = slim.batch_norm(
output,
center=True,
scale=True,
is_training=self.is_train,
scope="feed_output_bn")
return output |
import pyperclip
import json
from tkinter import *
from tkinter import messagebox
from password_gen import generate_password
# ---------------------------- PASSWORD GENERATOR ------------------------------- #
def gen_password():
password = generate_password()
inp_password.delete(0, END)
inp_password.insert(0, password)
pyperclip.copy(password)
# ---------------------------- SAVE PASSWORD ------------------------------- #
def save_password():
website = inp_website.get()
user = inp_user.get()
password = inp_password.get()
new_data = {website: {"email": user,"password": password,}}
if len(website) == 0 or len(user) == 0 or len(password) == 0:
messagebox.showwarning(title="Oops...", message="Please don't leave any fields empty!")
else:
try:
with open("day30/data.json", mode="r") as file:
data = json.load(file)
except (FileNotFoundError, json.decoder.JSONDecodeError) as e:
with open("day30/data.json", mode="w") as file:
json.dump(new_data, file, indent=4)
else:
data.update(new_data)
with open("day30/data.json", mode="w") as file:
json.dump(data, file, indent=4)
finally:
reset_ui()
def reset_ui():
inp_website.delete(0, END)
inp_website.focus()
inp_user.delete(0, END)
inp_user.insert(0, "dienst@mail.org")
inp_password.delete(0, END)
def search_passwd():
try:
with open("day30/data.json", mode="r") as file:
data = json.load(file)
except FileNotFoundError:
messagebox.showerror(title="File not found", message="The data file was not found! Safe your first password...")
else:
# TODO: keiner guter Stil, besser umstellen auf if/else
try:
website_data = data[inp_website.get()]
except KeyError:
messagebox.showwarning(title="Not found", message="No details for the website exists!")
else:
messagebox.showinfo(title=inp_website.get(), message=f"User: {website_data['email']} \nPassword: {website_data['password']}")
# ---------------------------- UI SETUP ------------------------------- #
app = Tk()
app.title("PyPa-Manager")
app.config(padx=30, pady=30)
#app.resizable(False, False)
canvas = Canvas(width=200, height=200, highlightthickness=0)
logo = PhotoImage(file="day29/logo.png")
canvas.create_image(100, 100, image=logo)
canvas.grid(column=1, row=0)
lbl_website = Label(text="Website:")
lbl_website.grid(column=0, row=1, pady=(5, 0), sticky="e")
inp_website = Entry(width=30)
inp_website.grid(column=1, row=1, pady=(5, 0), sticky="w")
btn_search = Button(text="Search", width=8, command=search_passwd)
btn_search.grid(column=2, row=1, pady=(5, 0), sticky="e")
lbl_user = Label(text="Email/Username:")
lbl_user.grid(column=0, row=2, pady=(5, 0), sticky="e")
inp_user = Entry(width=43)
inp_user.grid(column=1, row=2, pady=(5, 0), columnspan=2, sticky="w")
lbl_password = Label(text="Password:")
lbl_password.grid(column=0, row=3, pady=(5, 0), sticky="e")
inp_password = Entry(width=30)
inp_password.grid(column=1, row=3, pady=(5, 0), sticky="w")
btn_password = Button(text="Generate", width=8, command=gen_password)
btn_password.grid(column=2, row=3, pady=(5, 0), sticky="e")
btn_add = Button(text="Add", width=36, command=save_password)
btn_add.grid(column=1, row=4, pady=(10, 0), columnspan=2, sticky="e")
reset_ui()
app.mainloop() |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@file createNet.py
@author Michael Behrisch
@author Daniel Krajzewicz
@date 2008-07-04
@version $Id$
Create the XML input files for the generation of the SUMO network
of the CityMobil parking lot.
SUMO, Simulation of Urban MObility; see http://sumo.dlr.de/
Copyright (C) 2008-2017 DLR (http://www.dlr.de/) and contributors
This file is part of SUMO.
SUMO is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import random
import subprocess
from constants import *
occupied = 0
nodes = open("%s.nod.xml" % PREFIX, "w")
print("<nodes>", file=nodes)
edges = open("%s.edg.xml" % PREFIX, "w")
print("<edges>", file=edges)
connections = open("%s.con.xml" % PREFIX, "w")
print("<connections>", file=connections)
routes = open("%s.rou.xml" % PREFIX, "w")
print("""<routes>
<vType id="car" length="3" minGap=".5" guiShape="passenger" maxSpeed="50" color="0.7,0.7,0.7"/>
<vType id="person" length=".25" minGap="0" guiShape="pedestrian" width=".25" maxSpeed="5" color="1,0.2,0.2"/>
<vType id="cybercar" length="%s" minGap="1" guiShape="evehicle" maxSpeed="%s" color="0,1,0" emissionClass="HBEFA2/P_7_7"/>""" % (CYBER_LENGTH, CYBER_SPEED), file=routes)
# streets
nodeID = "main-0"
print('<node id="in" x="-100" y="0"/>', file=nodes)
print('<node id="%s" x="0" y="0"/>' % nodeID, file=nodes)
print('<edge id="mainin" from="in" to="%s" numLanes="2" spreadType="center"/>' %
nodeID, file=edges)
for row in range(DOUBLE_ROWS):
nextNodeID = "main%s-%s" % (row, row + 1)
if row + 1 == DOUBLE_ROWS:
nextNodeID = "main%s-" % row
x = (row + 1) * ROW_DIST
print('<node id="%s" x="%s" y="0"/>' % (nextNodeID, x), file=nodes)
print('<edge id="main%s" from="%s" to="%s" numLanes="2" spreadType="center"/>' % (
row, nodeID, nextNodeID), file=edges)
nodeID = nextNodeID
print('<node id="out" x="%s" y="0"/>' % (x + 100), file=nodes)
print('<edge id="mainout" from="%s" to="out" numLanes="2" spreadType="center"/>' %
nodeID, file=edges)
roads = ["road-0"]
for row in range(DOUBLE_ROWS - 1):
roads.append("road%s-%s" % (row, row + 1))
roads.append("road%s-" % (row + 1))
for idx, road in enumerate(roads):
nodeID = road.replace("road", "main")
for slot in range(SLOTS_PER_ROW):
partID = "%s-%s" % (road, slot)
print('<node id="%st" x="%s" y="%s"/>' % (
partID, idx * ROW_DIST, (slot + 1) * SLOT_WIDTH), file=nodes)
print('<edge id="%s" from="%s" to="%st" numLanes="2" spreadType="center"/>' % (
partID, nodeID, partID), file=edges)
print('<edge id="-%s" from="%st" to="%s" numLanes="2" spreadType="center"/>' % (
partID, partID, nodeID), file=edges)
nodeID = "%st" % partID
for row in range(DOUBLE_ROWS):
for slot in range(SLOTS_PER_ROW):
slotID = "slot%s-%sl" % (row, slot)
source = "%s-%st" % (roads[row], slot)
print('<node id="%st" x="%s" y="%s"/>' % (
slotID, row * ROW_DIST + SLOT_LENGTH, (slot + 1) * SLOT_WIDTH), file=nodes)
print('<edge id="%s" from="%s" to="%st" spreadType="center"/>' % (
slotID, source, slotID), file=edges)
print('<edge id="-%s" from="%st" to="%s" spreadType="center"/>' % (
slotID, slotID, source), file=edges)
slotID = "slot%s-%sr" % (row, slot)
source = "%s-%st" % (roads[row + 1], slot)
print('<node id="%st" x="%s" y="%s"/>' % (
slotID, (row + 1) * ROW_DIST - SLOT_LENGTH, (slot + 1) * SLOT_WIDTH), file=nodes)
print('<edge id="%s" from="%s" to="%st" spreadType="center"/>' % (
slotID, source, slotID), file=edges)
print('<edge id="-%s" from="%st" to="%s" spreadType="center"/>' % (
slotID, slotID, source), file=edges)
# footpaths
y = (SLOTS_PER_ROW + 1) * SLOT_WIDTH
print('<node id="foot" x="-100" y="%s"/>' % y, file=nodes)
print(
'<edge id="footmainin" from="foot" to="foot0" speed="5" spreadType="center"/>', file=edges)
for row in range(DOUBLE_ROWS):
nodeID = "foot%s" % row
x = row * ROW_DIST + ROW_DIST / 2
print('<node id="%s" x="%s" y="%s"/>' % (nodeID, x, y), file=nodes)
if row > 0:
edgeID = "footmain%sto%s" % (row - 1, row)
print('<edge id="%s" from="foot%s" to="foot%s" speed="5" spreadType="center"/>' % (
edgeID, row - 1, row), file=edges)
for slot in reversed(range(SLOTS_PER_ROW)):
slotID = "foot%s-%s" % (row, slot)
print('<node id="%s" x="%s" y="%s"/>' % (
slotID, x, (slot + 1) * SLOT_WIDTH), file=nodes)
print('<edge id="%sto%s" from="%s" to="%s" speed="5" spreadType="center"/>' % (
nodeID, slot, nodeID, slotID), file=edges)
print('<edge id="-%sto%s" from="%s" to="%s" speed="5" spreadType="center"/>' % (
nodeID, slot, slotID, nodeID), file=edges)
print('<node id="%srt" x="%s" y="%s"/>' % (
slotID, x + SLOT_FOOT_LENGTH, (slot + 1) * SLOT_WIDTH), file=nodes)
print('<edge id="%sr" from="%s" to="%srt" spreadType="center"/>' % (
slotID, slotID, slotID), file=edges)
print('<edge id="-%sr" from="%srt" to="%s" spreadType="center"/>' % (
slotID, slotID, slotID), file=edges)
print('<node id="%slt" x="%s" y="%s"/>' % (
slotID, x - SLOT_FOOT_LENGTH, (slot + 1) * SLOT_WIDTH), file=nodes)
print('<edge id="%sl" from="%s" to="%slt" spreadType="center"/>' % (
slotID, slotID, slotID), file=edges)
print('<edge id="-%sl" from="%slt" to="%s" spreadType="center"/>' % (
slotID, slotID, slotID), file=edges)
nodeID = slotID
vSlot = slotID.replace("foot", "slot")
if random.uniform(0, 1) < OCCUPATION_PROBABILITY:
occupied += 1
print("""\
<vehicle id="v%sr" type="car" depart="0" departPos="0">
<route edges="%sr -%sr"/>
</vehicle>""" % (vSlot, vSlot, vSlot), file=routes)
else:
print("""\
<flow id="p%sr" type="person" begin="0" period="1" number="%s">
<route edges="%sr -%sr"/>
</flow>""" % (slotID, CAR_CAPACITY, slotID, slotID), file=routes)
if random.uniform(0, 1) < OCCUPATION_PROBABILITY:
occupied += 1
print("""\
<vehicle id="v%sl" type="car" depart="0" departPos="0">
<route edges="%sl -%sl"/>
</vehicle>""" % (vSlot, vSlot, vSlot), file=routes)
else:
print("""\
<flow id="p%sl" type="person" begin="0" period="1" number="%s">
<route edges="%sl -%sl"/>
</flow>""" % (slotID, CAR_CAPACITY, slotID, slotID), file=routes)
x = DOUBLE_ROWS * ROW_DIST + ROW_DIST / 2
print('<node id="foot%s" x="%s" y="%s"/>' % (DOUBLE_ROWS, x, y), file=nodes)
edgeID = "footmain%sto%s" % (DOUBLE_ROWS - 1, DOUBLE_ROWS)
print('<edge id="%s" from="foot%s" to="foot%s" speed="5" spreadType="center"/>' % (
edgeID, DOUBLE_ROWS - 1, DOUBLE_ROWS), file=edges)
print('<node id="footend" x="%s" y="%s"/>' % (x + 100, y), file=nodes)
print('<edge id="footmainout" from="foot%s" to="footend" speed="5" spreadType="center"/>' %
DOUBLE_ROWS, file=edges)
print('<node id="fair" x="%s" y="%s"/>' % (x + 100, y - 10), file=nodes)
print('<edge id="footfairin" from="fair" to="foot%s" speed="5" spreadType="center"/>' %
DOUBLE_ROWS, file=edges)
# cybercar (automated bus)
y = (SLOTS_PER_ROW + 3) * SLOT_WIDTH
print('<node id="cyber" x="-100" y="%s"/>' % y, file=nodes)
print(
'<edge id="cyberin" from="cyber" to="cyber0" numLanes="2" spreadType="center"/>', file=edges)
print(
'<edge id="-cyberin" from="cyber0" to="cyber" numLanes="2" spreadType="center"/>', file=edges)
for row in range(DOUBLE_ROWS + 1):
nodeID = "cyber%s" % row
x = row * ROW_DIST + ROW_DIST / 2
print('<node id="%s" x="%s" y="%s"/>' % (nodeID, x, y), file=nodes)
if row > 0:
edgeID = "cyber%sto%s" % (row - 1, row)
print('<edge id="%s" from="cyber%s" to="cyber%s" numLanes="2" spreadType="center"/>' % (
edgeID, row - 1, row), file=edges)
print('<edge id="-%s" from="cyber%s" to="cyber%s" numLanes="2" spreadType="center"/>' % (
edgeID, row, row - 1), file=edges)
if row < DOUBLE_ROWS:
print('<connection from="%s" to="cyber%sto%s"/>' % (
edgeID, row, row + 1), file=connections)
print('<connection from="-cyber%sto%s" to="-%s"/>' % (
row, row + 1, edgeID), file=connections)
else:
print('<connection from="%s" to="cyberout"/>' %
edgeID, file=connections)
print('<connection from="-cyberout" to="-%s"/>' %
edgeID, file=connections)
print('<node id="cyberend" x="%s" y="%s"/>' % (x + 100, y), file=nodes)
print('<edge id="cyberout" from="cyber%s" to="cyberend" numLanes="2" spreadType="center"/>' %
row, file=edges)
print('<edge id="-cyberout" from="cyberend" to="cyber%s" numLanes="2" spreadType="center"/>' %
row, file=edges)
print("</nodes>", file=nodes)
nodes.close()
print("</edges>", file=edges)
edges.close()
print("</connections>", file=connections)
connections.close()
subprocess.call([NETCONVERT,
'--no-internal-links',
'-n', '%s.nod.xml' % PREFIX,
'-e', '%s.edg.xml' % PREFIX,
'-x', '%s.con.xml' % PREFIX,
'-o', '%s.net.xml' % PREFIX])
numBusses = TOTAL_CAPACITY // BUS_CAPACITY
print(""" <flow id="b" type="cybercar" begin="0" period="100" number="%s">
<route edges="cyberin"/>
</flow>
</routes>""" % numBusses, file=routes)
routes.close()
routes = open("%s_cyber.rou.xml" % PREFIX, "w")
print("""<routes>
<flow id="c" type="cybercar" begin="50" period="100" number="%s">
<route edges="cyberin"/>
</flow>
</routes>""" % (TOTAL_CAPACITY / CYBER_CAPACITY - numBusses), file=routes)
routes.close()
stops = open("%s.add.xml" % PREFIX, "w")
print("<additional>", file=stops)
for row in range(DOUBLE_ROWS):
edgeID = "cyber%sto%s" % (row, row + 1)
print(' <busStop id="%sstop" lane="%s_0"' %
(edgeID, edgeID), end=' ', file=stops)
print('startPos="%s" endPos="%s"/>' % (
STOP_POS - 2 * CYBER_LENGTH - 1, STOP_POS), file=stops)
for edge in ["cyberin", "cyberout"]:
print(' <busStop id="%sstop" lane="%s_0"' %
(edge, edge), end=' ', file=stops)
print('startPos="%s" endPos="%s"/>' % (
90 - 2 * CYBER_LENGTH - 1, 90), file=stops)
print(
' <edgeData id="dump" freq="3600" file="aggregated.xml" excludeEmpty="true" type="emissions"/>', file=stops)
print("</additional>", file=stops)
stops.close()
totalSlots = 2 * DOUBLE_ROWS * SLOTS_PER_ROW
bat = open("%s.bat" % PREFIX, "w")
breakbat = open("%s_break.bat" % PREFIX, "w")
for period in range(5, 50, 5):
routes = open("%s_demand%02i.rou.xml" % (PREFIX, period), "w")
print("<routes>", file=routes)
if occupied < totalSlots:
print(""" <flow id="v" type="car" begin="10" period="%s" number="%s">
<route edges="mainin"/>
</flow>""" % (period, totalSlots - occupied), file=routes)
if occupied > 0:
print(""" <flow id="p" type="person" begin="10" period="%s" number="%s">
<route edges="footfairin"/>
</flow>""" % (period, occupied * CAR_CAPACITY), file=routes)
print("</routes>", file=routes)
routes.close()
config = open("%s%02i.sumocfg" % (PREFIX, period), "w")
print("""<configuration>
<input>
<net-file value="%s.net.xml"/>
<route-files value="%s.rou.xml,%s_demand%02i.rou.xml"/>
<additional-files value="%s.add.xml"/>
<no-step-log value="True"/>
<time-to-teleport value="0"/>
<remote-port value="%s"/>
</input>
</configuration>""" % (PREFIX, PREFIX, PREFIX, period, PREFIX, PORT), file=config)
config.close()
print("simpleManager.py -d %s" % period, file=bat)
print("simpleManager.py -b 120 -d %s" % period, file=breakbat)
config = open("%s%02i_cyber.sumocfg" % (PREFIX, period), "w")
print("""<configuration>
<input>
<net-file value="%s.net.xml"/>
<route-files value="%s.rou.xml,%s_cyber.rou.xml,%s_demand%02i.rou.xml"/>
<additional-files value="%s.add.xml"/>
<no-step-log value="True"/>
<time-to-teleport value="0"/>
<remote-port value="%s"/>
</input>
</configuration>""" % (PREFIX, PREFIX, PREFIX, PREFIX, period, PREFIX, PORT), file=config)
config.close()
print("agentManager.py -c -d %s" % period, file=bat)
print("agentManager.py -c -b 120 -d %s" % period, file=breakbat)
bat.close()
breakbat.close()
|
# Import modules
import numpy as np
import pandas as pd
from pandas_profiling import ProfileReport
print("Setup Complete")
# Load the data from the tsv file in a dataframe
file_path = "/home/apprenant/simplon_projects/foodflix/data/01_raw/en.openfoodfacts.org.products.tsv"
df = pd.read_csv(file_path, sep='\t')
# Takes a sample of 10000 rows from the dataframe
df_sample = df.sample(n=10000, random_state=1)
profile = ProfileReport(df_sample, title='Pandas Profiling Report')
# Saves the report to a html page
profile.to_file("results/profiling_raw.html") |
from __future__ import absolute_import
from ... import wesanderson as wap
def test_print_maps(capsys):
wap.print_maps()
out, err = capsys.readouterr()
lines = out.split('\n')
assert lines[0] == 'Aquatic1 qualitative 5'
def test_get_map():
palette = wap.get_map('cavalcanTi')
assert isinstance(palette, wap.wesanderson.WesAndersonMap)
assert palette.name == 'Cavalcanti'
assert palette.type == 'qualitative'
assert len(palette.colors) == 5
assert palette.url == \
('http://wesandersonpalettes.tumblr.com/post/'
'79348553036/castello-cavalcanti-how-can-i-help')
def test_get_map_reversed():
palette = wap.get_map('cavalcanTi', reverse=True)
assert isinstance(palette, wap.wesanderson.WesAndersonMap)
assert palette.name == 'Cavalcanti_r'
assert palette.type == 'qualitative'
assert len(palette.colors) == 5
assert palette.url == \
('http://wesandersonpalettes.tumblr.com/post/'
'79348553036/castello-cavalcanti-how-can-i-help')
def test_palettes_loaded():
assert isinstance(wap.Cavalcanti_5, wap.wesanderson.WesAndersonMap)
assert isinstance(wap.Cavalcanti_5_r, wap.wesanderson.WesAndersonMap)
assert wap.Cavalcanti_5.type == 'qualitative'
def test_get_all_maps():
# Smoke tests.
wap._get_all_maps()
|
import tensorflow as tf
from grace_dl.tensorflow import Compressor
class FP16Compressor(Compressor):
"""Compress all floating point gradients to 16-bit."""
def compress(self, tensor, name):
"""Downcasts the tensor to 16-bit."""
tensor_compressed = tensor
if tensor.dtype.is_floating:
# Only allow compression from other floating point types
tensor_compressed = tf.cast(tensor, dtype=tf.float16)
return [tensor_compressed], tensor.dtype
def decompress(self, tensor, ctx):
"""Upcasts the tensor to the initialization dtype."""
tensor_decompressed, = tensor
dtype = ctx
if dtype.is_floating:
tensor_decompressed = tf.cast(tensor, dtype=dtype)
return tensor_decompressed
|
"""
Tests for the coord module
"""
import os
import sys
import pytest
# Add our folder to the system path so python can find our code.
current_location = os.path.dirname(__file__)
sys.path.append(os.path.join(current_location, '..'))
from mcsim.coordinates import calculate_distance
def test_calculate_distance():
point_1 = [0, 0, 0]
point_2 = [1, 0, 0]
expected_distance = 1
dist1 = calculate_distance(point_1, point_2)
assert dist1 == expected_distance
def test_calculate_distance2():
point_1 = [0, 0, 0]
point_2 = [0, 0, 8]
box_length = 10
expected_distance = 2
dist1 = calculate_distance(point_1, point_2, box_length=box_length)
assert dist1 == expected_distance
@pytest.mark.parametrize("point1, point2, expected_distance, box_length",
[
([0, 0, 0], [1, 0 , 0], 1, None),
([0, 0, 0], [8, 0 , 0], 8, None),
([0, 0, 0], [8, 0 , 0], 2, 10)
],
)
def test_calculate_distance_many(point1, point2, expected_distance, box_length):
calculated_distance = calculate_distance(point1, point2, box_length=box_length)
assert calculated_distance == expected_distance |
from __future__ import annotations
from app.constants import WINWIDTH
from app.engine import engine
from ..ui_framework import HAlignment, UIComponent
from ..ui_framework_layout import ListLayoutStyle, UILayoutType
from ..ui_framework_styling import UIMetric
from .text_component import TextComponent
class IconRow(UIComponent):
def __init__(self, name: str = None, parent: UIComponent = None,
width: str = '100%', height: str = '0%', text: str = '',
icon: engine.Surface | UIComponent = None,
text_align: HAlignment = HAlignment.LEFT,
font: str = 'text-white', data=None):
super().__init__(name=name, parent=parent)
if text_align == HAlignment.LEFT:
self.props.layout = UILayoutType.LIST
self.props.list_style = ListLayoutStyle.ROW
self.data = data
self.text = TextComponent(text, text)
self.text.props.font_name = font
self.text.props.h_alignment = text_align
self.text.max_width = WINWIDTH # ensure that text is always on one line
self.icon: UIComponent = self.process_icon(icon)
parsed_height = UIMetric.parse(height).to_pixels(self.parent.height)
self.size = (width, max(parsed_height, self.icon.height))
self.add_child(self.icon)
self.add_child(self.text)
def _reset(self, reason: str = None):
self.update_font()
super()._reset(reason=reason)
def update_font(self):
total_width = self.icon.width + self.text.width
if total_width > self.max_width:
[_, fcolor] = self.text.props.font_name.split('-')
new_font_name = 'narrow-' + fcolor
self.text.set_font(new_font_name)
def process_icon(self, icon: UIComponent | engine.Surface | None) -> UIComponent:
if isinstance(icon, UIComponent):
return icon
elif isinstance(icon, engine.Surface):
return UIComponent.from_existing_surf(icon)
else:
return UIComponent.from_existing_surf(engine.create_surface((0, self.text.height), True))
def get_text_topleft(self):
return self.layout_handler.generate_child_positions(True)[1]
def set_icon(self, icon: engine.Surface):
self.icon = self.process_icon(icon)
self.children.clear()
self.add_child(self.icon)
self.add_child(self.text)
|
import logging
from pathlib import (
Path,
)
import coloredlogs
import jinete as jit
level = logging.INFO
logging.basicConfig(level=level)
coloredlogs.install(level=level)
logger = logging.getLogger(__name__)
BASE_PATH = Path(__file__).parents[2]
DATASETS_PATH = BASE_PATH / "res" / "datasets"
def main():
logger.info("Starting...")
file_path = DATASETS_PATH / "cordeau-laporte" / "a2-16.txt"
solver = jit.Solver(
loader_kwargs={"file_path": file_path},
algorithm=jit.GraspAlgorithm,
algorithm_kwargs={"first_solution_kwargs": {"episodes": 1, "randomized_size": 2}, "episodes": 5},
storer=jit.StorerSet,
storer_kwargs={"storer_cls_set": {jit.PromptStorer, jit.GraphPlotStorer}},
)
result = solver.solve() # noqa
logger.info("Finished...")
if __name__ == "__main__":
main()
|
import tensorflow as tf
a = tf.constant(5)
b = tf.constant(2)
c = tf.constant(3)
d = tf.multiply(a, b)
e = tf.add(c, b)
f = tf.subtract(d, e)
# NOTE: Opening a session using the with clause will ensure the session is
# automatically closed once all computations are done.
with tf.Session() as sess:
fetches = [a, b, c, d, e, f]
outs = sess.run(fetches)
print("outs = {}".format(outs))
print(type(outs[0]))
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import os
import numpy
import torch
from torch import nn
from maskrcnn_benchmark.structures.bounding_box import BoxList
from .roi_mask_feature_extractors import make_roi_mask_feature_extractor
from .roi_mask_predictors import make_roi_mask_predictor
from .inference import make_roi_mask_post_processor
from .loss import make_roi_mask_loss_evaluator
def keep_only_positive_boxes(boxes):
"""
Given a set of BoxList containing the `labels` field,
return a set of BoxList for which `labels > 0`.
Arguments:
boxes (list of BoxList)
"""
assert isinstance(boxes, (list, tuple))
assert isinstance(boxes[0], BoxList)
assert boxes[0].has_field("labels")
positive_boxes = []
positive_inds = []
num_boxes = 0
for boxes_per_image in boxes:
labels = boxes_per_image.get_field("labels")
inds_mask = labels > 0
inds = inds_mask.nonzero().squeeze(1)
positive_boxes.append(boxes_per_image[inds])
positive_inds.append(inds_mask)
return positive_boxes, positive_inds
class ROIMaskHead(torch.nn.Module):
def __init__(self, cfg):
super(ROIMaskHead, self).__init__()
self.cfg = cfg.clone()
self.feature_extractor = make_roi_mask_feature_extractor(cfg)
self.predictor = make_roi_mask_predictor(cfg)
self.post_processor = make_roi_mask_post_processor(cfg)
self.loss_evaluator = make_roi_mask_loss_evaluator(cfg)
def forward(self, features, proposals, targets=None):
"""
Arguments:
features (list[Tensor]): feature-maps from possibly several levels
proposals (list[BoxList]): proposal boxes
targets (list[BoxList], optional): the ground-truth targets.
Returns:
x (Tensor): the result of the feature extractor
proposals (list[BoxList]): during training, the original proposals
are returned. During testing, the predicted boxlists are returned
with the `mask` field set
losses (dict[Tensor]): During training, returns the losses for the
head. During testing, returns an empty dict.
"""
mask_save_dir = './new_dump/mask'
if not os.path.exists(mask_save_dir):
os.makedirs(mask_save_dir)
if self.training:
# during training, only focus on positive boxes
all_proposals = proposals
proposals, positive_inds = keep_only_positive_boxes(proposals)
for i, proposals_per_im in enumerate(proposals):
boxes = proposals_per_im.bbox
labels = proposals_per_im.get_field('labels')
boxes_save_path = mask_save_dir + '/{}_proposals_boxes'.format(i) + '.' + str(boxes.size())
numpy.save(boxes_save_path, boxes.cpu().detach().numpy())
labels_save_path = mask_save_dir + '/{}_proposals_labels'.format(i) + '.' + str(labels.size())
numpy.save(labels_save_path, labels.cpu().detach().numpy())
if self.training and self.cfg.MODEL.ROI_MASK_HEAD.SHARE_BOX_FEATURE_EXTRACTOR:
x = features
x = x[torch.cat(positive_inds, dim=0)]
else:
x = self.feature_extractor(features, proposals)
mask_logits = self.predictor(x)
mask_logits_save_path = mask_save_dir + '/mask_logits' + '.' + str(mask_logits.size())
numpy.save(mask_logits_save_path, mask_logits.cpu().detach().numpy())
if not self.training:
result = self.post_processor(mask_logits, proposals)
return x, result, {}
loss_mask = self.loss_evaluator(proposals, mask_logits, targets)
loss_save_path = mask_save_dir + '/loss' + '.' + str(loss_mask.size())
numpy.save(loss_save_path, loss_mask.cpu().detach().numpy())
return x, all_proposals, dict(loss_mask=loss_mask)
def build_roi_mask_head(cfg):
return ROIMaskHead(cfg)
|
# -*- coding:utf-8 -*-
import tensorflow as tf
import time
from tensorflow.examples.tutorials.mnist import input_data
import mnist_inference
import mnist_train
EVAL_INTERVAL_SECS = 10
def evaluate(mnist):
x =tf.placeholder(
tf.float32,
[None,mnist_inference.INPUT_NODE],
name = 'x-input')
y_ = tf.placeholder(
tf.float32,
[None, mnist_inference.OUTPUT_NODE],
name='y-input')
validate_feed = {x:mnist.validation.images,
y_:mnist.validation.labels}
y = mnist_inference.inference(x,None)
correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
variable_averages = tf.train.ExponentialMovingAverage(
mnist_train.MOVING_AVERAGE_DECAY)
variable_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variable_to_restore)
while True:
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(
mnist_train.MODEL_SAVE_PATH
)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess,ckpt.model_checkpoint_path)
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
accuracy_score = sess.run(accuracy,feed_dict=validate_feed)
print("%s step,validation accuracy = %g"%(global_step,accuracy_score))
else:
print('No checkpoint file found')
return
time.sleep(EVAL_INTERVAL_SECS)
def main(argv = None):
mnist = input_data.read_data_sets("/tmp/data", one_hot=True)
train(mnist)
if __name__ == '__main__':
tf.app.run()
|
import re
import requests
from bs4 import BeautifulSoup
#Default reddit URLs look like this.
_reddit_url = re.compile(r'http(s)?://(www.)?reddit.com/r/learnprogramming')
def getURL(url):
assert _reddit_url.match(url)
headers = {'User-Agent': 'Tarunz Reddit Search bot version 1.0'}
r = requests.get(url, headers = headers)
if r.status_code != 200:
raise Exception("Non-okay status code. {}".format(r.status_code))
return r.text
def parseRedditPost(html):
bs = BeautifulSoup(html)
return bs.select('div.usertext-body')[1].text
|
"""
**************************************************************************
\file read_brdf.py
\author Alejandro Sztrajman
\date March 2018
\version 1.00
Script for the UTIA BRDF data loading and value interpolation
################################################################
run using:
python read_brdf.py
(BRDF bin files are assumed to be in subdir 'data/')
################################################################
Based on the code by Jiri Filip.
******************************************************************************
"""
import imageio
import sys
import os
import math
import numpy as np
import pdb
import struct
import matplotlib.pyplot as plt
class BRDF:
def __init__(self, step_t=15, step_p=7.5, nti=6, ntv=6, planes=3): #FIXME: make all inputs the same units (radians or degrees)
"""Initialization of BRDF measurement parameters
:param step_t (float): step of elevation angles theta_i, theta_v
:param step_p (float): step of azimuthal angles phi_i, phi_v
:param nti (int): number of theta_i directions
:param ntv (int): number of theta_v directions
"""
self.step_t = step_t
self.step_p = step_p
self.nti = nti
self.ntv = ntv
self.npi = int(360.0/step_p) #number of phi_i, phi_v directions
self.npv = int(360.0/step_p)
self.planes = planes
def load_brdf(self, fname):
"""Load .bin file from UTIA BRDF binary database
:param fname (string): filename of .bin BRDF measurement file
:returns: numpy 1D array with BRDF values
"""
self.floats = np.fromfile(fname, dtype=np.float64) #FIXME: maybe translate .bin files to .npy which is platform independent
return self.floats
def get_brdf_slice(self, theta_i, theta_v, res=None):
"""
:param theta_i (float): incident light elevation angle theta_i (in radians)
:param theta_v (float): outgoing light elevation angle theta_v (in radians)
:param res ([int, int]): resolution / number of phi_i, phi_v directions
:returns: numpy 3D array with patch of BRDF values (single theta_i, theta_v directions)
"""
if (res is None):
res = [self.npi, self.npv]
refl = []
for p_i in np.linspace(0, 2*np.pi, res[0]):
for p_v in np.linspace(0, 2*np.pi, res[1]):
refl += [brdf.lookup_brdf_val(theta_i, p_i, theta_v, p_v)]
return np.array(refl).reshape(res[0], res[1], 3)
def get_brdf_mosaic(self, res_theta=None, res_phi=None):
"""
:param res_theta ([int, int]): resolution / number of theta_i, theta_v directions
:param res_phi ([int, int]): resolution / number of phi_i, phi_v directions
:returns: numpy 3D array with mosaic of BRDF values (each patch is a single theta_i, theta_v direction)
"""
if (res_theta is None):
res_theta = [self.nti, self.ntv]
if (res_phi is None):
res_phi = [self.npi, self.npv]
rows = []
for t_i in np.linspace(0, np.pi/2, res_theta[0]):
row = []
for t_v in np.linspace(0, np.pi/2, res_theta[1]):
slc = self.get_brdf_slice(t_i, t_v, res_phi)
if (len(row) == 0):
row = slc
else:
row = np.hstack((row, slc))
if (len(rows) == 0):
rows = row
else:
rows = np.vstack((rows, row))
return rows
def lookup_brdf_val(self, theta_i, phi_i, theta_v, phi_v):
"""
:param theta_i (float): incident light elevation angle theta_i (in radians)
:param theta_v (float): outgoing light elevation angle theta_v (in radians)
:param res ([int, int]): resolution / number of phi_i, phi_v directions
:returns: 3-channel BRDF values for single ingoing and outgoing directions, computed by interpolation
"""
pi2 = np.pi/2.
if (theta_i > pi2) or (theta_v > pi2):
return [0,0,0]
d2r = 180.0/np.pi
theta_i *= d2r
theta_v *= d2r
phi_i *= d2r
phi_v *= d2r
if (phi_i >= 360.0):
phi_i = 0.0
if (phi_v >= 360.0):
phi_v = 0.0
iti, itv, ipi, ipv = [0,0], [0,0], [0,0], [0,0]
iti[0] = int(math.floor(theta_i/self.step_t))
iti[1] = iti[0]+1
if (iti[0] > self.nti-2):
iti[0] = self.nti-2
iti[1] = self.nti-1
itv[0] = int(math.floor(theta_v/self.step_t))
itv[1] = itv[0]+1
if (itv[0] > self.ntv-2):
itv[0] = self.ntv-2
itv[1] = self.ntv-1
ipi[0] = int(math.floor(phi_i/self.step_p))
ipi[1] = ipi[0]+1
ipv[0] = int(math.floor(phi_v/self.step_p))
ipv[1] = ipv[0]+1
wti, wtv, wpi, wpv = [0,0], [0,0], [0,0], [0,0]
wti[1] = theta_i - float(self.step_t*iti[0])
wti[0] = float(self.step_t*iti[1]) - theta_i
sum = wti[0]+wti[1]
wti[0] /= sum
wti[1] /= sum
wtv[1] = theta_v - float(self.step_t*itv[0])
wtv[0] = float(self.step_t*itv[1]) - theta_v
sum = wtv[0]+wtv[1]
wtv[0] /= sum
wtv[1] /= sum
wpi[1] = phi_i - float(self.step_p*ipi[0])
wpi[0] = float(self.step_p*ipi[1]) - phi_i
sum = wpi[0]+wpi[1]
wpi[0] /= sum
wpi[1] /= sum
wpv[1] = phi_v - float(self.step_p*ipv[0])
wpv[0] = float(self.step_p*ipv[1]) - phi_v
sum = wpv[0]+wpv[1]
wpv[0] /= sum
wpv[1] /= sum
if (ipi[1] == self.npi):
ipi[1] = 0
if (ipv[1] == self.npv):
ipv[1] = 0
nc = self.npv*self.ntv
nr = self.npi*self.nti
RGB = [0,0,0]
for isp in range(self.planes):
for i in range(2):
for j in range(2):
for k in range(2):
for l in range(2):
idx = isp*nr*nc + nc*(self.npi*iti[i] + ipi[k]) + self.npv*itv[j] + ipv[l]
RGB[isp] += self.floats[idx] * wti[i]*wtv[j]*wpi[k]*wpv[l]
return RGB
if __name__ == "__main__":
binfile = 'data/m003_carpet01.bin'
#Example 1 - Basic loading of one material and value lookup
brdf = BRDF()
brdf.load_brdf(binfile)
d2r = np.pi/180.
theta_i = 50*d2r
phi_i = 100*d2r
theta_v = 50*d2r
phi_v = 0*d2r
rgb = brdf.lookup_brdf_val(theta_i, phi_i, theta_v, phi_v)
print('RGB:', rgb)
#Example 2 - restoring azimuthal subspace for fixed elevation angles theta_i=60/theta_v=60 and saving to PNG
theta_i = 60*d2r
theta_v = 60*d2r
brdf = BRDF()
brdf.load_brdf(binfile)
slc = brdf.get_brdf_slice(theta_i, theta_v, res=[120, 120])
slc = np.where(slc < 0, 0, slc)
slc = np.where(slc > 1.0, 1.0, slc)
imageio.write_image(255*slc, binfile[:-4]+'.png')
print('wrote file '+binfile[:-4]+'.png')
#Example 3 - saving mosaic of BRDF slices to an EXR file (linear gamma RGB)
#For more detail of the output see: http://btf.utia.cas.cz/?data_des and http://btf.utia.cas.cz/img/brdf/database/BRDF150_25.jpg
brdf = BRDF()
brdf.load_brdf(binfile)
mosaic = brdf.get_brdf_mosaic()
imageio.writeEXR(mosaic.astype(np.float32), binfile[:-4]+'.exr')
print('wrote file '+binfile[:-4]+'.exr')
|
# coding:utf-8
#!/usr/bin/python
#
# Copyright (c) Contributors to the Open 3D Engine Project.
# For complete copyright and license terms please see the LICENSE at the root of this distribution.
#
# SPDX-License-Identifier: Apache-2.0 OR MIT
#
# -- This line is 75 characters -------------------------------------------
"""Empty Doc String""" # To Do: add documentation
# -------------------------------------------------------------------------
# built-ins
import os
import sys
#import simplejson as json
import json
# Lumberyard extensions
from azpy.env_bool import env_bool
from azpy.constants import ENVAR_DCCSI_GDEBUG
from azpy.constants import ENVAR_DCCSI_DEV_MODE
from azpy.constants import *
# 3rdparty (we provide)
from box import Box
from pathlib import Path
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# set up global space, logging etc.
_G_DEBUG = env_bool(ENVAR_DCCSI_GDEBUG, False)
_DCCSI_DEV_MODE = env_bool(ENVAR_DCCSI_DEV_MODE, False)
_PACKAGENAME = __name__
if _PACKAGENAME is '__main__':
_PACKAGENAME = 'DCCsi.SDK.substance.builder.atom_material'
import azpy
_LOGGER = azpy.initialize_logger(_PACKAGENAME)
_LOGGER.debug('Starting up: {0}.'.format({_PACKAGENAME}))
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# early attach WingIDE debugger (can refactor to include other IDEs later)
if _DCCSI_DEV_MODE:
from azpy.test.entry_test import connect_wing
foo = connect_wing()
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# previous
class AtomPBR:
def __init__(self, material_file):
# loading .material File
self.material_file = material_file
self.input_data = open(self.material_file, "r")
self.material = json.load(self.input_data)
self.mat_box = Box(self.material)
self.input_data.close()
# List of texture slots
self.tex = ['baseColor', 'metallic', 'roughness', 'normalMap', 'opacity']
# Construct texture maps
self.basecolor_tex = ""
self.metallic_tex = ""
self.roughness_tex = ""
self.normalmap_tex = ""
self.opacity_tex = ""
def load(self, material_file):
input_data = open(material_file, "r")
self.material = json.load(input_data)
self.mat_box = Box(self.material)
input_data.close()
def get_map(self, tex_slot):
return self.mat_box.properties[tex_slot].parameters.textureMap
def set_map(self, tex_slot, tex_map):
self.mat_box.properties[tex_slot].parameters.textureMap = tex_map
def write(self, material_out):
output_data = open(material_out, "w+")
output_data.write(json.dumps(self.mat_box, indent=4))
output_data.close()
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# new?
class AtomMaterial:
def __init__(self, material_file):
# loading .material File
self.material_file = material_file
self.input_data = open(self.material_file, "r")
self.material = json.load(self.input_data)
self.mat_box = Box(self.material)
self.input_data.close()
# List of texture slots
# old tex maps
# self.tex = ['DiffuseMap', 'NormalMap', 'SpecularMap', 'EnvironmentMap']
self.tex = ['baseColor', 'metallic', 'roughness', 'specularF0', 'normal', 'opacity']
self.texture_map = {'baseColor': 'baseColor',
'metallic': 'metallic',
'roughness': 'roughness',
'specularF0': 'specular',
'normal': 'normal',
'opacity': 'opacity'
}
def load(self, material_file):
input_data = open(material_file, "r")
self.material = json.load(input_data)
self.mat_box = Box(self.material)
input_data.close()
def get_material_type(self):
return self.mat_box.materialType
# old getMap function
# def getMap(self, tex_slot):
# return self.mat_box.properties.general[tex_slot]
def get_map(self, tex_slot):
return self.mat_box.properties[tex_slot].textureMap
def set_map(self, tex_slot, tex_map):
self.mat_box.properties[tex_slot].textureMap = tex_map
self.mat_box.properties[tex_slot].useTexture = True
self.mat_box.properties[tex_slot].factor = 1.0
def write(self, material_out):
if not material_out.parent.exists():
try:
material_out.parent.mkdir(mode=0o777, parents=True, exist_ok=True)
_LOGGER.info('mkdir: {}'.format(material_out.parent))
except Exception as e:
_LOGGER.error(e)
raise(e)
else:
_LOGGER.info('exists: {}'.format(material_out.parent))
material_out.touch()
output_data = open(str(material_out), "w+")
output_data.write(json.dumps(self.mat_box, indent=4))
output_data.close()
return material_out
# -------------------------------------------------------------------------
###########################################################################
# Main Code Block, runs this script as main (testing)
# -------------------------------------------------------------------------
if __name__ == "__main__":
"""Run this file as main"""
_LOGGER.info("Test Run:: {0}.".format({_PACKAGENAME}))
_LOGGER.info("{0} :: if __name__ == '__main__':".format(_PACKAGENAME))
material_path = Path(Path(__file__).parent.parent, 'resources', 'atom')
# material_01 = AtomPBR("atom_pbr.material", "awesome.material")
# material_01 = AtomPBR("atom_pbr.material")
material_01 = AtomMaterial(Path(material_path, "StandardPBR_AllProperties.material"))
# material_01.load("atom_pbr.material")
# material_01.map(material_01.tex[2]).textureMap = "materials/substance/amazing_xzzzx.tif"
# # print(material_01.metallic)
# material_01.write("awesome.material")
# print(material_01.tex[2])
# print(material_01.getMap(material_01.tex[3]))
# material_01.baesColor_tex = "materials/substance/amazing_bc.tif"
# material_01.setMap(material_01.tex[0], material_01.baesColor_tex)
# material_01.write("awesome.material")
# Test material parser for the new format.
material_01.baseColor_tex = "Textures/Streaming/streaming99.dds"
material_01.metallic_tex = "Textures/Streaming/streaming99.dds"
material_01.set_map(material_01.tex[0], material_01.baseColor_tex)
material_01.set_map(material_01.tex[1], material_01.metallic_tex)
material_out = material_01.write(Path(material_path, "atom_variant00.material"))
_LOGGER.info('materialType is:: {}'.format(material_01.get_material_type()))
if material_out.exists():
_LOGGER.info('Wrote material file: {}'.format(material_out))
# remove the logger
del _LOGGER
# ---- END ---------------------------------------------------------------
|
y = x[0, ..., 2]
|
__version__ = '0.2.4'
from .scraper import Scraper # noqa: F401
|
import os
import cv2
from paddle.v2.image import load_image
class DataGenerator(object):
def __init__(self, char_dict, image_shape):
'''
:param char_dict: The dictionary class for labels.
:type char_dict: class
:param image_shape: The fixed shape of images.
:type image_shape: tuple
'''
self.image_shape = image_shape
self.char_dict = char_dict
def train_reader(self, file_list):
'''
Reader interface for training.
:param file_list: The path list of the image file for training.
:type file_list: list
'''
def reader():
UNK_ID = self.char_dict['<unk>']
for image_path, label in file_list:
label = [self.char_dict.get(c, UNK_ID) for c in label]
yield self.load_image(image_path), label
return reader
def infer_reader(self, file_list):
'''
Reader interface for inference.
:param file_list: The path list of the image file for inference.
:type file_list: list
'''
def reader():
for image_path, label in file_list:
yield self.load_image(image_path), label
return reader
def load_image(self, path):
'''
Load an image and transform it to 1-dimention vector.
:param path: The path of the image data.
:type path: str
'''
image = load_image(path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Resize all images to a fixed shape.
if self.image_shape:
image = cv2.resize(
image, self.image_shape, interpolation=cv2.INTER_CUBIC)
image = image.flatten() / 255.
return image
|
import abc
from typing import Optional
class PluginInstaller(abc.ABC):
"""
Implements installation functionality for
"""
def setup(self) -> None:
"""
Perform any necessary setup before installing plugins
"""
def teardown(self) -> None:
"""
Tear down any resource created in setup()
"""
@abc.abstractmethod
def install(self, name: str, version: Optional[str] = None) -> None:
"""
Install a plugin with the given name and optionally version. Version strings must
be exact, not ranges.
"""
raise NotImplementedError
|
import sys
sys.path.append("../")
from api.metrics.rent_burdened import RentBurdenedMetrics
from api.utils.testing import create_test_db
...
def test_RBU():
rent_burdened_households_table = [
{
"area_number": 1,
"period_end_year": 2019,
"segment": "all",
"value": 52.7029065195125
},
{
"area_number": 1,
"period_end_year": 2018,
"segment": "all",
"value": 41.6541886411246
},
{
"area_number": 2,
"period_end_year": 2019,
"segment": "all",
"value": 56.2945422971736
},
{
"area_number": 3,
"period_end_year": 2014,
"segment": "all",
"value": 51.4589913593531
},
{
"area_number": 3,
"period_end_year": 2018,
"segment": "all",
"value": 48.790639029861
},
{
"area_number": 3,
"period_end_year": 2017,
"segment": "all",
"value": 33.9090007521057
}
]
con, cur = create_test_db(
scripts=[
"./pipeline/load/rent_burdened_households.sql"
],
tables={
"rent_burdened_households": rent_burdened_households_table
}
)
metric = RentBurdenedMetrics(con)
assert metric.rent_burdened(year=2019, segment="all") == [
{ "area_number": 1, "value": 0.527029065195125 },
{ "area_number": 2, "value": 0.562945422971736 }
], "Should have two results for 2019."
assert metric.rent_max_burdened() == [
{ 'value': 0.527029065195125, 'area_number': 1},
{ 'value': 0.562945422971736, 'area_number': 2},
{ 'value': 0.514589913593531, 'area_number': 3}
], "Double check the max result."
assert metric.rent_min_burdened() == [
{ "area_number": 1, "value": 0.416541886411246},
{ "area_number": 2, "value": 0.562945422971736},
{ "area_number": 3, "value": 0.339090007521057}
], "Double check the min result."
assert metric.rent_average_burden_area() == [
{'value': 0.47178547580318553, 'area_number': 1},
{'value': 0.562945422971736, 'area_number': 2},
{'value': 0.4471954371377327, 'area_number': 3}
], "Check if the results are correct" |
#! /usr/bin/env python
from gazebo_msgs.srv import GetModelState
import rospy
from gazebo_msgs.msg import ModelStates
from geometry_msgs.msg import Pose
class Block:
def __init__(self, name):
self._name = name
class Robot:
def __init__(self, number, x, y, z):
self.name = number
self.x = x
self.y = y
self.z = z
class State:
_blockListDict = {
'block_a': Block('husky1'),
'block_b': Block('husky2'),
'block_c': Block('husky3'),
'block_d': Block('husky4'),
}
def __init__(self):
rospy.init_node('get_robot_positions', anonymous=True)
self.pub_list=[]
self.pub_list.append(rospy.Publisher("husky1/robot/position",Pose,queue_size=1))
self.pub_list.append(rospy.Publisher("husky2/robot/position",Pose,queue_size=1))
self.pub_list.append(rospy.Publisher("husky3/robot/position",Pose,queue_size=1))
self.pub_list.append(rospy.Publisher("husky4/robot/position",Pose,queue_size=1))
rospy.Subscriber("/gazebo/model_states", ModelStates,self.callback, queue_size=1)
def get_positions(self):
data = rospy.wait_for_message("/gazebo/model_states", ModelStates)
for block in self._blockListDict.itervalues():
blockName = str(block._name)
if blockName in data.name:
robot_index = data.name.index(blockName)
current_robot = Robot(number=int(blockName[-1]),
x=data.pose[robot_index].position.x,
y=data.pose[robot_index].position.y,
z=data.pose[robot_index].position.z)
print("Robot number : {}".format(current_robot.name))
print("Robot x : {}".format(current_robot.x))
print("Robot y : {}".format(current_robot.y))
print("Robot z : {}".format(current_robot.z))
def callback(self,data):
for i in range(0,4):
robot_index = data.name.index('husky{}'.format(i+1))
pose = data.pose[robot_index]
self.pub_list[i].publish(pose)
if __name__ == '__main__':
show = State()
rospy.spin()
# show.get_positions()
|
from django.shortcuts import render
from django.shortcuts import redirect
from .forms import BookForm, ChangePassword, DeleteAccount, FutureBook
from .settings import validate_password, change_password, delete_account
from .drones import get_vehicles_of_user, get_all_drone_data, get_all_base_data, form_time, get_future_bookings_of_user, plan_route, plan_time, plan_future_time, book_journey, convert_vehicles_to_ids, convert_vehicle_ids_to_vehicles, convert_locations_to_ids, convert_locations_ids_to_locations, serialize_datetime, deserialize_datetime, get_locations_data
# Create your views here.
def dashboard(request):
vehicles_data = get_vehicles_of_user(request.session['username'])
future_booking_data = get_future_bookings_of_user(request.session['username'])
context = {
"username": request.session['username'],
"vehicles_data": vehicles_data,
"future_booking_data": future_booking_data
}
return render(request, '../../drone_system/templates/drone_system/dashboard.html', context)
def information(request):
drones_data = get_all_drone_data()
bases_data = get_all_base_data()
context = {
"drones_data": drones_data,
"bases_data": bases_data,
}
return render(request, '../../drone_system/templates/drone_system/information.html', context)
def locations(request):
locations_data = get_locations_data()
context = {
"locations_data": locations_data,
}
return render(request, '../../drone_system/templates/drone_system/locations.html', context)
def book(request):
book_status = ""
vehicles = []
vehicles_types = []
route_locations = []
start_times = []
end_times = []
if request.method == "POST":
form = BookForm(request.POST)
if form.is_valid():
form.cleaned_data['origin'] = str(form.cleaned_data['origin'])
form.cleaned_data['origin'] = (form.cleaned_data['origin'].split(','))[0]
form.cleaned_data['destination'] = str(form.cleaned_data['destination'])
form.cleaned_data['destination'] = (form.cleaned_data['destination'].split(','))[0]
if request.POST.get("Book"):
if form.cleaned_data['origin'] != form.cleaned_data['destination']:
vehicles, vehicles_types, route_locations, book_status = plan_route(form.cleaned_data['origin'], form.cleaned_data['destination'])
if None in vehicles:
book_status = "None"
elif book_status == "":
start_times, durations, end_times = plan_time(vehicles, route_locations)
book_journey(vehicles, vehicles_types, route_locations, start_times, durations, end_times, request.session['username'])
book_status = "Booked"
else:
start_times, durations, end_times = plan_time(vehicles, route_locations)
vehicles_ids = convert_vehicles_to_ids(vehicles)
route_locations_ids = convert_locations_to_ids(route_locations)
serialized_start_times = serialize_datetime(start_times)
serialized_end_times = serialize_datetime(end_times)
request.session['vehicles_ids'] = vehicles_ids
request.session['vehicles_types'] = vehicles_types
request.session['route_locations_ids'] = route_locations_ids
request.session['start_times'] = serialized_start_times
request.session['durations'] = durations
request.session['end_times'] = serialized_end_times
else:
book_status = "Same"
elif request.POST.get("Yes"):
vehicles = convert_vehicle_ids_to_vehicles(request.session['vehicles_ids'], request.session['vehicles_types'])
route_locations = convert_locations_ids_to_locations(request.session['route_locations_ids'])
start_times = deserialize_datetime(request.session['start_times'])
end_times = deserialize_datetime(request.session['end_times'])
book_journey(vehicles, request.session['vehicles_types'], route_locations, start_times, request.session['durations'], end_times, request.session['username'])
book_status = "Booked"
form = BookForm()
del request.session['vehicles_ids']
del request.session['vehicles_types']
del request.session['route_locations_ids']
del request.session['start_times']
del request.session['durations']
del request.session['end_times']
elif request.POST.get("No"):
del request.session['vehicles_ids']
del request.session['vehicles_types']
del request.session['route_locations_ids']
del request.session['start_times']
del request.session['durations']
del request.session['end_times']
form = BookForm()
else:
form = BookForm()
context = {
"form": form,
"book_status": book_status,
"vehicles": vehicles,
"vehicles_types": vehicles_types,
"route_locations": route_locations,
"start_times": start_times,
"end_times": end_times,
}
return render(request, '../../drone_system/templates/drone_system/book.html', context)
def futurebook(request):
time_validation = ""
book_status = ""
if request.method == "POST":
form = FutureBook(request.POST)
if form.is_valid():
form.cleaned_data['origin'] = str(form.cleaned_data['origin'])
form.cleaned_data['origin'] = (form.cleaned_data['origin'].split(','))[0]
form.cleaned_data['destination'] = str(form.cleaned_data['destination'])
form.cleaned_data['destination'] = (form.cleaned_data['destination'].split(','))[0]
if form.cleaned_data['origin'] != form.cleaned_data['destination']:
time = form_time(form.cleaned_data['time'])
time_validation = True
vehicles, vehicles_types, route_locations, book_status = plan_route(form.cleaned_data['origin'], form.cleaned_data['destination'])
if None in vehicles:
book_status = "None"
else:
start_times, durations, end_times = plan_future_time(vehicles, route_locations, time)
print(start_times)
if None in start_times:
print("None in start times")
book_status = "None"
else:
book_journey(vehicles, vehicles_types, route_locations, start_times, durations, end_times, request.session['username'], True)
book_status = "Booked"
form = FutureBook()
else:
book_status = "Same"
else:
time_validation = False
else:
form = FutureBook()
context = {
"form": form,
"time_validation": time_validation,
"book_status": book_status,
}
return render(request, '../../drone_system/templates/drone_system/futurebook.html', context)
def help(request):
return render(request, '../../drone_system/templates/drone_system/help.html')
def settings(request):
return render(request, '../../drone_system/templates/drone_system/settings.html')
def changepassword(request):
form = ChangePassword(request.POST or None)
status = ""
if form.is_valid():
if validate_password(request.session['username'], form.cleaned_data['password']):
if form.cleaned_data['new_password'] == form.cleaned_data['repeat_password']:
change_password(request.session['username'], form.cleaned_data['new_password'])
status = "Changed"
else:
status = "Not same"
else:
status = "Wrong"
form = ChangePassword()
context = {
"form": form,
"status": status
}
return render(request, '../../drone_system/templates/drone_system/changepassword.html', context)
def deleteaccount(request):
form = DeleteAccount(request.POST or None)
status = ""
if form.is_valid():
if validate_password(request.session['username'], form.cleaned_data['password']):
delete_account(request.session['username'])
return redirect('/dashboard/logout')
else:
status = "Wrong"
form = DeleteAccount()
context = {
"form": form,
"status": status
}
return render(request, '../../drone_system/templates/drone_system/deleteaccount.html', context)
def logout(request):
del request.session['username']
return redirect('/home/')
|
#!/usr/bin/env python3
#homework5 - Advanced
import matplotlib.pyplot as plt
import pandas as pd
def printme(name, to_print):
print(f'{name}:')
print(f'{to_print}\n\n')
insurance = pd.read_csv('data/insurance.csv')
printme("insurance dataframe", insurance.to_string())
printme("insurance dataframe", insurance.columns)
printme("insurance dataframe", insurance.info())
printme("insurance dataframe", insurance.describe().to_string())
printme("age field", insurance['age'])
printme("imp 3 fields", insurance[['age','children','charges']])
printme("insurance dataframe", insurance.loc[0:4, ['age', 'children', 'charges']])
printme("Average value", insurance["charges"].mean())
printme("Min value", insurance["charges"].min())
printme("Max value", insurance["charges"].max())
#printme("Selecting age and sex of the person that paid 10797.3362 ", (insurance[['Sex', 'Age']] ) & (insurance.loc[insurance['charges'] == 10797.3362]))
tempvalue = insurance.loc[insurance['charges'] == 10797.3362]
print (tempvalue[['age', 'sex']])
print ("Age of the person who paid max charge", insurance['age'] , insurance["charges"].max())
print ("insured people we have for each region", insurance.groupby(['region']).size())
printme("NO of Insured people are children", insurance[insurance['children'] > 0].count())
printme("correlation between fields", insurance.corr().to_string())
plt.close()
|
from tilepack.outputter.mbtiles import MbtilesOutput
from tilepack.outputter.zipfile import ZipfileOutput
from tilepack.outputter.null import NullOutput
|
#!/usr/bin/python
#
## mrquery
##
## This program provides an interface to send arbitrary queries to Moira.
#
import common
import re, argparse, json
import pymoira
class QueryInfo:
def __init__(self, name):
info, = client.query('_help', (name,))
# FIXME: the format Moira server uses is horrible.
# There should be a saner way to do this
description = ' '.join(info)
match = re.match( r"\s*(\S+), (\S+) \((.+)\)( => (.+))?", description )
if not match:
raise pymoira.UserError( "Unable to parse the Moira query description" )
self.name, self.shortname, inputs, output_whole, outputs = match.groups()
self.inputs = inputs.split(' ')
self.outputs = outputs.split(' ') if outputs else []
def setup_arguments(argparser):
"""Sets up the arguments."""
argparser.add_argument('query', help = 'The Moira query name to execute')
argparser.add_argument('arg', nargs = '*', help = 'The arguments to the query')
argparser.add_argument('-j', '--json', action = 'store_true', help = 'Output the results of the query in JSON')
def do_query():
"""Runs the query and returns the result with query information."""
info = QueryInfo(args.query)
if len(info.inputs) != len(args.arg):
raise pymoira.UserError( "Query argument count mismatch (%i expected, %i supplied)" % (len(info.inputs), len(args.arg)) )
output = client.query(info.name, args.arg)
result = []
for row in output:
new_row = {}
if len(row) != len(info.outputs):
raise pymoira.UserError("Moira server returned unexpected amount of columns in a row")
for name, value in zip(info.outputs, row):
new_row[name] = value
result.append(new_row)
return result, info
def show_help():
try:
query = args.arg[0]
except IndexError:
common.error( "Query name was not specified" )
return
info, = client.query('_help', (query,))
print ' '.join(info).strip()
def show_queries_list():
queries = client.query('_list_queries', ())
for query, in queries:
print query
def show_user_list():
users = client.query('_list_users', ())
for user in users:
print "%s from %s %s, since %s %s" % user
def handle_query():
"""Outputs the results of the query or an error message."""
if args.json:
try:
result, info = do_query()
print json.dumps({ 'status' : 'ok', 'result' : result })
except pymoira.BaseError as e:
print json.dumps({ 'status' : 'error', 'message' : str(e) })
else:
try:
# Those are not real queries, and QueryInfo() would fail for them
if args.query == '_help':
show_help()
return
if args.query == '_list_queries':
show_queries_list()
return
if args.query == '_list_users':
show_user_list()
return
result, info = do_query()
for row in result:
fields = [ (field_name, row[field_name]) for field_name in info.outputs ]
common.show_fields(*fields)
print ""
except pymoira.BaseError as err:
common.error(err)
if __name__ == '__main__':
client, args = common.init('mrquery', 'Send raw queries to Moira', setup_arguments)
handle_query()
|
#!/usr/bin/env python
import os
import requests
from bs4 import BeautifulSoup
import json
import random
class Quotes:
def __init__(self):
if os.path.isfile('quotes.json'):
with open('quotes.json', 'r') as fin:
self.quotes = json.load(fin)
else:
self.quotes = []
html=requests.get('http://www.imdb.com/title/tt0080339/trivia?tab=qt&ref_=tt_trv_qu', headers={'User-Agent': 'Mozilla'})
soup=BeautifulSoup(html.text, 'html.parser')
for div in soup.findAll('div', {"class": "sodatext"}):
self.quotes.append(repr(div))
with open('quotes.json', 'w') as fout:
json.dump(self.quotes, fout, indent=4)
def __del__(self):
if os.path.isfile('quotes.json'):
os.remove('quotes.json')
def random(self):
return self.quotes[random.randint(0, len(self.quotes)-1)]
if __name__ == "__main__":
print(Quotes().random())
|
# 3. With a given integral number n, write a program to generate a dictionary that contains (i, i x i)
# such that is an integral number between 1 and n (both included). and then the program
# should print the dictionary.Suppose the following input is supplied to the program: 8
# Then, the output should be: {1: 1, 2: 4, 3: 9, 4: 16, 5: 25, 6: 36, 7: 49, 8: 64}
number = int(input())
dict = {}
for i in range(1, number + 1):
dict[i] = i * i
print(dict)
|
import hashlib
import json
import os
import signal
import subprocess
import time
from contextlib import contextmanager
from typing import List, Set
import requests
def getDefaultExpectedHash() -> bytes:
# "uniquestring" is stored in /etc/unique of the SSH server. We then run
# the command `sha1sum /etc/unique` via kssh and assert that the output
# contains the sha1 hash of uniquestring. This checks to make sure the
# command given to kssh is actually executing on the remote server.
return hashlib.sha1(b"uniquestring").hexdigest().encode("utf-8")
class TestConfig:
# Not actually a test class so mark it to be skipped
__test__ = False
def __init__(
self,
subteam,
subteam_secondary,
username,
bot_username,
expected_hash,
subteams,
):
self.subteam: str = subteam
self.subteam_secondary: str = subteam_secondary
self.username: str = username
self.bot_username: str = bot_username
self.expected_hash: bytes = expected_hash
self.subteams: List[str] = subteams
@staticmethod
def getDefaultTestConfig():
return TestConfig(
os.environ["SUBTEAM"],
os.environ["SUBTEAM_SECONDARY"],
os.environ["KSSH_USERNAME"],
os.environ["BOT_USERNAME"],
getDefaultExpectedHash(),
[
os.environ["SUBTEAM"] + postfix
for postfix in [".ssh.prod", ".ssh.staging", ".ssh.root_everywhere"]
],
)
def run_put_kvstore_command(team: str, config: str) -> bytes:
"""
Run a "keybase kvstore api" command to write the kssh_config for the
given team
:param team: The team to write the config for
:return: The stdout of the process
"""
# the extra json.dumps is to add the expected quotation marks
return run_command(
f'echo \'{{"method":"put", "params": {{'
f'"options": {{"team": "{team}", '
f'"namespace": "__sshca", "entryKey": "kssh_config",'
f'"entryValue": {json.dumps(config)} }}}}}}\' | '
f"xargs -0 -I put keybase kvstore api -m put"
)
def run_delete_kvstore_command(team: str) -> bytes:
"""
Run a "keybase kvstore api" command to delete the kssh_config from the
given team
:param team: The team to delete the config from
:return: The stdout of the process
"""
return run_command(
f'echo \'{{"method": "del", '
f'"params": {{"options": {{"team": '
f'"{team}", "namespace": "__sshca", '
f'"entryKey": "kssh_config"}}}}}}\' | '
f"xargs -0 -I del keybase kvstore api -m del || true"
)
def run_command_with_agent(cmd: str) -> bytes:
"""
Run the given command in a shell session with a running ssh-agent
:param cmd: The command to run
:return: The stdout of the process
"""
return run_command("eval `ssh-agent` && " + cmd)
def run_command(cmd: str, timeout: int = 15) -> bytes:
"""
Run the given command in a shell with the given timeout
:param cmd: The command to run
:param timeout: The timeout in seconds
:return: The stdout of the process
"""
# In order to properly run a command with a timeout and shell=True, we use
# Popen with a shell and group all child processes so we can kill all of
# them. See:
# - https://stackoverflow.com/questions/36952245/subprocess-timeout-failure
# - https://stackoverflow.com/questions/4789837/how-to-terminate-a-
# python-subprocess-launched-with-shell-true
with subprocess.Popen(
cmd, shell=True, stdout=subprocess.PIPE, preexec_fn=os.setsid
) as process:
try:
stdout, stderr = process.communicate(timeout=timeout)
if process.returncode != 0:
print(f"Output before return: {repr(stdout)}, {repr(stderr)}")
raise subprocess.CalledProcessError(
process.returncode, cmd, stdout, stderr
)
return stdout
except subprocess.TimeoutExpired as e:
os.killpg(process.pid, signal.SIGINT)
print(f"Output before timeout: {process.communicate()[0]}")
raise e
def read_file(filename: str) -> List[bytes]:
"""
Read the contents of the given filename to a list of strings. If it is a
normal file, uses the standard open() function. Otherwise, uses `keybase fs
read`. This is because fuse is not running in the container so a normal
open call will not work for KBFS.
:param filename: The name of the file to read
:return: A list of lines in the file
"""
if filename.startswith("/keybase/"):
return run_command(f"keybase fs read {filename}").splitlines()
with open(filename, "rb") as f:
return f.readlines()
def clear_keys():
# Clear all keys generated by kssh
try:
run_command("rm -rf ~/.ssh/keybase-signed-key*")
except subprocess.CalledProcessError:
pass
def clear_local_config():
# Clear kssh's local config file
try:
run_command("rm -rf ~/.ssh/kssh-config.json")
except subprocess.CalledProcessError:
pass
def load_env(filename: str):
# Load the environment based off of the given filename which is the path to
# the python test script
env_name = os.path.basename(filename).split(".")[0]
return (
requests.get(f"http://ca-bot:8080/load_env?filename={env_name}").content
== b"OK"
)
def assert_contains_hash(expected_hash: bytes, output: bytes):
assert expected_hash in output
@contextmanager
def simulate_two_teams(tc: TestConfig):
# A context manager that simulates running the given function in an
# environment with two teams set up
get_res = run_command(
f'echo \'{{"method": "get", "params": {{"options": {{"team": '
f'"{tc.subteam}.ssh.staging", "namespace": "__sshca", '
f'"entryKey": "kssh_config"}}}}}}\' | '
f"xargs -0 -I get keybase kvstore api -m get"
)
config = json.loads(json.loads(get_res)["result"]["entryValue"])
config["teamname"] = tc.subteam_secondary
config["botname"] = "otherbotname"
run_put_kvstore_command(tc.subteam_secondary, json.dumps(config))
try:
yield
finally:
run_delete_kvstore_command(tc.subteam_secondary)
@contextmanager
def outputs_audit_log(tc: TestConfig, filename: str, expected_number: int):
# A context manager that asserts that the given function triggers
# expected_number of audit logs to be added to the log at the given
# filename
# Make a set of the lines in the audit log before we ran
before_lines = set(read_file(filename))
# Then run the code inside the context manager
yield
# And sleep to give KBFS some time
time.sleep(2.5)
# Then see if there are new lines using set difference. This is only
# safe/reasonable since we include a timestamp in audit log lines.
after_lines = set(read_file(filename))
new_lines = after_lines - before_lines
cnt = 0
for line in new_lines:
line = line.decode("utf-8")
if (
line
and f"Processing SignatureRequest from user={tc.username}" in line
and (
f"principals:{tc.subteam}.ssh.staging,"
f"{tc.subteam}.ssh.root_everywhere, expiration:+1h, pubkey:"
)
in line
):
cnt += 1
if cnt != expected_number:
assert (
False
), f"Found {cnt} audit log entries, expected {expected_number}! \
New audit logs: {new_lines}"
def get_principals(certificateFilename: str) -> Set[str]:
inPrincipalsSection = False
principalsIndentationLevel = 16
foundPrincipals: Set[str] = set()
for line in run_command(
f"cat {certificateFilename} | ssh-keygen -L -f /dev/stdin"
).splitlines():
if line.strip().startswith(b"Principals:"):
inPrincipalsSection = True
continue
if len(line) - len(line.lstrip()) != principalsIndentationLevel:
inPrincipalsSection = False
if inPrincipalsSection:
foundPrincipals.add(line.strip().decode("utf-8"))
return foundPrincipals
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import keras
import numpy as np
import warnings, os, sys
from keras.models import Model
from keras import initializers, regularizers, constraints
from keras.utils import conv_utils
from keras.utils.data_utils import get_file
from keras.engine.topology import get_source_inputs
from keras.engine import InputSpec
from keras.applications import imagenet_utils
from keras.applications.imagenet_utils import decode_predictions
from keras import backend as K
from keras.callbacks import TensorBoard, EarlyStopping, ModelCheckpoint
# from sklearn.cross_validation import train_test_split
from keras.layers import Input, Activation, Dropout, Flatten, Dense, Reshape, BatchNormalization
from keras.layers import Convolution2D, MaxPooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D, Conv2D
from keras_applications.imagenet_utils import _obtain_input_shape
from sklearn.model_selection import train_test_split
def relu6(x):
return K.relu(x, max_value=6)
def load_sheit():
# os.system("wget 'https://drive.google.com/file/d/1I-azq5nKd-8YOLiKz2xVxJMXhvjhtW6s'")
# os.system("wget 'https://drive.google.com/file/d/1J7MC48fQtB_AVSFo4gefqfApw3dsWBak'")
# Example: Load Datase
# PATH='./'
# X = np.load(PATH + "basic_dataset_img.npz")
# y = np.load(PATH + "basic_dataset_pts.npz")
# X = X['arr_0']
# y = y['arr_0'].reshape(-1, 136)
# Define X and y
# Load data
PATH = "../"
X = np.load(PATH + "basic_dataset_img.npz")
y = np.load(PATH + "basic_dataset_pts.npz")
X = X['arr_0']
y = y['arr_0'].reshape(-1, 136)
return X, y
def preprocess_input(x):
"""Preprocesses a numpy array encoding a batch of images.
# Arguments
x: a 4D numpy array consists of RGB values within [0, 255].
# Returns
Preprocessed array.
"""
return imagenet_utils.preprocess_input(x, mode='tf')
class DepthwiseConv2D(Conv2D):
def __init__(self,
kernel_size,
strides=(1, 1),
padding='valid',
depth_multiplier=1,
data_format=None,
activation=None,
use_bias=True,
depthwise_initializer='glorot_uniform',
bias_initializer='zeros',
depthwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
bias_constraint=None,
**kwargs):
super(DepthwiseConv2D, self).__init__(
filters=None,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
activation=activation,
use_bias=use_bias,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
bias_constraint=bias_constraint,
**kwargs)
self.depth_multiplier = depth_multiplier
self.depthwise_initializer = initializers.get(depthwise_initializer)
self.depthwise_regularizer = regularizers.get(depthwise_regularizer)
self.depthwise_constraint = constraints.get(depthwise_constraint)
self.bias_initializer = initializers.get(bias_initializer)
def build(self, input_shape):
if len(input_shape) < 4:
raise ValueError('Inputs to `DepthwiseConv2D` should have rank 4. '
'Received input shape:', str(input_shape))
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = 3
if input_shape[channel_axis] is None:
raise ValueError('The channel dimension of the inputs to '
'`DepthwiseConv2D` '
'should be defined. Found `None`.')
input_dim = int(input_shape[channel_axis])
depthwise_kernel_shape = (self.kernel_size[0], self.kernel_size[1],
input_dim, self.depth_multiplier)
self.depthwise_kernel = self.add_weight(
shape=depthwise_kernel_shape,
initializer=self.depthwise_initializer,
name='depthwise_kernel',
regularizer=self.depthwise_regularizer,
constraint=self.depthwise_constraint)
if self.use_bias:
self.bias = self.add_weight(
shape=(input_dim * self.depth_multiplier, ),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
# Set input spec.
self.input_spec = InputSpec(ndim=4, axes={channel_axis: input_dim})
self.built = True
def call(self, inputs, training=None):
outputs = K.depthwise_conv2d(
inputs,
self.depthwise_kernel,
strides=self.strides,
padding=self.padding,
dilation_rate=self.dilation_rate,
data_format=self.data_format)
if self.bias:
outputs = K.bias_add(
outputs, self.bias, data_format=self.data_format)
if self.activation is not None:
return self.activation(outputs)
return outputs
def compute_output_shape(self, input_shape):
if self.data_format == 'channels_first':
rows = input_shape[2]
cols = input_shape[3]
out_filters = input_shape[1] * self.depth_multiplier
elif self.data_format == 'channels_last':
rows = input_shape[1]
cols = input_shape[2]
out_filters = input_shape[3] * self.depth_multiplier
rows = conv_utils.conv_output_length(rows, self.kernel_size[0],
self.padding, self.strides[0])
cols = conv_utils.conv_output_length(cols, self.kernel_size[1],
self.padding, self.strides[1])
if self.data_format == 'channels_first':
return (input_shape[0], out_filters, rows, cols)
elif self.data_format == 'channels_last':
return (input_shape[0], rows, cols, out_filters)
def get_config(self):
config = super(DepthwiseConv2D, self).get_config()
config.pop('filters')
config.pop('kernel_initializer')
config.pop('kernel_regularizer')
config.pop('kernel_constraint')
config['depth_multiplier'] = self.depth_multiplier
config['depthwise_initializer'] = initializers.serialize(
self.depthwise_initializer)
config['depthwise_regularizer'] = regularizers.serialize(
self.depthwise_regularizer)
config['depthwise_constraint'] = constraints.serialize(
self.depthwise_constraint)
return config
def MobileNet(input_shape=None,
alpha=1.0,
depth_multiplier=1,
dropout=1e-3,
include_top=True,
weights='imagenet',
input_tensor=None,
pooling=None,
classes=1000,
shallow=False):
if K.backend() != 'tensorflow':
raise RuntimeError('Only TensorFlow backend is currently supported, '
'as other backends do not support '
'depthwise convolution.')
if weights not in {'imagenet', None}:
raise ValueError('The `weights` argument should be either '
'`None` (random initialization) or `imagenet` '
'(pre-training on ImageNet).')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as ImageNet with `include_top` '
'as true, `classes` should be 1000')
# Determine proper input shape and default size.
if input_shape is None:
default_size = 224
else:
if K.image_data_format() == 'channels_first':
rows = input_shape[1]
cols = input_shape[2]
else:
rows = input_shape[0]
cols = input_shape[1]
if rows == cols and rows in [128, 160, 192, 224]:
default_size = rows
else:
default_size = 224
input_shape = _obtain_input_shape(
input_shape,
default_size=default_size,
min_size=32,
data_format=K.image_data_format(),
require_flatten=include_top,
weights=weights)
if K.image_data_format() == 'channels_last':
row_axis, col_axis = (0, 1)
else:
row_axis, col_axis = (1, 2)
rows = input_shape[row_axis]
cols = input_shape[col_axis]
if weights == 'imagenet':
if depth_multiplier != 1:
raise ValueError('If imagenet weights are being loaded, '
'depth multiplier must be 1')
if alpha not in [0.25, 0.50, 0.75, 1.0]:
raise ValueError('If imagenet weights are being loaded, '
'alpha can be one of'
'`0.25`, `0.50`, `0.75` or `1.0` only.')
if rows != cols or rows not in [128, 160, 192, 224]:
raise ValueError('If imagenet weights are being loaded, '
'input must have a static square shape (one of '
'(128,128), (160,160), (192,192), or (224, 224)).'
' Input shape provided = %s' % (input_shape, ))
if K.image_data_format() != 'channels_last':
warnings.warn('The MobileNet family of models is only available '
'for the input data format "channels_last" '
'(width, height, channels). '
'However your settings specify the default '
'data format "channels_first" (channels, width, height).'
' You should set `image_data_format="channels_last"` '
'in your Keras config located at ~/.keras/keras.json. '
'The model being returned right now will expect inputs '
'to follow the "channels_last" data format.')
K.set_image_data_format('channels_last')
old_data_format = 'channels_first'
else:
old_data_format = None
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
x = _conv_block(img_input, 32, alpha, strides=(2, 2))
x = _depthwise_conv_block(x, 64, alpha, depth_multiplier, block_id=1)
x = _depthwise_conv_block(x, 128, alpha, depth_multiplier, strides=(2, 2), block_id=2)
x = _depthwise_conv_block(x, 128, alpha, depth_multiplier, block_id=3)
x = _depthwise_conv_block(x, 256, alpha, depth_multiplier, strides=(2, 2), block_id=4)
x = _depthwise_conv_block(x, 256, alpha, depth_multiplier, block_id=5)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, strides=(2, 2), block_id=6)
if not shallow:
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=7)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=8)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=9)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=10)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=11)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, strides=(2, 2), block_id=12)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=13)
if include_top:
if K.image_data_format() == 'channels_first':
shape = (int(512 * alpha), 1, 1)
else:
shape = (1, 1, int(512 * alpha))
x = GlobalAveragePooling2D()(x)
x = Reshape(shape, name='reshape_1')(x)
x = Dropout(dropout, name='dropout')(x)
x = Conv2D(classes, (1, 1), padding='same', name='conv_preds')(x)
x = Activation('softmax', name='act_softmax')(x)
x = Reshape((classes, ), name='reshape_2')(x)
else:
if pooling == 'avg':
x = GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Model(inputs, x, name='mobilenet_%0.2f_%s' % (alpha, rows))
# load weights
if weights == 'imagenet':
if K.image_data_format() == 'channels_first':
raise ValueError('Weights for "channels_last" format '
'are not available.')
if alpha == 1.0:
alpha_text = '1_0'
elif alpha == 0.75:
alpha_text = '7_5'
elif alpha == 0.50:
alpha_text = '5_0'
else:
alpha_text = '2_5'
if include_top:
model_name = 'mobilenet_%s_%d_tf.h5' % (alpha_text, rows)
weigh_path = BASE_WEIGHT_PATH + model_name
weights_path = get_file(
model_name, weigh_path, cache_subdir='models')
else:
model_name = 'mobilenet_%s_%d_tf_no_top.h5' % (alpha_text, rows)
weigh_path = BASE_WEIGHT_PATH + model_name
weights_path = get_file(
model_name, weigh_path, cache_subdir='models')
model.load_weights(weights_path)
if old_data_format:
K.set_image_data_format(old_data_format)
return model
def _conv_block(inputs, filters, alpha, kernel=(3, 3), strides=(1, 1)):
channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
filters = int(filters * alpha)
x = Conv2D(
filters,
kernel,
padding='same',
use_bias=False,
strides=strides,
name='conv1')(inputs)
x = BatchNormalization(momentum=0.6,axis=channel_axis, name='conv1_bn')(x)
return Activation(relu6, name='conv1_relu')(x)
def _depthwise_conv_block(inputs,
pointwise_conv_filters,
alpha,
depth_multiplier=1,
strides=(1, 1),
block_id=1):
channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
pointwise_conv_filters = int(pointwise_conv_filters * alpha)
x = DepthwiseConv2D(
(3, 3),
padding='same',
depth_multiplier=depth_multiplier,
strides=strides,
use_bias=False,
name='conv_dw_%d' % block_id)(inputs)
x = BatchNormalization(momentum=0.6,
axis=channel_axis, name='conv_dw_%d_bn' % block_id)(x)
x = Activation(relu6, name='conv_dw_%d_relu' % block_id)(x)
x = Conv2D(
pointwise_conv_filters, (1, 1),
padding='same',
use_bias=False,
strides=(1, 1),
name='conv_pw_%d' % block_id)(x)
x = BatchNormalization(momentum=0.6,
axis=channel_axis, name='conv_pw_%d_bn' % block_id)(x)
return Activation(relu6, name='conv_pw_%d_relu' % block_id)(x)
model_pre = MobileNet(input_shape=(64,64,1),alpha=1.0,depth_multiplier=1
,include_top=False,weights=None,pooling='max',shallow=True)
num_outputs = 136
from keras.models import Model
from keras.layers import Dense, Input
last_layer = model_pre.get_layer('global_max_pooling2d_1').output
out = Dense(num_outputs, name='fc_14')(last_layer)
model = Model(model_pre.input, out)
X,y = load_sheit()
def main():
print("Define X and Y")
print("=======================================")
# Split train / test dataset
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
print("Success of getting train / test dataset")
print("=======================================")
print("X_train: ", X_train.shape)
print("y_train: ", y_train.shape)
print("X_test: ", X_test.shape)
print("y_test: ", y_test.shape)
print("=======================================")
model.compile(loss=smoothL1, optimizer=keras.optimizers.Adam(lr=1e-3), metrics=['mape'])
print(model.summary())
# checkpoint
filepath="./mobilenet_checkpoints/smooth_L1-{epoch:02d}-{val_mean_absolute_percentage_error:.5f}.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
callbacks_list = [checkpoint]
history = model.fit(X_train, y_train, batch_size=2, epochs=2, shuffle=True,\
verbose=1, validation_data=(X_test, y_test), callbacks=callbacks_list)
# Save model
model.save("./model/face_landmark_dnn.h5")
print("=======================================")
print("Save Final Model")
print("=======================================")
if __name__ == "__main__":
main()
|
from onesignal_sdk.response import OneSignalResponse
from .mocks import MockHttpxResponse
class TestOneSignalResponse:
def test_sets_body_and_status_code(self):
http_response = MockHttpxResponse(201, {'created': True})
response = OneSignalResponse(http_response)
assert response.status_code == 201
assert response.body == http_response.body
|
import pandas
user_df = pandas.read_csv('processed_data/prj_user.csv')
ids = user_df["id"]
ids = list(ids.values)
error_ids= []
for i, id in enumerate(ids):
try:
int(id)
except:
error_ids.append(i)
user_df = user_df.drop(user_df.index[error_ids])
user_df.to_csv("processed_data/prj_user.csv", index=False)
|
import gc
from twisted.trial.unittest import TestCase
from axiom.store import Store
from axiom.upgrade import registerUpgrader, registerAttributeCopyingUpgrader
from axiom.item import Item, declareLegacyItem
from axiom.attributes import integer, reference
from axiom.errors import BrokenReference, DeletionDisallowed
class Referee(Item):
schemaVersion = 1
typeName = "test_reference_referee"
topSecret = integer()
class SimpleReferent(Item):
schemaVersion = 1
typeName = "test_reference_referent"
ref = reference()
class BreakingReferent(Item):
schemaVersion = 1
typeName = "test_reference_breaking_referent"
ref = reference(whenDeleted=reference.NULLIFY)
class DependentReferent(Item):
ref = reference(whenDeleted=reference.CASCADE, reftype=Referee)
class DisallowReferent(Item):
ref = reference(whenDeleted=reference.DISALLOW, reftype=Referee)
class BadReferenceTestCase(TestCase):
ntimes = 10
def testSanity(self):
store = Store()
for i in xrange(self.ntimes):
SimpleReferent(store=store, ref=Referee(store=store, topSecret=i))
(referee,) = list(store.query(Referee))
(referent,) = list(store.query(SimpleReferent))
self.assertEqual(referent.ref.topSecret, referee.topSecret)
referee.deleteFromStore()
referent.deleteFromStore()
def testBadReferenceNone(self):
"""
Test that accessing a broken reference on an Item that has already been
loaded into memory correctly nullifies the attribute.
"""
store = Store()
referee = Referee(store=store, topSecret=0)
referent = SimpleReferent(store=store, ref=referee)
referee.deleteFromStore()
referee = None
gc.collect()
(referent,) = list(store.query(SimpleReferent))
self.assertEqual(referent.ref, None)
def testBadReferenceNoneLoading(self):
"""
Test that accessing a broken reference on an Item that has not yet been
loaded correctly nullifies the attribute.
"""
store = Store()
referee = Referee(store=store, topSecret=0)
referent = SimpleReferent(store=store, ref=referee)
referee.deleteFromStore()
referee = None
referent = None
gc.collect()
(referent,) = list(store.query(SimpleReferent))
self.assertEqual(referent.ref, None)
def test_brokenReferenceException(self):
"""
Test that an exception is raised when a broken reference is detected
when this should be impossible (ie. CASCADE or NULLIFY).
"""
store = Store()
referee = Referee(store=store, topSecret=0)
referent = BreakingReferent(store=store, ref=referee)
referee.deleteFromStore()
referent = None
gc.collect()
referent = store.findFirst(BreakingReferent)
self.patch(BreakingReferent.ref, 'whenDeleted', reference.CASCADE)
self.assertRaises(BrokenReference, lambda: referent.ref)
def testBadReferenceNoneRevert(self):
store = Store()
referee = Referee(store=store, topSecret=0)
referent = SimpleReferent(store=store, ref=referee)
def txn():
referee.deleteFromStore()
self.assertEqual(referent.ref, None)
1 / 0
self.assertRaises(ZeroDivisionError, store.transact, txn)
self.assertEqual(referent.ref, referee)
referent = None
referee = None
gc.collect()
referent = store.findUnique(SimpleReferent)
referee = store.findUnique(Referee)
self.assertEqual(referent.ref, referee)
def testBrokenReferenceDisallow(self):
"""
Test that deleting an item referred to by a whenDeleted == DISALLOW
reference raises an exception.
"""
store = Store()
referee = Referee(store=store, topSecret=0)
referent = DisallowReferent(store=store, ref=referee)
self.assertRaises(DeletionDisallowed, referee.deleteFromStore)
self.assertRaises(DeletionDisallowed, store.query(Referee).deleteFromStore)
def testReferenceQuery(self):
store = Store()
referee = Referee(store=store, topSecret=0)
self.assertEqual(
list(store.query(SimpleReferent,
SimpleReferent.ref == Referee.storeID)),
[])
def testReferenceDeletion(self):
store = Store()
referee = Referee(store=store, topSecret=0)
dep = DependentReferent(store=store,
ref=referee)
sid = dep.storeID
self.assertIdentical(store.getItemByID(sid), dep) # sanity
referee.deleteFromStore()
self.assertRaises(KeyError, store.getItemByID, sid)
def testBatchReferenceDeletion(self):
"""
Test that batch deletion removes dependent items correctly.
"""
store = Store()
referee = Referee(store=store, topSecret=0)
dep = DependentReferent(store=store,
ref=referee)
sid = dep.storeID
store.query(Referee).deleteFromStore()
self.assertRaises(KeyError, store.getItemByID, sid)
def test_dummyItemReference(self):
"""
Getting the value of a reference attribute which has previously been
set to a legacy item results in an instance of the most recent type for
that item.
"""
store = Store()
referent = SimpleReferent(store=store)
oldReferee = nonUpgradedItem(store=store)
referent.ref = oldReferee
newReferee = referent.ref
self.assertTrue(
isinstance(newReferee, UpgradedItem),
"{!r} was instance of {!r}, expected {!r}".format(newReferee,
type(newReferee),
UpgradedItem))
def test_dummyItemReferenceUpgraded(self):
"""
Getting the value of a reference attribute which has been set to a
legacy item, which is then upgraded while the reference is "live",
results in an instance of the most recent type for that item.
"""
store = Store()
referent = SimpleReferent(store=store)
oldReferee = nonUpgradedItem2(store=store)
referent.ref = oldReferee
# Manually run the upgrader on this specific legacy item. This is the
# same as if the SimpleReferent item had been created in an upgrader
# for UpgradedItem, except that we can keep a strong reference to
# oldReferee to ensure it is not garbage collected (this would
# otherwise happen nondeterministically on platforms like PyPy).
newReferee = item2to3(oldReferee)
self.assertIsInstance(newReferee, UpgradedItem)
self.assertIdentical(referent.ref, newReferee)
def test_dummyItemReferenceInUpgrade(self):
"""
Setting the value of a reference attribute to a legacy item during an
upgrade results in the same value being set on the upgraded item.
"""
store = Store()
def tx():
oldReferent = nonUpgradedItem(store=store)
oldReferee = nonUpgradedItem(store=store)
newReferent = oldReferent.upgradeVersion(
UpgradedItem.typeName, 1, 2)
newReferee = oldReferee.upgradeVersion(
UpgradedItem.typeName, 1, 2, ref=newReferent)
self.assertIdentical(newReferee.ref, newReferent)
store.transact(tx)
def test_dummyItemGetItemByID(self):
"""
Instantiating a dummy item and then getting it by its storeID should
upgrade it.
"""
store = Store()
t = nonUpgradedItem(store=store)
self.assertEquals(t.__legacy__, True)
self.assertRaises(KeyError, store.objectCache.get, t.storeID)
t2 = store.getItemByID(t.storeID)
self.assertNotIdentical(t, t2)
self.assertTrue(isinstance(t2, UpgradedItem))
class UpgradedItem(Item):
"""
A simple item which is the current version of L{nonUpgradedItem}.
"""
schemaVersion = 3
ref = reference()
nonUpgradedItem = declareLegacyItem(
UpgradedItem.typeName, 1,
dict(ref=reference()))
nonUpgradedItem2 = declareLegacyItem(
UpgradedItem.typeName, 2,
dict(ref=reference()))
registerAttributeCopyingUpgrader(UpgradedItem, 1, 2)
def item2to3(old):
"""
Upgrade an nonUpgradedItem to UpgradedItem
"""
return old.upgradeVersion(UpgradedItem.typeName, 2, 3, ref=old.ref)
registerUpgrader(item2to3, UpgradedItem.typeName, 2, 3)
|
# Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: vraj@reciprocitylabs.com
# Maintained By: vraj@reciprocitylabs.com
from ggrc import db
from .mixins import Identifiable, created_at_args
class Event(Identifiable, db.Model):
__tablename__ = 'events'
person_id = db.Column(db.Integer, db.ForeignKey('people.id'), nullable = False)
created_at = db.Column(db.DateTime, nullable = False, **created_at_args())
http_method = db.Column(db.String, nullable = False)
resource_id = db.Column(db.Integer, nullable = False)
resource_type = db.Column(db.String, nullable = False)
events = db.relationship('Revision', backref='event', lazy='subquery') # We always need the revisions
person = db.relationship('Person')
|
# Try and Except Blocks
Example 1
Input: 5 / 0
result = 5 / 0
print(result)
# Example 2
try:
result = 5 / 0
print(result)
except ZeroDivisionError:
print("Cannot divide by zero")
# Example 3
try:
print(unknown_variable)
except NameError as error:
print("An error occurred, this is the detailed error message from Python: " + str(error))
# Example 4
try:
result = 5 / 0
print(unknown_variable)
except NameError as error:
print(error)
except ZeroDivisionError as error:
print(error)
except Exception:
print("An error happened :(")
|
from rest_framework import routers
from libs.components.permissions.resources import AppResources
from library.v1 import views as library_views
# Declare version 1 resources for our app 'library'
app_resources = AppResources(1, 'library')
create_resource = app_resources.create_resource
# Define a resource for books using the BookViewSet
app_resources.books = create_resource(
viewset=library_views.BookViewSet,
)
# Define a resource for books using the CheckoutViewSet
app_resources.checkouts= create_resource(
viewset=library_views.CheckoutViewSet,
)
# Default router configuration and iteration that occurs at the bottom of each urls.py file
# This handles registering each resource from above with their specified viewset
router = routers.DefaultRouter(trailing_slash=False)
for resource in app_resources.get_resources():
router.register(resource.noun, resource.viewset)
urlpatterns = router.urls |
from functools import wraps
from flask import request, jsonify, abort
from app import app
def check_key(key, ip):
"""
Summary: Check API Key and IP.
Author: Hans Husurianto
@param key: API Key to check.
@param ip: IP of the requester.
@return: boolean
"""
if key is None or ip is None:
return False
api_key = app.config['API_KEY']
if api_key is None:
return False
elif api_key == key and ip == app.config['IP']:
return True
return False
def requires_auth(f):
"""
Summary: Decorator to check API Key and IP.
@param f: flask function
@return: decorator, return the wrapped function
"""
@wraps(f)
def decorated(*args, **kwargs):
if app.config["DEBUG"] or check_key(request.headers['key'], request.remote_addr):
return f(*args, **kwargs)
else:
abort(401, "Unauthorized API key. Please check your API key and that you have spelt it correctly.")
return decorated
def devEnvironment(f):
"""
Summary: Decorator to check if the app is in development environment.
@param f: flask function
@return: decorator, return the wrapped function
"""
@wraps(f)
def decorated(*args, **kwargs):
if app.config["DEBUG"]:
return f(*args, **kwargs)
else:
abort(403, "Forbidden. This endpoint is not available in production environment.")
return decorated |
import numpy as np
import cv2
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from numpy import dot, sqrt
from numpy.linalg import norm, inv
from scipy.optimize import least_squares
from scipy.optimize import minimize
from scipy.linalg import rq
from src.sfm_pipeline.axis_angle import (rotation_mat_2_angle_axis, \
rotate_points_axis_angle)
import time
def unpack_func_result(result, ncameras, N):
"""[summary]
Args:
result ([type]): [description]
ncameras ([type]): [description]
N ([type]): [description]
Returns:
[type]: [description]
"""
# print('result.shape: ', result.shape)
mot_slice = ncameras*3*2
motion = np.reshape(result[0:mot_slice], (ncameras, 3, 2),order='F')
structure = np.reshape(result[mot_slice:], (N, 3),order='F' )
return motion, structure
def pack_func_args(motion, structure):
"""[summary]
Args:
motion ([type]): [description]
structure ([type]): [description]
Returns:
[type]: [description]
"""
motion_vec = motion.flatten(order='F')
structure_vec = structure.flatten(order='F')
return np.concatenate((motion_vec, structure_vec))
def unpack_graph(graph):
"""[summary]
Args:
graph ([type]): [description]
Returns:
[type]: [description]
"""
ncameras = len(graph.frame_idxs)
# R-> axis_angle (3x1), t (3x1)
motion_aa = np.zeros((ncameras, 3,2))
structure = graph.structure
N = graph.kpts.shape[1]
for i in range(ncameras):
Ri = graph.motion[i,:,:-1]
ti = graph.motion[i,:,-1]
motion_aa[i, :, 0] = rotation_mat_2_angle_axis(Ri)
motion_aa[i, :, 1] = ti
f = graph.f
return motion_aa, structure, f, ncameras, N
def reprojection_residuals(kpts_idxs, kpts, motion, structure, f, px, py):
"""[summary]
Args:
kpts_idxs ([type]): [description]
kpts ([type]): [description]
motion ([type]): [description]
structure ([type]): [description]
f ([type]): [description]
px ([type]): [description]
py ([type]): [description]
Returns:
[type]: [description]
"""
ncameras = motion.shape[0]
# residuals = np.zeros((ncameras, kpts.shape[1]))
residuals = np.zeros((0,2))
for i in range(ncameras):
axis_angle = motion[i, :, 0]
translation = motion[i, :, 1]
rot_points = rotate_points_axis_angle(axis_angle, structure.T)
tr_x = rot_points[0,:] + translation[0]
tr_y = rot_points[1,:] + translation[1]
tr_z = rot_points[2,:] + translation[2]
tr_x_z = tr_x/tr_z
tr_y_z = tr_y/tr_z
x_hat = f*tr_x_z + px
y_hat = f*tr_y_z + py
x = kpts[i, :, 0]
y = kpts[i, :, 1]
curr_residuals = np.zeros((len(x_hat),2))
curr_residuals[:,0] = x_hat-x
curr_residuals[:,1] = y_hat-y
residuals = np.vstack((residuals, curr_residuals))
return residuals.flatten()
def error_SSD(residuals):
"""[summary]
Args:
residuals ([type]): [description]
Returns:
[type]: [description]
"""
repj_error = 2*sqrt(np.sum(residuals**2, axis=0))/len(residuals)
return repj_error
def objective_func(mot_str_vec, kpts_idxs, kpts, ncameras, N, f, px, py):
"""[summary]
Args:
mot_str_vec ([type]): [description]
kpts_idxs ([type]): [description]
kpts ([type]): [description]
ncameras ([type]): [description]
N ([type]): [description]
f ([type]): [description]
px ([type]): [description]
py ([type]): [description]
Returns:
[type]: [description]
"""
# print('ncameras: ', ncameras)
motion, structure = unpack_func_result(mot_str_vec, ncameras, N)
residuals = reprojection_residuals(kpts_idxs, kpts, motion, structure, f, px, py)
# print('residuals: ', residuals.shape)
# print('reprojection error before: ', error_SSD(residuals))
return residuals
def bundle_adjustment(graph, px=0, py=0):
"""[summary]
Args:
graph ([type]): [description]
px (int, optional): [description]. Defaults to 0.
py (int, optional): [description]. Defaults to 0.
Returns:
[type]: [description]
"""
start_time = time.time()
motion, structure, f, ncameras, Npts = unpack_graph(graph)
print('structure shape: ', structure.shape)
residuals = reprojection_residuals(graph.kpts_idxs, graph.kpts, motion, structure.T, f, px, py)
print('residuals shape: ', residuals.shape)
print('reprojection error before: ', error_SSD(residuals))
mot_str_vec = pack_func_args(motion, structure)
print('mot_str_vec: ', mot_str_vec.shape)
fun = lambda x : objective_func(x, graph.kpts_idxs, graph.kpts, ncameras, Npts, f, px, py)
result = least_squares(fun=fun, x0=mot_str_vec, method='lm',
ftol=1e-08, xtol=1e-08, gtol=1e-08,
max_nfev=1000, verbose=2) #initial_guess jac='2-point', args=(graph.kpts_idxs, graph.kpts, ncameras, Npts, f, px, py)
motion, structure = unpack_func_result(result['x'], ncameras, Npts)
print('structure shape: ', structure.shape)
residuals = reprojection_residuals(graph.kpts_idxs, graph.kpts, motion, structure, f, px, py)
print('reprojection error final: ', error_SSD(residuals))
print("Time since optimization start", time.time() - start_time)
return graph
def adjust_focal_length(graph):
return graph.f
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.