content
stringlengths 5
1.05M
|
|---|
import csv
import json
from itertools import zip_longest
with open('../energy_detectors/data/new_stackoverflow_data.json') as f:
stackoverflow_data = json.load(f)
stackoverflow_url = [item.get('url') for item in stackoverflow_data]
stackoverflow_question = [item.get('post_content')
for item in stackoverflow_data]
stackoverflow_answer = [item.get('answer') for item in stackoverflow_data]
stackoverflow_qcode = [item.get('question_code')
for item in stackoverflow_data]
stackoverflow_acode = [item.get('answer_code') for item in stackoverflow_data]
stackoverflow_title = [item.get('title') for item in stackoverflow_data]
stackoverflow_id = []
stackoverflow_battery = []
stackoverflow_energy = []
stackoverflow_sustain = []
stackoverflow_power = []
stackoverflow_green = []
stackoverflow_question_new = []
stackoverflow_answer_new = []
stackoverflow_question_code_new = []
stackoverflow_answer_code_new = []
collection_name = []
raw_contents = []
for i in range(len(stackoverflow_url)):
y = "SO" + str(i)
stackoverflow_id.append(y)
for i in range(len(stackoverflow_url)):
collection_name.append("StackOverflow")
for questions in stackoverflow_question:
questions = ''.join(questions)
stackoverflow_question_new.append(questions)
for answers in stackoverflow_answer:
answers = ''.join(answers)
stackoverflow_answer_new.append(answers)
for qcode in stackoverflow_qcode:
try:
qcode = ''.join(qcode)
stackoverflow_question_code_new.append(qcode)
except TypeError:
qcode = ''
stackoverflow_question_code_new.append(qcode)
for acode in stackoverflow_acode:
try:
acode = ''.join(acode)
stackoverflow_answer_code_new.append(acode)
except TypeError:
acode = ''
stackoverflow_answer_code_new.append(acode)
# print(len(stackoverflow_question_new))
# print(len(stackoverflow_answer_new))
# print(len(stackoverflow_question_code_new))
# print(len(stackoverflow_answer_code_new))
for i in range(32):
rcontents = stackoverflow_question_new[i] + '' + stackoverflow_question_code_new[
i] + '' + stackoverflow_answer_new[i] + '' + stackoverflow_answer_code_new[i]
raw_contents.append(rcontents)
power_keyword = 'power'
battery_keyword = 'battery'
energy_keyword = 'energy'
sustain_keyword = 'sustainab'
green_keyword = 'green'
raw_contents_final = []
for rc in raw_contents:
if (power_keyword in rc):
a, b = rc.split(power_keyword, 1)
a = a[-45:]
b = b[0:45]
power_string = a + power_keyword + b
raw_contents_final.append(power_string)
elif (battery_keyword in rc):
a, b = rc.split(battery_keyword, 1)
a = a[-45:]
b = b[0:45]
battery_string = a + battery_keyword + b
raw_contents_final.append(battery_string)
elif (energy_keyword in rc):
a, b = rc.split(energy_keyword, 1)
a = a[-45:]
b = b[0:45]
energy_string = a + energy_keyword + b
raw_contents_final.append(energy_string)
elif (sustain_keyword in rc):
a, b = rc.split(sustain_keyword, 1)
a = a[-45:]
b = b[0:45]
sustain_string = a + sustain_keyword + b
raw_contents_final.append(sustain_string)
elif (green_keyword in rc):
a, b = rc.split(green_keyword, 1)
a = a[-45:]
b = b[0:45]
green_string = a + green_keyword + b
raw_contents_final.append(green_string)
for battery in raw_contents:
b = battery.count('batter')
stackoverflow_battery.append(b)
for power in raw_contents:
p = power.count('power')
stackoverflow_power.append(p)
for energy in raw_contents:
e = energy.count('energy')
stackoverflow_energy.append(e)
for sustainab in raw_contents:
s = sustainab.count('sustainab')
stackoverflow_sustain.append(s)
for green in raw_contents:
g = green.count('green')
stackoverflow_green.append(g)
stackoverflow_list = [stackoverflow_id,
stackoverflow_url,
collection_name,
stackoverflow_title,
raw_contents_final,
stackoverflow_battery,
stackoverflow_energy,
stackoverflow_power,
stackoverflow_sustain,
stackoverflow_green
]
export_data = zip_longest(*stackoverflow_list, fillvalue='')
with open('data/energy_data_new.csv', 'a', newline='') as myfile:
wr = csv.writer(myfile)
wr.writerows(export_data)
myfile.close()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-12-05 16:16
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('driver', '0003_location_vehicle'),
]
operations = [
migrations.AlterField(
model_name='driverprofile',
name='profpic',
field=models.ImageField(blank=True, upload_to='driver-profpic/'),
),
]
|
from kazoo.retry import (
KazooRetry,
RetryFailedError,
ForceRetryError
)
from kazoo.client import KazooClient
import json
import functools
import logging
logging.basicConfig()
class TwoPCState(object):
BEGIN = 'begin'
PREPARE = 'prep'
COMMIT = 'commit'
ACK_COMMIT = 'ack_commit'
ABORT = 'abort'
ACK_ABORT = 'ack_abort'
STATUSES = (BEGIN, PREPARE, COMMIT, ACK_COMMIT, ABORT, ACK_ABORT)
class TwoPCConstraintError(object):
pass
class Coordinator(object):
tx_path = '/zone'
def __init__(self, client, nodes, path, query):
self.client = client
self.path = path
self.tx_path = self.path + '/' + 'tx'
self.query = str(query).encode('utf-8')
self.wake_event = client.handler.event_object()
self.commit_evt = None
self.tx_created = False
self.nodes = nodes
self.intermediate_results = [None] * len(nodes)
def begin_2pc(self):
self.cnt = 0
threshold = len(self.nodes)
self._inner_2pc(self.nodes, threshold)
self._clean_everything()
def _inner_2pc(self, nodes, threshold):
# initial
self.client.ensure_path(self.path)
if not self.client.exists(self.tx_path):
self.client.create(self.tx_path)
self.wait_for_cohort(nodes, threshold)
self.reset_state(nodes)
# prepare (phase 1)
print '------------PREPARE------------'
print 'All parties are ready. Begin transaction'
self.client.set(self.tx_path, json.dumps({
'query': self.query,
'state': TwoPCState.BEGIN}))
self.wake_event.wait()
# commit (phase 2)
print '------------COMMIT------------'
print 'All parties are executed transaction.', self.intermediate_results
decision = self._make_decision(self.intermediate_results)
print 'Coordinator decided', decision
self.client.set(self.tx_path, json.dumps({
'status': decision}))
self.reset_state(nodes)
print '------------COMMIT ACK------------'
self.wake_event.wait()
print 'Coordinator finished', self.intermediate_results
self.reset_state(nodes)
def reset_state(self, nodes):
self.wake_event.clear()
self.cnt = 0
self.intermediate_results = []
self._register_watchers(nodes)
def _make_decision(self, results):
raw_results = list(r['status'] for r in results)
print raw_results
try:
raw_results.index(TwoPCState.ABORT)
except ValueError:
return TwoPCState.COMMIT
else:
return TwoPCState.ABORT
def _register_watchers(self, nodes):
for node in nodes:
node_path = self.tx_path + '/' + node
fn = functools.partial(self._on_node_tx_status, node_path)
self.client.get(node_path, fn)
def _on_node_tx_status(self, node_path, evt):
if evt.type == 'CHANGED':
value = json.loads(self.client.get(node_path)[0])
if value['status'] in TwoPCState.STATUSES:
self.cnt += 1
self.intermediate_results.append(value)
if self.cnt == len(self.nodes):
self.wake_event.set()
def wait_for_cohort(self, nodes, threshold):
self.wake_event.clear()
self.cnt = 0
def on_node_presence(node_path, state):
print state
if state.type == 'CREATED':
self.cnt += 1
elif state.type == 'DELETED':
fn = functools.partial(on_node_presence, node_path)
self.client.exists(node_path, fn)
if self.cnt == threshold:
self.wake_event.set()
for node in nodes:
node_path = self.tx_path + '/' + node
on_node_create_or_delete = functools.partial(
on_node_presence, node_path)
self.client.exists(node_path, on_node_create_or_delete)
print 'Waiting'
self.wake_event.wait()
self.cnt = 0
return True
def _clean_everything(self):
self.client.delete(self.path, recursive=True)
# print self.clinet.delete(self.path, )
class Member(object):
def __init__(self, client, path, name):
self.client = client
self.path = path
self.tx_path = self.path + '/' + 'tx'
self.prefix = name
self.create_path = self.tx_path + '/' + self.prefix
self.create_tried = False
self.wake_up = self.client.handler.event_object()
self.initialize()
def initialize(self):
self.client.ensure_path(self.path)
self.wake_up.clear()
def on_changed_presence(evt):
if evt.type == 'DELETED':
self.wake_up.set()
if not self.client.exists(self.create_path, on_changed_presence):
node = self.client.create(self.create_path, ephemeral=True)
else:
self.wake_up.wait()
node = self.client.create(self.create_path, ephemeral=True)
self.wake_up.clear()
self.client.get(self.tx_path, self._on_new_tx)
print node
def two_pc(self):
self.wake_up.wait()
self.wake_up.clear()
self.client.get(self.tx_path, self._on_new_tx)
print '------------PREPARE------------'
print 'Begin transaction.', self.data
rv = eval(self.data['query'])
print 'Transaction calculated. answer is', rv
self.client.set(self.create_path, json.dumps({
'result': rv,
'status': TwoPCState.ABORT}))
self.wake_up.wait()
self.wake_up.clear()
print "------------COMMIT------------", self.data
if self.data['status'] == TwoPCState.COMMIT:
self.client.set(self.create_path, json.dumps({
'status': TwoPCState.ACK_COMMIT}))
print 'acknowledging'
print '------------TRANSACTION FINISHED------------'
elif self.data['status'] == TwoPCState.ABORT:
self.client.set(self.create_path, json.dumps({
'status': TwoPCState.ACK_ABORT}))
print 'acknowledging'
print '------------TRANSACTION FAILED--------------'
start = two_pc
def _on_new_tx(self, evt):
data = ''
if evt.type == 'CHANGED':
data = self.client.get(self.tx_path, self._on_tx_changed)
self.data = json.loads(data[0])
self.wake_up.set()
def _on_tx_changed(self, evt):
if evt.type == 'CHANGED':
data = self.client.get(self.create_path, self._on_tx_changed)
print data
if __name__ == '__main__':
main_cli = KazooClient(hosts='znode1.zcluster.com:2181,znode2.zcluster.com:2181,znode3.zcluster.com:2181')
main_cli.start()
node_names = [
'1', '2'
]
coord = Coordinator(main_cli, node_names, 'twopc', '1 + 1')
coord.begin_2pc()
|
import requests
import json
import yaml
def getAPIKey():
with open('config.yaml', 'r') as ymlfile:
cfg = yaml.safe_load(ymlfile)
return cfg['MAPS_API_KEY']
def getTimes(origins, destinations, mode):
key = getAPIKey()
url = "https://maps.googleapis.com/maps/api/distancematrix/json?origins="+origins+"&destinations="+destinations+"&mode="+mode+"&key="+key
payload={}
headers = {}
response = requests.request("GET", url, headers=headers, data=payload)
data = response.json()
results = []
durations = data['rows'][0]['elements']
for duration in durations:
results.append(duration['duration']['value'])
return results
def generateText(time, mode):
mins = time // 60
if mode == "transit":
return "Transit takes " + str(mins) + " minutes."
elif mode == "driving":
return "Driving time is " + str(mins) + " minutes."
elif mode == "walking":
return "Walking time is " + str(mins) + " minutes."
elif mode == "bicycling":
return "Bicycling time is " + str(mins) + " minutes."
def main(addresses, target, city, modes=['driving', 'walking', 'bicycling', 'transit']):
origins = requests.utils.quote(target.strip()+', '+city.strip())
destinationsList = []
for address in addresses:
stripped = address.strip()
withCity = stripped + ", " + city.strip()
destinationsList.append(withCity)
destinations = requests.utils.quote("|".join(destinationsList))
results = {}
for mode in modes:
results[mode] = getTimes(origins, destinations, mode)
output = []
for destNo in range(len(destinationsList)):
time = results[modes[0]][destNo]
mode = modes[0]
for modeNo in range(1,len(modes)):
if results[modes[modeNo]][destNo] < time:
time = results[modes[modeNo]][destNo]
mode = modes[modeNo]
output.append(generateText(time, mode))
return output
def lambda_handler(event, context):
if not 'add' in event or not 'tgt' in event or not 'city' in event or not 'modes' in event:
return {
'statusCode': 200,
'body': json.dumps("Error: Missing parameter for this API.")
}
result = main(event['add'], event['tgt'], event['city'], event['modes'])
#result = "Test"
return {
'statusCode': 200,
'body': {"messages": result}
}
|
from google.protobuf import descriptor_pb2
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import reflection as _reflection
from google.protobuf import message as _message
from google.protobuf import descriptor as _descriptor
import sys
_b = sys.version_info[0] < 3 and (
lambda x: x) or (lambda x: x.encode('latin1'))
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='routeguide.proto',
package='fbcrawl',
syntax='proto2',
serialized_pb=_b('\n\x10routeguide.proto\x12\x07\x66\x62\x63rawl\"\x7f\n\x0b\x43ommentItem\x12\x0e\n\x06source\x18\x01 \x02(\t\x12\x10\n\x08reply_to\x18\x02 \x02(\t\x12\x0c\n\x04\x64\x61te\x18\x03 \x02(\x03\x12\x0c\n\x04text\x18\x04 \x02(\t\x12\x11\n\treactions\x18\x05 \x02(\x03\x12\x12\n\nsource_url\x18\x06 \x02(\t\x12\x0b\n\x03url\x18\x07 \x02(\t')
)
_COMMENTITEM = _descriptor.Descriptor(
name='CommentItem',
full_name='fbcrawl.CommentItem',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='source', full_name='fbcrawl.CommentItem.source', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='reply_to', full_name='fbcrawl.CommentItem.reply_to', index=1,
number=2, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='date', full_name='fbcrawl.CommentItem.date', index=2,
number=3, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='text', full_name='fbcrawl.CommentItem.text', index=3,
number=4, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='reactions', full_name='fbcrawl.CommentItem.reactions', index=4,
number=5, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='source_url', full_name='fbcrawl.CommentItem.source_url', index=5,
number=6, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='url', full_name='fbcrawl.CommentItem.url', index=6,
number=7, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=29,
serialized_end=156,
)
DESCRIPTOR.message_types_by_name['CommentItem'] = _COMMENTITEM
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
CommentItem = _reflection.GeneratedProtocolMessageType('CommentItem', (_message.Message,), dict(
DESCRIPTOR=_COMMENTITEM,
__module__='routeguide_pb2'
# @@protoc_insertion_point(class_scope:fbcrawl.CommentItem)
))
_sym_db.RegisterMessage(CommentItem)
# @@protoc_insertion_point(module_scope)
|
from datetime import datetime
from enum import Enum
import pytest
from monty.json import MSONable
from pydantic import BaseModel, Field
from maggma.api.utils import api_sanitize, serialization_helper
from typing import Union
from bson import ObjectId
class SomeEnum(Enum):
A = 1
B = 2
C = 3
class Pet(MSONable):
def __init__(self, name, age):
self.name = name
self.age = age
class AnotherPet(MSONable):
def __init__(self, name, age):
self.name = name
self.age = age
class AnotherOwner(BaseModel):
name: str = Field(..., description="Ower name")
weight_or_pet: Union[float, AnotherPet] = Field(..., title="Owners weight or Pet")
class Owner(BaseModel):
name: str = Field(..., title="Owner's name")
age: int = Field(..., title="Owne'r Age")
weight: float = Field(..., title="Owner's weight")
last_updated: datetime = Field(..., title="Last updated date for this record")
pet: Pet = Field(..., title="Owner's Pet")
other: SomeEnum = Field(..., title="A enum?")
def test_api_sanitize():
# Ensure model validation fails
with pytest.raises(ValueError):
Owner()
# This should still fail validation
new_owner = api_sanitize(Owner, fields_to_leave=["Owner.name"])
with pytest.raises(ValueError):
new_owner()
new_owner(name="owner")
# These will fail if non-optional fields are not turned off
new_owner2 = api_sanitize(Owner)
new_owner() # api_sanitize is in-place
new_owner2()
Owner()
# This should fail type validation for pet
with pytest.raises(Exception):
Owner(pet="fido")
temp_pet_dict = Pet(name="fido", age=3).as_dict()
bad_pet_dict = dict(temp_pet_dict)
del bad_pet_dict["@module"]
del bad_pet_dict["@class"]
# This should fail because of bad data type
with pytest.raises(Exception):
Owner(pet=bad_pet_dict)
assert isinstance(Owner(pet=temp_pet_dict).pet, Pet)
api_sanitize(Owner, allow_dict_msonable=True)
# This should still fail because of bad data type
with pytest.raises(Exception):
Owner(pet=bad_pet_dict)
# This should work
assert isinstance(Owner(pet=temp_pet_dict).pet, dict)
# This should work evne though AnotherPet is inside the Union type
api_sanitize(AnotherOwner, allow_dict_msonable=True)
temp_pet_dict = AnotherPet(name="fido", age=3).as_dict()
assert isinstance(AnotherPet.validate_monty(temp_pet_dict), dict)
def test_serialization_helper():
oid = ObjectId("60b7d47bb671aa7b01a2adf6")
assert serialization_helper(oid) == "60b7d47bb671aa7b01a2adf6"
@pytest.mark.xfail
def test_serialization_helper_xfail():
oid = "test"
serialization_helper(oid)
|
def get_mpi_environment():
try:
from mpi4py import MPI
except ModuleNotFoundError:
# mpi4py not installed so it can't be used
return
if not MPI.COMM_WORLD.Get_size() - 1:
# MPI not being used
# (if user did start MPI with size 1 this would be an illegal configuration since: main + 1 worker = 2 ranks)
return
nworkers = MPI.COMM_WORLD.Get_size() - 1
is_manager = MPI.COMM_WORLD.Get_rank() == 0
mpi_environment = {'mpi_comm': MPI.COMM_WORLD, 'comms': 'mpi', 'nworkers': nworkers, 'is_manager': is_manager}
return mpi_environment
|
"""
This scripts precompute stats for training more efficiently with RL on parent
Be aware that some liberties have been taken with the implementation so that
everything runs smoothly and easily. It is not a duplicate of the original
PARENT metric. For evaluation, continue to use the original implementation!
"""
import itertools, collections, json
import argparse
import os
def nwise(iterable, n):
iterables = itertools.tee(iterable, n)
[next(iterables[i]) for i in range(n) for j in range(i)]
return zip(*iterables)
def ngram_counts(sequence, order):
"""Returns count of all ngrams of given order in sequence."""
if len(sequence) < order:
return collections.Counter()
return collections.Counter(nwise(sequence, order))
def overlap_probability(ngram, table_values):
return len(table_values.intersection(ngram)) / len(ngram)
def load_tables(dataset, setname):
tables_filename = os.path.join(dataset, f"{setname}_tables.jl")
with open(tables_filename, encoding="utf8", mode="r") as tables_file:
tables = [json.loads(line) for line in tables_file]
return tables
def load_refs(dataset, setname):
refs_filename =os.path.join(dataset, f"{setname}_output.txt")
with open(refs_filename, encoding="utf8", mode="r") as refs_file:
refs = [line.strip().split(" ")
for line in refs_file if line.strip()]
return refs
def serialize_stats(tv, rnc, rnw):
tv = [list(s) for s in tv]
rnc = [{order: {' '.join(ngram): count for ngram, count in counter.items()}
for order, counter in rnc.items()}
for rnc in rnc]
rnw = [{order: {' '.join(ngram): count for ngram, count in counter.items()}
for order, counter in rnw.items()}
for rnw in rnw]
return tv, rnc, rnw
def main(dataset):
references = load_refs(dataset, setname='train')
tables = load_tables(dataset, setname='train')
if dataset == 'wikibio':
TABLE_VALUES = [{tok for _, value in table for tok in value} for table in tables]
else:
TABLE_VALUES = [{tok for head, _, tail in table for tok in head + tail} for table in tables]
REF_NGRAM_COUNTS = [{order: ngram_counts(ref, order)
for order in range(1, 5)}
for ref in references]
REF_NGRAM_WEIGHTS = [
{
order: {ngram: overlap_probability(ngram, table_values)
for ngram in counter}
for order, counter in ref_counts_at_k.items()
}
for ref_counts_at_k, table_values in zip(REF_NGRAM_COUNTS, TABLE_VALUES)
]
tv, rnc, rnw = serialize_stats(TABLE_VALUES,
REF_NGRAM_COUNTS,
REF_NGRAM_WEIGHTS)
path = f'{dataset}/TABLE_VALUES.json'
with open(path, encoding="utf8", mode="w") as f:
json.dump(tv, f)
path = f'{dataset}/REF_NGRAM_COUNTS.json'
with open(path, encoding="utf8", mode="w") as f:
json.dump(rnc, f)
path = f'{dataset}/REF_NGRAM_WEIGHTS.json'
with open(path, encoding="utf8", mode="w") as f:
json.dump(rnw, f)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', '-d', dest='dataset', default="webnlg",
choices=['wikibio', 'webnlg'])
args = parser.parse_args()
main(args.dataset)
|
N_K = input().split()
K = int(N_K[1])
words = input().split()
list = []
for x in words:
list.append(x)
length = len(x)
current_line = ""
output = ""
current_line_length = 0
index = 0
for x in list:
if index == 0:
current_line = current_line + x
current_line_length += len(x)
elif current_line_length + len(x) <= K:
current_line = current_line + " " + x
current_line_length += len(x)
else:
output = output + current_line + "\n"
current_line = ""
current_line = current_line + x
current_line_length = len(x)
index += 1
output = output + current_line
print(output)
|
#
# See top-level LICENSE.rst file for Copyright information
#
# -*- coding: utf-8 -*-
"""
desispec.pipeline.run
=========================
Tools for running the pipeline.
"""
from __future__ import absolute_import, division, print_function
import os
import sys
import numpy as np
from desiutil.log import get_logger
from .. import io
from ..parallel import (dist_uniform, dist_discrete, dist_discrete_all,
stdouterr_redirected, use_mpi)
from .prod import load_prod
from .db import check_tasks
def run_task(name, opts, comm=None, logfile=None, db=None):
"""Run a single task.
Based on the name of the task, call the appropriate run function for that
task. Log output to the specified file. Run using the specified MPI
communicator and optionally update state to the specified database.
Note: This function DOES NOT check the database or filesystem to see if
the task has been completed or if its dependencies exist. It assumes that
some higher-level code has done that if necessary.
Args:
name (str): the name of this task.
opts (dict): options to use for this task.
comm (mpi4py.MPI.Comm): optional MPI communicator.
logfile (str): output log file. If None, do not redirect output to a
file.
db (pipeline.db.DB): The optional database to update.
Returns:
int: the total number of processes that failed.
"""
from .tasks.base import task_classes, task_type
log = get_logger()
ttype = task_type(name)
nproc = 1
rank = 0
if comm is not None:
nproc = comm.size
rank = comm.rank
if rank == 0:
if (logfile is not None) and os.path.isfile(logfile):
os.remove(logfile)
# Mark task as in progress
if db is not None:
task_classes[ttype].state_set(db=db,name=name,state="running")
failcount = 0
if logfile is None:
# No redirection
if db is None:
failcount = task_classes[ttype].run(name, opts, comm=comm)
else:
failcount = task_classes[ttype].run_and_update(db, name, opts,
comm=comm)
else:
with stdouterr_redirected(to=logfile, comm=comm):
if db is None:
failcount = task_classes[ttype].run(name, opts, comm=comm)
else:
failcount = task_classes[ttype].run_and_update(db, name, opts,
comm=comm)
return failcount
def run_task_simple(name, opts, comm=None):
"""Run a single task with no DB or log redirection.
This a wrapper around run_task() for use without a database and with no
log redirection. See documentation for that function.
Args:
name (str): the name of this task.
opts (dict): options to use for this task.
comm (mpi4py.MPI.Comm): optional MPI communicator.
Returns:
int: the total number of processes that failed.
"""
return run_task(name, opts, comm=comm, logfile=None, db=None)
def run_task_list(tasktype, tasklist, opts, comm=None, db=None, force=False):
"""Run a collection of tasks of the same type.
This function requires that the DESI environment variables are set to
point to the current production directory.
This function first takes the communicator and uses the maximum processes
per task to split the communicator and form groups of processes of
the desired size. It then takes the list of tasks and uses their relative
run time estimates to assign tasks to the process groups. Each process
group loops over its assigned tasks.
If the database is not specified, no state tracking will be done and the
filesystem will be checked as needed to determine the current state.
Only tasks that are ready to run (based on the filesystem checks or the
database) will actually be attempted.
Args:
tasktype (str): the pipeline step to process.
tasklist (list): the list of tasks. All tasks should be of type
"tasktype" above.
opts (dict): the global options (for example, as read from the
production options.yaml file).
comm (mpi4py.Comm): the full communicator to use for whole set of tasks.
db (pipeline.db.DB): The optional database to update.
force (bool): If True, ignore database and filesystem state and just
run the tasks regardless.
Returns:
tuple: the number of ready tasks, and the number that failed.
"""
from .tasks.base import task_classes, task_type
log = get_logger()
nproc = 1
rank = 0
if comm is not None:
nproc = comm.size
rank = comm.rank
# Compute the number of processes that share a node.
procs_per_node = 1
if comm is not None:
import mpi4py.MPI as MPI
nodecomm = comm.Split_type(MPI.COMM_TYPE_SHARED, 0)
procs_per_node = nodecomm.size
# Get the options for this task type.
options = opts[tasktype]
# Get the tasks that still need to be done.
runtasks = None
ntask = None
ndone = None
if rank == 0:
if force:
# Run everything
runtasks = tasklist
ntask = len(runtasks)
ndone = 0
else:
# Actually check which things need to be run.
states = check_tasks(tasklist, db=db)
runtasks = [ x for x in tasklist if states[x] == "ready" ]
ntask = len(runtasks)
ndone = len([ x for x in tasklist if states[x] == "done" ])
log.debug("Number of {} tasks ready to run is {} (total is {})".format(tasktype,len(runtasks),len(tasklist)))
if comm is not None:
runtasks = comm.bcast(runtasks, root=0)
ntask = comm.bcast(ntask, root=0)
ndone = comm.bcast(ndone, root=0)
# Get the weights for each task. Since this might trigger DB lookups, only
# the root process does this.
weights = None
if rank == 0:
weights = [ task_classes[tasktype].run_time(x, procs_per_node, db=db) \
for x in runtasks ]
if comm is not None:
weights = comm.bcast(weights, root=0)
# Now every process has the full list of tasks. Get the max
# number of processes for this task type
taskproc = task_classes[tasktype].run_max_procs(procs_per_node)
if taskproc > nproc:
taskproc = nproc
# If we have multiple processes for each task, split the communicator.
comm_group = comm
comm_rank = None
group = rank
ngroup = nproc
group_rank = 0
if comm is not None:
if taskproc > 1:
ngroup = int(nproc / taskproc)
group = int(rank / taskproc)
group_rank = rank % taskproc
comm_group = comm.Split(color=group, key=group_rank)
comm_rank = comm.Split(color=group_rank, key=group)
else:
comm_group = None
comm_rank = comm
# Now we divide up the tasks among the groups of processes as
# equally as possible.
group_ntask = 0
group_firsttask = 0
if group < ngroup:
# only assign tasks to whole groups
if ntask < ngroup:
if group < ntask:
group_ntask = 1
group_firsttask = group
else:
group_ntask = 0
else:
if ntask <= ngroup:
# distribute uniform in this case
group_firsttask, group_ntask = dist_uniform(ntask, ngroup,
group)
else:
group_firsttask, group_ntask = dist_discrete(weights, ngroup,
group)
# every group goes and does its tasks...
rundir = io.get_pipe_rundir()
logdir = os.path.join(rundir, io.get_pipe_logdir())
failcount = 0
group_failcount = 0
if group_ntask > 0:
log.debug("rank #{} Group number of task {}, first task {}".format(rank,group_ntask,group_firsttask))
for t in range(group_firsttask, group_firsttask + group_ntask):
# For this task, determine the output log file. If the task has
# the "night" key in its name, then use that subdirectory.
# Otherwise, if it has the "pixel" key, use the appropriate
# subdirectory.
tt = task_type(runtasks[t])
fields = task_classes[tt].name_split(runtasks[t])
tasklog = None
if "night" in fields:
tasklogdir = os.path.join(logdir, io.get_pipe_nightdir(),
"{:08d}".format(fields["night"]))
# (this directory should have been made during the prod update)
tasklog = os.path.join(tasklogdir,
"{}.log".format(runtasks[t]))
elif "pixel" in fields:
tasklogdir = os.path.join(logdir, "healpix",
io.healpix_subdirectory(fields["nside"],fields["pixel"]))
# When creating this directory, there MIGHT be conflicts from
# multiple processes working on pixels in the same
# sub-directories...
try :
if not os.path.isdir(os.path.dirname(tasklogdir)):
os.makedirs(os.path.dirname(tasklogdir))
except FileExistsError:
pass
try :
if not os.path.isdir(tasklogdir):
os.makedirs(tasklogdir)
except FileExistsError:
pass
tasklog = os.path.join(tasklogdir,
"{}.log".format(runtasks[t]))
failedprocs = run_task(runtasks[t], options, comm=comm_group,
logfile=tasklog, db=db)
if failedprocs > 1:
group_failcount += 1
failcount = group_failcount
# Every process in each group has the fail count for the tasks assigned to
# its group. To get the total onto all processes, we just have to do an
# allreduce across the rank communicator.
if comm_rank is not None:
failcount = comm_rank.allreduce(failcount)
if db is not None and rank == 0 :
# postprocess the successful tasks
log.debug("postprocess the successful tasks")
states = db.get_states(runtasks)
log.debug("states={}".format(states))
log.debug("runtasks={}".format(runtasks))
with db.cursor() as cur :
for name in runtasks :
if states[name] == "done" :
log.debug("postprocessing {}".format(name))
task_classes[tasktype].postprocessing(db,name,cur)
log.debug("rank #{} done".format(rank))
return ntask, ndone, failcount
def run_task_list_db(tasktype, tasklist, comm=None):
"""Run a list of tasks using the pipeline DB and options.
This is a wrapper around run_task_list which uses the production database
and global options file.
Args:
tasktype (str): the pipeline step to process.
tasklist (list): the list of tasks. All tasks should be of type
"tasktype" above.
comm (mpi4py.Comm): the full communicator to use for whole set of tasks.
Returns:
tuple: the number of ready tasks, and the number that failed.
"""
(db, opts) = load_prod("w")
return run_task_list(tasktype, tasklist, opts, comm=comm, db=db)
def dry_run(tasktype, tasklist, opts, procs, procs_per_node, db=None,
launch="mpirun -np", force=False):
"""Compute the distribution of tasks and equivalent commands.
This function takes similar arguments as run_task_list() except simulates
the data distribution and commands that would be run if given the specified
number of processes and processes per node.
This can be used to debug issues with the runtime concurrency or the
actual options that will be passed to the underying main() entry points
for each task.
This function requires that the DESI environment variables are set to
point to the current production directory.
Only tasks that are ready to run (based on the filesystem checks or the
database) will actually be attempted.
NOTE: Since this function is just informative and for interactive use,
we print information directly to STDOUT rather than logging.
Args:
tasktype (str): the pipeline step to process.
tasklist (list): the list of tasks. All tasks should be of type
"tasktype" above.
opts (dict): the global options (for example, as read from the
production options.yaml file).
procs (int): the number of processes to simulate.
procs_per_node (int): the number of processes per node to simulate.
db (pipeline.db.DB): The optional database to update.
launch (str): The launching command for a job. This is just a
convenience and prepended to each command before the number of
processes.
force (bool): If True, ignore database and filesystem state and just
run the tasks regardless.
Returns:
Nothing.
"""
from .tasks.base import task_classes, task_type
prefix = "DRYRUN: "
# Get the options for this task type.
options = dict()
if tasktype in opts:
options = opts[tasktype]
# Get the tasks that still need to be done.
runtasks = None
if force:
# Run everything
runtasks = tasklist
else:
# Actually check which things need to be run.
states = check_tasks(tasklist, db=db)
runtasks = [ x for x in tasklist if (states[x] == "ready") ]
ntask = len(runtasks)
print("{}{} tasks out of {} are ready to run (or be re-run)".format(prefix,
ntask, len(tasklist)))
sys.stdout.flush()
# Get the weights for each task.
weights = [ task_classes[tasktype].run_time(x, procs_per_node, db=db) \
for x in runtasks ]
# Get the max number of processes for this task type
taskproc = task_classes[tasktype].run_max_procs(procs_per_node)
if taskproc > procs:
print("{}task type '{}' can use {} processes per task. Limiting "
" this to {} as requested".format(prefix, tasktype, taskproc,
procs))
sys.stdout.flush()
taskproc = procs
# If we have multiple processes for each task, create groups
ngroup = procs
if taskproc > 1:
ngroup = int(procs / taskproc)
print("{}using {} groups of {} processes each".format(prefix,
ngroup, taskproc))
sys.stdout.flush()
if ngroup * taskproc < procs:
print("{}{} processes remain and will sit idle".format(prefix,
(procs - ngroup * taskproc)))
sys.stdout.flush()
# Now we divide up the tasks among the groups of processes as
# equally as possible.
group_firsttask = None
group_ntask = None
if ntask <= ngroup:
group_firsttask = [ x for x in range(ntask) ]
group_ntask = [ 1 for x in range(ntask) ]
if ntask < ngroup:
group_firsttask.extend([ 0 for x in range(ngroup - ntask) ])
group_ntask.extend([ 0 for x in range(ngroup - ntask) ])
else:
dist = dist_discrete_all(weights, ngroup)
group_firsttask = [ x[0] for x in dist ]
group_ntask = [ x[1] for x in dist ]
rundir = io.get_pipe_rundir()
logdir = os.path.join(rundir, io.get_pipe_logdir())
maxruntime = 0
print("{}".format(prefix))
sys.stdout.flush()
for g in range(ngroup):
first = group_firsttask[g]
nt = group_ntask[g]
if nt == 0:
continue
gruntime = np.sum(weights[first:first+nt])
if gruntime > maxruntime:
maxruntime = gruntime
print("{}group {} estimated runtime is {} minutes".format(prefix,
g, gruntime))
sys.stdout.flush()
for t in range(first, first + nt):
# For this task, determine the output log file. If the task has
# the "night" key in its name, then use that subdirectory.
# Otherwise, if it has the "pixel" key, use the appropriate
# subdirectory.
tt = task_type(runtasks[t])
fields = task_classes[tt].name_split(runtasks[t])
tasklog = None
if "night" in fields:
tasklogdir = os.path.join(logdir, io.get_pipe_nightdir(),
"{:08d}".format(fields["night"]))
tasklog = os.path.join(tasklogdir,
"{}.log".format(runtasks[t]))
elif "pixel" in fields:
tasklogdir = os.path.join(logdir,
io.healpix_subdirectory(fields["nside"],fields["pixel"]))
tasklog = os.path.join(tasklogdir,
"{}.log".format(runtasks[t]))
com = task_classes[tt].run_cli(runtasks[t], options, taskproc,
launch=launch, log=tasklog, db=db) # need to db for some tasks
print("{} {}".format(prefix, com))
sys.stdout.flush()
print("{}".format(prefix))
sys.stdout.flush()
print("{}Total estimated runtime is {} minutes".format(prefix,
maxruntime))
sys.stdout.flush()
return
|
"""Base Store Abstract Class"""
from __future__ import annotations
import logging
from abc import ABCMeta, abstractmethod
from typing import Any, Iterable, List, Optional
import proxystore as ps
from proxystore.factory import Factory
logger = logging.getLogger(__name__)
class Store(metaclass=ABCMeta):
"""Abstraction of a key-value store"""
def __init__(self, name) -> None:
"""Init Store
Args:
name (str): name of the store instance.
"""
self.name = name
logger.debug(f"Initialized {self}")
def __repr__(self) -> None:
"""String representation of Store instance"""
s = f"{ps.utils.fullname(self.__class__)}("
attributes = [
f"{key}={value}"
for key, value in self.__dict__.items()
if not key.startswith('_') and not callable(value)
]
attributes.sort()
s += ", ".join(attributes)
s += ")"
return s
def cleanup(self) -> None:
"""Cleanup any objects associated with the store
Many :class:`Store <.Store>` types do not have any objects that
requiring cleaning up so this method is simply a no-op.
Warning:
This method should only be called at the end of the program
when the store will no longer be used, for example once all
proxies have been resolved.
"""
pass
def create_key(self, obj: Any) -> str:
"""Create key for the object
Args:
obj: object to be placed in store.
Returns:
key (str)
"""
return ps.utils.create_key(obj)
@abstractmethod
def evict(self, key: str) -> None:
"""Evict object associated with key
Args:
key (str): key corresponding to object in store to evict.
"""
raise NotImplementedError
@abstractmethod
def exists(self, key: str) -> bool:
"""Check if key exists
Args:
key (str): key to check.
Returns:
`bool`
"""
raise NotImplementedError
@abstractmethod
def get(
self,
key: str,
*,
strict: bool = False,
default: Any = None,
) -> Optional[object]:
"""Return object associated with key
Args:
key (str): key corresponding to object.
strict (bool): guarentee returned object is the most recent
version (default: False).
default: optionally provide value to be returned if an object
associated with the key does not exist (default: None).
Returns:
object associated with key or `default` if key does not exist.
"""
raise NotImplementedError
@abstractmethod
def is_cached(self, key: str, *, strict: bool = False) -> bool:
"""Check if object is cached locally
Args:
key (str): key corresponding to object.
strict (bool): guarentee object in cache is most recent version
(default: False).
Returns:
`bool`
"""
raise NotImplementedError
@abstractmethod
def proxy(
self,
obj: Optional[object] = None,
*,
key: Optional[str] = None,
factory: Factory = Factory,
**kwargs,
) -> 'ps.proxy.Proxy':
"""Create a proxy that will resolve to an object in the store
Warning:
If the factory requires reinstantiating the store to correctly
resolve the object, the factory should reinstantiate the store
with the same arguments used to instantiate the store that
created the proxy/factory. I.e. the :func:`proxy()` function
should pass any arguments given to :func:`Store.__init__()`
along to the factory so the factory can correctly recreate the
store if the factory is resolved in a different Python process.
Args:
obj (object): object to place in store and return proxy for.
If an object is not provided, a key must be provided that
corresponds to an object already in the store
(default: None).
key (str): optional key to associate with `obj` in the store.
If not provided, a key will be generated (default: None).
factory (Factory): factory class that will be instantiated
and passed to the proxy. The factory class should be able
to correctly resolve the object from this store
(default: :any:`Factory <proxystore.factory.Factory>`).
kwargs (dict): additional arguments to pass to the Factory.
Returns:
:any:`Proxy <proxystore.proxy.Proxy>`
Raises:
ValueError:
if `key` and `obj` are both `None`.
ValueError:
if `obj` is None and `key` does not exist in the store.
"""
raise NotImplementedError
@abstractmethod
def proxy_batch(
self,
objs: Optional[Iterable[Optional[object]]] = None,
*,
keys: Optional[Iterable[Optional[str]]] = None,
factory: Factory = Factory,
**kwargs,
) -> List['ps.proxy.Proxy']:
"""Create proxies for batch of objects in the store
See :any:`proxy() <proxystore.store.base.Store.proxy>` for more
details.
Args:
objs (Iterable[object]): objects to place in store and return
proxies for. If an iterable of objects is not provided, an
iterable of keys must be provided that correspond to objects
already in the store (default: None).
keys (Iterable[str]): optional keys to associate with `objs` in the
store. If not provided, keys will be generated (default: None).
factory (Factory): factory class that will be instantiated
and passed to the proxies. The factory class should be able
to correctly resolve an object from this store
(default: :any:`Factory <proxystore.factory.Factory>`).
kwargs (dict): additional arguments to pass to the Factory.
Returns:
List of :any:`Proxy <proxystore.proxy.Proxy>`
Raises:
ValueError:
if `keys` and `objs` are both `None`.
ValueError:
if `objs` is None and `keys` does not exist in the store.
"""
raise NotImplementedError
@abstractmethod
def set(self, obj: Any, *, key: Optional[str] = None) -> str:
"""Set key-object pair in store
Args:
obj (object): object to be placed in the store.
key (str, optional): key to use with the object. If the key is not
provided, one will be created.
Returns:
key (str). Note that some implementations of a store may return
a key different from the provided key.
"""
raise NotImplementedError
@abstractmethod
def set_batch(
self,
objs: Iterable[Any],
*,
keys: Optional[Iterable[Optional[str]]] = None,
) -> List[str]:
"""Set objects in store
Args:
objs (Iterable[object]): iterable of objects to be placed in the
store.
keys (Iterable[str], optional): keys to use with the objects.
If the keys are not provided, keys will be created.
Returns:
List of keys (str). Note that some implementations of a store may
return keys different from the provided keys.
Raises:
ValueError:
if :code:`keys is not None` and :code:`len(objs) != len(keys)`.
"""
raise NotImplementedError
|
from numpy import array, nan, atleast_1d, copy, vstack, hstack, isscalar, ndarray, prod, int32
import numpy as np
from setDefaultIterFuncs import USER_DEMAND_EXIT
from ooMisc import killThread, setNonLinFuncsNumber
from nonOptMisc import scipyInstalled, Vstack, isspmatrix, isPyPy
try:
from DerApproximator import get_d1
DerApproximatorIsInstalled = True
except:
DerApproximatorIsInstalled = False
class nonLinFuncs:
def __init__(self): pass
def wrapped_func(p, x, IND, userFunctionType, ignorePrev, getDerivative):#, _linePointDescriptor = None):
if isinstance(x, dict):
if not p.isFDmodel: p.err('calling the function with argument of type dict is allowed for FuncDesigner models only')
x = p._point2vector(x)
if not getattr(p.userProvided, userFunctionType): return array([])
if p.istop == USER_DEMAND_EXIT:
if p.solver.useStopByException:
raise killThread
else:
return nan
if getDerivative and not p.isFDmodel and not DerApproximatorIsInstalled:
p.err('For the problem you should have DerApproximator installed, see http://openopt.org/DerApproximator')
#userFunctionType should be 'f', 'c', 'h'
funcs = getattr(p.user, userFunctionType)
#funcs_num = getattr(p, 'n'+userFunctionType)
if IND is not None:
ind = p.getCorrectInd(IND)
else: ind = None
# this line had been added because some solvers pass tuple instead of
# x being vector p.n x 1 or matrix X=[x1 x2 x3...xk], size(X)=[p.n, k]
if not isspmatrix(x): # mb for future purposes
x = atleast_1d(x)
# if not str(x.dtype).startswith('float'):
# x = asfarray(x)
else:
if p.debug:
p.pWarn('[oo debug] sparse matrix x in nonlinfuncs.py has been encountered')
# if not ignorePrev:
# prevKey = p.prevVal[userFunctionType]['key']
# else:
# prevKey = None
#
# # TODO: move it into runprobsolver or baseproblem
# if p.prevVal[userFunctionType]['val'] is None:
# p.prevVal[userFunctionType]['val'] = zeros(getattr(p, 'n'+userFunctionType))
#
# if prevKey is not None and p.iter > 0 and array_equal(x, prevKey) and ind is None and not ignorePrev:
# #TODO: add counter of the situations
# if not getDerivative:
# r = copy(p.prevVal[userFunctionType]['val'])
# #if p.debug: assert array_equal(r, p.wrapped_func(x, IND, userFunctionType, True, getDerivative))
# if ind is not None: r = r[ind]
#
# if userFunctionType == 'f':
# if p.isObjFunValueASingleNumber: r = r.sum(0)
# if p.invertObjFunc: r = -r
# if p.solver.funcForIterFcnConnection=='f' and any(isnan(x)):
# p.nEvals['f'] += 1
#
# if p.nEvals['f']%p.f_iter == 0:
# p.iterfcn(x, fk = r)
# return r
args = getattr(p.args, userFunctionType)
# TODO: handle it in prob prepare
if not hasattr(p, 'n'+userFunctionType): setNonLinFuncsNumber(p, userFunctionType)
# if ind is None:
# nFuncsToObtain = getattr(p, 'n'+ userFunctionType)
# else:
# nFuncsToObtain = len(ind)
if x.shape[0] != p.n and (x.ndim<2 or x.shape[1] != p.n):
p.err('x with incorrect shape passed to non-linear function')
#TODO: code cleanup (below)
if getDerivative or x.ndim <= 1 or x.shape[0] == 1:
nXvectors = 1
x_0 = copy(x)
else:
nXvectors = x.shape[0]
# TODO: use certificate instead
if p.isFDmodel:
if getDerivative:
if p.freeVars is None or (p.fixedVars is not None and len(p.freeVars) < len(p.fixedVars)):
funcs2 = [(lambda x, i=i: \
p._pointDerivative2array(
funcs[i].D(x, Vars = p.freeVars, useSparse=p.useSparse, fixedVarsScheduleID=p._FDVarsID, exactShape=True),
useSparse=p.useSparse, func=funcs[i], point=x)) \
for i in range(len(funcs))]
else:
funcs2 = [(lambda x, i=i: \
p._pointDerivative2array(
funcs[i].D(x, fixedVars = p.fixedVars, useSparse=p.useSparse, fixedVarsScheduleID=p._FDVarsID, exactShape=True),
useSparse=p.useSparse, func=funcs[i], point=x)) \
for i in range(len(funcs))]
else: # function, not derivative
_kw = {'fixedVarsScheduleID': p._FDVarsID}
if p.freeVars is None or (p.fixedVars is not None and len(p.freeVars) < len(p.fixedVars)):
_kw['Vars'] = p.freeVars
else:
_kw['fixedVars'] = p.fixedVars
funcs2 = [(lambda x, i=i: \
funcs[i]._getFuncCalcEngine(x, **_kw))\
for i in range(len(funcs))]
else: # not FD model
funcs2 = funcs
if ind is None:
Funcs = funcs2
elif ind is not None and p.functype[userFunctionType] == 'some funcs R^nvars -> R':
Funcs = [funcs2[i] for i in ind]
else:
Funcs = getFuncsAndExtractIndexes(p, funcs2, ind, userFunctionType)
# agregate_counter = 0
Args = () if p.isFDmodel else args
if nXvectors == 1:
if p.isFDmodel:
X = p._vector2point(x)
X._p = p
#X._linePointDescriptor = _linePointDescriptor
else:
X = x
if nXvectors > 1: # and hence getDerivative isn't involved
#temporary, to be fixed
if userFunctionType == 'f':
assert p.isObjFunValueASingleNumber
if p.isFDmodel:
assert ind is None
if isPyPy or p.hasVectorizableFuncs: # TODO: get rid of box-bound constraints
from FuncDesigner.ooPoint import ooPoint as oopoint
from FuncDesigner.multiarray import multiarray
# TODO: new
xx = []
counter = 0
#xT = x.T
for i, oov in enumerate(p._freeVarsList):
s = p._optVarSizes[oov]
xx.append((oov, (x[:, counter: counter + s].flatten() if s == 1 else x[:, counter: counter + s]).view(multiarray)))
# xx.append((oov, multiarray(x[:, counter: counter + s].flatten() if s == 1 else x[:, counter: counter + s])))
counter += s
X = oopoint(xx)
X.dictOfFixedFuncs = p.dictOfFixedFuncs
X._dictOfRedirectedFuncs = p._dictOfRedirectedFuncs
X.maxDistributionSize = p.maxDistributionSize
X._dictOfStochVars = p._dictOfStochVars
X._p = p
if x.ndim > 1:
X.isMultiPoint = True
X.N = nXvectors
if len(p.unvectorizableFuncs) != 0:
XX = [p._vector2point(x[i]) for i in range(nXvectors)]
for _X in XX:
_X._p = p
_X.dictOfFixedFuncs = p.dictOfFixedFuncs
_X._dictOfRedirectedFuncs = p._dictOfRedirectedFuncs
_X._dictOfStochVars = p._dictOfStochVars
_X.maxDistributionSize = p.maxDistributionSize
r = vstack([[fun(_x) for _x in XX] if funcs[i] in p.unvectorizableFuncs else fun(X).T for i, fun in enumerate(Funcs)]).T
# X = [p._vector2point(x[i]) for i in range(nXvectors)]
# r = hstack([[fun(xx) for xx in X] for fun in Funcs]).reshape(1, -1)
#new
# if p.vectorizable:
# from FuncDesigner.ooPoint import ooPoint as oopoint, multiarray
#
# X = dict([(oovar, x[:, i].view(multiarray)) for i, oovar in enumerate(p._freeVarsList)])
# X = oopoint(X, skipArrayCast = True)
# X.N = nXvectors
# X.isMultiPoint = True
# X.update(p.dictOfFixedFuncs)
# r = hstack([fun(X) for fun in Funcs]).reshape(1, -1)
#
# #old
# else:
# X = [p._vector2point(x[i]) for i in range(nXvectors)]
# r = hstack([[fun(xx) for xx in X] for fun in Funcs]).reshape(1, -1)
else:
X = [(x[i],) + Args for i in range(nXvectors)]
#r = hstack([[fun(*xx) for xx in X] for fun in Funcs])
R = []
for xx in X:
tmp = [fun(*xx) for fun in Funcs]
r_ = hstack(tmp[0]) if len(tmp) == 1 and isinstance(tmp[0], (list, tuple)) else hstack(tmp) if len(tmp) > 1 else tmp[0]
R.append(r_)
r = hstack(R)#.T
#print(r.shape, userFunctionType)
elif not getDerivative:
tmp = [fun(*(X, )+Args) for fun in Funcs]
r = hstack(tmp[0]) if len(tmp) == 1 and isinstance(tmp[0], (list, tuple)) else hstack(tmp) if len(tmp) > 1 else tmp[0]
#print(x.shape, r.shape, x, r)
# if not ignorePrev and ind is None:
# p.prevVal[userFunctionType]['key'] = copy(x_0)
# p.prevVal[userFunctionType]['val'] = r.copy()
elif getDerivative and p.isFDmodel:
rr = [fun(X) for fun in Funcs]
r = Vstack(rr) if scipyInstalled and any([isspmatrix(elem) for elem in rr]) else vstack(rr)
else:#getDerivative
diffInt = p.diffInt
abs_x = abs(x)
finiteDiffNumbers = 1e-10 * abs_x
if p.diffInt.size == 1:
finiteDiffNumbers[finiteDiffNumbers < diffInt] = diffInt
else:
finiteDiffNumbers[finiteDiffNumbers < diffInt] = diffInt[finiteDiffNumbers < diffInt]
R = []
#r = zeros((nFuncsToObtain, p.n))
for index, fun in enumerate(Funcs):
""" getting derivatives """
def func(x):
_r = fun(*((x,) + Args))
return _r if type(_r) not in (list, tuple) or len(_r)!=1 else _r[0]
d1 = get_d1(func, x, pointVal = None, diffInt = finiteDiffNumbers,
stencil=p.JacobianApproximationStencil, exactShape=True)
#r[agregate_counter:agregate_counter+d1.size] = d1
R.append(d1)
# agregate_counter += atleast_1d(v).shape[0]
r = vstack(R)
#if type(r) == matrix: r = r.A
if type(r) != ndarray and not isscalar(r) and not isspmatrix(r): # multiarray
r = r.view(ndarray).flatten() if userFunctionType == 'f' else r.view(ndarray)
#elif userFunctionType == 'f' and p.isObjFunValueASingleNumber and prod(r.shape) > 1 and (type(r) == ndarray or min(r.shape) > 1):
#r = r.sum(0)
elif userFunctionType == 'f' and p.isObjFunValueASingleNumber and not isscalar(r):
if prod(r.shape) > 1 and not getDerivative and nXvectors == 1:
p.err('implicit summation in objective is no longer available to prevent possibly hidden bugs')
# if r.size == 1:
# r = r.item()
if userFunctionType == 'f' and p.isObjFunValueASingleNumber:
if getDerivative and r.ndim > 1:
if min(r.shape) > 1:
p.err('incorrect shape of objective func derivative')
# TODO: omit cast to dense array. Somewhere bug triggers?
if hasattr(r, 'toarray'):
r=r.toarray()
r = r.flatten()
if userFunctionType != 'f' and nXvectors != 1:
r = r.reshape(nXvectors, r.size // nXvectors)
# if type(r) == matrix:
# raise 0
# r = r.A # if _dense_numpy_matrix !
if nXvectors == 1 and (not getDerivative or prod(r.shape) == 1): # DO NOT REPLACE BY r.size - r may be sparse!
r = r.flatten() if type(r) == ndarray else r.toarray().flatten() if not isscalar(r) else atleast_1d(r)
if p.invertObjFunc and userFunctionType=='f':
r = -r
if not getDerivative:
if ind is None:
p.nEvals[userFunctionType] += nXvectors
else:
p.nEvals[userFunctionType] = p.nEvals[userFunctionType] + float(nXvectors * len(ind)) / getattr(p, 'n'+ userFunctionType)
if getDerivative:
assert x.size == p.n#TODO: add python list possibility here
x = x_0 # for to suppress numerical instability effects while x +/- delta_x
#if userFunctionType == 'f' and p.isObjFunValueASingleNumber and r.size == 1:
#r = r.item()
from FuncDesigner import _Stochastic
if isinstance(r, _Stochastic) or (isinstance(r, ndarray) and isinstance(r.flat[0], _Stochastic)):
p.err('''
error in evaluation of p.%s:
objective and constraints cannot be directly applied on stochastic variables,
you should use functions like mean, std, var, P.''' % userFunctionType
)
if userFunctionType == 'f' and hasattr(p, 'solver') and p.solver.funcForIterFcnConnection=='f' and hasattr(p, 'f_iter') and not getDerivative:
if p.nEvals['f']%p.f_iter == 0 or nXvectors > 1:
p.iterfcn(x, r)
return r
def wrapped_1st_derivatives(p, x, ind_, funcType, ignorePrev, useSparse):
if isinstance(x, dict):
if not p.isFDmodel: p.err('calling the function with argument of type dict is allowed for FuncDesigner models only')
if ind_ is not None:p.err('the operation is turned off for argument of type dict when ind!=None')
x = p._point2vector(x)
if ind_ is not None:
ind = p.getCorrectInd(ind_)
else: ind = None
if p.istop == USER_DEMAND_EXIT:
if p.solver.useStopByException:
raise killThread
else:
return nan
derivativesType = 'd'+ funcType
prevKey = p.prevVal[derivativesType]['key']
if prevKey is not None and p.iter > 0 and np.array_equal(x, prevKey) and ind is None and not ignorePrev:
#TODO: add counter of the situations
assert p.prevVal[derivativesType]['val'] is not None
return copy(p.prevVal[derivativesType]['val'])
if ind is None and not ignorePrev: p.prevVal[derivativesType]['ind'] = copy(x)
#TODO: patterns!
nFuncs = getattr(p, 'n'+funcType)
x = atleast_1d(x)
if hasattr(p.userProvided, derivativesType) and getattr(p.userProvided, derivativesType):
funcs = getattr(p.user, derivativesType)
if ind is None or (nFuncs == 1 and p.functype[funcType] == 'single func'):
Funcs = funcs
elif ind is not None and p.functype[funcType] == 'some funcs R^nvars -> R':
Funcs = [funcs[i] for i in ind]
else:
Funcs = getFuncsAndExtractIndexes(p, funcs, ind, funcType)
# if ind is None: derivativesNumber = nFuncs
# else: derivativesNumber = len(ind)
#derivatives = empty((derivativesNumber, p.n))
derivatives = []
#agregate_counter = 0
for fun in Funcs:#getattr(p.user, derivativesType):
tmp = atleast_1d(fun(*(x,)+getattr(p.args, funcType)))
# TODO: replace tmp.size here for sparse matrices
#assert tmp.size % p.n == mod(tmp.size, p.n)
if tmp.size % p.n != 0:
if funcType=='f':
p.err('incorrect user-supplied (sub)gradient size of objective function')
elif funcType=='c':
p.err('incorrect user-supplied (sub)gradient size of non-lin inequality constraints')
elif funcType=='h':
p.err('incorrect user-supplied (sub)gradient size of non-lin equality constraints')
if tmp.ndim == 1: m= 1
else: m = tmp.shape[0]
if p.functype[funcType] == 'some funcs R^nvars -> R' and m != 1:
# TODO: more exact check according to stored p.arr_of_indexes_* arrays
p.err('incorrect shape of user-supplied derivative, it should be in accordance with user-provided func size')
derivatives.append(tmp)
#derivatives[agregate_counter : agregate_counter + m] = tmp#.reshape(tmp.size/p.n,p.n)
#agregate_counter += m
#TODO: inline ind modification!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
derivatives = Vstack(derivatives)
if ind is None:
p.nEvals[derivativesType] += 1
else:
#derivatives = derivatives[ind]
p.nEvals[derivativesType] = p.nEvals[derivativesType] + float(len(ind)) / nFuncs
if funcType=='f':
if p.invertObjFunc: derivatives = -derivatives
if p.isObjFunValueASingleNumber:
if not isinstance(derivatives, ndarray): derivatives = derivatives.toarray()
derivatives = derivatives.flatten()
else:
#if not getattr(p.userProvided, derivativesType) or p.isFDmodel:
# x, IND, userFunctionType, ignorePrev, getDerivative
derivatives = p.wrapped_func(x, ind, funcType, True, True)
if ind is None:
p.nEvals[derivativesType] -= 1
else:
p.nEvals[derivativesType] = p.nEvals[derivativesType] - float(len(ind)) / nFuncs
#else:
if useSparse is False or not scipyInstalled or not hasattr(p, 'solver') or not p.solver._canHandleScipySparse:
# p can has no attr 'solver' if it is called from checkdf, checkdc, checkdh
if not isinstance(derivatives, ndarray):
derivatives = derivatives.toarray()
# if min(derivatives.shape) == 1:
# if isspmatrix(derivatives): derivatives = derivatives.A
# derivatives = derivatives.flatten()
if type(derivatives) != ndarray and isinstance(derivatives, ndarray): # dense numpy matrix
derivatives = derivatives.A
if ind is None and not ignorePrev: p.prevVal[derivativesType]['val'] = derivatives
if funcType=='f':
if hasattr(p, 'solver') and not p.solver.iterfcnConnected and p.solver.funcForIterFcnConnection=='df':
if p.df_iter is True: p.iterfcn(x)
elif p.nEvals[derivativesType]%p.df_iter == 0: p.iterfcn(x) # call iterfcn each {p.df_iter}-th df call
if p.isObjFunValueASingleNumber and type(derivatives) == ndarray and derivatives.ndim > 1:
derivatives = derivatives.flatten()
return derivatives
# the funcs below are not implemented properly yet
def user_d2f(p, x):
assert x.ndim == 1
p.nEvals['d2f'] += 1
assert(len(p.user.d2f)==1)
r = p.user.d2f[0](*(x, )+p.args.f)
if p.invertObjFunc:# and userFunctionType=='f':
r = -r
return r
def user_d2c(p, x):
return ()
def user_d2h(p, x):
return ()
def user_l(p, x):
return ()
def user_dl(p, x):
return ()
def user_d2l(p, x):
return ()
def getCorrectInd(p, ind):
if ind is None or type(ind) in [list, tuple]:
result = ind
else:
try:
result = atleast_1d(np.asarray(ind, dtype=int32)).tolist()
except:
raise ValueError('%s is an unknown func index type!'%type(ind))
return result
def getFuncsAndExtractIndexes(p, funcs, ind, userFunctionType):
if ind is None: return funcs
if len(funcs) == 1 :
def f (*args, **kwargs):
tmp = funcs[0](*args, **kwargs)
if isspmatrix(tmp):
tmp = tmp.tocsc()
elif not isinstance(tmp, ndarray) or tmp.ndim == 0:
tmp = atleast_1d(tmp)
if isPyPy:
return atleast_1d([tmp[i] for i in ind])
else:
# if p.debug:
# assert all(tmp[ind] == atleast_1d([tmp[i] for i in ind]))
return tmp[ind]
return [f]
#getting number of block and shift
arr_of_indexes = getattr(p, 'arr_of_indexes_' + userFunctionType)
if isPyPy: # temporary walkaround the bug "int32 is unhashable"
Left_arr_indexes = np.searchsorted(arr_of_indexes, ind)
left_arr_indexes = [int(elem) for elem in atleast_1d(Left_arr_indexes)]
else:
left_arr_indexes = np.searchsorted(arr_of_indexes, ind)
indLenght = len(ind)
Funcs2 = []
# TODO: try to get rid of cycles, use vectorization instead
IndDict = {}
for i in range(indLenght):
if left_arr_indexes[i] != 0:
num_of_funcs_before_arr_left_border = arr_of_indexes[left_arr_indexes[i]-1]
inner_ind = ind[i] - num_of_funcs_before_arr_left_border - 1
else:
inner_ind = ind[i]
if left_arr_indexes[i] in IndDict.keys():
IndDict[left_arr_indexes[i]].append(inner_ind)
else:
IndDict[left_arr_indexes[i]] = [inner_ind]
Funcs2.append([funcs[left_arr_indexes[i]], IndDict[left_arr_indexes[i]]])
Funcs = []
for i in range(len(Funcs2)):
def f_aux(x, i=i):
r = Funcs2[i][0](x)
# TODO: are other formats better?
if not isscalar(r):
if isPyPy:
if isspmatrix(r):
r = r.tocsc()[Funcs2[i][1]]
else:
# Temporary walkaround of PyPy integer indexation absence
tmp = atleast_1d(r)
r = atleast_1d([tmp[i] for i in Funcs2[i][1]])
else:
r = r.tocsc()[Funcs2[i][1]] if isspmatrix(r) else atleast_1d(r)[Funcs2[i][1]]
return r
Funcs.append(f_aux)
#Funcs.append(lambda x, i=i: Funcs2[i][0](x)[Funcs2[i][1]])
return Funcs#, inner_ind
|
'''n=str(input('Digite seu nome completo: ')).strip()
print('Analisando seu nome......')
print('Seu nome em maiuscula é {}'.format(n.upper()))
print('Seu nome em minuscula é {}'.format(n.lower()))
print('Seu nome tem {} letras'.format(len(n) - n.count(' ')))
#print('Seu primeiro nome tem {} letras'.format(n.find(' ')))
separa=n.split()
print('Seu primeiro nome é {} e tem {} letras'.format(separa[0], len(separa[0])))'''
n=str(input('Digite seu nome completo: ')).strip()
print('Analisando seu nome....')
print('Seu nome em maiuscula é {}'.format(n.upper()))
print('Seu nome em minuscula é {}'.format(n.lower()))
print('Seu nome ao todo tem {} letras'.format(len(n) - n.count(' ')))
#print('Seu primeiro nome tem {} letras '.format(n.find(' ')))
separa= n.split()
print(separa) #coloquei esse elemento para ver a separação dos nome.
print('Seu primeiro nome é {} e tem {} letras'.format(separa[0], len(separa[0])))
|
from abc import ABCMeta, abstractmethod
import pytorch_lightning as pl
import torch
from lasaft.utils.functions import get_optimizer_by_name
class AbstractSeparator(pl.LightningModule, metaclass=ABCMeta):
def __init__(self, lr, optimizer, initializer):
super(AbstractSeparator, self).__init__()
self.lr = lr
self.optimizer = optimizer
self.target_names = ['vocals', 'drums', 'bass', 'other']
if initializer in ['kaiming', 'kaiming_normal']:
f = torch.nn.init.kaiming_normal_
elif initializer in ['kaiming_uniform']:
f = torch.nn.init.kaiming_uniform_
elif initializer in ['xavier', 'xavier_normal']:
f = torch.nn.init.xavier_normal_
elif initializer in ['xavier_uniform']:
f = torch.nn.init.xavier_uniform
else:
raise ModuleNotFoundError
def init_weights():
with torch.no_grad():
for param in self.parameters():
if param.dim() > 1:
f(param)
self.initializer = init_weights
def configure_optimizers(self):
optimizer = get_optimizer_by_name(self.optimizer)
return optimizer(self.parameters(), lr=float(self.lr))
@abstractmethod
def training_step(self, batch, batch_idx):
pass
@abstractmethod
def forward(self, input_signal, input_condition) -> torch.Tensor:
pass
@abstractmethod
def separate(self, input_signal, input_condition) -> torch.Tensor:
pass
def init_weights(self):
self.initializer()
|
import bs4 as bs
import urllib.request
import pandas as pd
kto=urllib.request.urlopen('http://www.bloomberght.com/doviz/dolar')
spider=bs.BeautifulSoup(kto,'lxml')
uzman_para= urllib.request.urlopen('http://uzmanpara.milliyet.com.tr/canli-borsa/').read()
soup=bs.BeautifulSoup(uzman_para,'lxml')
for spd in spider.find_all('table'): #BeautifulSoup kütüphanesi ile döviz kurları'''
print(spd.text)
spdr = pd.read_html('http://www.bloomberght.com/doviz/dolar') #'''Pandas kütüphanesi ile döviz kurları'''
for fd in spdr:
print(fd)
for paragph in soup.find_all('table'): #'''BeautifulSoup kütüphanesi ile borsa değerleri'''
print(paragph.text)
dfs = pd.read_html('http://uzmanpara.milliyet.com.tr/canli-borsa/') #'''Pandas kütüphanesi borsa değerleri'''
for df in dfs:
print(df)
|
from aerosandbox import ImplicitAnalysis
from aerosandbox.geometry import *
class VortexLatticeMethod(ImplicitAnalysis):
# Usage:
# # Set up a problem using the syntax in the AeroProblem constructor (e.g. "Casvlm1(airplane = a, op_point = op)" for some Airplane a and OperatingPoint op)
# # Call the run() method on the vlm3 object to run the problem.
# # Access results in the command line, or through properties of the Casvlm1 class.
# # # In a future update, this will be done through a standardized AeroData class.
def __init__(self,
airplane, # type: Airplane
op_point, # type: op_point
opti, # type: cas.Opti
run_setup=True,
):
super().__init__(airplane, op_point)
self.opti = opti
if run_setup:
self.setup()
def setup(self, verbose=True):
# Runs a point analysis at the specified op-point.
self.verbose = verbose
if self.verbose:
print("Setting up casVLM1 calculation...")
self.make_panels()
self.setup_geometry()
self.setup_operating_point()
self.calculate_vortex_strengths()
self.calculate_forces()
if self.verbose:
print("casVLM1 setup complete! Ready to pass into the solver...")
def make_panels(self):
# Creates self.panel_coordinates_structured_list and self.wing_mcl_normals.
if self.verbose:
print("Meshing...")
front_left_vertices = []
front_right_vertices = []
back_left_vertices = []
back_right_vertices = []
is_trailing_edge = []
normal_directions = []
for wing_num in range(len(self.airplane.wings)):
# Things we want for each wing (where M is the number of chordwise panels, N is the number of spanwise panels)
# # panel_coordinates_structured_list: M+1 p N+1 p 3; corners of every panel.
# # normals_structured_list: M p N p 3; normal direction of each panel
# Get the wing
wing = self.airplane.wings[wing_num] # type: Wing
# Define number of chordwise points
n_chordwise_coordinates = wing.chordwise_panels + 1
# Get the chordwise coordinates
if wing.chordwise_spacing == 'uniform':
nondim_chordwise_coordinates = np.linspace(0, 1, n_chordwise_coordinates)
elif wing.chordwise_spacing == 'cosine':
nondim_chordwise_coordinates = cosspace(0, 1, n_chordwise_coordinates)
else:
raise Exception("Bad init_val of wing.chordwise_spacing!")
# -----------------------------------------------------
## Make the panels for each section.
for section_num in range(len(wing.xsecs) - 1):
# Define the relevant cross sections
inner_xsec = wing.xsecs[section_num] # type: WingXSec
outer_xsec = wing.xsecs[section_num + 1] # type: WingXSec
# Find the corners
inner_xsec_xyz_le = inner_xsec.xyz_le + wing.xyz_le
inner_xsec_xyz_te = inner_xsec.xyz_te() + wing.xyz_le
outer_xsec_xyz_le = outer_xsec.xyz_le + wing.xyz_le
outer_xsec_xyz_te = outer_xsec.xyz_te() + wing.xyz_le
# Define the airfoils at each cross section
inner_airfoil = inner_xsec.airfoil.add_control_surface(
deflection=inner_xsec.control_surface_deflection,
hinge_point_x=inner_xsec.control_surface_hinge_point
) # type: Airfoil
outer_airfoil = outer_xsec.airfoil.add_control_surface(
deflection=inner_xsec.control_surface_deflection,
hinge_point_x=inner_xsec.control_surface_hinge_point
) # type: Airfoil
# Make the mean camber lines for each.
inner_xsec_mcl_y_nondim = inner_airfoil.local_camber(nondim_chordwise_coordinates)
outer_xsec_mcl_y_nondim = outer_airfoil.local_camber(nondim_chordwise_coordinates)
# Find the tangent angles of the mean camber lines
inner_xsec_mcl_angle = (
cas.atan2(cas.diff(inner_xsec_mcl_y_nondim), cas.diff(nondim_chordwise_coordinates)) - (
inner_xsec.twist * np.pi / 180)
) # in radians
outer_xsec_mcl_angle = (
cas.atan2(cas.diff(outer_xsec_mcl_y_nondim), cas.diff(nondim_chordwise_coordinates)) - (
outer_xsec.twist * np.pi / 180)
) # in radians
# Find the effective twist axis
effective_twist_axis = outer_xsec_xyz_le - inner_xsec_xyz_le
effective_twist_axis[0] = 0.
# Define number of spanwise points
n_spanwise_coordinates = inner_xsec.spanwise_panels + 1
# Get the spanwise coordinates
if inner_xsec.spanwise_spacing == 'uniform':
nondim_spanwise_coordinates = np.linspace(0, 1, n_spanwise_coordinates)
elif inner_xsec.spanwise_spacing == 'cosine':
nondim_spanwise_coordinates = cosspace(0, 1, n_spanwise_coordinates)
else:
raise Exception("Bad init_val of section.spanwise_spacing!")
for chord_index in range(wing.chordwise_panels):
for span_index in range(inner_xsec.spanwise_panels):
nondim_chordwise_coordinate = nondim_chordwise_coordinates[chord_index]
nondim_spanwise_coordinate = nondim_spanwise_coordinates[span_index]
nondim_chordwise_coordinate_next = nondim_chordwise_coordinates[chord_index + 1]
nondim_spanwise_coordinate_next = nondim_spanwise_coordinates[span_index + 1]
# Calculate vertices
front_left_vertices.append(
(
inner_xsec_xyz_le * (
1 - nondim_chordwise_coordinate) + inner_xsec_xyz_te * nondim_chordwise_coordinate
) * (1 - nondim_spanwise_coordinate) +
(
outer_xsec_xyz_le * (
1 - nondim_chordwise_coordinate) + outer_xsec_xyz_te * nondim_chordwise_coordinate
) * nondim_spanwise_coordinate
)
front_right_vertices.append(
(
inner_xsec_xyz_le * (
1 - nondim_chordwise_coordinate) + inner_xsec_xyz_te * nondim_chordwise_coordinate
) * (1 - nondim_spanwise_coordinate_next) +
(
outer_xsec_xyz_le * (
1 - nondim_chordwise_coordinate) + outer_xsec_xyz_te * nondim_chordwise_coordinate
) * nondim_spanwise_coordinate_next
)
back_left_vertices.append(
(
inner_xsec_xyz_le * (
1 - nondim_chordwise_coordinate_next) + inner_xsec_xyz_te * nondim_chordwise_coordinate_next
) * (1 - nondim_spanwise_coordinate) +
(
outer_xsec_xyz_le * (
1 - nondim_chordwise_coordinate_next) + outer_xsec_xyz_te * nondim_chordwise_coordinate_next
) * nondim_spanwise_coordinate
)
back_right_vertices.append(
(
inner_xsec_xyz_le * (
1 - nondim_chordwise_coordinate_next) + inner_xsec_xyz_te * nondim_chordwise_coordinate_next
) * (1 - nondim_spanwise_coordinate_next) +
(
outer_xsec_xyz_le * (
1 - nondim_chordwise_coordinate_next) + outer_xsec_xyz_te * nondim_chordwise_coordinate_next
) * nondim_spanwise_coordinate_next
)
is_trailing_edge.append(chord_index == wing.chordwise_panels - 1)
# Calculate normal
angle = (
inner_xsec_mcl_angle[chord_index] * (1 - nondim_spanwise_coordinate) +
inner_xsec_mcl_angle[chord_index] * nondim_spanwise_coordinate
)
rot = rotation_matrix_angle_axis(-angle - np.pi / 2, effective_twist_axis)
normal_directions.append(rot @ cas.vertcat(1, 0, 0))
# Handle symmetry
if wing.symmetric:
# Define the relevant cross sections
inner_xsec = wing.xsecs[section_num] # type: WingXSec
outer_xsec = wing.xsecs[section_num + 1] # type: WingXSec
# Define the airfoils at each cross section
if inner_xsec.control_surface_type == "symmetric":
inner_airfoil = inner_xsec.airfoil.add_control_surface(
deflection=inner_xsec.control_surface_deflection,
hinge_point_x=inner_xsec.control_surface_hinge_point
) # type: Airfoil
outer_airfoil = outer_xsec.airfoil.add_control_surface(
deflection=inner_xsec.control_surface_deflection,
hinge_point_x=inner_xsec.control_surface_hinge_point
) # type: Airfoil
elif inner_xsec.control_surface_type == "asymmetric":
inner_airfoil = inner_xsec.airfoil.add_control_surface(
deflection=-inner_xsec.control_surface_deflection,
hinge_point_x=inner_xsec.control_surface_hinge_point
) # type: Airfoil
outer_airfoil = outer_xsec.airfoil.add_control_surface(
deflection=-inner_xsec.control_surface_deflection,
hinge_point_x=inner_xsec.control_surface_hinge_point
) # type: Airfoil
else:
raise ValueError("Invalid input for control_surface_type!")
# Make the mean camber lines for each.
inner_xsec_mcl_y_nondim = inner_airfoil.local_camber(nondim_chordwise_coordinates)
outer_xsec_mcl_y_nondim = outer_airfoil.local_camber(nondim_chordwise_coordinates)
# Find the tangent angles of the mean camber lines
inner_xsec_mcl_angle = (
cas.atan2(cas.diff(inner_xsec_mcl_y_nondim), cas.diff(nondim_chordwise_coordinates)) - (
inner_xsec.twist * np.pi / 180)
) # in radians
outer_xsec_mcl_angle = (
cas.atan2(cas.diff(outer_xsec_mcl_y_nondim), cas.diff(nondim_chordwise_coordinates)) - (
outer_xsec.twist * np.pi / 180)
) # in radians
# Find the effective twist axis
effective_twist_axis = outer_xsec_xyz_le - inner_xsec_xyz_le
effective_twist_axis[0] = 0.
# Define number of spanwise points
n_spanwise_coordinates = inner_xsec.spanwise_panels + 1
# Get the spanwise coordinates
if inner_xsec.spanwise_spacing == 'uniform':
nondim_spanwise_coordinates = np.linspace(0, 1, n_spanwise_coordinates)
elif inner_xsec.spanwise_spacing == 'cosine':
nondim_spanwise_coordinates = cosspace(0, 1, n_spanwise_coordinates)
else:
raise Exception("Bad init_val of section.spanwise_spacing!")
for chord_index in range(wing.chordwise_panels):
for span_index in range(inner_xsec.spanwise_panels):
nondim_chordwise_coordinate = nondim_chordwise_coordinates[chord_index]
nondim_spanwise_coordinate = nondim_spanwise_coordinates[span_index]
nondim_chordwise_coordinate_next = nondim_chordwise_coordinates[chord_index + 1]
nondim_spanwise_coordinate_next = nondim_spanwise_coordinates[span_index + 1]
# Calculate vertices
front_right_vertices.append(reflect_over_XZ_plane(
(
inner_xsec_xyz_le * (
1 - nondim_chordwise_coordinate) + inner_xsec_xyz_te * nondim_chordwise_coordinate
) * (1 - nondim_spanwise_coordinate) +
(
outer_xsec_xyz_le * (
1 - nondim_chordwise_coordinate) + outer_xsec_xyz_te * nondim_chordwise_coordinate
) * nondim_spanwise_coordinate
))
front_left_vertices.append(reflect_over_XZ_plane(
(
inner_xsec_xyz_le * (
1 - nondim_chordwise_coordinate) + inner_xsec_xyz_te * nondim_chordwise_coordinate
) * (1 - nondim_spanwise_coordinate_next) +
(
outer_xsec_xyz_le * (
1 - nondim_chordwise_coordinate) + outer_xsec_xyz_te * nondim_chordwise_coordinate
) * nondim_spanwise_coordinate_next
))
back_right_vertices.append(reflect_over_XZ_plane(
(
inner_xsec_xyz_le * (
1 - nondim_chordwise_coordinate_next) + inner_xsec_xyz_te * nondim_chordwise_coordinate_next
) * (1 - nondim_spanwise_coordinate) +
(
outer_xsec_xyz_le * (
1 - nondim_chordwise_coordinate_next) + outer_xsec_xyz_te * nondim_chordwise_coordinate_next
) * nondim_spanwise_coordinate
))
back_left_vertices.append(reflect_over_XZ_plane(
(
inner_xsec_xyz_le * (
1 - nondim_chordwise_coordinate_next) + inner_xsec_xyz_te * nondim_chordwise_coordinate_next
) * (1 - nondim_spanwise_coordinate_next) +
(
outer_xsec_xyz_le * (
1 - nondim_chordwise_coordinate_next) + outer_xsec_xyz_te * nondim_chordwise_coordinate_next
) * nondim_spanwise_coordinate_next
))
is_trailing_edge.append(chord_index == wing.chordwise_panels - 1)
# Calculate normal
angle = (
inner_xsec_mcl_angle[chord_index] * (1 - nondim_spanwise_coordinate) +
inner_xsec_mcl_angle[chord_index] * nondim_spanwise_coordinate
)
rot = rotation_matrix_angle_axis(-angle - np.pi / 2, effective_twist_axis)
normal_directions.append(reflect_over_XZ_plane(rot @ cas.vertcat(1, 0, 0)))
# Concatenate things (DM)
# self.front_left_vertices = cas.transpose(cas.horzcat(*front_left_vertices))
# self.front_right_vertices = cas.transpose(cas.horzcat(*front_right_vertices))
# self.back_left_vertices = cas.transpose(cas.horzcat(*back_left_vertices))
# self.back_right_vertices = cas.transpose(cas.horzcat(*back_right_vertices))
# self.normal_directions = cas.transpose(cas.horzcat(*normal_directions))
# self.is_trailing_edge = is_trailing_edge
# Concatenate things (MX)
self.front_left_vertices = cas.MX(cas.transpose(cas.horzcat(*front_left_vertices)))
self.front_right_vertices = cas.MX(cas.transpose(cas.horzcat(*front_right_vertices)))
self.back_left_vertices = cas.MX(cas.transpose(cas.horzcat(*back_left_vertices)))
self.back_right_vertices = cas.MX(cas.transpose(cas.horzcat(*back_right_vertices)))
self.normal_directions = cas.MX(cas.transpose(cas.horzcat(*normal_directions)))
self.is_trailing_edge = is_trailing_edge
# Calculate areas
diag1 = self.front_right_vertices - self.back_left_vertices
diag2 = self.front_left_vertices - self.back_right_vertices
cross = cas.cross(diag1, diag2)
cross_norm = cas.sqrt(cross[:, 0] ** 2 + cross[:, 1] ** 2 + cross[:, 2] ** 2)
self.areas = cross_norm / 2
# Do the vortex math
self.left_vortex_vertices = 0.75 * self.front_left_vertices + 0.25 * self.back_left_vertices
self.right_vortex_vertices = 0.75 * self.front_right_vertices + 0.25 * self.back_right_vertices
self.vortex_centers = (self.left_vortex_vertices + self.right_vortex_vertices) / 2
self.vortex_bound_leg = (self.right_vortex_vertices - self.left_vortex_vertices)
self.collocation_points = (
0.5 * (
0.25 * self.front_left_vertices + 0.75 * self.back_left_vertices
) +
0.5 * (
0.25 * self.front_right_vertices + 0.75 * self.back_right_vertices
)
)
# Do final processing for later use
self.n_panels = self.collocation_points.shape[0]
if self.verbose:
print("Meshing complete!")
def setup_geometry(self):
# # Calculate AIC matrix
# ----------------------
if self.verbose:
print("Calculating the collocation influence matrix...")
self.Vij_collocations_x, self.Vij_collocations_y, self.Vij_collocations_z = self.calculate_Vij(
self.collocation_points)
# AIC = (Vij * normal vectors)
self.AIC = (
self.Vij_collocations_x * self.normal_directions[:, 0] +
self.Vij_collocations_y * self.normal_directions[:, 1] +
self.Vij_collocations_z * self.normal_directions[:, 2]
)
# # Calculate Vij at vortex centers for force calculation
# -------------------------------------------------------
if self.verbose:
print("Calculating the vortex center influence matrix...")
self.Vij_centers_x, self.Vij_centers_y, self.Vij_centers_z = self.calculate_Vij(self.vortex_centers)
def setup_operating_point(self):
if self.verbose:
print("Calculating the freestream influence...")
self.steady_freestream_velocity = self.op_point.compute_freestream_velocity_geometry_axes() # Direction the wind is GOING TO, in geometry axes coordinates
self.rotation_freestream_velocities = self.op_point.compute_rotation_velocity_geometry_axes(
self.collocation_points)
self.freestream_velocities = cas.transpose(self.steady_freestream_velocity + cas.transpose(
self.rotation_freestream_velocities)) # Nx3, represents the freestream velocity at each panel collocation point (c)
self.freestream_influences = (
self.freestream_velocities[:, 0] * self.normal_directions[:, 0] +
self.freestream_velocities[:, 1] * self.normal_directions[:, 1] +
self.freestream_velocities[:, 2] * self.normal_directions[:, 2]
)
def calculate_vortex_strengths(self):
# # Calculate Vortex Strengths
# ----------------------------
# Governing Equation: AIC @ Gamma + freestream_influence = 0
if self.verbose:
print("Calculating vortex strengths...")
# Explicit solve
self.vortex_strengths = cas.solve(self.AIC, -self.freestream_influences)
# # Implicit solve
# self.vortex_strengths = self.opti.variable(self.n_panels)
# self.opti.set_initial(self.vortex_strengths, 1)
# self.opti.subject_to([
# self.AIC @ self.vortex_strengths == -self.freestream_influences
# ])
def calculate_forces(self):
# # Calculate Near-Field Forces and Moments
# -----------------------------------------
# Governing Equation: The force on a straight, small vortex filament is F = rho * V p l * gamma,
# where rho is density, V is the velocity vector, p is the cross product operator,
# l is the vector of the filament itself, and gamma is the circulation.
if self.verbose:
print("Calculating forces on each panel...")
# Calculate Vi (local velocity at the ith vortex center point)
Vi_x = self.Vij_centers_x @ self.vortex_strengths + self.freestream_velocities[:, 0]
Vi_y = self.Vij_centers_y @ self.vortex_strengths + self.freestream_velocities[:, 1]
Vi_z = self.Vij_centers_z @ self.vortex_strengths + self.freestream_velocities[:, 2]
Vi = cas.horzcat(Vi_x, Vi_y, Vi_z)
# Calculate forces_inviscid_geometry, the force on the ith panel. Note that this is in GEOMETRY AXES,
# not WIND AXES or BODY AXES.
density = self.op_point.density
# Vi_cross_li = np.cross(Vi, self.vortex_bound_leg, axis=1)
Vi_cross_li = cas.horzcat(
Vi_y * self.vortex_bound_leg[:, 2] - Vi_z * self.vortex_bound_leg[:, 1],
Vi_z * self.vortex_bound_leg[:, 0] - Vi_x * self.vortex_bound_leg[:, 2],
Vi_x * self.vortex_bound_leg[:, 1] - Vi_y * self.vortex_bound_leg[:, 0],
)
# vortex_strengths_expanded = np.expand_dims(self.vortex_strengths, axis=1)
self.forces_geometry = density * Vi_cross_li * self.vortex_strengths
# Calculate total forces and moments
if self.verbose:
print("Calculating total forces and moments...")
self.force_total_geometry = cas.vertcat(
cas.sum1(self.forces_geometry[:, 0]),
cas.sum1(self.forces_geometry[:, 1]),
cas.sum1(self.forces_geometry[:, 2]),
) # Remember, this is in GEOMETRY AXES, not WIND AXES or BODY AXES.
# if self.verbose: print("Total aerodynamic forces (geometry axes): ", self.force_total_inviscid_geometry)
self.force_total_wind = cas.transpose(
self.op_point.compute_rotation_matrix_wind_to_geometry()) @ self.force_total_geometry
# if self.verbose: print("Total aerodynamic forces (wind axes):", self.force_total_inviscid_wind)
self.moments_geometry = cas.cross(
cas.transpose(cas.transpose(self.vortex_centers) - self.airplane.xyz_ref),
self.forces_geometry
)
self.Mtotal_geometry = cas.vertcat(
cas.sum1(self.moments_geometry[:, 0]),
cas.sum1(self.moments_geometry[:, 1]),
cas.sum1(self.moments_geometry[:, 2]),
)
self.moment_total_wind = cas.transpose(
self.op_point.compute_rotation_matrix_wind_to_geometry()) @ self.Mtotal_geometry
# Calculate dimensional forces
self.lift_force = -self.force_total_wind[2]
self.drag_force_induced = -self.force_total_wind[0]
self.side_force = self.force_total_wind[1]
# Calculate nondimensional forces
q = self.op_point.dynamic_pressure()
s_ref = self.airplane.s_ref
b_ref = self.airplane.b_ref
c_ref = self.airplane.c_ref
self.CL = self.lift_force / q / s_ref
self.CDi = self.drag_force_induced / q / s_ref
self.CY = self.side_force / q / s_ref
self.Cl = self.moment_total_wind[0] / q / s_ref / b_ref
self.Cm = self.moment_total_wind[1] / q / s_ref / c_ref
self.Cn = self.moment_total_wind[2] / q / s_ref / b_ref
# Solves divide by zero error
self.CL_over_CDi = cas.if_else(self.CDi == 0, 0, self.CL / self.CDi)
def calculate_Vij(self,
points, # type: cas.MX
align_trailing_vortices_with_freestream=False, # Otherwise, aligns with x-axis
):
# Calculates Vij, the velocity influence matrix (First index is collocation point number, second index is vortex number).
# points: the list of points (Nx3) to calculate the velocity influence at.
n_points = points.shape[0]
# Make a and b vectors.
# a: Vector from all collocation points to all horseshoe vortex left vertices.
# # First index is collocation point #, second is vortex #.
# b: Vector from all collocation points to all horseshoe vortex right vertices.
# # First index is collocation point #, second is vortex #.
a_x = points[:, 0] - cas.repmat(cas.transpose(self.left_vortex_vertices[:, 0]), n_points, 1)
a_y = points[:, 1] - cas.repmat(cas.transpose(self.left_vortex_vertices[:, 1]), n_points, 1)
a_z = points[:, 2] - cas.repmat(cas.transpose(self.left_vortex_vertices[:, 2]), n_points, 1)
b_x = points[:, 0] - cas.repmat(cas.transpose(self.right_vortex_vertices[:, 0]), n_points, 1)
b_y = points[:, 1] - cas.repmat(cas.transpose(self.right_vortex_vertices[:, 1]), n_points, 1)
b_z = points[:, 2] - cas.repmat(cas.transpose(self.right_vortex_vertices[:, 2]), n_points, 1)
if align_trailing_vortices_with_freestream:
freestream_direction = self.op_point.compute_freestream_direction_geometry_axes()
u_x = freestream_direction[0]
u_y = freestream_direction[1]
u_z = freestream_direction[2]
else:
u_x = 1
u_y = 0
u_z = 0
# Do some useful arithmetic
a_cross_b_x = a_y * b_z - a_z * b_y
a_cross_b_y = a_z * b_x - a_x * b_z
a_cross_b_z = a_x * b_y - a_y * b_x
a_dot_b = a_x * b_x + a_y * b_y + a_z * b_z
a_cross_u_x = a_y * u_z - a_z * u_y
a_cross_u_y = a_z * u_x - a_x * u_z
a_cross_u_z = a_x * u_y - a_y * u_x
a_dot_u = a_x * u_x + a_y * u_y + a_z * u_z
b_cross_u_x = b_y * u_z - b_z * u_y
b_cross_u_y = b_z * u_x - b_x * u_z
b_cross_u_z = b_x * u_y - b_y * u_x
b_dot_u = b_x * u_x + b_y * u_y + b_z * u_z
norm_a = cas.sqrt(a_x ** 2 + a_y ** 2 + a_z ** 2)
norm_b = cas.sqrt(b_x ** 2 + b_y ** 2 + b_z ** 2)
norm_a_inv = 1 / norm_a
norm_b_inv = 1 / norm_b
# # Handle the special case where the collocation point is along a bound vortex leg
# a_cross_b_squared = (
# a_cross_b_x ** 2 +
# a_cross_b_y ** 2 +
# a_cross_b_z ** 2
# )
# a_dot_b = cas.if_else(a_cross_b_squared < 1e-8, a_dot_b + 1, a_dot_b)
# a_cross_u_squared = (
# a_cross_u_x ** 2 +
# a_cross_u_y ** 2 +
# a_cross_u_z ** 2
# )
# a_dot_u = cas.if_else(a_cross_u_squared < 1e-8, a_dot_u + 1, a_dot_u)
# b_cross_u_squared = (
# b_cross_u_x ** 2 +
# b_cross_u_y ** 2 +
# b_cross_u_z ** 2
# )
# b_dot_u = cas.if_else(b_cross_u_squared < 1e-8, b_dot_u + 1, b_dot_u)
# Handle the special case where the collocation point is along the bound vortex leg
a_dot_b -= 1e-8
# a_dot_xhat += 1e-8
# b_dot_xhat += 1e-8
# Calculate Vij
term1 = (norm_a_inv + norm_b_inv) / (norm_a * norm_b + a_dot_b)
term2 = norm_a_inv / (norm_a - a_dot_u)
term3 = norm_b_inv / (norm_b - b_dot_u)
Vij_x = 1 / (4 * np.pi) * (
a_cross_b_x * term1 +
a_cross_u_x * term2 -
b_cross_u_x * term3
)
Vij_y = 1 / (4 * np.pi) * (
a_cross_b_y * term1 +
a_cross_u_y * term2 -
b_cross_u_y * term3
)
Vij_z = 1 / (4 * np.pi) * (
a_cross_b_z * term1 +
a_cross_u_z * term2 -
b_cross_u_z * term3
)
return Vij_x, Vij_y, Vij_z
# def calculate_delta_cp(self):
# # Find the area of each panel ()
# diag1 = self.front_left_vertices - self.back_right_vertices
# diag2 = self.front_right_vertices - self.back_left_vertices
# self.areas = np.linalg.norm(np.cross(diag1, diag2, axis=1), axis=1) / 2
#
# # Calculate panel data
# self.Fi_normal = np.einsum('ij,ij->i', self.forces_inviscid_geometry, self.normal_directions)
# self.pressure_normal = self.Fi_normal / self.areas
# self.delta_cp = self.pressure_normal / self.op_point.dynamic_pressure()
def get_induced_velocity_at_point(self, point):
if not self.opti.return_status() == 'Solve_Succeeded':
print("WARNING: This method should only be used after a solution has been found!!!\n"
"Running anyway for debugging purposes - this is likely to not work.")
Vij_x, Vij_y, Vij_z = self.calculate_Vij(point)
# vortex_strengths = self.opti.debug.value(self.vortex_strengths)
Vi_x = Vij_x @ self.vortex_strengths
Vi_y = Vij_y @ self.vortex_strengths
Vi_z = Vij_z @ self.vortex_strengths
get = lambda x: self.opti.debug.value(x)
Vi_x = get(Vi_x)
Vi_y = get(Vi_y)
Vi_z = get(Vi_z)
Vi = np.vstack((Vi_x, Vi_y, Vi_z)).T
return Vi
def get_velocity_at_point(self, point):
# Input: a Nx3 numpy array of points that you would like to know the velocities at.
# Output: a Nx3 numpy array of the velocities at those points.
Vi = self.get_induced_velocity_at_point(point)
freestream = self.op_point.compute_freestream_velocity_geometry_axes()
V = cas.transpose(cas.transpose(Vi) + freestream)
return V
def calculate_streamlines(self,
seed_points=None, # will be auto-calculated if not specified
n_steps=100, # minimum of 2
length=None # will be auto-calculated if not specified
):
if length is None:
length = self.airplane.c_ref * 5
if seed_points is None:
seed_points = (self.back_left_vertices + self.back_right_vertices) / 2
# Resolution
length_per_step = length / n_steps
# Initialize
streamlines = [seed_points]
# Iterate
for step_num in range(1, n_steps):
update_amount = self.get_velocity_at_point(streamlines[-1])
norm_update_amount = cas.sqrt(
update_amount[:, 0] ** 2 + update_amount[:, 1] ** 2 + update_amount[:, 2] ** 2)
update_amount = length_per_step * update_amount / norm_update_amount
streamlines.append(streamlines[-1] + update_amount)
self.streamlines = streamlines
def draw(self,
data_to_plot=None,
data_name=None,
show=True,
draw_streamlines=True,
recalculate_streamlines=False
):
"""
Draws the solution. Note: Must be called on a SOLVED AeroProblem object.
To solve an AeroProblem, use opti.solve(). To substitute a solved solution, use ap = ap.substitute_solution(sol).
:return:
"""
if self.verbose:
print("Drawing...")
if self.verbose and not self.opti.return_status() == 'Solve_Succeeded':
print("WARNING: This method should only be used after a solution has been found!\n"
"Running anyway for debugging purposes - this is likely to not work...")
# Do substitutions
get = lambda x: self.opti.debug.value(x)
front_left_vertices = get(self.front_left_vertices)
front_right_vertices = get(self.front_right_vertices)
back_left_vertices = get(self.back_left_vertices)
back_right_vertices = get(self.back_right_vertices)
left_vortex_vertices = get(self.left_vortex_vertices)
right_vortex_vertices = get(self.right_vortex_vertices)
self.vortex_strengths = get(self.vortex_strengths)
try:
data_to_plot = get(data_to_plot)
except NotImplementedError:
pass
if data_to_plot is None:
data_name = "Vortex Strengths"
data_to_plot = get(self.vortex_strengths)
fig = Figure3D()
for index in range(len(front_left_vertices)):
fig.add_quad(
points=[
front_left_vertices[index, :],
front_right_vertices[index, :],
back_right_vertices[index, :],
back_left_vertices[index, :],
],
intensity=data_to_plot[index],
outline=True,
)
# fig.add_line( # Don't draw the quarter-chords
# points=[
# left_vortex_vertices[index],
# right_vortex_vertices[index]
# ],
# )
# Fuselages
for fuse_id in range(len(self.airplane.fuselages)):
fuse = self.airplane.fuselages[fuse_id] # type: Fuselage
for xsec_id in range(len(fuse.xsecs) - 1):
xsec_1 = fuse.xsecs[xsec_id] # type: FuselageXSec
xsec_2 = fuse.xsecs[xsec_id + 1] # type: FuselageXSec
r1 = xsec_1.radius
r2 = xsec_2.radius
points_1 = np.zeros((fuse.circumferential_panels, 3))
points_2 = np.zeros((fuse.circumferential_panels, 3))
for point_index in range(fuse.circumferential_panels):
rot = rotation_matrix_angle_axis(
2 * cas.pi * point_index / fuse.circumferential_panels,
[1, 0, 0],
True
).toarray()
points_1[point_index, :] = rot @ np.array([0, 0, r1])
points_2[point_index, :] = rot @ np.array([0, 0, r2])
points_1 = points_1 + np.array(fuse.xyz_le).reshape(-1) + np.array(xsec_1.xyz_c).reshape(-1)
points_2 = points_2 + np.array(fuse.xyz_le).reshape(-1) + np.array(xsec_2.xyz_c).reshape(-1)
for point_index in range(fuse.circumferential_panels):
fig.add_quad(points=[
points_1[(point_index) % fuse.circumferential_panels, :],
points_1[(point_index + 1) % fuse.circumferential_panels, :],
points_2[(point_index + 1) % fuse.circumferential_panels, :],
points_2[(point_index) % fuse.circumferential_panels, :],
],
intensity=0,
mirror=fuse.symmetric,
)
if draw_streamlines:
if (not hasattr(self, 'streamlines')) or recalculate_streamlines:
if self.verbose:
print("Calculating streamlines...")
is_trailing_edge = np.array(self.is_trailing_edge, dtype=bool)
seed_points = (back_left_vertices[is_trailing_edge] + back_right_vertices[is_trailing_edge]) / 2
self.calculate_streamlines(seed_points=seed_points)
if self.verbose:
print("Parsing streamline data...")
n_streamlines = self.streamlines[0].shape[0]
n_timesteps = len(self.streamlines)
for streamlines_num in range(n_streamlines):
streamline = [self.streamlines[ts][streamlines_num, :] for ts in range(n_timesteps)]
fig.add_streamline(
points=streamline,
)
return fig.draw(
show=show,
colorbar_title=data_name
)
|
from serif.model.document_model import DocumentModel
from serif.theory.enumerated_type import MentionType
import logging
# Unlike NameMentionModel, this won't throw out any names when it
# can't find an NP node that exactly matches the name extent.
# It will create name mentions from just start and end token if
# it can't find a matching syn node. We tried matching any node
# not just NP, but that screws up the mention.atomic_head
# function.
# The reason why we inherit from BaseModel and not MentionModel
# is because MentionModel only makes mentions out of syn nodes,
# and we want to be able to make mentions out of start and
# end tokens.
logger = logging.getLogger(__name__)
class AggressiveNameMentionModel(DocumentModel):
"""Makes Mentions for existing Names, doesn't throw out any names"""
def __init__(self,**kwargs):
super(AggressiveNameMentionModel,self).__init__(**kwargs)
def name_mention_from_syn_node(self, sentence, entity_type, syn_node):
# check for existing mention with syn_node
found = False
for mention in sentence.mention_set:
if mention.syn_node == syn_node or mention.atomic_head == syn_node:
found = True
mention.mention_type = MentionType.name
mention.entity_type = entity_type
if found: return
# create new
sentence.mention_set.add_new_mention(syn_node, "NAME", entity_type)
def name_mention_from_tokens(self, sentence, entity_type, start_token, end_token):
# check for existing mention with syn_node
found = False
for mention in sentence.mention_set:
if mention.start_token == start_token and mention.end_token == end_token:
found = True
mention.mention_type = MentionType.name
mention.entity_type = entity_type
if found: return
# create new
sentence.mention_set.add_new_mention_from_tokens("NAME", entity_type, start_token, end_token)
def process_document(self, document):
for sentence in document.sentences:
if sentence.mention_set is None:
sentence.add_new_mention_set()
if sentence.name_theory is None:
logger.warning("No name theory for sentence {}, skipping AggressiveNameMentionModel".
format(sentence.id))
continue
elif sentence.parse is None:
logger.warning("No parse for sentence {}, skipping AggressiveNameMentionModel".
format(sentence.id))
continue
for name in sentence.name_theory:
# Exact match NP
syn_node = sentence.parse.get_covering_syn_node(
name.start_token, name.end_token, ["NP"])
if (syn_node and
syn_node.start_token == name.start_token and
syn_node.end_token == name.end_token):
self.name_mention_from_syn_node(sentence, name.entity_type, syn_node)
continue
# Just create mention from start and end token
self.name_mention_from_tokens(sentence, name.entity_type, name.start_token, name.end_token)
|
__all__ = [
'instantiate_coroutine'
]
import inspect, functools, threading
import collections.abc
from contextlib import contextmanager
_locals = threading.local()
@contextmanager
def running(kernel):
if getattr(_locals, 'running', False):
raise RuntimeError('only one xiaolongbaoloop kernel per thread is allowed')
_locals.running = True
_locals.kernel = kernel
try:
yield
finally:
_locals.running = False
_locals.kernel = None
def iscoroutinefunction(func):
if isinstance(func, functools.partial):
return iscoroutinefunction(func.func)
if hasattr(func, '__func__'):
return iscoroutinefunction(func.__func__)
return inspect.iscoroutinefunction(func) or hasattr(func, '_awaitable') or inspect.isasyncgenfunction(func)
def instantiate_coroutine(corofunc, *args, **kwargs):
# 已经是 coroutine 了
if isinstance(corofunc, collections.abc.Coroutine) or inspect.isgenerator(corofunc):
assert not args and not kwargs, 'arguments cannot be passed to an already instantiated coroutine'
return corofunc
if not iscoroutinefunction(corofunc) and not getattr(corofunc, '_async_thread', False):
coro = corofunc(*args, **kwargs)
if not isinstance(coro, collections.abc.Coroutine):
raise TypeError(f'could not create coroutine from {corofunc}')
return coro
async def context():
return corofunc(*args, **kwargs)
try:
context().send(None)
except StopIteration as e:
return e.value
|
import os
import tempfile
import pytest
import json
from tassaron_flask_template.main import create_app, init_app
from tassaron_flask_template.main.plugins import db
from tassaron_flask_template.main.models import User
from tassaron_flask_template.shop.inventory_models import *
from flask import session
@pytest.fixture
def client():
global app, db, bcrypt, login_manager
app = create_app()
db_fd, db_path = tempfile.mkstemp()
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite+pysqlite:///" + db_path
app.config["WTF_CSRF_ENABLED"] = False
app.config["TESTING"] = True
app = init_app(app)
client = app.test_client()
with app.app_context():
db.create_all()
db.session.add(
User(email="test@example.com", password="password", is_admin=False)
)
db.session.add(
ProductCategory(
name="Food",
)
)
db.session.add(
Product(
name="Potato",
price=1.0,
description="Tuber from the ground",
image="potato.jpg",
stock=1,
category_id=1,
)
)
db.session.commit()
yield client
os.close(db_fd)
os.unlink(db_path)
def test_session_is_restored(client):
with client:
client.get("/")
assert session["cart"] == {}
client.post(
"/cart/add",
data=json.dumps({"id": 1, "quantity": 1}),
content_type='application/json',
)
assert session["cart"] == {1: 1}
client.post(
"/account/login",
data={"email": "test@example.com", "password": "password"},
follow_redirects=True,
)
client.get("/account/logout")
assert session["cart"] == {}
client.post(
"/account/login",
data={"email": "test@example.com", "password": "password"},
follow_redirects=True,
)
assert session["cart"] == {1: 1}
def test_session_doesnt_overwrite(client):
with app.app_context():
db.session.add(
Product(
name="Potato",
price=1.0,
description="Tuber from the ground",
image="potato.jpg",
stock=1,
category_id=1,
)
)
db.session.commit()
with client:
client.post(
"/account/login",
data={"email": "test@example.com", "password": "password"},
follow_redirects=True,
)
client.post(
"/cart/add",
data=json.dumps({"id": 1, "quantity": 1}),
content_type='application/json',
)
assert session["cart"] == {1: 1}
client.get("/account/logout")
client.post(
"/cart/add",
data=json.dumps({"id": 2, "quantity": 1}),
content_type='application/json',
)
client.post(
"/account/login",
data={"email": "test@example.com", "password": "password"},
follow_redirects=True,
)
assert session["cart"] == {2: 1}
|
import asyncio
import collections
import typing
import weakref
from datetime import datetime
from functools import reduce
from inspect import isawaitable
from signal import SIGINT, SIGTERM
from types import AsyncGeneratorType
from aiohttp import ClientSession
from database import MongoDatabase
from config import Logger, Paper, SpiderCount, Document, Targets, Target
from .exceptions import (
InvalidCallbackResult,
NotImplementedParseError,
NothingMatchedError,
)
from .item import Item
from .request import Request
from .response import Response
from .tools import fetch_bank_cookies
from copy import copy
import os
import re
import time
try:
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
except ImportError:
pass
class SpiderHook:
callback_result_map: dict = None
logger = Logger(level='warning').logger
async def _run_spider_hook(self, hook_func):
if callable(hook_func):
try:
aws_hook_func = hook_func(weakref.proxy(self))
if isawaitable(aws_hook_func):
await aws_hook_func
except Exception as e:
self.logger.error(f"<Hook {hook_func.__name__}: {e}")
async def process_failed_response(self, request, response):
pass
async def process_succeed_response(self, request, response):
pass
async def process_item(self, item):
pass
async def process_callback_result(self, callback_result):
callback_result_name = type(callback_result).__name__
process_func_name = self.callback_result_map.get(callback_result_name, "")
process_func = getattr(self, process_func_name, None)
if process_func is not None:
await process_func(callback_result)
else:
raise InvalidCallbackResult(f"process_callback_result()方法中<Parse invalid callback result type: {callback_result_name}>")
class Spider(SpiderHook):
name = None
request_config = None
# request_session = None
headers: dict = None
metadata: dict = None
kwargs: dict = None
failed_counts: int = 0
success_counts: int = 0
worker_numbers: int = 2
concurrency: int = 3
worker_tasks: list = []
targets: list = []
pattern_link = r'http[s]?://[0-9a-zA-Z/\?=&\.]+'
pattern_date = '20[0-9]{2}[-年/][01]?[0-9][-月/][0123]?[0-9]日?'
pattern_date_extra = r'20\.\d{2}\.\d{2}'
pattern_chinese = r'[\u4e00-\u9fa5]'
pattern_number = r'\d'
pattern_letter = r'[a-zA-Z]'
cookie_need_banks = ['浦发银行', '江苏银行']
cookies_available = {}
suffix_file = ['.doc', '.docx', '.xls', '.xlsx', '.ppt', '.pptx', '.pdf', '.zip', '.rar', '.tar', '.bz2', '.7z','.gz']
def __init__(
self,
name=None,
start_urls: list = None,
loop=None,
is_async_start: bool = False,
cancel_tasks: bool = True,
**kwargs,
):
if name is not None:
self.name = name
elif not getattr(self, 'name', None):
raise ValueError("%s must have a name" % type(self).__name__)
if not isinstance(self.targets, typing.Iterable):
raise ValueError("In %s, targets must be type of list" % type(self).__name__)
self.start_urls = start_urls or []
if not isinstance(self.start_urls, typing.Iterable):
raise ValueError("start_urls must be collections.Iterable")
self.loop = loop
asyncio.set_event_loop(self.loop)
self.request_queue = asyncio.Queue()
self.sem = asyncio.Semaphore(self.concurrency)
# Init object-level properties SpiderHook的类属性
self.callback_result_map = self.callback_result_map or {}
self.headers = self.headers or {}
self.metadata = self.metadata or {}
self.kwargs = self.kwargs or {}
self.request_config = self.request_config or {}
self.request_session = ClientSession()
self.cancel_tasks = cancel_tasks
self.is_async_start = is_async_start
# Mongo数据库
self.mongo = MongoDatabase()
mongo_db = self.mongo.db()
self.collection = mongo_db['TEXT']
self.collection_manual = mongo_db['MANUAL']
self.collection_spider_count = mongo_db['spider_count']
self.collection_tweet = mongo_db['TWEET']
self.collection_tweet_photo = mongo_db['TWEET_PHOTO']
self.collection_tweet_comment = mongo_db['TWEET_COMMENT']
self.collection_tweet_userprofile = mongo_db['USER_PROFILE']
self.collection_tweet_useravatar = mongo_db['USER_AVATAR']
# 重要!处理异步回调函数的方法,在start_worker()方法中,启动该方法
# 从返回结果callback_results中迭代每一个返回结果callback_result, 根据其不同的类别,套用不同的执行方法
async def _process_async_callback(self, callback_results: AsyncGeneratorType, response: Response = None):
try:
async for callback_result in callback_results:
if isinstance(callback_result, AsyncGeneratorType):
await self._process_async_callback(callback_result)
elif isinstance(callback_result, Request):
self.request_queue.put_nowait(
self.handle_request(request=callback_result)
)
elif isinstance(callback_result, typing.Coroutine):
self.request_queue.put_nowait(
self.handle_callback(aws_callback=callback_result, response=response)
)
elif isinstance(callback_result, Item):
await self.process_item(callback_result)
else:
await self.process_callback_result(callback_result=callback_result)
except NothingMatchedError as e:
error_info = f"<Field: {str(e).lower()}" + f", error url: {response.url}>"
self.logger.error(error_info)
except Exception as e:
self.logger.error(e)
async def _process_response(self, request: Request, response: Response):
if response:
if response.ok:
self.success_counts += 1
await self.process_succeed_response(request, response)
else:
self.failed_counts += 1
await self.process_failed_response(request, response)
async def _start(self, after_start=None, before_stop=None):
print('【=======================================启动:%s=========================================】' % self.name)
start_time = datetime.now()
# Add signal 添加控制信号,不过只有在linux系统上才行
for signal in (SIGINT, SIGTERM):
try:
self.loop.add_signal_handler(signal, lambda: asyncio.ensure_future(self.stop(signal)))
except NotImplementedError:
pass
# Actually run crawling 真正开始爬取了。。。
try:
await self._run_spider_hook(after_start)
await self.start_master()
await self._run_spider_hook(before_stop)
finally:
await self.request_session.close()
# Display logs about this crawl task 本次蜘蛛爬取工作的日志处理,成功次数,失败次数,用时多久
end_time = datetime.now()
spider_count = SpiderCount(name=self.name, time_start=start_time, time_end=end_time, success=self.success_counts, failure=self.failed_counts)
self.collection_spider_count.insert_one(spider_count.do_dump())
print(spider_count)
print('----------- 用时:%s ------------' % (end_time - start_time))
@classmethod
async def async_start(
cls,
start_urls: list = None,
loop=None,
after_start=None,
before_stop=None,
cancel_tasks: bool = True,
**kwargs,
):
loop = loop or asyncio.get_event_loop()
spider_ins = cls(start_urls=start_urls, loop=loop, is_async_start=True, cancel_tasks=cancel_tasks, **kwargs)
await spider_ins._start(after_start=after_start, before_stop=before_stop)
return spider_ins
@classmethod
def start(
cls,
start_urls: list = None,
loop=None,
after_start=None,
before_stop=None,
close_event_loop=True,
**kwargs,
):
print('【=======================================启动:%s=========================================】' % cls.name)
loop = loop or asyncio.new_event_loop()
spider_ins = cls(start_urls=start_urls, loop=loop, **kwargs)
spider_ins.loop.run_until_complete(spider_ins._start(after_start=after_start, before_stop=before_stop))
spider_ins.loop.run_until_complete(spider_ins.loop.shutdown_asyncgens())
if close_event_loop:
spider_ins.loop.close()
return spider_ins
async def handle_callback(self, aws_callback: typing.Coroutine, response):
"""Process coroutine callback function"""
callback_result = None
try:
callback_result = await aws_callback
except NothingMatchedError as e:
self.logger.error(f"<Item: {str(e).lower()}>")
except Exception as e:
self.logger.error(f"<Callback[{aws_callback.__name__}]: {e}")
return callback_result, response
async def handle_request(self, request: Request) -> typing.Tuple[AsyncGeneratorType, Response]:
callback_result, response = None, None
try:
callback_result, response = await request.fetch_callback(self.sem)
await self._process_response(request=request, response=response)
except NotImplementedParseError as e:
self.logger.error(e)
except NothingMatchedError as e:
error_info = f"<Field: {str(e).lower()}" + f", error url: {request.url}>"
self.logger.error(error_info)
except Exception as e:
self.logger.error(f"<Callback[{request.callback.__name__}]: {e}")
return callback_result, response
# 6、处理多个handle_request方法,如果form_datas值不为空,则执行POST请求
# 用来解决asyncio出现too many file descriptors in select()问题的web请求, 防止一下子请求过多,而被封IP,
# list中有超过500个以上的请求,则使用multiple_requests
async def multiple_request(self, urls: list, form_datas: list = None, is_gather: bool = False, **kwargs):
if is_gather:
if form_datas:
tasks = [self.handle_request(self.request(url=urls[0], form_data=one, **kwargs)) for one in form_datas]
else:
tasks = [self.handle_request(self.request(url=url, **kwargs)) for url in urls]
resp_results = await asyncio.gather(*tasks, return_exceptions=True)
for index, task_result in enumerate(resp_results):
if not isinstance(task_result, RuntimeError) and task_result:
_, response = task_result
response.index = index
yield response
else:
if form_datas:
for index, one in enumerate(form_datas):
_, response = await self.handle_request(self.request(url=urls[0], form_data=one, **kwargs))
response.index = index
yield response
else:
for index, one in enumerate(urls): # 因为遍历集合方法中,存在异步 await方法,所以不能再call_back回原来的方法中去,否则会导致无限循环
_, response = await self.handle_request(self.request(url=one, **kwargs))
response.index = index
yield response
async def parse(self, response):
raise NotImplementedParseError("<!!! parse function is expected !!!>")
# 【仅仅改写的process_start_urls方法的内容,将spider开始爬取的目标,使用target类,可以自由定制】
# 如果是targets类,则拆分转换为target类
async def process_start_urls(self):
list_target = []
for one in self.targets:
if type(one) == Targets:
for url in one.urls:
target_one = Target(
bank_name=one.bank_name,
url=url,
type_main=one.type_main,
type_next=one.type_next,
type_one=one.type_one,
type_two=one.type_two,
type_three=one.type_three,
method=one.method,
headers=one.headers,
formdata=one.formdata,
callback=one.callback,
metadata=one.metadata,
selectors=one.selectors
)
list_target.append(target_one)
else:
list_target.append(one)
for target in list_target:
bank_name = target.bank_name
if bank_name in self.cookie_need_banks:
if not self.cookies_available.get(bank_name):
cookie = await fetch_bank_cookies(bank_name)
self.cookies_available[bank_name] = cookie
self.headers.update({'Cookie': self.cookies_available[bank_name]})
headers = copy(self.headers)
headers.update(target.headers)
yield self.request(url=target.url, method=target.method, form_data=target.formdata, headers=headers, callback=self.parse, metadata={'target': target})
else:
if self.headers.get('Cookie'):
self.headers.pop('Cookie')
headers = copy(self.headers)
headers.update(target.headers)
yield self.request(url=target.url, method=target.method, form_data=target.formdata, headers=headers, callback=self.parse, metadata={'target': target})
# 【自定义启动方法】
async def manual_start_urls(self):
yield self.request()
def request(
self,
url: str = 'http://httpbin.org/get',
method: str = "GET",
*,
callback=None,
encoding: typing.Optional[str] = None,
headers: dict = None,
metadata: dict = None,
request_config: dict = None,
request_session=None,
form_data: dict = None,
**kwargs,
):
"""Init a Request class for crawling html"""
headers = headers or {}
metadata = metadata or {}
request_config = request_config or {}
request_session = request_session or self.request_session
form_data = form_data
headers.update(self.headers.copy())
request_config.update(self.request_config.copy())
kwargs.update(self.kwargs.copy())
# 如果存在form_data,则method为POST,否则为默认的GET
method = 'POST' if form_data else method
return Request(
url=url,
method=method,
callback=callback,
encoding=encoding,
headers=headers,
metadata=metadata,
request_config=request_config,
request_session=request_session,
form_data=form_data,
**kwargs,
)
async def start_master(self):
if self.targets:
async for request_ins in self.process_start_urls():
self.request_queue.put_nowait(self.handle_request(request_ins))
else:
async for request_ins in self.manual_start_urls():
self.request_queue.put_nowait(self.handle_request(request_ins))
workers = [asyncio.ensure_future(self.start_worker()) for i in range(self.worker_numbers)]
for worker in workers:
self.logger.info(f"Worker started: {id(worker)}")
await self.request_queue.join() # 阻塞至队列中所有的元素都被接收和处理完毕。当未完成计数降到零的时候, join() 阻塞被解除。
# 运行到此处,代表request_queue队列中的任务都执行完成了,不再受到requests_queue.join()方法的阻塞了。
# 然后执行的是关闭任务,和关闭loop的操作了。
if not self.is_async_start: # 如果不是is_async_start,即不是异步启动的,则等待执行stop()方法
await self.stop(SIGINT)
else:
if self.cancel_tasks: # 如果是异步启动的,在async_start()方法中,实例化Spider类时定义cancel_tasks为True, 则,取消前面的tasks, 执行当前异步启动的task
await self._cancel_tasks()
async def start_worker(self):
while True:
request_item = await self.request_queue.get()
self.worker_tasks.append(request_item)
if self.request_queue.empty():
results = await asyncio.gather(*self.worker_tasks, return_exceptions=True)
for task_result in results:
if not isinstance(task_result, RuntimeError) and task_result:
callback_results, response = task_result
if isinstance(callback_results, AsyncGeneratorType):
await self._process_async_callback(callback_results, response)
self.worker_tasks = []
self.request_queue.task_done() # 每当消费协程调用 task_done() 表示这个条目item已经被回收,该条目所有工作已经完成,未完成计数就会减少。
async def stop(self, _signal):
self.logger.info(f"Stopping spider: {self.name}")
await self._cancel_tasks()
self.loop.stop()
async def _cancel_tasks(self):
tasks = []
for task in asyncio.Task.all_tasks():
if task is not asyncio.tasks.Task.current_task():
tasks.append(task)
task.cancel()
await asyncio.gather(*tasks, return_exceptions=True)
async def download_save_document(self, bank_name, url, file_type):
file_suffix = os.path.splitext(url)[-1]
content = await self.download(bank_name, url)
ukey = bank_name + '=' + url
document = Document(ukey=ukey, file_type=file_type, file_suffix=file_suffix, content=content)
data = document.do_dump()
self.mongo.do_insert_one(self.collection_manual, {'_id': data['_id']}, data)
return data
# 用于下载文件
async def download(self, bank_name, url, timeout=25):
content = ''
headers = copy(self.headers)
if bank_name in self.cookie_need_banks:
if not self.cookies_available.get(bank_name):
cookie = await fetch_bank_cookies(bank_name)
self.cookies_available[bank_name] = cookie
headers.update({'Cookie': self.cookies_available[bank_name]})
try:
async with self.sem:
async with self.request_session.get(url, headers=headers, timeout=timeout) as response:
status_code = response.status
if status_code == 200:
content = await response.read()
except Exception as e:
print('下载失败: {},exception: {}, {}'.format(url, str(type(e)), str(e)))
return content
async def save_paper(self, paper: Paper):
data = paper.do_dump()
self.mongo.do_insert_one(self.collection, {'_id': data['_id']}, data)
# print('准备下载进数据库:', data['_id'])
|
"""
Defines a generic abstract list with the usual methods, and implements
a list using arrays and linked nodes. It also includes a linked list iterator.
Also defines UnitTests for the class.
"""
__author__ = "Maria Garcia de la Banda, modified by Brendon Taylor, Graeme Gange, and Alexey Ignatiev"
__docformat__ = 'reStructuredText'
import unittest
from abc import ABC, abstractmethod
from enum import Enum
from typing import Generic
from referential_array import ArrayR, T
class List(ABC, Generic[T]):
""" Abstract class for a generic List. """
def __init__(self) -> None:
""" Initialises the length of an exmpty list to be 0. """
self.length = 0
@abstractmethod
def __setitem__(self, index: int, item: T) -> None:
""" Sets the value of the element at position index to be item
:pre: index is 0 <= index < len(self)
"""
pass
@abstractmethod
def __getitem__(self, index: int) -> T:
""" Returns the value of the element at position index
:pre: index is 0 <= index < len(self)
"""
pass
def __len__(self) -> int:
""" Returns the length of the list
:complexity: O(1)
"""
return self.length
@abstractmethod
def is_full(self) -> bool:
""" Returns True iff the list is full
"""
pass
def is_empty(self) -> bool:
""" Returns True iff the list is empty
:complexity: O(1)
"""
return len(self) == 0
def clear(self):
""" Sets the list back to empty
:complexity: O(1)
"""
self.length = 0
@abstractmethod
def insert(self, index: int, item: T) -> None:
""" Moves self[j] to self[j+1] if j>=index, sets self[index]=item
:pre: index is 0 <= index <= len(self)
"""
pass
def append(self, item: T) -> None:
""" Adds the item to the end of the list; the rest is unchanged.
:see: #insert(index: int, item: T)
"""
self.insert(len(self), item)
@abstractmethod
def delete_at_index(self, index: int) -> T:
"""Moved self[j+1] to self[j] if j>index & returns old self[index]
:pre: index is 0 <= index < len(self)
"""
pass
@abstractmethod
def index(self, item: T) -> int:
""" Returns the position of the first occurrence of item
:raises ValueError: if item not in the list
"""
pass
def remove(self, item: T) -> None:
""" Removes the first occurrence of the item from the list
:raises ValueError: if item not in the list
:see: #index(item: T) and #delete_at_index(index: int)
"""
index = self.index(item)
self.delete_at_index(index)
def __str__(self) -> str:
""" Converts the list into a string, first to last
:complexity: O(len(self) * M), M is the size of biggest item
"""
result = '['
for i in range(len(self)):
if i > 0:
result += ', '
result += str(self[i]) if type(self[i]) != str else "'{0}'".format(self[i])
result += ']'
return result
class ArrayList(List[T]):
""" Implementation of a generic list with arrays.
Attributes:
length (int): number of elements in the list (inherited)
array (ArrayR[T]): array storing the elements of the list
ArrayR cannot create empty arrays. So MIN_CAPCITY used to avoid this.
"""
MIN_CAPACITY = 40
def __init__(self, max_capacity : int = 40) -> None:
""" Initialises self.length by calling its parent and
self.array as an ArrayList of appropriate capacity
:complexity: O(len(self)) always due to the ArrarR call
"""
List.__init__(self)
self.array = ArrayR(max(self.MIN_CAPACITY, max_capacity))
def __getitem__(self, index: int) -> T:
""" Returns the value of the element at position index
:pre: index is 0 <= index < len(self) checked by ArrayR's method
:complexity: O(1)
"""
if index < 0 or len(self) <= index:
raise IndexError("Out of bounds access in array.")
return self.array[index]
def __setitem__(self, index: int, value: T) -> None:
""" Sets the value of the element at position index to be item
:pre: index is 0 <= index < len(self) checked by ArrayR's method
:complexity: O(1)
"""
if index < 0 or len(self) <= index:
raise IndexError("Out of bounds access in array.")
self.array[index] = value
def __shuffle_right(self, index: int) -> None:
""" Shuffles all the items to the right from index
:complexity best: O(1) shuffle from the end of the list
:complexity worst: O(N) shuffle from the start of the list
where N is the number of items in the list
"""
for i in range(len(self), index, -1):
self.array[i] = self.array[i - 1]
def __shuffle_left(self, index: int) -> None:
""" Shuffles all the items to the left from index
:complexity best: O(1) shuffle from the start of the list
:complexity worst: O(N) shuffle from the end of the list
where N is the number of items in the list
"""
for i in range(index, len(self)):
self.array[i] = self.array[i+1]
def __resize(self) -> None:
"""
If the list is full, doubles the internal capacity of the list,
copying all existing elements. Does nothing if the list is not full.
:post: Capacity is strictly greater than the list length.
:complexity: Worst case O(N), for list of length N.
"""
if len(self) == len(self.array):
new_cap = int(1.9 * len(self.array))
new_array = ArrayR(new_cap)
for i in range(len(self)):
new_array[i] = self.array[i]
self.array = new_array
assert len(self) < len(self.array), "Capacity not greater than length after __resize."
def is_full(self):
""" Returns true if the list is full
:complexity: O(1)
"""
return len(self) >= len(self.array)
def index(self, item: T) -> int:
""" Returns the position of the first occurrence of item
:raises ValueError: if item not in the list
:complexity: O(Comp==) if item is first; Comp== is the BigO of ==
O(len(self)*Comp==) if item is last
"""
for i in range(len(self)):
if item == self[i]:
return i
raise ValueError("Item not in list")
def delete_at_index(self, index: int) -> T:
""" Moves self[j+1] to self[j] if j>index, returns old self[index]
:pre: index is 0 <= index < len(self) checked by self.array[_]
:complexity: O(len(self) - index)
"""
if index < 0 or index >= len(self):
raise IndexError("Out of bounds")
item = self.array[index]
self.length -= 1
self.__shuffle_left(index)
return item
def insert(self, index: int, item: T) -> None:
""" Moves self[j] to self[j+1] if j>=index & sets self[index]=item
:pre: index is 0 <= index <= len(self) checked by self.array[_]
:complexity: O(len(self)-index) if no resizing needed, O(len(self)) otherwise
"""
if self.is_full():
self.__resize()
self.__shuffle_right(index)
self.array[index] = item
self.length += 1
class TestList(unittest.TestCase):
""" Tests for the above class."""
EMPTY = 0
ROOMY = 5
LARGE = 10
def setUp(self):
self.lengths = [self.EMPTY, self.ROOMY, self.LARGE, self.ROOMY, self.LARGE]
self.lists = [ArrayList(self.LARGE) for i in range(len(self.lengths))]
for list, length in zip(self.lists, self.lengths):
for i in range(length):
list.append(i)
self.empty_list = self.lists[0]
self.roomy_list = self.lists[1]
self.large_list = self.lists[2]
#we build empty lists from clear.
#this is an indirect way of testing if clear works!
#(perhaps not the best)
self.clear_list = self.lists[3]
self.clear_list.clear()
self.lengths[3] = 0
self.lists[4].clear()
self.lengths[4] = 0
def tearDown(self):
for s in self.lists:
s.clear()
def test_init(self) -> None:
self.assertTrue(self.empty_list.is_empty())
self.assertEqual(len(self.empty_list), 0)
def test_len(self):
""" Tests the length of all lists created during setup."""
for list, length in zip(self.lists, self.lengths):
self.assertEqual(len(list), length)
def test_is_empty_add(self):
""" Tests lists that have been created empty/non-empty."""
self.assertTrue(self.empty_list.is_empty())
self.assertFalse(self.roomy_list.is_empty())
self.assertFalse(self.large_list.is_empty())
def test_is_empty_clear(self):
""" Tests lists that have been cleared."""
for list in self.lists:
list.clear()
self.assertTrue(list.is_empty())
def test_is_empty_delete_at_index(self):
""" Tests lists that have been created and then deleted completely."""
for list in self.lists:
#we empty the list
for i in range(len(list)):
self.assertEqual(list.delete_at_index(0), i)
try:
list.delete_at_index(-1)
except:
self.assertTrue(list.is_empty())
def test_append_and_remove_item(self):
for list in self.lists:
nitems = self.ROOMY
list.clear()
for i in range(nitems):
list.append(i)
for i in range(nitems-1):
list.remove(i)
self.assertEqual(list[0],i+1)
list.remove(nitems-1)
self.assertTrue(list.is_empty())
for i in range(nitems):
list.append(i)
for i in range(nitems-1,0,-1):
list.remove(i)
self.assertEqual(list[len(list)-1],i-1)
list.remove(0)
self.assertTrue(list.is_empty())
def test_clear(self):
for list in self.lists:
list.clear()
self.assertTrue(list.is_empty())
if __name__ == '__main__':
testtorun = TestList()
suite = unittest.TestLoader().loadTestsFromModule(testtorun)
unittest.TextTestRunner().run(suite)
|
import numpy as np
from bokeh.plotting import figure, show, output_file
N = 500
x = np.linspace(0, 10, N)
y = np.linspace(0, 10, N)
xx, yy = np.meshgrid(x, y)
d = np.sin(xx)*np.cos(yy)
d[:125, :125] = np.nan # Set bottom left quadrant to NaNs
p = figure(x_range=(0, 10), y_range=(0, 10))
# Solid line to show effect of alpha
p.line([0, 10], [0, 10], color='red', line_width=2)
# Use global_alpha kwarg to set alpha value
img = p.image(image=[d], x=0, y=0, dw=10, dh=10, global_alpha=0.7)
# Alpha for color mapper attributes can be set explicitly and is applied prior to global alpha,
# e.g. NaN color:
img.glyph.color_mapper.nan_color = (128, 128, 128, 0.1)
output_file("image_alpha.html", title="image_alpha.py example")
show(p) # open a browser
|
import pandas as pd
from yahoo_fin import stock_info
from tkinter import *
from tkinter import ttk
from datetime import date
janela = Tk()
janela.title('Optionalities')
tree = ttk.Treeview(janela, selectmode='browse', column=("column1", "column2", "column3", "column4", "column5", "column6", "column7", "column8", "column9", "column10"), show='headings') # Create the treeview
xlsx = pd.ExcelFile('opcoesp.xlsx', engine='openpyxl')
df = pd.read_excel(xlsx) # Read an Excel file into a pandas DataFrame.
x = 0
stocks = ['BBAS3', 'BBDC4', 'BBSE3', 'COGN3', 'CYRE3', 'MGLU3', 'PETR4', 'VIIA3', 'BOVA11', 'ABEV3', 'ELET3', 'ITUB4', 'CSNA3', 'B3SA3', 'EGIE3', 'SBSP3', 'WEGE3'] # Put here the stock ticker you sell
cotacoes = []
while True:
if df['Opção'][x].startswith('BBAS') == True:
cotacoes.append(stocks[0])
elif df['Opção'][x].startswith('BBDC') == True:
cotacoes.append(stocks[1])
elif df['Opção'][x].startswith('BBSE') == True:
cotacoes.append(stocks[2])
elif df['Opção'][x].startswith('COGN') == True:
cotacoes.append(stocks[3])
elif df['Opção'][x].startswith('CYRE') == True:
cotacoes.append(stocks[4])
elif df['Opção'][x].startswith('MGLU') == True:
cotacoes.append(stocks[5])
elif df['Opção'][x].startswith('PETR') == True:
cotacoes.append(stocks[6])
elif df['Opção'][x].startswith('VIIA') == True:
cotacoes.append(stocks[7])
elif df['Opção'][x].startswith('BOVA') == True:
cotacoes.append(stocks[8])
elif df['Opção'][x].startswith('ABEV') == True:
cotacoes.append(stocks[9])
elif df['Opção'][x].startswith('ELET') == True:
cotacoes.append(stocks[10])
elif df['Opção'][x].startswith('ITUB') == True:
cotacoes.append(stocks[11])
elif df['Opção'][x].startswith('CSNA') == True:
cotacoes.append(stocks[12])
elif df['Opção'][x].startswith('B3SA') == True:
cotacoes.append(stocks[13])
elif df['Opção'][x].startswith('EGIE') == True:
cotacoes.append(stocks[14])
elif df['Opção'][x].startswith('SBSP') == True:
cotacoes.append(stocks[15])
elif df['Opção'][x].startswith('WEGE') == True:
cotacoes.append(stocks[16])
x+=1
if x == df['Opção'].count():
break
stocks1 = cotacoes
df['Ações'] = stocks1
e = 0
cotation = []
cotado = []
dist_strike = []
while True:
cotation.append(df['Ações'][e].strip()+str(".SA")) # strip() remove the "\".
cotado.append(round(stock_info.get_live_price(cotation[e]),2))
dist_strike.append(round(((float(df['Strike'][e])/float(cotado[e]))-1)*100,2)) # Estimates the strike distance
e=e+1
if e==df['Opção'].count():
break
preco = cotado
df['Cotação'] = preco
distancia = dist_strike
df['Distância (%)'] = distancia
# Identifies the option type, whether it is call or put
g = 0
series_call = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L']
series_put = ['M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X']
tipo = []
c = df['Opção'].count() # Check how many items are in the column
while g!=c:
if df['Opção'][g][4] in series_call:
tipo.append('call')
elif df['Opção'][g][4] in series_put:
tipo.append('put')
g+=1
series = tipo
df['Tipo'] = series
# Identifies the expiration date of the option
h = 0
vencimento = []
fim = df['Opção'].count()
while h!=fim:
if df['Opção'][h][4] == 'A' or df['Opção'][h][4] == 'M':
vencimento.append('January')
elif df['Opção'][h][4] == 'B' or df['Opção'][h][4] == 'N':
vencimento.append('February')
elif df['Opção'][h][4] == 'C' or df['Opção'][h][4] == 'O':
vencimento.append('March')
elif df['Opção'][h][4] == 'D' or df['Opção'][h][4] == 'P':
vencimento.append('April')
elif df['Opção'][h][4] == 'E' or df['Opção'][h][4] == 'Q':
vencimento.append('May')
elif df['Opção'][h][4] == 'F' or df['Opção'][h][4] == 'R':
vencimento.append('June')
elif df['Opção'][h][4] == 'G' or df['Opção'][h][4] == 'S':
vencimento.append('July')
elif df['Opção'][h][4] == 'H' or df['Opção'][h][4] == 'T':
vencimento.append('August')
elif df['Opção'][h][4] == 'I' or df['Opção'][h][4] == 'U':
vencimento.append('September')
elif df['Opção'][h][4] == 'J' or df['Opção'][h][4] == 'V':
vencimento.append('October')
elif df['Opção'][h][4] == 'K' or df['Opção'][h][4] == 'W':
vencimento.append('November')
elif df['Opção'][h][4] == 'L' or df['Opção'][h][4] == 'X':
vencimento.append('December')
h+=1
encerramento = vencimento
df['Vencimento'] = encerramento
tree.column('column1', width=200, minwidth=50, stretch=NO)
tree.heading("#1", text='Option')
tree.column('column1', width=50, minwidth=50, stretch=NO)
tree.heading("#2", text='Amount')
tree.column('column3', width=150, minwidth=50, stretch=NO)
tree.heading("#3", text='Strike')
tree.column('column4', width=120, minwidth=50, stretch=NO)
tree.heading("#4", text='Premium')
tree.column('column5', width=150, minwidth=50, stretch=NO)
tree.heading("#5", text='Covered')
tree.column('column6', width=150, minwidth=50, stretch=NO)
tree.heading("#6", text='Stock')
tree.column('column7', width=100, minwidth=50, stretch=NO)
tree.heading("#7", text='Price')
tree.column('column8', width=120, minwidth=50, stretch=NO)
tree.heading("#8", text='Distance (%)')
tree.column('column9', width=100, minwidth=50, stretch=NO)
tree.heading("#9", text='Type')
tree.column('column10', width=150, minwidth=50, stretch=NO)
tree.heading("#10", text='Expiration')
row=1
df_rows = df.to_numpy().tolist()
for row in df_rows:
tree.insert("", END, values=row, tag='1') # Will insert content inside the treeview.
# Nova janela - This will separate the options that must be cleared on the last trading day.
janela_pular = Tk()
janela_pular.title('Closing positions')
tree1 = ttk.Treeview(janela_pular, selectmode='browse', column=("column1", "column2", "column3", "column4", "column5", "column6", "column7", "column8", "column9", "column10"), show='headings')
# dimensions
largura = 1360
altura = 250
# screen position
posix = 200
posiy = 300
# defining the geometry
janela_pular.geometry("%dx%d+%d+%d" % (largura,altura,posix,posiy))
data_atual = date.today() # Results like: 2018-03-01
# Collecting only the month
mes = data_atual.month
while True:
if mes == 1:
monat = 'January'
elif mes == 2:
monat = 'February'
elif mes == 3:
monat = 'March'
elif mes == 4:
monat = 'April'
elif mes == 5:
monat = 'May'
elif mes == 6:
monat = 'June'
elif mes == 7:
monat = 'July'
elif mes == 8:
monat = 'August'
elif mes == 9:
monat = 'September'
elif mes == 10:
monat = 'October'
elif mes == 11:
monat = 'November'
elif mes == 12:
monat = 'December'
break
df_mask=df['Vencimento']==monat
filtered_df = df[df_mask]
df_month = pd.DataFrame(filtered_df) # Creates a new dataframe containing data for the current month only
df_call=df['Tipo']=='call' # Create mask to filter only calls
filtered_call = df_month[df_call] # Create mask to filter only calls
df_call = pd.DataFrame(filtered_call)
df_put=df['Tipo']=='put' # Create mask to filter only puts
filtered_put = df_month[df_put] # Create mask to filter only puts
df_put = pd.DataFrame(filtered_put)
df_call_mask=df_call['Distância (%)'] < 10
filtered_call_dist = df_call[df_call_mask]
df_put_mask=df_put['Distância (%)'] > -10
filtered_put_dist = df_put[df_put_mask]
m = pd.merge(filtered_call_dist, filtered_put_dist, how = 'outer')
tree1.column('column1', width=200, minwidth=50, stretch=NO)
tree1.heading("#1", text='Option')
tree1.column('column1', width=50, minwidth=50, stretch=NO)
tree1.heading("#2", text='Amount')
tree1.column('column3', width=150, minwidth=50, stretch=NO)
tree1.heading("#3", text='Strike')
tree1.column('column4', width=120, minwidth=50, stretch=NO)
tree1.heading("#4", text='Premium')
tree1.column('column5', width=150, minwidth=50, stretch=NO)
tree1.heading("#5", text='Covered')
tree1.column('column6', width=150, minwidth=50, stretch=NO)
tree1.heading("#6", text='Stock')
tree1.column('column7', width=100, minwidth=50, stretch=NO)
tree1.heading("#7", text='Price')
tree1.column('column8', width=120, minwidth=50, stretch=NO)
tree1.heading("#8", text='Distance (%)')
tree1.column('column9', width=100, minwidth=50, stretch=NO)
tree1.heading("#9", text='Type')
tree1.column('column10', width=150, minwidth=50, stretch=NO)
tree1.heading("#10", text='Expiration')
rowy=1
df_rowsy = m.to_numpy().tolist()
for rowy in df_rowsy:
tree1.insert("", END, values=rowy, tag='1')
# Adding a vertical scrollbar to Treeview widget
treeScroll = ttk.Scrollbar(janela)
treeScroll.configure(command=tree.yview)
tree.configure(yscrollcommand=treeScroll.set)
treeScroll.pack(side= RIGHT, fill= BOTH)
tree.pack()
# Adding a vertical scrollbar to Treeview widget
treeScroll = ttk.Scrollbar(janela_pular)
treeScroll.configure(command=tree1.yview)
tree1.configure(yscrollcommand=treeScroll.set)
treeScroll.pack(side= RIGHT, fill= BOTH)
tree1.pack()
janela.mainloop()
janela_pular.mainloop()
|
# Generated by Django 2.2.1 on 2019-06-10 19:07
import ckeditor.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('medications', '0007_effect_category'),
]
operations = [
migrations.AddField(
model_name='effect',
name='description',
field=ckeditor.fields.RichTextField(blank=True, null=True),
),
migrations.AlterField(
model_name='effect',
name='category',
field=models.CharField(blank=True, choices=[('circle', 'Circle chart'), ('icon', 'Icon'), ('list', 'List of values'), ('timeline', 'Timeline of values')], max_length=250, null=True),
),
]
|
"""
:Author: Valerio Maggio
:Contact: valeriomaggio@gmail.com
"""
from __future__ import division, print_function
"""
This script extract all the methods from the two versions of the JFreeChart system
(i.e., so far, the only system with two different versions in the DB) in order to
collect all the methods that are present in both the two versions.
To identify if two methods are **exactly** the same, the script considers
the tuple:
(Class Name, Method Name, Type Parameters)
Once all these methods have been collected, an Excel (.xlsx) report file
is written in order to proceed to the comparison of comments and source code
that may have changed between the two versions.
The Excel file will contain the following information:
Method1 Method2 CoherenceMethod1 CoherenceMethod2 ComparisonInfo Notes
"""
from django.conf import settings
from code_comments_coherence import settings as comments_classification_settings
settings.configure(**comments_classification_settings.__dict__)
# Import Model Classes
from source_code_analysis.models import SoftwareProject
from lxml import etree # imported to parse method XML parse trees
import xlsxwriter # To generate final report file
# Import Kernel Function to Detect Clones
from source_code_analysis.code_analysis.kernels import contiguous_tree_kernel
from source_code_analysis.code_analysis.xml_parsers import XMLMethodTreeParser, ParsedTreeNode
from xml.parsers.expat import ExpatError
from nltk.tokenize import wordpunct_tokenize
# from itertools import ifilter, izip_longest, izip
# from itertools import filter, zip_longest, zip
# Py3 compatibility hack
from itertools import zip_longest
ifilter = filter
izip_longest = zip_longest
izip = zip
from collections import OrderedDict
def generate_method_key(method):
"""
Generate an hashable key for a given method.
In more details, for a given method, the key is
represented by a tuple corresponding to:
(class name, method name, type names of all parameters)
"""
class_name = method.code_class.class_name
method_name = method.method_name
try:
# Get type names
parser = etree.XMLParser(recover=True) # recover from bad characters.
xml_tree = etree.fromstring(method.xml_tree, parser=parser)
# doc = etree.ElementTree(xml_tree)
# Extract Nodes corresponding to Types of FORMAL PARAMETER(s)
xpath_query = '//method_statement_node/node[@instruction_class="FORMAL_PARAMETERS"]/' \
'node[@instruction_class="FORMAL_PARAMETER"]/node[@instruction_class="TYPE"]'
parameter_type_nodes = xml_tree.xpath(xpath_query)
except etree.XMLSyntaxError as exc:
print('XML Syntax Error Found!!', exc.msg)
print('Method: ', method_name)
exit()
else:
type_names = list()
for node in parameter_type_nodes:
type_names.append(node.attrib['name'])
key = [class_name, method_name]
key.extend(type_names)
return tuple(key) # Finally convert the key list to a tuple (immutable)
def index_methods_of_projects(software_project):
"""
Extract all the methods from a given software projects.
Returned methods are indexed by the corresponding key
(see method `generate_method_key`)
Parameters:
-----------
software_project: SoftwareProject
The instance of the SoftwareProject where methods have to be gathered from.
Returns:
--------
methods: dict
A dictionary mapping each method instance to their corresponding key.
"""
code_methods = software_project.code_methods.all() # Get all methods of the Sw Project
indexed_methods = dict()
for method in code_methods:
key = generate_method_key(method)
indexed_methods[key] = method
return indexed_methods
def similarity_of_comments(method1, method2):
"""
Compute the similarity of Head Comments of
given methods.
Comments similarity is computed by the Hamming Distance
between the words of the comments (excluding
punctuations, and formattings) represented as a
big joint string.
"""
def normalize_comment(comment):
tokens = wordpunct_tokenize(comment)
tokens = ifilter(unicode.isalnum, tokens)
return ''.join(tokens).lower()
def hamming_distance(s1, s2):
"""Return the Hamming distance between any-length sequences of characters"""
return sum(ch1 != ch2 for ch1, ch2 in izip_longest(s1, s2, fillvalue=''))
comment1 = normalize_comment(method1.comment)
comment2 = normalize_comment(method2.comment)
length = max(len(comment1), len(comment2))
if not length:
return 1
hd = hamming_distance(comment1, comment2)
return 1 - (hd/length)
def similarity_of_code(method1, method2):
"""
Compute the similarity of Code Fragments of the two given methods
using the (Tree) Kernel Functions among corresponding
Code Fragment (i.e., XML Parse Tree Fragments associated to each method
during the Code Parsing/Analysis phase)
"""
method1_xml_tree = '\n'.join([ParsedTreeNode.from_unicode_to_xml(line) for line in
method1.xml_tree.splitlines()])
method2_xml_tree = '\n'.join([ParsedTreeNode.from_unicode_to_xml(line) for line in
method2.xml_tree.splitlines()])
method1_xml_tree = method1_xml_tree.encode('utf-8')
method2_xml_tree = method2_xml_tree.encode('utf-8')
try:
parse_tree1 = XMLMethodTreeParser().parse(method1_xml_tree)
parse_tree2 = XMLMethodTreeParser().parse(method2_xml_tree)
except ExpatError:
return -1
except UnicodeEncodeError:
return -1
else:
return contiguous_tree_kernel(parse_tree1, parse_tree2)
def get_common_methods(methods_release, methods_next_release):
"""
Find and returns all the methods that are present in
both the lists of methods, corresponding to two different
releases of the *same* software project.
Parameters:
-----------
methods_release: dict
A dictionary containing all the methods of the first project
release, as returned by the `index_methods_of_projects`
function.
methods_next_release: dict
A dictionary containing all the methods of the second project
release, as returned by the `index_methods_of_projects`
function.
Return:
-------
methods_in_common: dict
A dictionary containing the references to only the
common methods between the two considered releases.
Selected methods are only those that differ at least in one
between comment and code.
The resulting dictionary contains for each couple of methods, the
values of code and comment similarities (as computed by the
`similarity_of_code` and `similarity_of_comments` functions).
"""
methods_in_common = dict()
for mkey in methods_release:
if mkey in methods_next_release:
# Before setting the Common Method, check if something **actually** changed.
method_current_release = methods_release[mkey]
method_next_release = methods_next_release[mkey]
# Getting Comment Similarity
comment_sim = similarity_of_comments(method_current_release, method_next_release)
# Getting Code Similarity
code_sim = similarity_of_code(method_current_release, method_next_release)
if (0 <= comment_sim < 1) or (0 <= code_sim < 1):
methods_in_common.setdefault(mkey, dict())
methods_in_common[mkey].setdefault('methods', list())
methods_in_common[mkey]['methods'].append(method_current_release)
methods_in_common[mkey]['methods'].append(method_next_release)
methods_in_common[mkey]['code_similarity'] = code_sim
methods_in_common[mkey]['comment_similarity'] = comment_sim
return methods_in_common # return only common methods
def generate_excel_report_file(target_methods, system_names=(), filename='methods_report.xlsx'):
"""
Generate an XLS(X) - Excel - Report file containing all
the information to compare and evaluate target methods.
In particular, this report aims at producing an output
suitable to compare two versions of the **same** methods
within two different (consecutive) software releases.
The Excel report file is generated thanks to the
[`xlsxwriter`](https://xlsxwriter.readthedocs.org/)
Python library.
Arguments:
----------
target_methods: dict
A dictionary containing all the target methods to compare
(as returned by the `get_common_methods` function)
system_names: tuple (default: () - empty tuple)
A tuple containing the names of the two compared systems (and release numbers).
If None (default), the two systems will be referred in the report as
"First Release" and "Second Release", respectively.
filename: str (default: "methods_report.xlsx")
The name of the Excel file to generate.
"""
# Creating The Excel Workbook
workbook = xlsxwriter.Workbook(filename)
worksheet = workbook.add_worksheet()
# Add a format for the header cells.
header_format = workbook.add_format({
'border': 1,
# 'bg_color': '#C6EFCE',
'bold': True,
'text_wrap': True,
'valign': 'vcenter',
'align': 'center',
'indent': 1,
})
centered_format = workbook.add_format({
'align': 'center',
'valign': 'vcenter',
'bold': True
})
# Set up some formats to use.
bold = workbook.add_format({'bold': True}) # Bold
center = workbook.add_format({'align': 'center'}) # Center
# Txt Wrap (for code and comments)
tx_wrap = workbook.add_format({'valign': 'top', 'align': 'left'})
tx_wrap.set_text_wrap()
# Valign Top for Notes
vtop = workbook.add_format({'valign': 'top', 'align': 'left', 'locked': False})
# Unlocked
unlocked = workbook.add_format({
'align': 'center',
'valign': 'vcenter',
'bold': True,
'locked': False
})
# Set up layout of the worksheet.
worksheet.set_column('A:B', 80)
worksheet.set_column('C:C', 2)
worksheet.set_column('D:E', 15)
worksheet.set_column('F:F', 2)
worksheet.set_column('G:H', 20)
worksheet.set_column('I:I', 2)
worksheet.set_column('J:K', 20)
worksheet.set_column('L:L', 2)
worksheet.set_column('M:M', 40)
# Check System Names
if not system_names or not len(system_names):
system_names = ('First Release', 'Second Release')
if len(system_names) < 2:
system_names = (system_names[0], 'Second Release')
# Write the header cells and some data that will be used in the examples.
heading1 = system_names[0] # A - 80
heading2 = system_names[1] # B - 80
heading3 = 'Comment Changed' # D - 15
heading4 = 'Code Changed' # E - 15
heading5 = 'Comment Lexical Similarity' # G - 20
heading6 = 'Code Textual Similarity' # H - 20
heading7 = 'Are the Comments Semantically Different?' # J
heading8 = 'Are the Code Fragments Semantically Different?' # K
heading9 = 'Notes' # M
# Methods
worksheet.write('A1', heading1, header_format)
worksheet.write('B1', heading2, header_format)
# Code and Comment Changed (selection)
worksheet.write('D1', heading3, header_format)
worksheet.write('E1', heading4, header_format)
# Code and Comments Similarity
worksheet.write('G1', heading5, header_format)
worksheet.write('H1', heading6, header_format)
# Code and Comments Semantic Similarity Evaluation
worksheet.write('J1', heading7, header_format)
worksheet.write('K1', heading8, header_format)
# Notes
worksheet.write('M1', heading9, header_format)
def write_method_code_fragment(method, location):
"""Utility Closure to write Code fragment of given method at a specified location"""
code_fragment = method.code_fragment
open_brackets_count = code_fragment.count('{')
closed_brackets_count = code_fragment.count('}')
if open_brackets_count != closed_brackets_count:
code_fragment += '}' * (open_brackets_count - closed_brackets_count)
lines = method.comment.splitlines()
lines.extend(code_fragment.splitlines())
lines = [l for l in lines if len(l.strip())]
fragment = '\n'.join(lines)
worksheet.write(location, fragment, tx_wrap)
return fragment
def write_validation_list_and_preselect(similarity, location, semantic_difference=False):
if 0 <= similarity <= 1:
if 0 <= similarity < 1:
if not semantic_difference: # default
# In this case, the column to write does not refer to the semantic
# equivalence, but to the lexical similarity
worksheet.write(location, 'YES', unlocked)
else:
worksheet.write(location, 'NO', unlocked)
else:
worksheet.write(location, '', unlocked)
worksheet.data_validation(location,
{'validate': 'list', 'source': ['YES', 'NO']})
# Start writing the Excel File
row_counter = 2
for mkey in sorted(target_methods.keys()):
# Get Methods
method1, method2 = target_methods[mkey]['methods']
code_similarity = target_methods[mkey]['code_similarity']
comment_similarity = target_methods[mkey]['comment_similarity']
# Write Method Information
worksheet.write_rich_string('A{r}'.format(r=row_counter),
bold, 'Method: ', method1.method_name,
' (Class: {0})'.format(method1.code_class.class_name), center)
worksheet.write_rich_string('B{r}'.format(r=row_counter),
bold, 'Method: ', method2.method_name,
' (Class: {0})'.format(method2.code_class.class_name), center)
# Write Method Comment and Code
row_counter += 1
fragment1 = write_method_code_fragment(method1, 'A{r}'.format(r=row_counter))
fragment2 = write_method_code_fragment(method2, 'B{r}'.format(r=row_counter))
# Set row Height to the Largest row
worksheet.set_row(row_counter, 0.15*(max(len(fragment1), len(fragment2))))
# Write Selection List for Code and Comment Lexical/Textual Changes
write_validation_list_and_preselect(comment_similarity, 'D{r}'.format(r=row_counter))
write_validation_list_and_preselect(code_similarity, 'E{r}'.format(r=row_counter))
# Write Code and Comment Similarity
worksheet.write('G{r}'.format(r=row_counter), '%.3f' % comment_similarity, centered_format)
worksheet.write('H{r}'.format(r=row_counter), '%.3f' % code_similarity, centered_format)
# Write Selection List for Code and Comment Semantic Changes
write_validation_list_and_preselect(comment_similarity, 'J{r}'.format(r=row_counter),
semantic_difference=True)
write_validation_list_and_preselect(code_similarity, 'K{r}'.format(r=row_counter),
semantic_difference=True)
# Set Notes Format
worksheet.write('M{r}'.format(r=row_counter), '', vtop)
# Increment Row Counter by two
row_counter += 2
# Turn worksheet protection on.
# worksheet.protect()
if __name__ == '__main__':
# Get JHotDraw Systems sorted by Versions
jhotDraw_systems = SoftwareProject.objects.filter(name__iexact='JHotDraw').order_by('version')
print('Indexing Methods of Target Systems')
jhd_methods = OrderedDict() # Using Ordered Dict to insert keys already sorted by version no.
for jhd_system in jhotDraw_systems:
methods = index_methods_of_projects(jhd_system)
print('Found ', len(methods), ' Methods in JHotDraw ', jhd_system.version)
jhd_methods[jhd_system.version] = methods
print('Extracting Common Methods from successive releases')
for current_release, next_release in izip(jhd_methods.keys()[:-1], jhd_methods.keys()[1:]):
print('Getting Common Methods From JHotDraw Releases: ',
current_release, ' - ', next_release)
current_release_methods = jhd_methods[current_release]
next_release_methods = jhd_methods[next_release]
# Extract Common Methods
common_methods = get_common_methods(current_release_methods, next_release_methods)
print('Found ', len(common_methods), ' Common Methods between the two releases!')
if len(common_methods):
print('Generating Report File')
# Generate Report File
report_filename = 'methods_report_JHotDraw_{0}_{1}.xlsx'.format(current_release,
next_release)
generate_excel_report_file(common_methods,
filename=report_filename,
system_names=('JHotDraw {0}'.format(current_release),
'JHotDraw {0}'.format(next_release)))
print('Done')
|
########################################################
# evaluator.py
# Author: Jamie Zhu <jimzhu@GitHub>
# Created: 2014/2/6
# Last updated: 2015/8/30
########################################################
import numpy as np
import time
from PPCF.commons import evaluator
from PPCF import P_PMF
import multiprocessing
import logging
#======================================================#
# Function to evalute the approach at all settings
#======================================================#
def execute(matrix, para):
# loop over each density and each round
if para['parallelMode']: # run on multiple processes
pool = multiprocessing.Pool()
for den in para['density']:
for roundId in xrange(para['rounds']):
pool.apply_async(executeOneSetting, (matrix, den, roundId, para))
pool.close()
pool.join()
else: # run on single processes
for den in para['density']:
for roundId in xrange(para['rounds']):
executeOneSetting(matrix, den, roundId, para)
# summarize the dumped results
evaluator.summarizeResult(para)
#======================================================#
# Function to run the prediction approach at one setting
#======================================================#
def executeOneSetting(matrix, density, roundId, para):
logging.info('density=%.2f, %2d-round starts.'%(density, roundId + 1))
startTime = time.clock()
# remove data matrix
logging.info('Removing entries from data matrix...')
(trainMatrix, testMatrix) = evaluator.removeEntries(matrix, density, roundId)
# data perturbation by adding noises
logging.info('Data perturbation...')
(perturbMatrix, uMean, uStd) = randomPerturb(trainMatrix, para)
# QoS prediction
logging.info('QoS prediction...')
predictedMatrix = P_PMF.predict(perturbMatrix, para)
predictedMatrix = reNormalize(predictedMatrix, uMean, uStd)
runningTime = float(time.clock() - startTime)
# evaluate the estimation error
evalResult = evaluator.evaluate(testMatrix, predictedMatrix, para)
result = (evalResult, runningTime)
# dump the result at each density
outFile = '%s%s_%s_result_%.2f_round%02d.tmp'%(para['outPath'], para['dataName'],
para['dataType'], density, roundId + 1)
evaluator.dumpresult(outFile, result)
logging.info('density=%.2f, %2d-round done.'%(density, roundId + 1))
logging.info('----------------------------------------------')
#======================================================#
# Function to perturb the entries of data matrix
#======================================================#
def randomPerturb(matrix, para):
perturbMatrix = matrix.copy()
(numUser, numService) = matrix.shape
uMean = np.zeros(numUser)
uStd = np.zeros(numUser)
noiseRange = para['noiseRange']
# z-score normalization
for i in xrange(numUser):
qos = matrix[i, :]
qos = qos[qos != 0]
mu = np.average(qos)
sigma = np.std(qos)
uMean[i] = mu
uStd[i] = sigma
perturbMatrix[i, :] = (perturbMatrix[i, :] - mu) / sigma
if para['noiseType'] == 'guassian':
noiseVec = np.random.normal(0, noiseRange, numService)
elif para['noiseType'] == 'uniform':
noiseVec = np.random.uniform(-noiseRange, noiseRange, numService)
perturbMatrix[i, :] += noiseVec
perturbMatrix[matrix == 0] = 0
return (perturbMatrix, uMean, uStd)
#======================================================#
# Function to perturb the entries of data matrix
#======================================================#
def reNormalize(matrix, uMean, uStd):
numUser = matrix.shape[0]
resultMatrix = matrix.copy()
for i in xrange(numUser):
resultMatrix[i, :] = resultMatrix[i, :] * uStd[i] + uMean[i]
resultMatrix[resultMatrix < 0] = 0
return resultMatrix
|
#!/usr/bin/python
# Import necessary libraries/modules
import os
import argparse
import numpy as np
import cv2
from sklearn import metrics
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
class LogRegMNIST():
"""This is a class for performing a Logistic Regression classification on the MNIST dataset.
"""
def __init__(self, digits, args):
self.args = args
self.X = digits.data.astype("float") #extracting data
self.y = digits.target #extracting labels
def split(self):
""" Function for splitting MNIST dataset into train and test sets.
"""
# Normalize (MinMax regularization)
self.X = (self.X - self.X.min())/(self.X.max() - self.X.min())
# Split into train and test set
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(self.X,
self.y,
random_state=self.args['random_state'],
train_size=1-self.args['test_size'],
test_size=self.args['test_size'])
def train_model(self):
"""Function for training the Logistic Regression classifier.
"""
# Initialise model and fit that model to the training data and labels
self.clf = LogisticRegression(penalty='none',
tol=0.1,
solver='saga',
multi_class='multinomial').fit(self.X_train, self.y_train)
def calc_eval_metrics(self):
"""Function for calculating evaluation metrics.
Input:
clf: trained Logistic Regression classifier
"""
# Take the trained model and use to predict test class
self.y_pred = self.clf.predict(self.X_test)
# Calculate evaluation metrics
cm = metrics.classification_report(self.y_test, self.y_pred)
return cm
def save_eval_metrics(self, cm):
"""Function for saving file with evaluation metrics.
Input:
cm: evaluation metrics
"""
# Specifying output path
outpath = os.path.join("out", f"{self.args['filename']}.txt")
# Writing file
with open(outpath, "w", encoding="utf-8") as file:
file.write(cm)
def run_classifier(self):
"""Function for running all functions within the class in the correct order.
"""
# Splitting data
self.split()
# Train model
self.train_model()
# Calculate evaluation metrics
cm = self.calc_eval_metrics()
# Print evaluation metrics
print(f"\n EVALUATION METRICS: \n {cm}")
# Save evaluation metrics
self.save_eval_metrics(cm)
# Creating a function that checks whether a given value is between 0 and 1 and return an error if it is not. This is used to ensure that only a test_size-argument within the correct range can be parsed in the command-line.
def percentFloat(string):
value = float(string)
if value < 0 or value > 1:
raise argparse.ArgumentTypeError('Value has to be between 0 and 1')
return value
# Defining main function
def main():
ap = argparse.ArgumentParser(description="[INFO] This script uses the full MNIST data set, trains a Logistic Regression Classifier, and prints and saves the evaluation metrics to the terminal.")
# Argument for specifying a random-state value
ap.add_argument("-rs",
"--random_state",
required=False,
type=int,
default=9,
help="int, value for random state of model")
# Argument for specifying size of test set
ap.add_argument("-ts",
"--test_size",
required=False,
type=percentFloat, #here I use the function I created above
default=0.2,
help="float, proportional size of test set (must be number between 0 and 1)")
# Argument for specifying filename of evaluation metrics
ap.add_argument("-fn",
"--filename",
required=False,
type=str,
default="evaluation_metrics_LR",
help="str, filename for saving the evaluation metrics")
args = vars(ap.parse_args())
# Loading data
digits = datasets.load_digits()
# Turning into LogRegMNIST object (the class I created above)
logreg = LogRegMNIST(digits, args)
# Perform classification
logreg.run_classifier()
# Define behaviour when called from command line
if __name__=="__main__":
main()
print("[INFO] The evaluation metrics has been saved in 'out/'.")
|
"""Backends for similarity analysis. We strive for somewhat interfaces on numpy and
tensorflow."""
__all__ = ["npbased", "tfbased"]
|
## Set API key
api_key = 'abcdefghijklmnopqrstuvwxyz123456'
## Set default file type
response_type = 'xml'
## Set response data types
response_data = [
'categories',
'seriess',
'tags',
'releases',
'release_dates',
'sources',
'vintage_dates',
'observations'
]
## Set date columns
dates = [
'realtime_start',
'realtime_end',
'date',
'vintage_dates',
'last_updated',
'observation_start',
'observation_end',
'created'
]
## SSL Verify HTTPS
ssl_verify = True
|
import abc
import base64
import six
from cryptography import x509
from cryptography.exceptions import InvalidSignature
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.primitives.serialization import load_pem_public_key
from cryptography.x509 import load_pem_x509_certificate
from aliyun_encryption_sdk.kms.kms import AliyunKms
from aliyun_encryption_sdk.model import SignatureAlgorithm
from aliyun_encryption_sdk.provider import str_to_cmk
from aliyun_encryption_sdk.provider.signature import get_digest
@six.add_metaclass(abc.ABCMeta)
class Verifier(object):
def __init__(self, signature_algorithm, public_key=None):
if not isinstance(signature_algorithm, SignatureAlgorithm):
raise TypeError("'signature_algorithm' must be SignatureAlgorithm type")
self.signature_algorithm = signature_algorithm
self.public_key = public_key
def verify_data(self, verify_material):
if self.signature_algorithm is SignatureAlgorithm.RSA_PSS_SHA_256:
verifier = self.public_key.verifier(
verify_material.signed_value,
padding.PSS(
mgf=padding.MGF1(hashes.SHA256()),
salt_length=padding.PSS.MAX_LENGTH
),
hashes.SHA256()
)
elif self.signature_algorithm is SignatureAlgorithm.RSA_PKCS1_SHA_256:
verifier = self.public_key.verifier(
verify_material.signed_value,
padding.PKCS1v15(),
hashes.SHA256()
)
else:
raise Exception("algorithm not support")
verifier.update(verify_material.message)
try:
verifier.verify()
except InvalidSignature:
verify_value = False
else:
verify_value = True
verify_material.verify_value = verify_value
return verify_material
class KmsVerifier(Verifier):
def __init__(self, key_arn, key_version_id, kms, signature_algorithm):
if not isinstance(key_arn, six.string_types):
raise TypeError("'key_arn' must be str type")
if not isinstance(key_version_id, six.string_types):
raise TypeError("'key_version_id' must be str type")
if kms is not None and not isinstance(kms, AliyunKms):
raise TypeError("'kms' must be AliyunKms type")
self.key = str_to_cmk(key_arn)
self.key_version_id = key_version_id
self.kms = kms
super(KmsVerifier, self).__init__(signature_algorithm)
def verify_data(self, verify_material):
digest = verify_material.digest
if digest is None or len(digest) == 0:
digest = get_digest(verify_material.message, self.signature_algorithm)
verify_value = self.kms.asymmetric_verify(
self.key, self.key_version_id, self.signature_algorithm,
base64.b64encode(digest), base64.b64encode(verify_material.signed_value)
)
verify_material.verify_value = verify_value
return verify_material
class PublicKeyVerifier(Verifier):
def __init__(self, signature_algorithm, pem_public_key):
if not isinstance(signature_algorithm, SignatureAlgorithm):
raise TypeError("'signature_algorithm' must be SignatureAlgorithm type")
if not isinstance(pem_public_key, (six.string_types, bytes)):
raise TypeError("'pem_public_key' must be str or bytes type")
public_key = load_pem_public_key(pem_public_key)
super(PublicKeyVerifier, self).__init__(signature_algorithm, public_key)
class CertificateVerifier(Verifier):
def __init__(self, pem_certificate):
if not isinstance(pem_certificate, (six.string_types, bytes)):
raise TypeError("'pem_certificate' must be str or bytes type")
cert = load_pem_x509_certificate(pem_certificate)
if cert.signature_algorithm_oid == x509.oid.SignatureAlgorithmOID.RSA_WITH_SHA256:
signature_algorithm = SignatureAlgorithm.RSA_PKCS1_SHA_256
elif cert.signature_algorithm_oid._name == "SM3WITHSM2":
signature_algorithm = None
else:
raise Exception("signature algorithm not support")
public_key = cert.public_key()
super(CertificateVerifier, self).__init__(signature_algorithm, public_key)
|
from StarTSPImage.StarTSPImage import imageFileToRaster, imageToRaster
|
# coding: utf8
from __future__ import unicode_literals
import plac
import requests
import os
import subprocess
import sys
from .link import link
from ..util import prints, get_package_path
from .. import about
@plac.annotations(
model=("model to download, shortcut or name)", "positional", None, str),
direct=("force direct download. Needs model name with version and won't "
"perform compatibility check", "flag", "d", bool))
def download(cmd, model, direct=False):
"""
Download compatible model from default download path using pip. Model
can be shortcut, model name or, if --direct flag is set, full model name
with version.
"""
if direct:
dl = download_model('{m}/{m}.tar.gz'.format(m=model))
else:
shortcuts = get_json(about.__shortcuts__, "available shortcuts")
model_name = shortcuts.get(model, model)
compatibility = get_compatibility()
version = get_version(model_name, compatibility)
dl = download_model('{m}-{v}/{m}-{v}.tar.gz'.format(m=model_name,
v=version))
if dl == 0:
try:
# Get package path here because link uses
# pip.get_installed_distributions() to check if model is a
# package, which fails if model was just installed via
# subprocess
package_path = get_package_path(model_name)
link(None, model_name, model, force=True,
model_path=package_path)
except:
# Dirty, but since spacy.download and the auto-linking is
# mostly a convenience wrapper, it's best to show a success
# message and loading instructions, even if linking fails.
prints(
"Creating a shortcut link for 'en' didn't work (maybe "
"you don't have admin permissions?), but you can still "
"load the model via its full package name:",
"nlp = spacy.load('%s')" % model_name,
title="Download successful")
def get_json(url, desc):
r = requests.get(url)
if r.status_code != 200:
msg = ("Couldn't fetch %s. Please find a model for your spaCy "
"installation (v%s), and download it manually.")
prints(msg % (desc, about.__version__), about.__docs_models__,
title="Server error (%d)" % r.status_code, exits=1)
return r.json()
def get_compatibility():
version = about.__version__
version = version.rsplit('.dev', 1)[0]
comp_table = get_json(about.__compatibility__, "compatibility table")
comp = comp_table['spacy']
if version not in comp:
prints("No compatible models found for v%s of spaCy." % version,
title="Compatibility error", exits=1)
return comp[version]
def get_version(model, comp):
model = model.rsplit('.dev', 1)[0]
if model not in comp:
version = about.__version__
msg = "No compatible model found for '%s' (spaCy v%s)."
prints(msg % (model, version), title="Compatibility error", exits=1)
return comp[model][0]
def download_model(filename):
download_url = about.__download_url__ + '/' + filename
return subprocess.call(
[sys.executable, '-m', 'pip', 'install', '--no-cache-dir',
download_url], env=os.environ.copy())
|
# WARNING: Please don't edit this file. It was generated by Python/WinRT v1.0.0-beta.4
import enum
import winsdk
_ns_module = winsdk._import_ns_module("Windows.Devices.I2c")
try:
import winsdk.windows.devices.i2c.provider
except Exception:
pass
try:
import winsdk.windows.foundation
except Exception:
pass
try:
import winsdk.windows.foundation.collections
except Exception:
pass
class I2cBusSpeed(enum.IntEnum):
STANDARD_MODE = 0
FAST_MODE = 1
class I2cSharingMode(enum.IntEnum):
EXCLUSIVE = 0
SHARED = 1
class I2cTransferStatus(enum.IntEnum):
FULL_TRANSFER = 0
PARTIAL_TRANSFER = 1
SLAVE_ADDRESS_NOT_ACKNOWLEDGED = 2
CLOCK_STRETCH_TIMEOUT = 3
UNKNOWN_ERROR = 4
I2cTransferResult = _ns_module.I2cTransferResult
I2cConnectionSettings = _ns_module.I2cConnectionSettings
I2cController = _ns_module.I2cController
I2cDevice = _ns_module.I2cDevice
II2cDeviceStatics = _ns_module.II2cDeviceStatics
|
# ======================================================================================
# Copyright and other protections apply. Please see the accompanying LICENSE file for
# rights and restrictions governing use of this software. All rights not expressly
# waived or licensed are reserved. If that file is missing or appears to be modified
# from its original, then please contact the author before viewing or using this
# software in any capacity.
# ======================================================================================
from __future__ import annotations
from dyce import H, P
def do_it(_: str) -> None:
import matplotlib.pyplot
for depth in range(6):
res = (10 @ P(H(10).explode(max_depth=depth))).h(slice(-3, None))
matplotlib.pyplot.plot(
*res.distribution_xy(),
marker=".",
label=f"{depth} rerolls",
)
matplotlib.pyplot.legend()
# Should match the corresponding img[alt] text
matplotlib.pyplot.title("Modeling taking the three highest of ten exploding d10s")
|
# Generated by Django 2.0 on 2018-01-05 12:41
from django.db import migrations, models
def delete_attributes(apps, schema_editor):
attribute_model = apps.get_model('projects', 'Attribute')
attribute_model.objects.all().delete()
class Migration(migrations.Migration):
dependencies = [
('projects', '0008_add_phase_to_project'),
]
operations = [
migrations.AlterField(
model_name='attribute',
name='value_type',
field=models.CharField(choices=[('integer', 'integer'), ('short_string', 'short string'), ('long_string', 'long string'), ('boolean', 'boolean'), ('date', 'date')], max_length=64, verbose_name='value type'),
),
migrations.RunPython(delete_attributes, migrations.RunPython.noop),
]
|
import docker
import progressbar
import json
import os
import requests
import subprocess
import time
import zipfile
import tarfile
import re
from io import BytesIO
import generate_transfers_table
from logger import _log
import transitanalystisrael_config as cfg
import process_date
from pathlib import Path
from datetime import datetime as dt
import glob
import process_date
def get_config_params():
"""
Reads monthly_update_config_params.conf file and returns the configuration parameters
:return: configuration parameters
"""
# Get parameters
default_coverage_name = cfg.default_coverage_name
secondary_custom_coverage_name = cfg.secondary_custom_coverage_name
# navitia_docker_compose_file_name = "docker-israel-custom-instances.yml"-needed only when 2 coverages should be up
# Currently we only bring up one coverage and it's always the previous month to be aligned with past and current
# websites. The regular coverage (most current) file is: "docker-compose.yml"
coverage_name = secondary_custom_coverage_name
navitia_docker_compose_file_name = "docker-compose-secondary-cov.yml"
navitia_docker_compose_default_file_name = "docker-compose.yml"
navitia_docker_compose_file_path = Path(os.getcwd()).parent.parent / "navitia-docker-compose" / "compose_files"
if cfg.get_service_date == "on_demand":
navitia_docker_compose_file_name = "navitia-docker-ondemand-" + cfg.gtfsdate + ".yml"
coverage_name = "ondemand-" + cfg.gtfsdate
gtfs_file_path = Path(os.getcwd()).parent / cfg.gtfspath
processdate = process_date.get_date_now()
gtfs_zip_file_name = cfg.gtfsdirbase + processdate + ".zip"
return default_coverage_name, coverage_name, navitia_docker_compose_file_path, navitia_docker_compose_file_name, \
navitia_docker_compose_default_file_name, gtfs_file_path, gtfs_zip_file_name
def copy_file_into_docker(container, dest_path, file_path, file_name):
"""
Copy a given file to a destination folder in a Docker container
:param container: container object
:param dest_path: destination folder path inside the container
:param file_path: source path of the file on the host
:param file_name: the file name to be copied
"""
_log.info("Going to copy %s to %s at %s", file_name, container.name, dest_path)
# Read the file
file = open(Path(os.getcwd()).parent / file_path / file_name, 'rb')
file = file.read()
try:
# Convert to tar file
tar_stream = BytesIO()
file_tar = tarfile.TarFile(fileobj=tar_stream, mode='w')
tarinfo = tarfile.TarInfo(name=file_name)
tarinfo.size = len(file)
file_tar.addfile(tarinfo, BytesIO(file))
file_tar.close()
# Put in the container
tar_stream.seek(0)
success = container.put_archive(
path=dest_path,
data=tar_stream
)
if success:
_log.info("Finished copying %s to %s at %s", file_name, container.name, dest_path)
else:
raise FileNotFoundError
except FileNotFoundError as err:
_log.error("Couldn't copy %s to %s at %s", file_name, container.name, dest_path)
raise err
def get_docker_service_client():
"""
Checks that the docker daemon service is running and returns the service client
:return: the docker service client
"""
# Check that the docker daemon service is up, and timeout after five minutes
docker_check_alive_cmd = "docker info"
docker_is_up = False
timeout = time.time() + 60 * 5
try:
while not docker_is_up:
if time.time() > timeout:
raise TimeoutError
# Check that the daemon is up and running
docker_check_alive_process = subprocess.Popen(docker_check_alive_cmd, stdout=subprocess.PIPE, shell=True)
output, error = docker_check_alive_process.communicate()
docker_is_up = "Containers" in output.decode('utf-8')
# Get the docker client
client = docker.from_env()
return client
except BaseException as error:
_log.error("Docker daemon service is not up")
raise error
def get_navitia_url_for_cov_status(cov_name):
"""
Get the url of Navitia coverage status page
:param cov_name: the name of the coverage to return, e.g. "default" or "secondary-cov"
:return: url of Navitia coverage status page
"""
return "http://localhost:9191/v1/coverage/" + cov_name #+ "/status/"
def check_coverage_running(url, coverage_name):
"""
Check if Navitia coverage is up and running
:param url: Navitia server coverage url
:param coverage_name: the name of the coverage to check
:return: Whether a Navitia coverage is up and running
"""
_log.info("checking if %s is up", coverage_name)
response = requests.get(url)
# Get the status of the coverage as Json
json_data = json.loads(response.text)
if "regions" not in json_data or "running" not in json_data["regions"][0]['status']:
_log.info("%s coverage is down", coverage_name)
return False
else:
_log.info("%s coverage is up", coverage_name)
return True
def get_coverage_start_production_date(coverage_name):
"""
Get the start production date of the current GTFS in the given coverage
:param coverage_name: the name of the coverage
:return: end of current production date
"""
url = get_navitia_url_for_cov_status(coverage_name)
response = requests.get(url)
# Get the status of the coverage as Json
json_data = json.loads(response.text)
if "regions" not in json_data or "running" not in json_data["regions"][0]['status']:
_log.debug("%s coverage is down so the start of production date can't be established", coverage_name)
return ""
else:
start_production_date = json_data["regions"][0]["start_production_date"]
return start_production_date
def check_prod_date_is_valid_using_heat_map(time_map_server_url, coverage_name, expected_valid_date):
"""
Simulating UI action for heat map query with expected_valid_date
:return: whether expected_valid_date is in production date
"""
heat_map_url = time_map_server_url + coverage_name + \
"/heat_maps?max_duration=3600&from=34.79041%3B32.073443&datetime=" + expected_valid_date + \
"T080000+02:00&resolution=200"
response = requests.get(heat_map_url)
# Get the status of the coverage as Json
json_data = json.loads(response.text)
if "error" not in json_data:
return True
elif json_data['error']['message'] == "date is not in data production period":
return False
return True
def validate_auto_graph_changes_applied(coverage_name, default_coverage_name, default_cov_prev_sop_date, docker_client,
navitia_docker_compose_file_path, navitia_docker_compose_file_name,
navitia_docker_compose_default_file_name):
"""
Validate that the new default coverage returns results for heat map query for current_start_service_date (as in dates file
or gtfs date) and that secondary-cov has results for the previous production date of the default.
:param default_coverage_name: The coverage that gets a new (usually more recent) start of production date
:param secondary_custom_coverage_name: The coverage that gets a the original default_coverage start of production date
:param default_cov_sop_date: start of production date of original default coverage (before changes applied)
:return: whether the graph changes were applied
"""
current_start_service_date = dt.strptime(process_date.get_date_now(), "%Y%m%d")
if cfg.ttm_server_on == "aws_ec2":
time_map_server_url = cfg.time_map_server_aws_url
else:
time_map_server_url = cfg.time_map_server_local_url
# Check that the current default coverage is up-to-date by comparing sop dates
stop_all_containers(docker_client)
start_navitia_with_single_coverage(navitia_docker_compose_file_path, navitia_docker_compose_default_file_name,
default_coverage_name, False)
if not check_prod_date_is_valid_using_heat_map(time_map_server_url, default_coverage_name,
current_start_service_date.strftime("%Y%m%d")):
_log.error("The %s coverage seems not to be up-to-date following update attempts.", default_coverage_name)
return False
else:
_log.info("%s coverage is up-to-date with production date %s", default_coverage_name,
current_start_service_date.strftime("%Y%m%d"))
# Check that the coverage_name (the previous one) is up-to-date by comparing sop dates
stop_all_containers(docker_client)
is_up = start_navitia_with_single_coverage(navitia_docker_compose_file_path, navitia_docker_compose_file_name,
coverage_name, False)
if not is_up:
_log.error("The %s coverage seems not to be up", coverage_name)
cov_sop_date = get_coverage_start_production_date(coverage_name)
if cov_sop_date == "":
_log.info("If this is the first time you're running Transit Analyst Israel data processing, you need to "
"copy the generated default.nav.lz4 graph to secondary-cov.nav.lz4 - See docs.")
return True
if not check_prod_date_is_valid_using_heat_map(time_map_server_url, coverage_name,
current_start_service_date.strftime("%Y%m%d")):
_log.error("The %s coverage seems not to be up-to-date following update attempts.\nA call for heat map data with"
" %s date returned no results", coverage_name, current_start_service_date.strftime("%Y%m%d"))
return False
_log.info("%s coverage is now updated with new start-of-production date %s. "
"Can be accessed via %s%s", coverage_name, current_start_service_date.strftime("%Y%m%d"), time_map_server_url,
coverage_name)
return True
def validate_graph_changes_applied(coverage_name):
"""
Validate that the coverage has a different start of production date different from before
"""
current_start_service_date = process_date.get_date_now()
if cfg.ttm_server_on == "aws_ec2":
time_map_server_url = cfg.time_map_server_aws_url
else:
time_map_server_url = cfg.time_map_server_local_url
cov_sop_date = get_coverage_start_production_date(coverage_name)
if cov_sop_date == "" or not check_prod_date_is_valid_using_heat_map(time_map_server_url, coverage_name,
current_start_service_date):
_log.error("The %s coverage seems not to be up-to-date following update attempts."
"\n A call for heat map data with %s date returned no results",
coverage_name, current_start_service_date)
return False
_log.info("%s coverage is now updated with new start-of-production date %s\n."
"Can be accessed via %s%s", coverage_name, current_start_service_date, time_map_server_url,
coverage_name)
return True
def start_navitia_with_single_coverage(navitia_docker_compose_file_path, navitia_docker_compose_file_name,
coverage_name, extend_wait_time=False):
"""
Start Navitia server with only default coverage (using docker-compose)
:param navitia_docker_compose_file_path: path where docker-compose file exists
:param extend_wait_time: whether an extended time of wait should be applied. Should be set to True when Navitia
docker compose is started up the first time (images are being downloaded from the web)
:return: Whether Navitia was started successfully with default coverage
"""
_log.info("Attempting to start Navitia with %s coverage", coverage_name)
# run the docker- compose and redirect logs to prevent from printing in the output
navitia_docker_start_command = "docker-compose -f" + navitia_docker_compose_file_name + " -p navitia-docker-compose up --remove-orphans"
subprocess.Popen(navitia_docker_start_command, shell=True, cwd=navitia_docker_compose_file_path, stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL)
# Longer wait time is required because images are being re-downloaded
if extend_wait_time:
t_wait = 60 * 5
else:
t_wait = 60 * 3
_log.info("Waiting %s seconds to validate Navitia docker is up and running", t_wait)
time.sleep(t_wait)
# Check if coverage is up and running
is_default_up = check_coverage_running(get_navitia_url_for_cov_status(coverage_name), coverage_name)
if not is_default_up:
return False
return True
def start_navitia_w_default_and_custom_cov(secondary_custom_coverage_name, navitia_docker_compose_file_path,
navitia_docker_compose_custom_file_path, navitia_docker_compose_file_name,
extend_wait_time=False):
"""
Start Navitia server with default and custom coverages (using custom docker-compose file)
:param secondary_custom_coverage_name:
:param navitia_docker_compose_file_path: path where docker-compose file exists
:param navitia_docker_compose_file_name: name of the custom docker-compose file
:param extend_wait_time: whether an extended time of wait should be applied. Should be set to True when Navitia
docker compose is started up the first time (images are being downloaded from the web)
:return: Whether Navitia was started successfully with default and secondary coverages
"""
_log.error("This method isn't currently used because 2 corages require server with at least 10GB RAM available for "
"docker.\nEach coverage requires about 3.5 RAM when running")
raise Exception
# Verifying the custom file has another coverage named secondary_custom_coverage_name which isn't "default"
# _log.info("Attempting to start Navitia with default coverage and %s coverage", secondary_custom_coverage_name)
# navitia_docker_compose_custom_file = open(os.path.join(navitia_docker_compose_custom_file_path,
# navitia_docker_compose_file_name), mode='r')
# navitia_docker_compose_custom_file_contents = navitia_docker_compose_custom_file.read()
# navitia_docker_compose_custom_file.close()
#
# if secondary_custom_coverage_name != "default" \
# and not secondary_custom_coverage_name in navitia_docker_compose_custom_file_contents:
# _log.error("The custom configuration does not include a coverage area named: %s. Fix config, restart docker "
# "and start again", secondary_custom_coverage_name)
# return False
#
# # run the docker- compose and redirect logs to prevent from printing in the output
# regular_docker_compose_file_full_path = str(Path(navitia_docker_compose_file_path) / "docker-compose.yml")
# navitia_docker_start_command = "docker-compose -f " + regular_docker_compose_file_full_path + " -f " + \
# navitia_docker_compose_file_name + " -p navitia-docker-compose up --remove-orphans"
#
# subprocess.Popen(navitia_docker_start_command, shell=True, cwd=navitia_docker_compose_custom_file_path, stderr=subprocess.DEVNULL,
# stdout=subprocess.DEVNULL)
# # Longer wait time is required because images are being re-downloaded
# if extend_wait_time:
# t_wait = 60 * 5
# else:
# t_wait = 240
# _log.info("Waiting %s seconds to validate Navitia docker is up and running", t_wait)
# time.sleep(t_wait)
#
# # Check if default and secondary_custom_coverage_name regions are up and running
# if cfg.get_service_date == 'auto':
# default_coverage_name = "default"
# is_default_up = check_coverage_running(get_navitia_url_for_cov_status(default_coverage_name), default_coverage_name)
# if not is_default_up:
# return False
# is_secondary_up = check_coverage_running(get_navitia_url_for_cov_status(secondary_custom_coverage_name),
# secondary_custom_coverage_name)
# if not is_secondary_up:
# return False
# return True
def generate_ondemand_docker_config_file(navitia_docker_compose_file_path, navitia_docker_compose_file_name):
'''
Creates a custom docker-compose file for on-demand environment from the docker-israel-custom-instances.yml file
'''
navitia_docker_compose_file = open(os.path.join(navitia_docker_compose_file_path, 'docker-compose.yml'),
mode='r')
navitia_docker_compose_file_contents = navitia_docker_compose_file.read()
navitia_docker_compose_file.close()
custom_custom_docker_file_contents = navitia_docker_compose_file_contents.replace("default",
"ondemand-" + cfg.gtfsdate)
with open(os.path.join(navitia_docker_compose_file_path, navitia_docker_compose_file_name),
mode='w+') as custom_custom_docker_file:
custom_custom_docker_file.write(custom_custom_docker_file_contents)
custom_custom_docker_file.close()
_log.info("Created custom docker-compose file: %s", navitia_docker_compose_file_name)
def move_current_to_past(container, source_cov_name, dest_cov_name):
"""
Move the Navitia graph of the source coverage to the destination coverage so in next re-start changes are applied
:param container: the worker container of Navitia
:param source_cov_name: the name of the coverage to take the graph from (usually "default")
:param dest_cov_name: the name of the coverage to move the graph to (e.g. "secondary-cov")
:return: whether the move was successful, a RunTimeError is thown if not
"""
command_list = "/bin/sh -c \"mv " + source_cov_name + ".nav.lz4 "+ dest_cov_name + ".nav.lz4\""
exit_code, output = container.exec_run(cmd=command_list, stdout=True, workdir="/srv/ed/output/")
if exit_code != 0:
_log.error("Couldn't change %s to %s", source_cov_name, dest_cov_name)
raise RuntimeError
_log.info("Changed the name of %s.nav.lz4 to %s.nav.lz4", source_cov_name, dest_cov_name)
return True
def is_cov_exists(container, coverage_name):
_log.info("Checking if %s exists in /srv/ed/output of %s", coverage_name, container.name)
file_list_command = "/bin/sh -c \"ls\""
exit_code, output = container.exec_run(cmd=file_list_command, stdout=True, workdir="/srv/ed/output/")
exists = coverage_name in str(output)
if exists:
_log.info("%s exists in /srv/ed/output of %s", coverage_name, container.name)
else:
_log.info("%s doesn't exists in /srv/ed/output of %s", coverage_name, container.name)
return exists
def backup_past_coverage(container, coverage_name):
"""
Copy a given coverage graph to the local host running this script
:param container: Navitia worker container
:param coverage_name: the coverage graph name to copy
"""
# Create a local file for writing the incoming graph
local_processed_folder = Path(os.getcwd()).parent / "processed"
_log.info("Going to copy %s.nav.lz4 to %s on local host", coverage_name, local_processed_folder)
local_graph_file = open(os.path.join(local_processed_folder, coverage_name + '.nav.lz4'), 'wb')
# Fetch the graph file
bits, stat = container.get_archive('/srv/ed/output/' + coverage_name + '.nav.lz4')
size = stat["size"]
# Generate a progress bar
pbar = createProgressBar(size, action="Transferring")
# Fetch
size_iterator = 0
for chunk in bits:
if chunk:
file_write_update_progress_bar(chunk, local_graph_file, pbar, size_iterator)
size_iterator += len(chunk)
local_graph_file.close()
pbar.finish()
_log.info("Finished copying %s.nav.lz4 to %s on local host", coverage_name, os.getcwd())
def delete_grpah_from_container(container, coverage_name):
"""
Delete a graph from Navitia worker container
:param container: Navitia worker container
:param coverage_name: the name of the coverage that its graph should be removed
"""
return delete_file_from_container(container, coverage_name + ".nav.lz4")
def delete_file_from_container(container, file_name):
"""
Delete a filefrom Navitia worker container
:param container: Navitia worker container
:param file_name: the name of the file to be removed
"""
delete_command= "/bin/sh -c \"rm " + file_name + "\""
exit_code, output = container.exec_run(cmd=delete_command, stdout=True, workdir="/srv/ed/output/")
if exit_code != 0:
_log.error("Couldn't delete %s graph", file_name)
return False
_log.info("Finished deleting %s from container %s", file_name, container.name)
def delete_file_from_host(file_name):
"""
Delete a file from the host running this script
:param file_name: the file name to be deleted
"""
if os.path.isfile(file_name):
os.remove(file_name)
_log.info("Finished deleting %s from host", file_name)
def stop_all_containers(docker_client):
"""
Stop all the running docker containers
:param docker_client: docker client
"""
_log.info("Going to stop all Docker containers")
for container in docker_client.containers.list():
container.stop()
_log.info("Stopped all Docker containers")
def generate_transfers_file(gtfs_file_path):
"""
Generate a transfers table compatible with Navitia's server requirements for extending transfers between stops in
graph calculation. Default values are used:
maximum crow-fly walking distance of 500 meters, 0 minimum transfer time and 0.875 meters/second walking speed
:param gtfs_file_name: Name of GTFS zip file containing a stops.txt file with list of stops and their coordinates
:return: the full path of the generated transfers.txt file
"""
output_path = os.path.join(gtfs_file_path,"transfers.txt")
generate_transfers_table.generate_transfers(input=os.path.join(gtfs_file_path,"stops.txt"), output=output_path)
return output_path
def generate_gtfs_with_transfers(gtfs_file_name, gtfs_file_path):
"""
Generate a GTFS ZIP file with a processed transfers.txt file compatible with Navitia's server requirements for
extending transfers between stops in graph calculation
:param gtfs_file_name: GTFS zip file name
:param gtfs_file_path: GTFS zip file path
:return: the name of the GTFS file
"""
gtfs_file_path_name = os.path.join(gtfs_file_path, gtfs_file_name)
_log.info("Extracting stops.txt and computing transfers.txt")
output_path = generate_transfers_file(os.path.join(gtfs_file_path,gtfs_file_name[:-4]))
with zipfile.ZipFile(gtfs_file_path_name, 'a') as zip_ref:
zip_ref.write(output_path, arcname="transfers.txt")
_log.info("Added transfers.txt to %s", gtfs_file_path_name)
def copy_osm_and_gtfs_to_cov(worker_con, osm_file_path, osm_file_name, gtfs_file_path, gtfs_file_name, cov_name):
"""
Copy GTFS and OSM files into the input folder of default coverage for creating a new Navitia graph
:param worker_con: docker worker container
:param osm_file_path: osm file path
:param osm_file_name: osm file name
:param gtfs_file_path: gtfs file path
:param gtfs_file_name: gtfs file name
:param cov_name: coverage name
:return:
"""
copy_file_into_docker(worker_con, 'srv/ed/input/' + cov_name, osm_file_path, osm_file_name)
copy_file_into_docker(worker_con, 'srv/ed/input/' + cov_name, gtfs_file_path, gtfs_file_name)
def validate_osm_gtfs_convertion_to_graph_is_completed(worker_con, time_to_wait, start_processing_time):
"""
Validates that the following Navitia worker tasks were successfully completed:
osm2ed, gtfs2ed and ed2nav
:param worker_con: the Navitia worker container
:param time_to_wait: time to wait for the validation to take place, in minutes. Default is 20 minutes
:return: Whether conversion is completed or not
"""
# Wait if needed
_log.info("Waiting %s minutes to let OSM & GTFS conversions to lz4 graph takes place", time_to_wait)
time.sleep(time_to_wait * 60)
_log.info("I'm back! Verifying that the conversions took place")
# Success status look like Task tyr.binarisation.ed2nav[feac06ca-51f7-4e39-bf1d-9541eaac0988] succeeded
# and tyr.binarisation.gtfs2ed[feac06ca-51f7-4e39-bf1d-9541eaac0988] succeeded
tyr_worker_outputname = "tyr_worker_output.txt"
with open(tyr_worker_outputname, "w", encoding="UTF-8") as tyr_worker_output:
tyr_worker_output.write(worker_con.logs().decode('utf-8'))
tyr_worker_output.close()
ed2nav_completed = False
with open(tyr_worker_outputname, "r", encoding="UTF-8") as tyr_worker_output:
lines = tyr_worker_output.readlines()
for line in reversed(lines):
if re.compile(r'tyr\.binarisation\.ed2nav\[\S*\] succeeded').search(line):
time_of_line = re.findall(r'\d{1,4}-\d{1,2}-\d{1,2}\b \d{1,2}:\d{1,2}:\d{1,2}', line)
time_of_line = dt.strptime(time_of_line[0], '%Y-%m-%d %H:%M:%S')
if start_processing_time < time_of_line:
ed2nav_completed = True
break
os.remove(tyr_worker_outputname)
if ed2nav_completed:
_log.info("OSM conversion task ed2nav, GTFS conversion task gtfs2ed and ed2nav are successful")
return True
else:
_log.error("After %s minutes - tasks aren't completed", time_to_wait)
return False
def validate_osm_gtfs_convertion_to_graph_is_running(docker_client, coverage_name, navitia_docker_compose_file_path,
navitia_docker_compose_file_name):
"""
Validates that the conversion of gtfs & OSM to Navitia graph is undergoing (continious process).
Container tyr_beat is the service that triggers the conversion in the worker container and it does this after
new files are copied into /srv/ed/input/<coverage-name> folder in the worker container.
If tyr_beat is down and can't be re-started, the container an its image are removed and re-downloaded from the web
:param docker_client: the docker client
:param secondary_custom_coverage_name: the secondary custom coverage
:param navitia_docker_compose_file_path:
:param navitia_docker_compose_file_name:
:return:
"""
# tyr_beat must be running as it manages the tasks for the worker, the latter generates the graph
_log.info("Validating that tyr_beat is up and running")
beat_con = docker_client.containers.list(filters={"name": "beat"})
time_beat_restarted=""
if not beat_con:
# restarting tyr_beat
_log.info("tyr_beat is down, attempting to re-run")
tyr_beat_start_command = "docker-compose -f" + navitia_docker_compose_file_name + " -p navitia-docker-compose up tyr_beat"
time_beat_restarted = dt.utcnow()
with open("tyr_beat_output.txt", "w", encoding="UTF-8") as tyr_beat_output:
subprocess.Popen(tyr_beat_start_command, cwd=navitia_docker_compose_file_path,
shell=True, stdout=tyr_beat_output, stderr=tyr_beat_output)
# Wait 30 seconds for it to come up
_log.info("Waiting 15 seconds to see if tyr_beat is up")
time.sleep(15)
tyr_beat_output.close()
# Check that tyr_beat is working using it's log and comparing the restart time with time in the log
new_time_is_found = False
with open("tyr_beat_output.txt", "r", encoding="UTF-8") as tyr_beat_output:
lines = tyr_beat_output.readlines()
for line in reversed(lines):
if "Sending due task udpate-data-every-30-seconds" in line:
time_of_line = re.findall(r'\d{1,4}-\d{1,2}-\d{1,2}\b \d{1,2}:\d{1,2}:\d{1,2}', line)
time_of_line = dt.strptime(time_of_line[0], '%Y-%m-%d %H:%M:%S')
if time_beat_restarted < time_of_line:
_log.info("tyr_beat is up and running")
new_time_is_found = True
break
# tyr_beat is malfunctioned, need to delete and re-download
if not new_time_is_found:
# stop all containers
_log.info("Stopping and removing all containers to pull fresh copy of tyr_beat container")
stop_all_containers(docker_client)
# delete container and image
beat_con = docker_client.containers.list(all=True, filters={"name": "beat"})[0]
beat_image = docker_client.images.list(name="navitia/tyr-beat")[0]
beat_con_name = beat_con.name
beat_image_id = beat_image.id
beat_con.remove()
_log.info("%s container is removed", beat_con_name)
docker_client.images.remove(beat_image.id)
_log.info("%s image is removed", beat_image_id)
# re-run navitia docker-compose which re-downloads the tyr_beat container
_log.info("Restarting docker with %s coverage",
coverage_name)
start_navitia_with_single_coverage(navitia_docker_compose_file_path, navitia_docker_compose_file_name,
coverage_name, True)
# removing the log file
os.remove("tyr_beat_output.txt")
else:
_log.info("Validated tyr_beat is up and running")
def is_aws_machine():
"""
Checks whether the machine is AWS EC2 instance or not
:return:
"""
try:
r = requests.get("http://169.254.169.254/latest/dynamic/instance-identity/document")
if r.json() is not None:
return True
else:
return False
except requests.exceptions.ConnectionError:
return False
if is_aws_machine():
import send_email
def send_log_to_email(subject, message):
"""
Send an e-mail with user-defined subject and message. the e-mail is attached with logs of this script
:param subject:
:param message:
:return: Whether the e-mail was sent successfully
"""
# Change to root before trying to send logs
root_path = Path.home() / "TransitAnalystIsrael" / "root"
os.chdir(root_path.as_posix())
logs_path = root_path / "logs"
if not os.path.isdir(logs_path ):
_log.error("%s isn't the logs directory. Please fix log directory as in code")
path = logs_path / '*'
list_of_files = glob.glob(str(path)) # * means all if need specific format then *.csv
attached_file = max(list_of_files, key=os.path.getctime)
return send_email.create_msg_and_send_email(subject, message, attached_file)
def createProgressBar(file_size, action='Downloading: '):
"""
Craeting a progress bar for continious tasks like downloading file or processing data
:param file_size: the total size of the file to set the 100% of the bar
:param action: type of action for the progress bar description, default is "Downloading: "
:return: a progress bar object
"""
widgets = [action, progressbar.Percentage(), ' ',
progressbar.Bar(marker='#', left='[', right=']'),
' ', progressbar.ETA(), ' ', progressbar.FileTransferSpeed()]
# We're increasing the file suze by 10% because sometimes the file split causes cahnges in total size
pbar = progressbar.ProgressBar(widgets=widgets, maxval=(file_size*1.1))
pbar.start()
return pbar
def file_write_update_progress_bar(data, dest_file, pbar, size_iterator):
"""
Call back for writing fetched or processed data from FTP while updating the progress bar
"""
dest_file.write(data)
pbar.update(size_iterator)
def get_gtfs_list_from_omd():
"""
:return: List of dates indicating different versions of GTFS by starting date
"""
# _log.info("Retrieving list of available GTFS versions from OpenMobilityData")
url="https://api.transitfeeds.com/v1/getFeedVersions?key=5bbfcb92-9c9f-4569-9359-0edc6e765e9f&feed=ministry-of-transport-and-road-safety%2F820&page=1&limit=500&err=1&warn=1"
r = requests.get(url, stream=True)
response = r.json()
print(response.status)
|
# -------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -------------------------------------------------------------
import os
import unittest
import numpy as np
from systemds.context import SystemDSContext
from systemds.examples.tutorials.adult import DataManager
from systemds.operator import Frame, Matrix, OperationNode
from systemds.operator.algorithm import (confusionMatrix, kmeans, l2svm,
multiLogReg, multiLogRegPredict,
scale, scaleApply, split, winsorize)
from systemds.script_building import DMLScript
class Test_DMLScript(unittest.TestCase):
"""
Test class for adult dml script tutorial code.
"""
sds: SystemDSContext = None
d: DataManager = None
neural_net_src_path: str = "tests/examples/tutorials/neural_net_source.dml"
preprocess_src_path: str = "tests/examples/tutorials/preprocess.dml"
dataset_path_train: str = "../../test/resources/datasets/adult/train_data.csv"
dataset_path_train_mtd: str = "../../test/resources/datasets/adult/train_data.csv.mtd"
dataset_path_test: str = "../../test/resources/datasets/adult/test_data.csv"
dataset_path_test_mtd: str = "../../test/resources/datasets/adult/test_data.csv.mtd"
dataset_jspec: str = "../../test/resources/datasets/adult/jspec.json"
@classmethod
def setUpClass(cls):
cls.sds = SystemDSContext()
cls.d = DataManager()
@classmethod
def tearDownClass(cls):
cls.sds.close()
def test_train_data(self):
x = self.d.get_train_data_pandas()
self.assertEqual((32561, 14), x.shape)
def test_train_labels(self):
y = self.d.get_train_labels_pandas()
self.assertEqual((32561,), y.shape)
def test_test_data(self):
x_l = self.d.get_test_data_pandas()
self.assertEqual((16281, 14), x_l.shape)
def test_test_labels(self):
y_l = self.d.get_test_labels_pandas()
self.assertEqual((16281,), y_l.shape)
def test_train_data_pandas_vs_systemds(self):
pandas = self.d.get_train_data_pandas()
systemds = self.d.get_train_data(self.sds).compute()
self.assertTrue(len(pandas.columns.difference(systemds.columns)) == 0)
self.assertEqual(pandas.shape, systemds.shape)
def test_train_labels_pandas_vs_systemds(self):
# Pandas does not strip the parsed values.. so i have to do it here.
pandas = np.array(
[x.strip() for x in self.d.get_train_labels_pandas().to_numpy().flatten()])
systemds = self.d.get_train_labels(
self.sds).compute().to_numpy().flatten()
comp = pandas == systemds
self.assertTrue(comp.all())
def test_test_labels_pandas_vs_systemds(self):
# Pandas does not strip the parsed values.. so i have to do it here.
pandas = np.array(
[x.strip() for x in self.d.get_test_labels_pandas().to_numpy().flatten()])
systemds = self.d.get_test_labels(
self.sds).compute().to_numpy().flatten()
comp = pandas == systemds
self.assertTrue(comp.all())
def test_transform_encode_train_data(self):
jspec = self.d.get_jspec(self.sds)
train_x, M1 = self.d.get_train_data(self.sds).transform_encode(spec=jspec)
train_x_numpy = train_x.compute()
self.assertEqual((32561, 107), train_x_numpy.shape)
def test_transform_encode_apply_test_data(self):
jspec = self.d.get_jspec(self.sds)
train_x, M1 = self.d.get_train_data(self.sds).transform_encode(spec=jspec)
test_x = self.d.get_test_data(self.sds).transform_apply(spec=jspec, meta=M1)
test_x_numpy = test_x.compute()
self.assertEqual((16281, 107), test_x_numpy.shape)
def test_transform_encode_train_labels(self):
jspec_dict = {"recode":["income"]}
jspec = self.sds.scalar(f'"{jspec_dict}"')
train_y, M1 = self.d.get_train_labels(self.sds).transform_encode(spec=jspec)
train_y_numpy = train_y.compute()
self.assertEqual((32561, 1), train_y_numpy.shape)
def test_transform_encode_test_labels(self):
jspec_dict = {"recode":["income"]}
jspec = self.sds.scalar(f'"{jspec_dict}"')
train_y, M1 = self.d.get_train_labels(self.sds).transform_encode(spec=jspec)
test_y = self.d.get_test_labels(self.sds).transform_apply(spec=jspec, meta=M1)
test_y_numpy = test_y.compute()
self.assertEqual((16281, 1), test_y_numpy.shape)
def test_multi_log_reg(self):
# Reduced because we want the tests to finish a bit faster.
train_count = 10000
test_count = 500
jspec_data = self.d.get_jspec(self.sds)
train_x_frame = self.d.get_train_data(self.sds)[0:train_count]
train_x, M1 = train_x_frame.transform_encode(spec=jspec_data)
test_x_frame = self.d.get_test_data(self.sds)[0:test_count]
test_x = test_x_frame.transform_apply(spec=jspec_data, meta=M1)
jspec_dict = {"recode": ["income"]}
jspec_labels = self.sds.scalar(f'"{jspec_dict}"')
train_y_frame = self.d.get_train_labels(self.sds)[0:train_count]
train_y, M2 = train_y_frame.transform_encode(spec=jspec_labels)
test_y_frame = self.d.get_test_labels(self.sds)[0:test_count]
test_y = test_y_frame.transform_apply(spec=jspec_labels, meta=M2)
betas = multiLogReg(train_x, train_y)
[_, y_pred, acc] = multiLogRegPredict(test_x, betas, test_y)
[_, conf_avg] = confusionMatrix(y_pred, test_y)
confusion_numpy = conf_avg.compute()
self.assertTrue(confusion_numpy[0][0] > 0.8)
self.assertTrue(confusion_numpy[0][1] < 0.5)
self.assertTrue(confusion_numpy[1][1] > 0.5)
self.assertTrue(confusion_numpy[1][0] < 0.2)
# def test_neural_net(self):
# # Reduced because we want the tests to finish a bit faster.
# train_count = 15000
# test_count = 5000
# train_data, train_labels, test_data, test_labels = self.d.get_preprocessed_dataset(interpolate=True, standardize=True, dimred=0.1)
# # Train data
# X = self.sds.from_numpy( train_data[:train_count])
# Y = self.sds.from_numpy( train_labels[:train_count])
# # Test data
# Xt = self.sds.from_numpy(test_data[:test_count])
# Yt = self.sds.from_numpy(test_labels[:test_count])
# FFN_package = self.sds.source(self.neural_net_src_path, "fnn", print_imported_methods=True)
# network = FFN_package.train(X, Y, 1, 16, 0.01, 1)
# self.assertTrue(type(network) is not None) # sourcing and training seems to works
# FFN_package.save_model(network, '"model/python_FFN/"').compute(verbose=True)
# # TODO This does not work yet, not sure what the problem is
# #probs = FFN_package.predict(Xt, network).compute(True)
# # FFN_package.eval(Yt, Yt).compute()
# def test_level1(self):
# # Reduced because we want the tests to finish a bit faster.
# train_count = 15000
# test_count = 5000
# train_data, train_labels, test_data, test_labels = self.d.get_preprocessed_dataset(interpolate=True,
# standardize=True, dimred=0.1)
# # Train data
# X = self.sds.from_numpy(train_data[:train_count])
# Y = self.sds.from_numpy(train_labels[:train_count])
# Y = Y + 1.0
# # Test data
# Xt = self.sds.from_numpy(test_data[:test_count])
# Yt = self.sds.from_numpy(test_labels[:test_count])
# Yt = Yt + 1.0
# betas = multiLogReg(X, Y)
# [_, y_pred, acc] = multiLogRegPredict(Xt, betas, Yt).compute()
# self.assertGreater(acc, 80) #Todo remove?
# # todo add text how high acc should be with this config
# confusion_matrix_abs, _ = confusionMatrix(self.sds.from_numpy(y_pred), Yt).compute()
# # todo print confusion matrix? Explain cm?
# self.assertTrue(
# np.allclose(
# confusion_matrix_abs,
# np.array([[3583, 502],
# [245, 670]])
# )
# )
# def test_level2(self):
# train_count = 32561
# test_count = 16281
# SCHEMA = '"DOUBLE,STRING,DOUBLE,STRING,DOUBLE,STRING,STRING,STRING,STRING,STRING,DOUBLE,DOUBLE,DOUBLE,STRING,STRING"'
# F1 = self.sds.read(
# self.dataset_path_train,
# schema=SCHEMA
# )
# F2 = self.sds.read(
# self.dataset_path_test,
# schema=SCHEMA
# )
# jspec = self.sds.read(self.dataset_jspec, data_type="scalar", value_type="string")
# PREPROCESS_package = self.sds.source(self.preprocess_src_path, "preprocess", print_imported_methods=True)
# X1 = F1.rbind(F2)
# X1, M1 = X1.transform_encode(spec=jspec)
# X = PREPROCESS_package.get_X(X1, 1, train_count)
# Y = PREPROCESS_package.get_Y(X1, 1, train_count)
# Xt = PREPROCESS_package.get_X(X1, train_count, train_count+test_count)
# Yt = PREPROCESS_package.get_Y(X1, train_count, train_count+test_count)
# Yt = PREPROCESS_package.replace_value(Yt, 3.0, 1.0)
# Yt = PREPROCESS_package.replace_value(Yt, 4.0, 2.0)
# # better alternative for encoding. This was intended, but it does not work
# #F2 = F2.replace("<=50K.", "<=50K")
# #F2 = F2.replace(">50K.", ">50K")
# #X1, M = F1.transform_encode(spec=jspec)
# #X2 = F2.transform_apply(spec=jspec, meta=M)
# #X = PREPROCESS_package.get_X(X1, 1, train_count)
# #Y = PREPROCESS_package.get_Y(X1, 1, train_count)
# #Xt = PREPROCESS_package.get_X(X2, 1, test_count)
# #Yt = PREPROCESS_package.get_Y(X2, 1, test_count)
# # TODO somehow throws error at predict with this included
# #X, mean, sigma = scale(X, True, True)
# #Xt = scaleApply(Xt, mean, sigma)
# betas = multiLogReg(X, Y)
# [_, y_pred, acc] = multiLogRegPredict(Xt, betas, Yt)
# confusion_matrix_abs, _ = confusionMatrix(y_pred, Yt).compute()
# print(confusion_matrix_abs)
# self.assertTrue(
# np.allclose(
# confusion_matrix_abs,
# np.array([[11593., 1545.],
# [842., 2302.]])
# )
# )
# def test_level3(self):
# train_count = 32561
# test_count = 16281
# SCHEMA = '"DOUBLE,STRING,DOUBLE,STRING,DOUBLE,STRING,STRING,STRING,STRING,STRING,DOUBLE,DOUBLE,DOUBLE,STRING,STRING"'
# F1 = self.sds.read(
# self.dataset_path_train,
# schema=SCHEMA
# )
# F2 = self.sds.read(
# self.dataset_path_test,
# schema=SCHEMA
# )
# jspec = self.sds.read(self.dataset_jspec, data_type="scalar", value_type="string")
# PREPROCESS_package = self.sds.source(self.preprocess_src_path, "preprocess", print_imported_methods=True)
# X1 = F1.rbind(F2)
# X1, M1 = X1.transform_encode(spec=jspec)
# X = PREPROCESS_package.get_X(X1, 1, train_count)
# Y = PREPROCESS_package.get_Y(X1, 1, train_count)
# Xt = PREPROCESS_package.get_X(X1, train_count, train_count + test_count)
# Yt = PREPROCESS_package.get_Y(X1, train_count, train_count + test_count)
# Yt = PREPROCESS_package.replace_value(Yt, 3.0, 1.0)
# Yt = PREPROCESS_package.replace_value(Yt, 4.0, 2.0)
# # better alternative for encoding
# # F2 = F2.replace("<=50K.", "<=50K")
# # F2 = F2.replace(">50K.", ">50K")
# # X1, M = F1.transform_encode(spec=jspec)
# # X2 = F2.transform_apply(spec=jspec, meta=M)
# # X = PREPROCESS_package.get_X(X1, 1, train_count)
# # Y = PREPROCESS_package.get_Y(X1, 1, train_count)
# # Xt = PREPROCESS_package.get_X(X2, 1, test_count)
# # Yt = PREPROCESS_package.get_Y(X2, 1, test_count)
# # TODO somehow throws error at predict with this included
# # X, mean, sigma = scale(X, True, True)
# # Xt = scaleApply(Xt, mean, sigma)
# FFN_package = self.sds.source(self.neural_net_src_path, "fnn", print_imported_methods=True)
# epochs = 1
# batch_size = 16
# learning_rate = 0.01
# seed = 42
# network = FFN_package.train(X, Y, epochs, batch_size, learning_rate, seed)
# """
# If more ressources are available, one can also choose to train the model using a parameter server.
# Here we use the same parameters as before, however we need to specifiy a few more.
# """
# ################################################################################################################
# # workers = 1
# # utype = '"BSP"'
# # freq = '"EPOCH"'
# # mode = '"LOCAL"'
# # network = FFN_package.train_paramserv(X, Y, epochs,
# # batch_size, learning_rate, workers, utype, freq, mode,
# # seed)
# ################################################################################################################
# FFN_package.save_model(network, '"model/python_FFN/"').compute(verbose=True)
# """
# Next we evaluate our network on the test set which was not used for training.
# The predict function with the test features and our trained network returns a matrix of class probabilities.
# This matrix contains for each test sample the probabilities for each class.
# For predicting the most likely class of a sample, we choose the class with the highest probability.
# """
# ################################################################################################################
# #probs = FFN_package.predict(Xt, network)
# ################################################################################################################
# """
# To evaluate how well our model performed on the test set, we can use the probability matrix from the predict call and the real test labels
# and compute the log-cosh loss.
# """
# ################################################################################################################
# #FFN_package.eval(Xt, Yt).compute(True)
# ################################################################################################################
if __name__ == "__main__":
unittest.main(exit=False)
|
import asyncio
from typing import Callable, Tuple, Type
from functools import wraps
def retry(tries: int, exceptions: Tuple[Type[BaseException]] = (Exception,)) -> Callable:
"""
Decorator that re-runs given function tries times if error occurs.
The amount of tries will either be the value given to the decorator,
or if tries is present in keyword arguments on function call, this
specified value will take precedense.
If the function fails even after all of the retries, raise the last
exception that the function raised (even if the previous failures caused
a different exception, this will only raise the last one!).
"""
def decorate(func: Callable) -> Callable:
@wraps(func)
async def async_wrapper(*args, tries: int = tries, **kwargs):
last_exc: BaseException
for _ in range(tries):
try:
return await func(*args, **kwargs)
except exceptions as exc:
last_exc = exc
else:
raise last_exc # type: ignore # (This won't actually be unbound)
@wraps(func)
def sync_wrapper(*args, tries: int = tries, **kwargs):
last_exc: BaseException
for _ in range(tries):
try:
return func(*args, **kwargs)
except exceptions as exc:
last_exc = exc
else:
raise last_exc # type: ignore # (This won't actually be unbound)
if asyncio.iscoroutinefunction(func):
return async_wrapper
return sync_wrapper
return decorate
|
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from quark.drivers import base
from quark.tests import test_base
class TestBaseDriver(test_base.TestBase):
def setUp(self):
super(TestBaseDriver, self).setUp()
self.driver = base.BaseDriver()
def test_load_config(self):
self.driver.load_config()
def test_get_connection(self):
self.driver.get_connection()
def test_create_network(self):
self.driver.create_network(context=self.context, network_name="public")
def test_delete_network(self):
self.driver.delete_network(context=self.context, network_id=1)
def test_create_port(self):
self.driver.create_port(context=self.context, network_id=1, port_id=2)
def test_update_port(self):
self.driver.update_port(context=self.context, network_id=1, port_id=2)
def test_delete_port(self):
self.driver.delete_port(context=self.context, port_id=2)
def test_diag_network(self):
diag = self.driver.diag_network(self.context, network_id=1)
self.assertEqual(diag, {})
def test_diag_port(self):
diag = self.driver.diag_port(self.context, network_id=1)
self.assertEqual(diag, {})
def test_create_security_group(self):
self.driver.create_security_group(context=self.context,
group_name="mygroup")
def test_delete_security_group(self):
self.driver.delete_security_group(context=self.context,
group_id=3)
def test_update_security_group(self):
self.driver.update_security_group(context=self.context,
group_id=3)
def test_create_security_group_rule(self):
rule = {'ethertype': 'IPv4', 'direction': 'ingress'}
self.driver.create_security_group_rule(context=self.context,
group_id=3,
rule=rule)
def test_delete_security_group_rule(self):
rule = {'ethertype': 'IPv4', 'direction': 'ingress'}
self.driver.delete_security_group_rule(context=self.context,
group_id=3,
rule=rule)
|
# Generated by Django 3.1.3 on 2020-11-26 19:20
import django.contrib.auth.models
import django.core.validators
from django.db import migrations, models
import re
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(max_length=30, unique=True, validators=[django.core.validators.RegexValidator(re.compile('^[\\w.@+-]+$'), 'The name can contain only letters and numbers, or the characters @/./+/-/_', 'invalid')], verbose_name='UserName')),
('email', models.EmailField(max_length=254, unique=True, verbose_name='Email')),
('name', models.CharField(blank=True, max_length=100, verbose_name='Name')),
('is_active', models.BooleanField(blank=True, default=True, verbose_name='Is active?')),
('is_staff', models.BooleanField(blank=True, default=False, verbose_name='Is from group?')),
('date_joined', models.DateTimeField(auto_now_add=True, verbose_name='Admittanceate')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'User',
'verbose_name_plural': 'Users',
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
|
__author__ = "Christian Kongsgaard"
__license__ = "MIT"
__version__ = "0.0.1"
# -------------------------------------------------------------------------------------------------------------------- #
# IMPORTS
# Modules:
from datetime import datetime
import xml.etree.ElementTree as ET
import os
# RiBuild Modules:
# -------------------------------------------------------------------------------------------------------------------- #
# DELPHIN FUNCTIONS AND CLASSES
class Delphin6File:
def __init__(self):
self.xml_tree = ET.Element('DelphinProject')
self.material_database = None
def create_attributes(self, file_attributes):
self.xml_tree.set('xmlns', file_attributes['xmlns'])
self.xml_tree.set('xmlns:xsi', file_attributes['xmlns:xsi'])
self.xml_tree.set('xmlns:IBK', file_attributes['xmlns:IBK'])
self.xml_tree.set('xsi:schemaLocation', file_attributes['xsi:schemaLocation'])
self.xml_tree.set('fileVersion', file_attributes['file_version'])
def create_project_info(self, comment, created=None):
last_edited = datetime.strftime(datetime.now(), '%c')
if not created:
created = last_edited
info_tree = ET.SubElement(self.xml_tree, 'ProjectInfo')
info_tree.set('created', created)
info_tree.set('lastEdited', last_edited)
comment_tree = ET.SubElement(info_tree, 'Comment')
comment_tree.text = str(comment)
def create_directory_placeholders(self, climate_database=None, material_database=None):
if not climate_database:
climate_database = r'C:/Program Files/IBK/Delphin 6.0/resources/DB_climate'
if not material_database:
material_database = r'C:/Program Files/IBK/Delphin 6.0/resources/DB_materials'
self.material_database = material_database
directory_placeholders_tree = ET.SubElement(self.xml_tree, 'DirectoryPlaceholders')
climate_param = ET.SubElement(directory_placeholders_tree, 'Placeholder')
climate_param.set('name', 'Climate Database')
climate_param.text = climate_database
material_param = ET.SubElement(directory_placeholders_tree, 'Placeholder')
material_param.set('name', 'Material Database')
material_param.text = material_database
def create_init(self, duration_unit, duration, longitude, latitude, climate_data_path, albedo, balance_equation_module='BEHeatMoisture'):
init_tree = ET.SubElement(self.xml_tree, 'Init')
sim_param_tree = ET.SubElement(init_tree, 'SimulationParameter')
balance_equation_module_tree = ET.SubElement(sim_param_tree, 'BalanceEquationModule')
balance_equation_module_tree.text = balance_equation_module
interval_tree = ET.SubElement(sim_param_tree, 'Interval')
interval_param = ET.SubElement(interval_tree, 'IBK:Parameter')
interval_param.set('name', 'Duration')
interval_param.set('unit', str(duration_unit))
interval_param.text = int(duration)
climate_path_tree = ET.SubElement(sim_param_tree, 'ClimateDataFilePath')
climate_path_tree.text = str(climate_data_path)
longitude_param = ET.SubElement(sim_param_tree, 'IBK:Parameter')
longitude_param.set('name', 'Longitude')
longitude_param.set('unit', 'Deg')
longitude_param.text = str(longitude)
latitude_param = ET.SubElement(sim_param_tree, 'IBK:Parameter')
latitude_param.set('name', 'Latitude')
latitude_param.set('unit', 'Deg')
latitude_param.text = latitude
albedo_param = ET.SubElement(sim_param_tree, 'IBK:Parameter')
albedo_param.set('name', 'Albedo')
albedo_param.set('unit', '---')
albedo_param.text = str(albedo)
def create_materials(self, material_numbers):
def read_material_file(file_path):
"""Reads a material file and return the information required by the material parameter"""
if not file_path:
return None
# Initialize variables
name = None
color = None
hatch_code = None
# Open file and loop through the lines
file_obj = open(file_path, 'r')
file_lines = file_obj.readlines()
for line in file_lines:
if line.startswith(' NAME'):
name_line = line.split('=')
names = name_line[1].split('|')
for n in names:
if n.startswith('EN'):
name = n[3:-1]
else:
pass
elif line.startswith(' COLOUR') or line.startswith(' COLOR'):
color_line = line.split('=')
color = color_line[1][1:-1]
elif line.startswith(' HATCHING'):
hatch_line = line.split('=')
hatch_code = hatch_line[1][1:-1]
else:
pass
return name, color, hatch_code, file_path
def lookup_material(material_number):
file_end = str(material_number) + '.m6'
material_files = os.listdir(self.material_database)
material_path = None
for f in material_files:
if f.endswith(file_end):
material_path = self.material_database + '/' + f
else:
pass
return read_material_file(material_path)
material_tree = ET.SubElement(self.xml_tree, 'Materials')
for m in material_numbers:
m = int(m)
material_name, material_color, material_hatch, material_file = lookup_material(m)
material_param = ET.SubElement(material_tree, 'MaterialReference')
material_param.set('name', material_name + ' [' + str(m) + ']')
material_param.set('color', material_color)
material_param.set('hatchCode', material_hatch)
material_param.text = material_file
def create_discretization(self, discretization):
discretization_tree = ET.SubElement(self.xml_tree, 'Discretization')
xstep_tree = ET.SubElement(discretization_tree, 'XStep')
xstep_tree.set('unit', 'm')
xstep_tree.text = ' '.join(discretization)
def create_conditions(self, interfaces, climates, boundary):
condition_tree = ET.SubElement(self.xml_tree, 'Conditions')
# Set interfaces
# Interfaces data structure: dict - {'indoor': {'type': string, 'bc': list with strings, 'orientation': int},
# 'outdoor': {'type': string, 'bc': list with strings}
# }
interfaces_tree = ET.SubElement(condition_tree, 'Interfaces')
indoor_tree = ET.SubElement(interfaces_tree, 'Interface')
indoor_tree.set('name', 'Indoor surface')
indoor_tree.set('type', interfaces['indoor']['type'])
interface_param = ET.SubElement(indoor_tree, 'IBK:Parameter')
interface_param.set('name', 'Orientation')
interface_param.set('unit', 'Deg')
interface_param.text = interfaces['indoor']['orientation']
for bc in interfaces['indoor']['bc']:
bc_param = ET.SubElement(indoor_tree, 'BCReference')
bc_param.text = bc
# Set climate conditions
# climate conditions data structure: list with dicts.
# dicts has structure: {'name': 'string',
# 'type': string,
# 'kind': string,
# 'param': dict,
# 'file': string or None,
# 'flag': dict or None
# }
#
# param has structure: {'name': string, 'unit': string, 'value': float or int}
# flag has structure: {'name': string, 'value': string}
climate_conditions_tree = ET.SubElement(condition_tree, 'ClimateConditions')
for climate_dict in climates:
climate_tree = ET.SubElement(climate_conditions_tree, 'ClimateCondition')
climate_tree.set('name', climate_dict['name'])
climate_tree.set('type', climate_dict['type'])
climate_tree.set('kind', climate_dict['kind'])
if climate_dict['kind'] == 'TabulatedData':
file_tree = ET.SubElement(climate_tree, 'Filename')
file_tree.text = climate_dict['file']
climate_param = ET.SubElement(climate_tree, 'IBK:Parameter')
climate_param.set('name', climate_dict['param']['name'])
climate_param.set('unit', climate_dict['param']['unit'])
climate_param.text = str(climate_dict['param']['value'])
flag_param = ET.SubElement(climate_tree, 'IBK:Flag')
flag_param.set('name', climate_dict['flag']['name'])
flag_param.text = climate_dict['flag']['value']
elif climate_dict['kind'] == 'Constant':
climate_param = ET.SubElement(climate_tree, 'IBK:Parameter')
climate_param.set('name', climate_dict['param']['name'])
climate_param.set('unit', climate_dict['param']['unit'])
climate_param.text = str(climate_dict['param']['value'])
else:
pass
# Set boundary conditions
# boundary conditions data structure: list with dicts.
# dicts has structure: {'name': 'string',
# 'type': string,
# 'kind': string,
# 'params': list of dicts,
# 'cc_ref': list of dicts
# }
#
# params has dict structure: {'name': string, 'unit': string, 'value': float or int}
# cc_ref has dict structure: {'type': string, 'value': string}
boundaries_tree = ET.SubElement(condition_tree, 'BoundaryConditions')
for boundary_dict in boundary:
boundary_tree = ET.SubElement(boundaries_tree, 'BoundaryCondition')
boundary_tree.set('name', boundary_dict['name'])
boundary_tree.set('type', boundary_dict['type'])
boundary_tree.set('kind', boundary_dict['kind'])
for boundary_parameter in boundary_dict['param']:
boundary_param = ET.SubElement(boundary_tree, 'IBK:Parameter')
boundary_param.set('name', boundary_parameter['name'])
boundary_param.set('unit', boundary_parameter['unit'])
boundary_param.text = str(boundary_parameter['value'])
for cc_parameter in boundary_dict['cc_ref']:
cc_param = ET.SubElement(boundary_tree, 'CCReference')
cc_param.set('type', cc_parameter['type'])
cc_param.text = cc_parameter['value']
def create_outputs(self, output_unit, output_grids, output_files):
outputs_tree = ET.SubElement(self.xml_tree, 'Outputs')
# Set general unit parameter
unit_tree = ET.SubElement(outputs_tree, 'IBK:Unit')
unit_tree.set('name', output_unit['name'])
unit_tree.text = output_unit['value']
# Set output grids
# output grids data structure: list with dicts.
# dicts has structure: {'name': 'string',
# 'intervals': list of dicts
# }
#
# interval has structure: {'name': string, 'unit': string, 'value': float or int}
output_grids_tree = ET.SubElement(outputs_tree, 'OutputGrids')
for output_grid in output_grids:
output_grid_tree = ET.SubElement(output_grids_tree, 'OutputGrid')
output_grid_tree.set('name', output_grid['name'])
for interval in output_grid['intervals']:
interval_tree = ET.SubElement(output_grid_tree, 'Interval')
interval_tree.set('name', interval['name'])
interval_tree.set('unit', interval['unit'])
interval_tree.text = str(interval['value'])
# Set output files
# output files data structure: list with dicts.
# dicts has structure: {'name': 'string',
# 'quantity': {'unit': string, 'value': string},
# 'time_type': string,
# 'space_type': string,
# 'output_grid': string
# }
output_files_tree = ET.SubElement(outputs_tree, 'OutputFiles')
for output_file in output_files:
output_file_tree = ET.SubElement(output_files_tree, 'OutputFile')
output_file_tree.set('name', output_file['name'])
quantity_param = ET.SubElement(output_file_tree, 'Quantity')
quantity_param.set('unit', output_file['quantity']['unit'])
quantity_param.text = output_file['quantity']['value']
def create_assignments(self, assignment_list):
# Set assignments
# assignments data structure: list with dicts.
# dicts has structure: {'type': 'string',
# 'location': string,
# 'reference': string,
# 'range': list of ints or None,
# 'point': list of floats or None
# }
assignments_tree = ET.SubElement(self.xml_tree, 'Assignments')
for assignment in assignment_list:
assignment_tree = ET.SubElement(assignments_tree, 'Assignment')
assignment_tree.set('type', assignment['type'])
assignment_tree.set('location', assignment['location'])
reference_param = ET.SubElement(assignment_tree, 'Reference')
reference_param.text = assignment['reference']
if assignment['type'] == 'Material' or assignment['type'] == 'Interface':
range_param = ET.SubElement(assignment_tree, 'Range')
range_param.text = ' '.join(assignment['range'])
elif assignment['type'] == 'Output':
if assignment['location'] == 'Coordinate':
point_param = ET.SubElement(assignment_tree, 'IBK:Point3D')
point_param.text = ' '.join(assignment['point'])
else:
pass
else:
pass
def create_xml(self):
delphin_tree = ET.ElementTree(self.xml_tree)
return delphin_tree
def write_xml(self, path):
xml_file = self.create_xml()
xml_file.write(path, xml_declaration=True)
|
from .messenger import BaseMessenger
__all__ = [
"BaseMessenger",
]
|
# Generated by Django 3.0 on 2021-07-22 10:04
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('faq', '0010_auto_20210722_1806'),
]
operations = [
migrations.RenameField(
model_name='category',
old_name='title_category',
new_name='tittle_category',
),
migrations.RemoveField(
model_name='question',
name='category',
),
]
|
# python standard lib
import os
import sys
import signal
import logging
import math
import datetime as dt
import time
import json
from json import dumps
from pathlib import Path
import traceback
from dataclasses import asdict
import asyncio
# external libs
import socketio
import tornado
import tornado.web
#import tornado.websocket
# internals
from .. import settings
from .. import controller
from .. import utils
from .. import loggers
from .. import gpio
here = Path(os.path.dirname(__file__)).resolve()
main_logger = logging.getLogger("main")
class BaseHandler(tornado.web.RequestHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def body_to_json(self):
body = self.request.body
if not body: body = b'{}'
return json.loads(body)
def set_default_headers(self):
self.set_header("Access-Control-Allow-Origin", "*")
self.set_header('Access-Control-Allow-Methods', 'POST, GET, OPTIONS, HEAD, PUT')
self.set_header("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type: application/json, Accept, Authorization")
def get_current_user(self):
""" Overrides method, gets called ones when accessing 'self.current_user' """
r = self.get_secure_cookie("audio_controller_user")
if r is not None:
return r.decode("utf-8")
return r
def set_cookie_username(self, username: str = ""):
self.set_secure_cookie("audio_controller_user", username.encode("utf-8"))
def logged_in(self):
""" Return True if user is logged in, False otherwise. """
return bool(self.current_user)
def login_required(self):
""" Check if login is required, which is always except when request comes from localhost """
return not self.is_localhost()
def is_localhost(self):
""" Return True if request comes from localhost (when port is 5000 this is True). False otherwise """
return self.request.host.endswith(":5000")
def write_login_exception(self):
self.write(dumps({'LoginException': 'Please login first'}))
def get_action(path: str):
""" get action from path, where path is assumed to be '/{controller}/{action}.*' """
items = path.lstrip("/").split("/")
if len(items) > 1:
return items[1]
return None
def get_js_filename():
""" Rename main.js to an unique file name, to force reload. Return latest file renamed from main.js. """
js_dir = here.parent / 'static' / 'js'
names = os.listdir(str(js_dir))
names = [n for n in names if n.startswith("main") and n.endswith(".js")]
if "main.js" in names:
for n in names:
if n != "main.js":
os.remove(js_dir / n)
new_name = f"main-{int(time.time())}.js"
os.rename(js_dir / "main.js", js_dir / new_name)
return new_name
else:
return sorted(names)[-1]
class Main(BaseHandler):
def get(self):
self.render("index.html", title="Title", page="home", js_filename=get_js_filename())
def post(self):
self.write("")
class Login(BaseHandler):
def check_user(self, username, password):
""" Check if user has provided correct password to login """
if username is None or password is None:
return False
return utils.check_user(username, password)
def post(self):
action = get_action(self.request.path)
if action == 'login_required':
self.write(dumps({'login_required': self.login_required()}))
return
if action == 'login':
# check if already logged in (reading cookie)
if self.current_user: # not None and not empty string
return self.write(dumps({'success': True}))
else:
# try login if arguments are provided
args = self.body_to_json()
# self.get_arguments
# if 'username' in args and 'password' in args:
username = str(args.get('username'))
password = str(args.get('password'))
if self.check_user(username, password):
msg = f"Login user {username}"
print(msg)
main_logger.info(msg)
self.set_cookie_username(username) # assumes unique usernames
self.write(dumps({'success': True}))
else:
msg = f"Login failed for user {username}"
print(msg)
main_logger.info(msg)
self.write(dumps({'success': False}))
elif action == 'logout':
# remove cookie user
self.set_cookie_username("")
self.write(dumps({'success': True}))
# self.redirect_relative("/") # not used, implemented client side
# elif action == 'register': ??
class General(BaseHandler):
async def post(self):
action = get_action(self.request.path)
if action == 'connected':
if controller.itec.serial is None:
raise tornado.web.HTTPError(503) # 503 = Dienst niet beschikbaar
self.write(dumps({'success': True}))
return
if self.login_required() and not self.logged_in():
self.write(dumps({'success': False}))
return
def write_settings():
self.write(dumps(asdict(settings.settings)))
def write_sources():
self.write(dumps([asdict(obj) for obj in settings.sources]))
def write_destinations():
self.write(dumps([asdict(obj) for obj in settings.destinations]))
if action == 'restoreSettings':
settings.restore()
write_settings()
return
elif action == 'getSettings':
write_settings()
return
elif action == 'setSettings':
args = self.body_to_json()
settings.update_settings(args)
controller.set_routes()
loggers.enable(settings.settings.enable_logging)
write_settings()
await notify_change()
return
elif action == 'getSources':
write_sources()
return
elif action == 'setSources':
args = self.body_to_json()
sources = args['sources']
settings.update_sources(sources)
controller.set_routes()
write_sources()
await notify_change()
return
elif action == 'getDestinations':
write_destinations()
return
elif action == 'setDestinations':
args = self.body_to_json()
destinations = args['destinations']
settings.update_destinations(destinations)
controller.set_routes()
write_destinations()
await notify_change()
return
elif action == 'getInputLevels':
levels = controller.config.current_levels
self.write(dumps(levels))
return
elif action == 'downloadLog':
self.write(loggers.get_logs_as_binary())
return
elif action == 'ifconfig':
self.write(os.popen('ifconfig').read())
return
elif action == 'reboot':
os.system("shutdown -r now")
return
elif action == 'shutdown':
os.system("shutdown now")
return
elif action == 'getRoutes':
self.write(controller.get_routes())
return
elif action == 'downloadSettings':
self.write(settings.get_binary())
return
elif action == 'uploadSettings':
file_content = self.request.files['file'][0]['body']
settings.set_binary(file_content)
self.write(dumps({'success': True}))
return
elif action == 'test_gpio':
if gpio.is_enabled:
await gpio.test_async()
return
async def notify_change():
""" Notify clients that there has been changed something, like a setting """
for server in websocket_servers:
await server.emit("event", "change")
websocket_servers: socketio.Server = []
def websocket_handlers(sio: socketio.Server):
if sio in websocket_servers:
return
# multiple (tornado) applications run on different ports, so for each a server exists
websocket_servers.append(sio)
@sio.event
async def connect(sid, environ):
handler = environ['tornado.handler']
handler = BaseHandler(handler.application, handler.request)
if handler.login_required() and not handler.logged_in():
print("Unauthorized websocket usage, websocket closed.")
return False
# print('connect ', sid)
@sio.event
def disconnect(sid):
pass # print('disconnect ', sid)
@sio.event
def event(sid, data):
pass # print("event catched")
# print(data)
# @sio.on('my custom event')
# def another_event(sid, data):
# print("custom event catched")
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import pandas as pd
import sys
from collections import Iterable
import numpy as np
import pkg_resources
import bz2
import pickle
import os
from clonesig.estimator import Estimator
from pandas.errors import EmptyDataError
from clonesig.evaluate import score_sig_1D_base
MIXTURE_THRESHOLD = 0.05
"""
folder_path = 'PhylogicNDT500/Sim_500_19_cst'
"""
folder_path = sys.argv[1]
signature_filename = 'data/sigProfiler_SBS_signatures_2018_03_28.csv'
sig = pd.read_csv(
pkg_resources.resource_stream(
'clonesig', signature_filename),
sep=',')
all_sigs = sig.columns[2:].to_list()
input_filename = '{}/input_t.tsv'.format(folder_path)
input_df = pd.read_csv(input_filename, sep='\t')
input_df = input_df.assign(signature=input_df.signature.astype(int))
true_signatures_1D_df = input_df[['mutation_id', 'signature']]
# tracksig
try:
mixture_file = pd.read_csv('{}/tracksig/tracksig_mixtures_cancertype.csv'.
format(folder_path), sep=',')
except FileNotFoundError:
print('non working')
try:
changepoint_file = pd.read_csv(
'{}/tracksig/tracksig_changepoints_cancertype.txt'.
format(folder_path), header=None, sep=' ')
changepoints_tracksig_list = changepoint_file.values[0]
except EmptyDataError:
changepoints_tracksig_list = np.array(list())
input_df = pd.read_csv('{}/input_t.tsv'.format(folder_path), sep='\t')
with open('{}/purity.txt'.format(folder_path), 'r') as f:
purity = float(f.read())
input_df = input_df.assign(mut_cn=1)
input_df = input_df.assign(vaf=input_df.var_counts /
(input_df.ref_counts + input_df.var_counts))
input_df = input_df.assign(
total_cn=lambda x: x['minor_cn'] + x['major_cn'])
input_df = input_df.assign(
vaf_cn=input_df.vaf * input_df['total_cn'] / input_df['mut_cn'])
input_df = input_df.assign(
vaf_purity=input_df.apply(
lambda x: x['vaf']/purity *
((1 - purity) * 2 + purity * x['total_cn']) /
x['mut_cn'], axis=1))
input_df.sort_values(by='vaf_purity', inplace=True)
input_df.reset_index(inplace=True, drop=True)
input_df = input_df.assign(mutation_group=lambda x: x.index//100)
nbin = len(input_df)//100
input_df_filter = input_df[input_df.mutation_group <= nbin - 1]
cluster_id_list = np.zeros(input_df_filter.mutation_group.nunique())
i = 1
for chg_point in changepoints_tracksig_list:
cluster_id_list[(chg_point - 1):] = i
i += 1
input_df_filter = input_df_filter.assign(
pred_cluster_id=input_df_filter.apply(
lambda x: int(cluster_id_list[x['mutation_group']]), axis=1))
pred_signatures = np.zeros(len(all_sigs))
filename = '{}/subMU.csv'.format(folder_path)
sub_matrix = pd.read_csv(filename, sep='\t')
mu_mat_setting = sub_matrix[sub_matrix.columns[1:]].values.T
sub_sigs = sub_matrix.columns[1:]
idx = [list(all_sigs).index(s) for s in sub_sigs]
est_sigs = mixture_file[mixture_file.columns[1:]].mean(axis=1).values
pred_signatures[idx] = est_sigs
pred_profile = est_sigs.dot(mu_mat_setting)
mut_sig = np.moveaxis(
np.repeat([mixture_file.values[:, 1:].T[input_df_filter.mutation_group.astype(int)]],
96, axis=0), [0, 1, 2], [2, 0, 1])
big_mu = np.repeat([mu_mat_setting], len(input_df_filter), axis=0)
big_everything = (mut_sig * big_mu / len(est_sigs))[np.arange(len(input_df_filter)), :, input_df_filter.trinucleotide]
signature_mut = np.argmax(big_everything, axis=1)
input_df_filter = input_df_filter.assign(signature=signature_mut)
tracksig_signatures_1D_df = input_df_filter[['mutation_id', 'signature']]
final_1D_df = pd.merge(true_signatures_1D_df, tracksig_signatures_1D_df, on='mutation_id', how='inner', suffixes=['_true', '_tracksig'])
score_1D_tracksig = score_sig_1D_base(final_1D_df.signature_true.values, final_1D_df.signature_tracksig.values.astype(int))
# tracksigfreq
try:
mixture_file = pd.read_csv('{}/tracksigfreq/tracksigfreq_mixtures_cancertype.csv'.
format(folder_path), sep=',')
except FileNotFoundError:
print('non working')
try:
changepoint_file = pd.read_csv(
'{}/tracksigfreq/tracksigfreq_changepoints_cancertype.txt'.
format(folder_path), header=None, sep=' ')
changepoints_tracksig_list = changepoint_file.values[0]
except EmptyDataError:
changepoints_tracksig_list = np.array(list())
data_df = pd.read_csv('{}/tracksigfreq/vcaf.csv'.
format(folder_path), sep='\t')
cluster_id_list = np.zeros(data_df.bin.nunique())
i = 1
for chg_point in changepoints_tracksig_list:
cluster_id_list[(chg_point - 1):] = i
i += 1
data_df = data_df.assign(
pred_cluster_id=data_df.apply(lambda x: int(cluster_id_list[x['bin']-1]),
axis=1))
J_pred = len(changepoints_tracksig_list) + 1
weights_pred = data_df.groupby('pred_cluster_id').phi.count().values/len(data_df)
phi_pred_values = data_df.groupby('pred_cluster_id').phi.mean().values
est_clonal_idx = data_df.groupby('pred_cluster_id').phi.mean().idxmax()
data_df = data_df.assign(
pred_subclonal=(data_df.pred_cluster_id != est_clonal_idx).astype(int))
pred_signatures = np.zeros(len(all_sigs))
filename = '{}/subMU.csv'.format(folder_path)
sub_matrix = pd.read_csv(filename, sep='\t')
mu_mat_setting = sub_matrix[sub_matrix.columns[1:]].values.T
sub_sigs = sub_matrix.columns[1:]
idx = [list(all_sigs).index(s) for s in sub_sigs]
est_sigs = mixture_file[mixture_file.columns[1:]].mean(axis=1).values
pred_signatures[idx] = est_sigs
pred_profile = est_sigs.dot(mu_mat_setting)
mut_sig = np.moveaxis(
np.repeat([mixture_file.values[:, 1:].T[data_df.bin.astype(int)-1]],
96, axis=0), [0, 1, 2], [2, 0, 1])
big_mu = np.repeat([mu_mat_setting], len(data_df), axis=0)
big_everything = (mut_sig * big_mu / len(est_sigs))[np.arange(len(data_df)), :, data_df.trinucleotide]
signature_mut = np.argmax(big_everything, axis=1)
tracksigfreq_signatures_1D_df = input_df_filter[['mutation_id', 'signature']]
final_1D_df = pd.merge(true_signatures_1D_df, tracksigfreq_signatures_1D_df, on='mutation_id', how='inner', suffixes=['_true', '_tracksigfreq'])
score_1D_tracksigfreq = score_sig_1D_base(final_1D_df.signature_true.values, final_1D_df.signature_tracksigfreq.values.astype(int))
old_score_filename = [i for i in os.listdir(folder_path) if 'result_evaluation' in i][0]
old_score = pd.read_csv('{}/{}'.format(folder_path, old_score_filename), sep='\t')
old_score.loc[old_score.method=='tracksig', 'score_sig_1D'] = score_1D_tracksig
old_score.loc[old_score.method=='tracksigfreq', 'score_sig_1D'] = score_1D_tracksigfreq
new_name = old_score_filename.split('.')[0] + '_new.csv'
old_score.to_csv('{}/{}'.format(folder_path, new_name),
sep='\t', index=False)
|
"""empty message
Revision ID: 79aa6da15aab
Revises: b54b75e1c56c
Create Date: 2020-09-20 15:34:30.232062
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '79aa6da15aab'
down_revision = 'b54b75e1c56c'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('artists', 'facebook_link',
existing_type=sa.VARCHAR(length=120),
nullable=True)
op.alter_column('artists', 'genres',
existing_type=sa.VARCHAR(length=120),
nullable=False)
op.alter_column('artists', 'image_link',
existing_type=sa.VARCHAR(length=500),
nullable=True)
op.add_column('shows', sa.Column('artist_id', sa.Integer(), nullable=False))
op.add_column('shows', sa.Column('venue_id', sa.Integer(), nullable=False))
op.create_foreign_key(None, 'shows', 'artists', ['artist_id'], ['id'])
op.create_foreign_key(None, 'shows', 'venues', ['venue_id'], ['id'])
op.drop_column('shows', 'category')
op.add_column('venues', sa.Column('seeking_talent', sa.Boolean(), nullable=False))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('venues', 'seeking_talent')
op.add_column('shows', sa.Column('category', sa.VARCHAR(), autoincrement=False, nullable=False))
op.drop_constraint(None, 'shows', type_='foreignkey')
op.drop_constraint(None, 'shows', type_='foreignkey')
op.drop_column('shows', 'venue_id')
op.drop_column('shows', 'artist_id')
op.alter_column('artists', 'image_link',
existing_type=sa.VARCHAR(length=500),
nullable=False)
op.alter_column('artists', 'genres',
existing_type=sa.VARCHAR(length=120),
nullable=True)
op.alter_column('artists', 'facebook_link',
existing_type=sa.VARCHAR(length=120),
nullable=False)
# ### end Alembic commands ###
|
# ABC119b
import sys
input = sys.stdin.readline
sys.setrecursionlimit(10**6)
n = int(input())
x = [list(input().split()) for _ in range(n)]
ans = 0
for i in x:
if (i[1] == 'BTC'):
ans += float(i[0]) * 380000
else:
ans += int(i[0])
print(ans)
|
from django import forms
from django.core.validators import MaxValueValidator, MinValueValidator
class user_id_form(forms.Form):
userId = forms.IntegerField()
class text_form(forms.Form):
query = forms.CharField( max_length=100)
|
from setuptools import setup
setup(
name='representjs',
version='1.0',
packages=["representjs"],
python_requires=">=3.7",
install_requires=[
"fire",
"graphviz",
"jsbeautifier",
"jsonlines",
"pyjsparser",
"tqdm",
"requests",
"regex",
"loguru",
"pyarrow",
# Data
"matplotlib",
"numpy",
"pandas",
"seaborn",
# PyTorch
"pytorch-lightning",
"torch",
"torchtext",
"wandb",
# NLP dependencies
"sentencepiece",
"sacremoses",
"transformers>=3.2.0",
"tokenizers",
"datasets",
"allennlp",
],
extras_require={"test": ["pytest"]}
)
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class INVItem(scrapy.Item):
date_str = scrapy.Field()
company = scrapy.Field()
company_url = scrapy.Field()
industry = scrapy.Field()
industry_url = scrapy.Field()
money_stage = scrapy.Field()
money_spans = scrapy.Field()
group_nolink = scrapy.Field() # list
group_text = scrapy.Field() # list
group_href = scrapy.Field() # list
view_url = scrapy.Field()
class IPOItem(scrapy.Item):
date_str = scrapy.Field()
company = scrapy.Field()
company_url = scrapy.Field()
industry = scrapy.Field()
industry_url = scrapy.Field()
money_spans = scrapy.Field()
place = scrapy.Field()
place_url = scrapy.Field()
view_url = scrapy.Field()
class MAItem(scrapy.Item):
date_str = scrapy.Field()
company = scrapy.Field()
company_url = scrapy.Field()
industry = scrapy.Field()
industry_url = scrapy.Field()
money_spans = scrapy.Field()
company2 = scrapy.Field()
company2_url = scrapy.Field()
view_url = scrapy.Field()
class PEItem(scrapy.Item):
date_str = scrapy.Field()
fund = scrapy.Field()
fund_url = scrapy.Field()
group = scrapy.Field()
group_url = scrapy.Field()
money_spans = scrapy.Field()
view_url = scrapy.Field()
|
#!/usr/bin/env python3.6
# coding=utf-8
"""
Module to import data from loo02.pl, parser json and send it back to airmonitor API interface.
"""
import json
import os
import time
import urllib3
from lib.airmonitor_common_libs import _send_data_to_api, get_content, logger_initialization
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
LOOK_API = os.environ["LOOK_API"]
LOOK_TOKEN = int(os.environ["LOOK_TOKEN"])
LOG_LEVEL = os.environ["LOG_LEVEL"]
LOGGER = logger_initialization()
def all_data():
api_content = get_content(url=f"{LOOK_API}{LOOK_TOKEN}")
looko2_all_stations = json.loads(api_content)
LOGGER.debug("LOOKO2 all stations %s", looko2_all_stations)
return looko2_all_stations
def looko2(event, context):
epoch_time = time.time()
result_ids = []
result_latitude = []
result_longitude = []
result_epoch = []
final_list_of_measurements_in_dictionary = []
looko2_all_stations = all_data()
try:
result_ids = [ids["Device"] for ids in looko2_all_stations]
result_latitude = [latitude["Lat"] for latitude in looko2_all_stations]
result_longitude = [longitude["Lon"] for longitude in looko2_all_stations]
result_epoch = [epoch["Epoch"] for epoch in looko2_all_stations]
except KeyError:
pass
LOGGER.debug("result_ids %s", result_ids)
LOGGER.debug("result_latitude %s", result_latitude)
LOGGER.debug("result_longitude %s", result_longitude)
LOGGER.debug("result_epoch %s", result_epoch)
merged_ids_lat_long = list(zip(result_ids, result_latitude, result_longitude, result_epoch))
LOGGER.debug("merged_ids_lat_long %s", merged_ids_lat_long)
for values in merged_ids_lat_long:
LOGGER.debug("values %s", values)
looko2_sensor_data = get_content(
url=f"http://api.looko2.com/?method=GetLOOKO&id={values[0]}&token={LOOK_TOKEN}"
)
try:
looko2_sensor_data = json.loads(looko2_sensor_data)
except:
looko2_sensor_data = 0
LOGGER.debug("looko2_sensor_data %s", looko2_sensor_data)
if looko2_sensor_data != 0 and (55 > float(values[1]) > 47) and (epoch_time - 7200) < float(values[3]):
try:
looko2_sensor_data_pm1 = looko2_sensor_data["PM1"]
LOGGER.debug("PM1: %s", looko2_sensor_data_pm1)
except (ValueError, KeyError, IndexError):
looko2_sensor_data_pm1 = 0
try:
looko2_sensor_data_pm25 = looko2_sensor_data["PM25"]
LOGGER.debug("PM2,5: %s", looko2_sensor_data_pm25)
except (ValueError, KeyError, IndexError):
looko2_sensor_data_pm25 = 0
try:
looko2_sensor_data_pm10 = looko2_sensor_data["PM10"]
LOGGER.debug("PM10: %s", looko2_sensor_data_pm10)
except (ValueError, KeyError, IndexError):
looko2_sensor_data_pm10 = 0
data = {
"lat": str(values[1]),
"long": str(values[2]),
"pm1": looko2_sensor_data_pm1,
"pm25": looko2_sensor_data_pm25,
"pm10": looko2_sensor_data_pm10,
"sensor": "looko2",
}
final_list_of_measurements_in_dictionary.append(data)
LOGGER.debug("final_list_of_measurements_in_dictionary %s", final_list_of_measurements_in_dictionary)
_send_data_to_api(final_list_of_measurements_in_dictionary)
return final_list_of_measurements_in_dictionary
if __name__ == "__main__":
looko2("", "")
|
from django.contrib.auth.models import User
from django.db import models
from pupils.models import Class,Pupils
class Teacher(models.Model):
user = models.OneToOneField(User,on_delete=models.SET_NULL,null=True)
first_name = models.CharField(max_length=50,null=True)
last_name = models.CharField(max_length=50,null=True)
otchestvo = models.CharField(max_length=50,null=True)
subject = models.ForeignKey('Subject',models.SET_NULL,null=True)
classes = models.ForeignKey(Class, models.SET_NULL, null=True)
def __str__(self):
return self.first_name
class Subject(models.Model):
subject = models.CharField(max_length=50, null=True)
def __str__(self):
return self.subject
class PupilstoObjects(models.Model):
subjects = models.ForeignKey(Subject,on_delete=models.SET_NULL,null=True,related_name='to_subjects')
classes = models.ManyToManyField(Class,null=True)
class Grade(models.Model):
pupil = models.ForeignKey(Pupils, on_delete=models.SET_NULL, null=True,related_name='grades')
subject = models.ForeignKey(Subject,on_delete=models.SET_NULL,null=True)
grade = models.IntegerField(default=1)
date_created = models.DateTimeField(auto_now_add=True)
|
import subprocess
import os
import argparse
import sys
import time
import platform
parser = argparse.ArgumentParser(description="Command Watcher Tool", add_help=True,
usage="cmdwatch -d DELAY [-o OUTPUT_FILE] [-t TIMEOUT] [-s] <cmd>")
parser.add_argument("-d", "--delay", help="How long to wait until next check. Checks every 2 seconds by default",
dest='delay', type=int)
parser.add_argument("-o", "--output", help="File where the output should be stored", dest='output_file')
parser.add_argument("-t", "--timeout", help="For how many seconds should I watch the output", dest='timeout', type=int)
parser.add_argument("-s", "--stop",
help="Pass this option if you want to stop checking whenever there is a change in output",
dest='stop', action='store_true')
try:
args, unknown_args = parser.parse_known_args()
except Exception:
parser.print_help()
sys.exit(0)
delay = args.delay or 2
output_file = args.output_file or None
if len(unknown_args) == 0:
sys.stderr.write("Pass a command to execute. Run cmdwatch --help for instructions")
sys.exit(0)
command = " ".join(unknown_args)
timeout = args.timeout or None
previous_output = ""
try:
while True:
if timeout:
timeout = timeout - delay
command_output = None
try:
command_output = subprocess.check_output(command, shell=True).decode("utf-8")
except subprocess.CalledProcessError as e:
sys.stdout.write(str(e))
break
if args.stop:
# User wants to run only when the output is different everytime
if command_output != previous_output:
sys.stdout.write(f"Watching output for \"{command}\" for every {delay} seconds\n\r\n\r")
sys.stdout.write(command_output + "\r")
if output_file:
fo = open(output_file, "a+")
fo.write(command_output + "\n" + "---" * 30 + "\n")
fo.close()
sys.stdout.flush()
time.sleep(delay)
if timeout:
if timeout <= 0:
break
break
else:
# User wants to run everytime
sys.stdout.write(f"Watching output for \"{command}\" for every {delay} seconds\n\r\n\r")
sys.stdout.write(command_output + "\r")
if output_file:
fo = open(output_file, "a+")
fo.write(command_output + "\n" + "---" * 30 + "\n")
fo.close()
sys.stdout.flush()
time.sleep(delay)
if platform.system() == 'Windows':
os.system("cls")
else:
os.system("clear")
if timeout:
if timeout <= 0:
break
except KeyboardInterrupt:
sys.stdout.write("Stopping execution")
def main():
parser.print_help()
if __name__ == '__main__':
args = sys.argv
main()
|
"""
SPDX-License-Identifier: BSD-3
"""
from ._libtpm2_pytss import lib
from .types import *
from .utils import _chkrc, TPM2B_pack
class ESAPI:
def __init__(self, tcti=None):
tctx = ffi.NULL if tcti is None else tcti.ctx
self.ctx_pp = ffi.new("ESYS_CONTEXT **")
_chkrc(lib.Esys_Initialize(self.ctx_pp, tctx, ffi.NULL))
self.ctx = self.ctx_pp[0]
def __enter__(self):
return self
def __exit__(self, _type, value, traceback):
self.close()
def close(self):
lib.Esys_Finalize(self.ctx_pp)
self.ctx = ffi.NULL
def setAuth(self, esys_tr, auth):
auth_p = TPM2B_pack(auth, "TPM2B_AUTH")
_chkrc(lib.Esys_TR_SetAuth(self.ctx, esys_tr, auth_p))
def Startup(self, startupType):
_chkrc(lib.Esys_Startup(self.ctx, startupType))
def Shutdown(
self,
shutdownType,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
_chkrc(lib.Esys_Shutdown(self.ctx, session1, session2, session3, shutdownType))
def SelfTest(
self,
fullTest,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
_chkrc(lib.Esys_SelfTest(self.ctx, session1, session2, session3, fullTest))
def IncrementalSelfTest(
self,
toTest,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
toDoList = ffi.new("TPML_ALG **")
_chkrc(
lib.Esys_IncrementalSelfTest(
self.ctx, session1, session2, session3, toTest._cdata, toDoList
)
)
return TPML_ALG(toDoList[0])
def GetTestResult(
self, session1=ESYS_TR.NONE, session2=ESYS_TR.NONE, session3=ESYS_TR.NONE
):
outData = ffi.new("TPM2B_MAX_BUFFER **")
testResult = ffi.new("TPM2_RC *")
_chkrc(
lib.Esys_GetTestResult(
self.ctx, session1, session2, session3, outData, testResult
)
)
return (TPM2B_MAX_BUFFER(outData[0]), TPM2_RC(testResult[0]))
def StartAuthSession(
self,
tpmKey,
bind,
nonceCaller,
sessionType,
symmetric,
authHash,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
if nonceCaller is None:
nonceCaller = ffi.NULL
elif isinstance(nonceCaller, TPM_OBJECT):
nonceCaller = nonceCaller._cdata
else:
raise TypeError("Expected nonceCaller to be None or TPM_OBJECT")
sessionHandle = ffi.new("ESYS_TR *")
_chkrc(
lib.Esys_StartAuthSession(
self.ctx,
tpmKey,
bind,
session1,
session2,
session3,
nonceCaller,
sessionType,
symmetric._cdata,
authHash,
sessionHandle,
)
)
sessionHandleObject = sessionHandle[0]
return sessionHandleObject
def TRSess_SetAttributes(self, session, attributes, mask=0xFF):
_chkrc(lib.Esys_TRSess_SetAttributes(self.ctx, session, attributes, mask))
def PolicyRestart(
self,
sessionHandle,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
_chkrc(
lib.Esys_PolicyRestart(
self.ctx, sessionHandle, session1, session2, session3
)
)
def Create(
self,
parentHandle,
inSensitive,
inPublic,
outsideInfo,
creationPCR,
session1=ESYS_TR.PASSWORD,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
outPrivate = ffi.new("TPM2B_PRIVATE **")
outPublic = ffi.new("TPM2B_PUBLIC **")
creationData = ffi.new("TPM2B_CREATION_DATA **")
creationHash = ffi.new("TPM2B_DIGEST **")
creationTicket = ffi.new("TPMT_TK_CREATION **")
_chkrc(
lib.Esys_Create(
self.ctx,
parentHandle,
session1,
session2,
session3,
inSensitive._cdata,
inPublic._cdata,
outsideInfo._cdata,
creationPCR._cdata,
outPrivate,
outPublic,
creationData,
creationHash,
creationTicket,
)
)
return (
TPM2B_PRIVATE(outPrivate[0]),
TPM2B_PUBLIC(outPublic[0]),
TPM2B_CREATION_DATA(creationData[0]),
TPM2B_DIGEST(creationHash[0]),
TPMT_TK_CREATION(creationTicket[0]),
)
def Load(
self,
parentHandle,
inPrivate,
inPublic,
session1=ESYS_TR.PASSWORD,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
objectHandle = ffi.new("ESYS_TR *")
_chkrc(
lib.Esys_Load(
self.ctx,
parentHandle,
session1,
session2,
session3,
inPrivate._cdata,
inPublic._cdata,
objectHandle,
)
)
objectHandleObject = objectHandle[0]
return objectHandleObject
def LoadExternal(
self,
inPrivate,
inPublic,
hierarchy,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
objectHandle = ffi.new("ESYS_TR *")
_chkrc(
lib.Esys_LoadExternal(
self.ctx,
session1,
session2,
session3,
inPrivate,
inPublic,
hierarchy,
objectHandle,
)
)
objectHandleObject = objectHandle[0]
return objectHandleObject
def ReadPublic(
self,
objectHandle,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
outPublic = ffi.new("TPM2B_PUBLIC **")
name = ffi.new("TPM2B_NAME **")
qualifiedName = ffi.new("TPM2B_NAME **")
_chkrc(
lib.Esys_ReadPublic(
self.ctx,
objectHandle,
session1,
session2,
session3,
outPublic,
name,
qualifiedName,
)
)
return (
TPM2B_PUBLIC(outPublic[0]),
TPM2B_NAME(name[0]),
TPM2B_NAME(qualifiedName[0]),
)
def ActivateCredential(
self,
activateHandle,
keyHandle,
credentialBlob,
secret,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
certInfo = ffi.new("TPM2B_DIGEST **")
_chkrc(
lib.Esys_ActivateCredential(
self.ctx,
activateHandle,
keyHandle,
session1,
session2,
session3,
credentialBlob,
secret,
certInfo,
)
)
return certInfo[0]
def MakeCredential(
self,
handle,
credential,
objectName,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
credentialBlob = ffi.new("TPM2B_ID_OBJECT **")
secret = ffi.new("TPM2B_ENCRYPTED_SECRET **")
_chkrc(
lib.Esys_MakeCredential(
self.ctx,
handle,
session1,
session2,
session3,
credential,
objectName,
credentialBlob,
secret,
)
)
return (credentialBlob[0], secret[0])
def Unseal(
self,
itemHandle,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
outData = ffi.new("TPM2B_SENSITIVE_DATA **")
_chkrc(
lib.Esys_Unseal(self.ctx, itemHandle, session1, session2, session3, outData)
)
return outData[0]
def ObjectChangeAuth(
self,
objectHandle,
parentHandle,
newAuth,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
outPrivate = ffi.new("TPM2B_PRIVATE **")
_chkrc(
lib.Esys_ObjectChangeAuth(
self.ctx,
objectHandle,
parentHandle,
session1,
session2,
session3,
newAuth,
outPrivate,
)
)
return outPrivate[0]
def CreateLoaded(
self,
parentHandle,
inSensitive,
inPublic,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
objectHandle = ffi.new("ESYS_TR *")
outPrivate = ffi.new("TPM2B_PRIVATE **")
outPublic = ffi.new("TPM2B_PUBLIC **")
_chkrc(
lib.Esys_CreateLoaded(
self.ctx,
parentHandle,
session1,
session2,
session3,
inSensitive,
inPublic,
objectHandle,
outPrivate,
outPublic,
)
)
objectHandleObject = objectHandle[0]
return (objectHandleObject, outPrivate[0], outPublic[0])
def Duplicate(
self,
objectHandle,
newParentHandle,
encryptionKeyIn,
symmetricAlg,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
encryptionKeyOut = ffi.new("TPM2B_DATA **")
duplicate = ffi.new("TPM2B_PRIVATE **")
outSymSeed = ffi.new("TPM2B_ENCRYPTED_SECRET **")
_chkrc(
lib.Esys_Duplicate(
self.ctx,
objectHandle,
newParentHandle,
session1,
session2,
session3,
encryptionKeyIn,
symmetricAlg,
encryptionKeyOut,
duplicate,
outSymSeed,
)
)
return (encryptionKeyOut[0], duplicate[0], outSymSeed[0])
def Rewrap(
self,
oldParent,
newParent,
inDuplicate,
name,
inSymSeed,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
outDuplicate = ffi.new("TPM2B_PRIVATE **")
outSymSeed = ffi.new("TPM2B_ENCRYPTED_SECRET **")
_chkrc(
lib.Esys_Rewrap(
self.ctx,
oldParent,
newParent,
session1,
session2,
session3,
inDuplicate,
name,
inSymSeed,
outDuplicate,
outSymSeed,
)
)
return (outDuplicate[0], outSymSeed[0])
def Import(
self,
parentHandle,
encryptionKey,
objectPublic,
duplicate,
inSymSeed,
symmetricAlg,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
outPrivate = ffi.new("TPM2B_PRIVATE **")
_chkrc(
lib.Esys_Import(
self.ctx,
parentHandle,
session1,
session2,
session3,
encryptionKey,
objectPublic,
duplicate,
inSymSeed,
symmetricAlg,
outPrivate,
)
)
return outPrivate[0]
def RSA_Encrypt(
self,
keyHandle,
message,
inScheme,
label,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
outData = ffi.new("TPM2B_PUBLIC_KEY_RSA **")
_chkrc(
lib.Esys_RSA_Encrypt(
self.ctx,
keyHandle,
session1,
session2,
session3,
message,
inScheme,
label,
outData,
)
)
return outData[0]
def RSA_Decrypt(
self,
keyHandle,
cipherText,
inScheme,
label,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
message = ffi.new("TPM2B_PUBLIC_KEY_RSA **")
_chkrc(
lib.Esys_RSA_Decrypt(
self.ctx,
keyHandle,
session1,
session2,
session3,
cipherText,
inScheme,
label,
message,
)
)
return message[0]
def ECDH_KeyGen(
self,
keyHandle,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
zPoint = ffi.new("TPM2B_ECC_POINT **")
pubPoint = ffi.new("TPM2B_ECC_POINT **")
_chkrc(
lib.Esys_ECDH_KeyGen(
self.ctx, keyHandle, session1, session2, session3, zPoint, pubPoint
)
)
return (zPoint[0], pubPoint[0])
def ECDH_ZGen(
self,
keyHandle,
inPoint,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
outPoint = ffi.new("TPM2B_ECC_POINT **")
_chkrc(
lib.Esys_ECDH_ZGen(
self.ctx, keyHandle, session1, session2, session3, inPoint, outPoint
)
)
return outPoint[0]
def ECC_Parameters(
self,
curveID,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
parameters = ffi.new("TPMS_ALGORITHM_DETAIL_ECC **")
_chkrc(
lib.Esys_ECC_Parameters(
self.ctx, session1, session2, session3, curveID, parameters
)
)
return parameters[0]
def ZGen_2Phase(
self,
keyA,
inQsB,
inQeB,
inScheme,
counter,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
outZ1 = ffi.new("TPM2B_ECC_POINT **")
outZ2 = ffi.new("TPM2B_ECC_POINT **")
_chkrc(
lib.Esys_ZGen_2Phase(
self.ctx,
keyA,
session1,
session2,
session3,
inQsB,
inQeB,
inScheme,
counter,
outZ1,
outZ2,
)
)
return (outZ1[0], outZ2[0])
def EncryptDecrypt(
self,
keyHandle,
decrypt,
mode,
ivIn,
inData,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
outData = ffi.new("TPM2B_MAX_BUFFER **")
ivOut = ffi.new("TPM2B_IV **")
_chkrc(
lib.Esys_EncryptDecrypt(
self.ctx,
keyHandle,
session1,
session2,
session3,
decrypt,
mode,
ivIn,
inData,
outData,
ivOut,
)
)
return (outData[0], ivOut[0])
def EncryptDecrypt2(
self,
keyHandle,
inData,
decrypt,
mode,
ivIn,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
outData = ffi.new("TPM2B_MAX_BUFFER **")
ivOut = ffi.new("TPM2B_IV **")
_chkrc(
lib.Esys_EncryptDecrypt2(
self.ctx,
keyHandle,
session1,
session2,
session3,
inData,
decrypt,
mode,
ivIn,
outData,
ivOut,
)
)
return (outData[0], ivOut[0])
def Hash(
self,
data,
hashAlg,
hierarchy,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
outHash = ffi.new("TPM2B_DIGEST **")
validation = ffi.new("TPMT_TK_HASHCHECK **")
_chkrc(
lib.Esys_Hash(
self.ctx,
session1,
session2,
session3,
data,
hashAlg,
hierarchy,
outHash,
validation,
)
)
return (outHash[0], validation[0])
def HMAC(
self,
handle,
buffer,
hashAlg,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
outHMAC = ffi.new("TPM2B_DIGEST **")
_chkrc(
lib.Esys_HMAC(
self.ctx, handle, session1, session2, session3, buffer, hashAlg, outHMAC
)
)
return outHMAC[0]
def GetRandom(
self,
bytesRequested,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
randomBytes = ffi.new("TPM2B_DIGEST **")
_chkrc(
lib.Esys_GetRandom(
self.ctx, session1, session2, session3, bytesRequested, randomBytes
)
)
return TPM2B_unpack(randomBytes[0])
def StirRandom(
self,
inData,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
_chkrc(lib.Esys_StirRandom(self.ctx, session1, session2, session3, inData))
def HMAC_Start(
self,
handle,
auth,
hashAlg,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
sequenceHandle = ffi.new("ESYS_TR *")
_chkrc(
lib.Esys_HMAC_Start(
self.ctx,
handle,
session1,
session2,
session3,
auth,
hashAlg,
sequenceHandle,
)
)
sequenceHandleObject = sequenceHandle[0]
return sequenceHandleObject
def HashSequenceStart(
self,
auth,
hashAlg,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
sequenceHandle = ffi.new("ESYS_TR *")
_chkrc(
lib.Esys_HashSequenceStart(
self.ctx, session1, session2, session3, auth, hashAlg, sequenceHandle
)
)
sequenceHandleObject = sequenceHandle[0]
return sequenceHandleObject
def SequenceUpdate(
self,
sequenceHandle,
buffer,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
_chkrc(
lib.Esys_SequenceUpdate(
self.ctx, sequenceHandle, session1, session2, session3, buffer
)
)
def SequenceComplete(
self,
sequenceHandle,
buffer,
hierarchy,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
result = ffi.new("TPM2B_DIGEST **")
validation = ffi.new("TPMT_TK_HASHCHECK **")
_chkrc(
lib.Esys_SequenceComplete(
self.ctx,
sequenceHandle,
session1,
session2,
session3,
buffer,
hierarchy,
result,
validation,
)
)
return (result[0], validation[0])
def EventSequenceComplete(
self,
pcrHandle,
sequenceHandle,
buffer,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
results = ffi.new("TPML_DIGEST_VALUES **")
_chkrc(
lib.Esys_EventSequenceComplete(
self.ctx,
pcrHandle,
sequenceHandle,
session1,
session2,
session3,
buffer,
results,
)
)
return results[0]
def Certify(
self,
objectHandle,
signHandle,
qualifyingData,
inScheme,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
certifyInfo = ffi.new("TPM2B_ATTEST **")
signature = ffi.new("TPMT_SIGNATURE **")
_chkrc(
lib.Esys_Certify(
self.ctx,
objectHandle,
signHandle,
session1,
session2,
session3,
qualifyingData,
inScheme,
certifyInfo,
signature,
)
)
return (certifyInfo[0], signature[0])
def CertifyCreation(
self,
signHandle,
objectHandle,
qualifyingData,
creationHash,
inScheme,
creationTicket,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
certifyInfo = ffi.new("TPM2B_ATTEST **")
signature = ffi.new("TPMT_SIGNATURE **")
_chkrc(
lib.Esys_CertifyCreation(
self.ctx,
signHandle,
objectHandle,
session1,
session2,
session3,
qualifyingData,
creationHash,
inScheme,
creationTicket,
certifyInfo,
signature,
)
)
return (certifyInfo[0], signature[0])
def Quote(
self,
signHandle,
qualifyingData,
inScheme,
PCRselect,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
quoted = ffi.new("TPM2B_ATTEST **")
signature = ffi.new("TPMT_SIGNATURE **")
_chkrc(
lib.Esys_Quote(
self.ctx,
signHandle,
session1,
session2,
session3,
qualifyingData,
inScheme,
PCRselect,
quoted,
signature,
)
)
return (quoted[0], signature[0])
def GetSessionAuditDigest(
self,
privacyAdminHandle,
signHandle,
sessionHandle,
qualifyingData,
inScheme,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
auditInfo = ffi.new("TPM2B_ATTEST **")
signature = ffi.new("TPMT_SIGNATURE **")
_chkrc(
lib.Esys_GetSessionAuditDigest(
self.ctx,
privacyAdminHandle,
signHandle,
sessionHandle,
session1,
session2,
session3,
qualifyingData,
inScheme,
auditInfo,
signature,
)
)
return (auditInfo[0], signature[0])
def GetCommandAuditDigest(
self,
privacyHandle,
signHandle,
qualifyingData,
inScheme,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
auditInfo = ffi.new("TPM2B_ATTEST **")
signature = ffi.new("TPMT_SIGNATURE **")
_chkrc(
lib.Esys_GetCommandAuditDigest(
self.ctx,
privacyHandle,
signHandle,
session1,
session2,
session3,
qualifyingData,
inScheme,
auditInfo,
signature,
)
)
return (auditInfo[0], signature[0])
def GetTime(
self,
privacyAdminHandle,
signHandle,
qualifyingData,
inScheme,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
timeInfo = ffi.new("TPM2B_ATTEST **")
signature = ffi.new("TPMT_SIGNATURE **")
_chkrc(
lib.Esys_GetTime(
self.ctx,
privacyAdminHandle,
signHandle,
session1,
session2,
session3,
qualifyingData,
inScheme,
timeInfo,
signature,
)
)
return (timeInfo[0], signature[0])
def Commit(
self,
signHandle,
P1,
s2,
y2,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
K = ffi.new("TPM2B_ECC_POINT **")
L = ffi.new("TPM2B_ECC_POINT **")
E = ffi.new("TPM2B_ECC_POINT **")
counter = ffi.new("UINT16 *")
_chkrc(
lib.Esys_Commit(
self.ctx,
signHandle,
session1,
session2,
session3,
P1,
s2,
y2,
K,
L,
E,
counter,
)
)
return (K[0], L[0], E[0], counter[0])
def EC_Ephemeral(
self,
curveID,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
Q = ffi.new("TPM2B_ECC_POINT **")
counter = ffi.new("UINT16 *")
_chkrc(
lib.Esys_EC_Ephemeral(
self.ctx, session1, session2, session3, curveID, Q, counter
)
)
return (Q[0], counter[0])
def VerifySignature(
self,
keyHandle,
digest,
signature,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
validation = ffi.new("TPMT_TK_VERIFIED **")
_chkrc(
lib.Esys_VerifySignature(
self.ctx,
keyHandle,
session1,
session2,
session3,
digest,
signature,
validation,
)
)
return validation[0]
def Sign(
self,
keyHandle,
digest,
inScheme,
validation,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
signature = ffi.new("TPMT_SIGNATURE **")
_chkrc(
lib.Esys_Sign(
self.ctx,
keyHandle,
session1,
session2,
session3,
digest,
inScheme,
validation,
signature,
)
)
return signature[0]
def SetCommandCodeAuditStatus(
self,
auth,
auditAlg,
setList,
clearList,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
_chkrc(
lib.Esys_SetCommandCodeAuditStatus(
self.ctx,
auth,
session1,
session2,
session3,
auditAlg,
setList,
clearList,
)
)
def PCR_Extend(
self,
pcrHandle,
digests,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
_chkrc(
lib.Esys_PCR_Extend(
self.ctx, pcrHandle, session1, session2, session3, digests
)
)
def PCR_Event(
self,
pcrHandle,
eventData,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
digests = ffi.new("TPML_DIGEST_VALUES **")
_chkrc(
lib.Esys_PCR_Event(
self.ctx, pcrHandle, session1, session2, session3, eventData, digests
)
)
return digests[0]
def PCR_Read(
self,
pcrSelectionIn,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
pcrUpdateCounter = ffi.new("UINT32 *")
pcrSelectionOut = ffi.new("TPML_PCR_SELECTION **")
pcrValues = ffi.new("TPML_DIGEST **")
_chkrc(
lib.Esys_PCR_Read(
self.ctx,
session1,
session2,
session3,
pcrSelectionIn._cdata,
pcrUpdateCounter,
pcrSelectionOut,
pcrValues,
)
)
return (
pcrUpdateCounter[0],
TPML_PCR_SELECTION(_cdata=pcrSelectionOut[0]),
TPML_DIGEST(_cdata=pcrValues[0]),
)
def PCR_Allocate(
self,
authHandle,
pcrAllocation,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
allocationSuccess = ffi.new("TPMI_YES_NO *")
maxPCR = ffi.new("UINT32 *")
sizeNeeded = ffi.new("UINT32 *")
sizeAvailable = ffi.new("UINT32 *")
_chkrc(
lib.Esys_PCR_Allocate(
self.ctx,
authHandle,
session1,
session2,
session3,
pcrAllocation,
allocationSuccess,
maxPCR,
sizeNeeded,
sizeAvailable,
)
)
return (allocationSuccess[0], maxPCR[0], sizeNeeded[0], sizeAvailable[0])
def PCR_SetAuthPolicy(
self,
authHandle,
authPolicy,
hashAlg,
pcrNum,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
_chkrc(
lib.Esys_PCR_SetAuthPolicy(
self.ctx,
authHandle,
session1,
session2,
session3,
authPolicy,
hashAlg,
pcrNum,
)
)
def PCR_SetAuthValue(
self,
pcrHandle,
auth,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
_chkrc(
lib.Esys_PCR_SetAuthValue(
self.ctx, pcrHandle, session1, session2, session3, auth
)
)
def PCR_Reset(
self,
pcrHandle,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
_chkrc(lib.Esys_PCR_Reset(self.ctx, pcrHandle, session1, session2, session3))
def PolicySigned(
self,
authObject,
policySession,
nonceTPM,
cpHashA,
policyRef,
expiration,
auth,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
timeout = ffi.new("TPM2B_TIMEOUT **")
policyTicket = ffi.new("TPMT_TK_AUTH **")
_chkrc(
lib.Esys_PolicySigned(
self.ctx,
authObject,
policySession,
session1,
session2,
session3,
nonceTPM,
cpHashA,
policyRef,
expiration,
auth,
timeout,
policyTicket,
)
)
return (timeout[0], policyTicket[0])
def PolicySecret(
self,
authHandle,
policySession,
nonceTPM,
cpHashA,
policyRef,
expiration,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
timeout = ffi.new("TPM2B_TIMEOUT **")
policyTicket = ffi.new("TPMT_TK_AUTH **")
_chkrc(
lib.Esys_PolicySecret(
self.ctx,
authHandle,
policySession,
session1,
session2,
session3,
nonceTPM,
cpHashA,
policyRef,
expiration,
timeout,
policyTicket,
)
)
return (timeout[0], policyTicket[0])
def PolicyTicket(
self,
policySession,
timeout,
cpHashA,
policyRef,
authName,
ticket,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
_chkrc(
lib.Esys_PolicyTicket(
self.ctx,
policySession,
session1,
session2,
session3,
timeout,
cpHashA,
policyRef,
authName,
ticket,
)
)
def PolicyOR(
self,
policySession,
pHashList,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
_chkrc(
lib.Esys_PolicyOR(
self.ctx, policySession, session1, session2, session3, pHashList
)
)
def PolicyPCR(
self,
policySession,
pcrDigest,
pcrs,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
_chkrc(
lib.Esys_PolicyPCR(
self.ctx, policySession, session1, session2, session3, pcrDigest, pcrs
)
)
def PolicyLocality(
self,
policySession,
locality,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
_chkrc(
lib.Esys_PolicyLocality(
self.ctx, policySession, session1, session2, session3, locality
)
)
def PolicyNV(
self,
authHandle,
nvIndex,
policySession,
operandB,
offset,
operation,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
_chkrc(
lib.Esys_PolicyNV(
self.ctx,
authHandle,
nvIndex,
policySession,
session1,
session2,
session3,
operandB,
offset,
operation,
)
)
def PolicyCounterTimer(
self,
policySession,
operandB,
offset,
operation,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
_chkrc(
lib.Esys_PolicyCounterTimer(
self.ctx,
policySession,
session1,
session2,
session3,
operandB,
offset,
operation,
)
)
def PolicyCommandCode(
self,
policySession,
code,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
_chkrc(
lib.Esys_PolicyCommandCode(
self.ctx, policySession, session1, session2, session3, code
)
)
def PolicyPhysicalPresence(
self,
policySession,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
_chkrc(
lib.Esys_PolicyPhysicalPresence(
self.ctx, policySession, session1, session2, session3
)
)
def PolicyCpHash(
self,
policySession,
cpHashA,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
_chkrc(
lib.Esys_PolicyCpHash(
self.ctx, policySession, session1, session2, session3, cpHashA
)
)
def PolicyNameHash(
self,
policySession,
nameHash,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
_chkrc(
lib.Esys_PolicyNameHash(
self.ctx, policySession, session1, session2, session3, nameHash
)
)
def PolicyDuplicationSelect(
self,
policySession,
objectName,
newParentName,
includeObject,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
_chkrc(
lib.Esys_PolicyDuplicationSelect(
self.ctx,
policySession,
session1,
session2,
session3,
objectName,
newParentName,
includeObject,
)
)
def PolicyAuthorize(
self,
policySession,
approvedPolicy,
policyRef,
keySign,
checkTicket,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
_chkrc(
lib.Esys_PolicyAuthorize(
self.ctx,
policySession,
session1,
session2,
session3,
approvedPolicy,
policyRef,
keySign,
checkTicket,
)
)
def PolicyAuthValue(
self,
policySession,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
_chkrc(
lib.Esys_PolicyAuthValue(
self.ctx, policySession, session1, session2, session3
)
)
def PolicyPassword(
self,
policySession,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
_chkrc(
lib.Esys_PolicyPassword(
self.ctx, policySession, session1, session2, session3
)
)
def PolicyGetDigest(
self,
policySession,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
policyDigest = ffi.new("TPM2B_DIGEST **")
_chkrc(
lib.Esys_PolicyGetDigest(
self.ctx, policySession, session1, session2, session3, policyDigest
)
)
return policyDigest[0]
def PolicyNvWritten(
self,
policySession,
writtenSet,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
_chkrc(
lib.Esys_PolicyNvWritten(
self.ctx, policySession, session1, session2, session3, writtenSet
)
)
def PolicyTemplate(
self,
policySession,
templateHash,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
_chkrc(
lib.Esys_PolicyTemplate(
self.ctx, policySession, session1, session2, session3, templateHash
)
)
def PolicyAuthorizeNV(
self,
authHandle,
nvIndex,
policySession,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
_chkrc(
lib.Esys_PolicyAuthorizeNV(
self.ctx,
authHandle,
nvIndex,
policySession,
session1,
session2,
session3,
)
)
def CreatePrimary(
self,
primaryHandle,
inSensitive,
inPublic,
outsideInfo,
creationPCR,
session1=ESYS_TR.PASSWORD,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
objectHandle = ffi.new("ESYS_TR *")
outPublic = ffi.new("TPM2B_PUBLIC **")
creationData = ffi.new("TPM2B_CREATION_DATA **")
creationHash = ffi.new("TPM2B_DIGEST **")
creationTicket = ffi.new("TPMT_TK_CREATION **")
_chkrc(
lib.Esys_CreatePrimary(
self.ctx,
primaryHandle,
session1,
session2,
session3,
inSensitive._cdata,
inPublic._cdata,
outsideInfo._cdata,
creationPCR._cdata,
objectHandle,
outPublic,
creationData,
creationHash,
creationTicket,
)
)
return (
ESYS_TR(objectHandle[0]),
TPM2B_PUBLIC(_cdata=outPublic[0]),
TPM2B_CREATION_DATA(_cdata=creationData[0]),
TPM2B_DIGEST(_cdata=creationHash[0]),
TPMT_TK_CREATION(_cdata=creationTicket[0]),
)
def HierarchyControl(
self,
authHandle,
enable,
state,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
_chkrc(
lib.Esys_HierarchyControl(
self.ctx, authHandle, session1, session2, session3, enable, state
)
)
def SetPrimaryPolicy(
self,
authHandle,
authPolicy,
hashAlg,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
_chkrc(
lib.Esys_SetPrimaryPolicy(
self.ctx, authHandle, session1, session2, session3, authPolicy, hashAlg
)
)
def ChangePPS(
self,
authHandle,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
_chkrc(lib.Esys_ChangePPS(self.ctx, authHandle, session1, session2, session3))
def ChangeEPS(
self,
authHandle,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
_chkrc(lib.Esys_ChangeEPS(self.ctx, authHandle, session1, session2, session3))
def Clear(
self,
authHandle,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
_chkrc(lib.Esys_Clear(self.ctx, authHandle, session1, session2, session3))
def ClearControl(
self,
auth,
disable,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
_chkrc(
lib.Esys_ClearControl(self.ctx, auth, session1, session2, session3, disable)
)
def HierarchyChangeAuth(
self,
authHandle,
newAuth,
session1=ESYS_TR.PASSWORD,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
_chkrc(
lib.Esys_HierarchyChangeAuth(
self.ctx,
authHandle,
session1,
session2,
session3,
TPM2B_pack(newAuth, t="TPM2B_AUTH"),
)
)
def DictionaryAttackLockReset(
self,
lockHandle,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
_chkrc(
lib.Esys_DictionaryAttackLockReset(
self.ctx, lockHandle, session1, session2, session3
)
)
def DictionaryAttackParameters(
self,
lockHandle,
newMaxTries,
newRecoveryTime,
lockoutRecovery,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
_chkrc(
lib.Esys_DictionaryAttackParameters(
self.ctx,
lockHandle,
session1,
session2,
session3,
newMaxTries,
newRecoveryTime,
lockoutRecovery,
)
)
def PP_Commands(
self,
auth,
setList,
clearList,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
_chkrc(
lib.Esys_PP_Commands(
self.ctx, auth, session1, session2, session3, setList, clearList
)
)
def SetAlgorithmSet(
self,
authHandle,
algorithmSet,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
_chkrc(
lib.Esys_SetAlgorithmSet(
self.ctx, authHandle, session1, session2, session3, algorithmSet
)
)
def FieldUpgradeStart(
self,
authorization,
keyHandle,
fuDigest,
manifestSignature,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
_chkrc(
lib.Esys_FieldUpgradeStart(
self.ctx,
authorization,
keyHandle,
session1,
session2,
session3,
fuDigest,
manifestSignature,
)
)
def FieldUpgradeData(
self,
fuData,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
nextDigest = ffi.new("TPMT_HA **")
firstDigest = ffi.new("TPMT_HA **")
_chkrc(
lib.Esys_FieldUpgradeData(
self.ctx, session1, session2, session3, fuData, nextDigest, firstDigest
)
)
return (nextDigest[0], firstDigest[0])
def FirmwareRead(
self,
sequenceNumber,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
fuData = ffi.new("TPM2B_MAX_BUFFER **")
_chkrc(
lib.Esys_FirmwareRead(
self.ctx, session1, session2, session3, sequenceNumber, fuData
)
)
return fuData[0]
def ContextSave(self, saveHandle):
context = ffi.new("TPMS_CONTEXT **")
_chkrc(lib.Esys_ContextSave(self.ctx, saveHandle, context))
return context[0]
def ContextLoad(self, context):
loadedHandle = ffi.new("ESYS_TR *")
_chkrc(lib.Esys_ContextLoad(self.ctx, context, loadedHandle))
loadedHandleObject = loadedHandle[0]
return loadedHandleObject
def FlushContext(self, flushHandle):
_chkrc(lib.Esys_FlushContext(self.ctx, flushHandle))
def EvictControl(
self,
auth,
objectHandle,
persistentHandle,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
newObjectHandle = ffi.new("ESYS_TR *")
_chkrc(
lib.Esys_EvictControl(
self.ctx,
auth,
objectHandle,
session1,
session2,
session3,
persistentHandle,
newObjectHandle,
)
)
newObjectHandleObject = newObjectHandle[0]
return newObjectHandleObject
def ReadClock(
self, session1=ESYS_TR.NONE, session2=ESYS_TR.NONE, session3=ESYS_TR.NONE
):
currentTime = ffi.new("TPMS_TIME_INFO **")
_chkrc(lib.Esys_ReadClock(self.ctx, session1, session2, session3, currentTime))
return currentTime[0]
def ClockSet(
self,
auth,
newTime,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
_chkrc(lib.Esys_ClockSet(self.ctx, auth, session1, session2, session3, newTime))
def ClockRateAdjust(
self,
auth,
rateAdjust,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
_chkrc(
lib.Esys_ClockRateAdjust(
self.ctx, auth, session1, session2, session3, rateAdjust
)
)
def GetCapability(
self,
capability,
prop,
propertyCount,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
moreData = ffi.new("TPMI_YES_NO *")
capabilityData = ffi.new("TPMS_CAPABILITY_DATA **")
_chkrc(
lib.Esys_GetCapability(
self.ctx,
session1,
session2,
session3,
capability,
prop,
propertyCount,
moreData,
capabilityData,
)
)
return (moreData[0], capabilityData[0])
def TestParms(
self,
parameters,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
_chkrc(lib.Esys_TestParms(self.ctx, session1, session2, session3, parameters))
def NV_DefineSpace(
self,
authHandle,
auth,
publicInfo,
session1=ESYS_TR.PASSWORD,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
nvHandle = ffi.new("ESYS_TR *")
_chkrc(
lib.Esys_NV_DefineSpace(
self.ctx,
authHandle,
session1,
session2,
session3,
TPM2B_pack(auth, t="TPM2B_AUTH"),
publicInfo._cdata,
nvHandle,
)
)
nvHandleObject = nvHandle[0]
return nvHandleObject
def NV_UndefineSpace(
self,
authHandle,
nvIndex,
session1=ESYS_TR.PASSWORD,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
_chkrc(
lib.Esys_NV_UndefineSpace(
self.ctx, authHandle, nvIndex, session1, session2, session3
)
)
def NV_UndefineSpaceSpecial(
self,
nvIndex,
platform,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
_chkrc(
lib.Esys_NV_UndefineSpaceSpecial(
self.ctx, nvIndex, platform, session1, session2, session3
)
)
def NV_ReadPublic(
self,
nvIndex,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
nvPublic = ffi.new("TPM2B_NV_PUBLIC **")
nvName = ffi.new("TPM2B_NAME **")
_chkrc(
lib.Esys_NV_ReadPublic(
self.ctx, nvIndex, session1, session2, session3, nvPublic, nvName
)
)
return (TPM2B_NV_PUBLIC(_cdata=nvPublic[0]), TPM2B_NAME(_cdata=nvName[0]))
def NV_Write(
self,
nvIndex,
data,
offset=0,
authHandle=0,
session1=ESYS_TR.PASSWORD,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
if authHandle == 0:
authHandle = nvIndex
_chkrc(
lib.Esys_NV_Write(
self.ctx,
authHandle,
nvIndex,
session1,
session2,
session3,
TPM2B_pack(data, t="TPM2B_MAX_NV_BUFFER"),
offset,
)
)
def NV_Increment(
self,
authHandle,
nvIndex,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
_chkrc(
lib.Esys_NV_Increment(
self.ctx, authHandle, nvIndex, session1, session2, session3
)
)
def NV_Extend(
self,
authHandle,
nvIndex,
data,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
_chkrc(
lib.Esys_NV_Extend(
self.ctx, authHandle, nvIndex, session1, session2, session3, data
)
)
def NV_SetBits(
self,
authHandle,
nvIndex,
bits,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
_chkrc(
lib.Esys_NV_SetBits(
self.ctx, authHandle, nvIndex, session1, session2, session3, bits
)
)
def NV_WriteLock(
self,
authHandle,
nvIndex,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
_chkrc(
lib.Esys_NV_WriteLock(
self.ctx, authHandle, nvIndex, session1, session2, session3
)
)
def NV_GlobalWriteLock(
self,
authHandle,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
_chkrc(
lib.Esys_NV_GlobalWriteLock(
self.ctx, authHandle, session1, session2, session3
)
)
def NV_Read(
self,
nvIndex,
size,
offset=0,
authHandle=0,
session1=ESYS_TR.PASSWORD,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
if authHandle == 0:
authHandle = nvIndex
data = ffi.new("TPM2B_MAX_NV_BUFFER **")
_chkrc(
lib.Esys_NV_Read(
self.ctx,
authHandle,
nvIndex,
session1,
session2,
session3,
size,
offset,
data,
)
)
return TPM2B_unpack(data[0])
def NV_ReadLock(
self,
authHandle,
nvIndex,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
_chkrc(
lib.Esys_NV_ReadLock(
self.ctx, authHandle, nvIndex, session1, session2, session3
)
)
def NV_ChangeAuth(
self,
nvIndex,
newAuth,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
_chkrc(
lib.Esys_NV_ChangeAuth(
self.ctx, nvIndex, session1, session2, session3, newAuth
)
)
def NV_Certify(
self,
signHandle,
authHandle,
nvIndex,
qualifyingData,
inScheme,
size,
offset,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
certifyInfo = ffi.new("TPM2B_ATTEST **")
signature = ffi.new("TPMT_SIGNATURE **")
_chkrc(
lib.Esys_NV_Certify(
self.ctx,
signHandle,
authHandle,
nvIndex,
session1,
session2,
session3,
qualifyingData,
inScheme,
size,
offset,
certifyInfo,
signature,
)
)
return (certifyInfo[0], signature[0])
def Vendor_TCG_Test(
self,
inputData,
session1=ESYS_TR.NONE,
session2=ESYS_TR.NONE,
session3=ESYS_TR.NONE,
):
outputData = ffi.new("TPM2B_DATA **")
_chkrc(
lib.Esys_Vendor_TCG_Test(
self.ctx, session1, session2, session3, inputData, outputData
)
)
return outputData[0]
|
import torch
import torch.nn as nn
import numpy as np
import numpy.random as rand
from dset import idx2char
# We use cross entropy loss
loss_func = nn.CrossEntropyLoss(reduction='mean')
def compute_loss(rnn, xNy, h_list, device):
"""
compute_loss for a given RNN model using loss_func
Args:
RNN: model to be trained
xNy (tuple): the input and target pair of the form (input, target)
h_list (list): list of hidden states. Each hidden state is a torch.tensor
device(str): 'cpu' or 'cuda'
Returns:
torch.tensor: value of the loss
"""
x_t, y_t = xNy
x_t=x_t.to(device, non_blocking=True)
y_t=y_t.to(device, non_blocking=True)
loss = 0.
for i in range(x_t.shape[1]):
out, h_list = rnn(x_t[:, i, :], h_list)
loss += loss_func(out, y_t[:, i])
return loss
def print_function(max_len, rnn, char_size, h_size, depth, mode):
""" Generate text and print it using rnn.
Args:
max_len (int): maximum length of generated text
rnn: RNN model
char_size: number of characters in the vocabulary.
h_size: size of hidden layer.
mode (str): one of "RNN", "LSTM", "GRU"
"""
rnn.eval()
seed = torch.zeros((1, char_size))
seed[0, rand.randint(0, char_size)] = 1
if mode == "RNN" or mode == "GRU":
h = [torch.zeros((1, h_size)) for i in range(depth)]
elif mode == "LSTM":
h = ([torch.zeros((1, h_size)) for i in range(depth)], [torch.zeros((1, h_size)) for i in range(depth)])
generated = []
out_text = ''
with torch.no_grad():
for i in range(max_len):
out, h = rnn(seed, h)
p = torch.nn.functional.softmax(out, dim=1)
p = np.array(p)
max_idx = np.random.choice(range(char_size), p=p.ravel())
char = idx2char[max_idx.item()]
out_text += char
seed = torch.zeros((1, char_size))
seed[0, max_idx] = 1
print(out_text)
|
"""Render SBOL Visual elements to SVG using Python and wkHTMLtoImage.
This requires a recent version wkhtmltopdf/wkhtmltoimage installed.
Thids also requires PIL/Pillow installed.
"""
import os
import subprocess as sp
import tempfile
from PIL import Image
import PIL.ImageOps
def autocrop(filename, border_width=5):
"""Remove all white borders from a picture, then add a white border of
size given by `border_width` (in pixels)"""
image = Image.open(filename, mode="r").convert('L')
inverted_image = PIL.ImageOps.invert(image)
cropped = image.crop(inverted_image.getbbox())
cropped_margin = PIL.ImageOps.expand(cropped, border_width, fill=255)
cropped_margin.save(filename)
def render_sbolv(sbolv_elements, outfile, elements_zoom=1, width=600,
css_stylesheets=("sbol-visual-standalone.css",),
border_width=5):
"""Render a series of sbolv elements into an image file.
Parameters
----------
sbolv_elements
A list of elements of the form (element_type, element_label)
where element_type is "promoter", "cds", etc. and element_label is
any text or HTML
outfile
Name or path of the output file. Valid extensions are .png, .jpeg, etc.
elements_zoom
Makes the elements and label appear bigger (zoom > 1) or smaller
(zoom < 1)
width
Width in pixels of the final picture (not counting border)
border_width
Size in pixels of the white pixels border around the picture.
"""
def get_content(fname):
with open(fname) as f:
content = f.read()
return content
css_content = "\n".join([
'<style type="text/css"> %s </style>' % get_content(fname)
for fname in css_stylesheets
])
sbolv_elements_html = "\n".join([
'<div class="sbolv %s">%s</div>' % (sbol_type, html_content)
for sbol_type, html_content in sbolv_elements
])
html = """
<html>
<head> %s </head>
<body> <div class="sbol-visual"> %s </div> </body>
</html>
""" % (css_content, sbolv_elements_html)
temp_html = tempfile.mkstemp(suffix=".html")[1]
with open(temp_html, "w") as f:
f.write(html)
extension = os.path.splitext(outfile)[1][1:]
process = sp.Popen(["wkhtmltoimage",
"--format", extension,
"--zoom", "%.1f" % elements_zoom,
"--width", "%d" % width,
"--disable-smart-width",
temp_html, outfile,
],
stderr=sp.PIPE, stdout=sp.PIPE)
out, err = process.communicate()
print(err)
os.remove(temp_html)
autocrop(outfile, border_width=border_width)
# LET'S TRY IT !
if __name__ == "__main__":
render_sbolv(
sbolv_elements=[
("promoter", "P1"),
("cds", "my favourite gene with a super long name"),
("terminator", "Tr. 1"),
("promoter", "P2"),
("cds", "<em>acs</em>"),
("terminator", "Tr. 2")
],
outfile="rendered_sbolv.png",
css_stylesheets=["../dist/sbol-visual-standalone.css"]
)
|
class CommandError(Exception):
"""Exception raised by CLI"""
|
import re
import os
import json
import types
class JSONtoSQL(object):
def __init__(self, target_db):
self.target_db = target_db
def reset(self):
self.target_db.reset()
return self
def drop_all(self):
self.target_db.drop_all()
return self
def to_sql(self, json_db_path, schema_path, transformer=None):
print('IMPORTING {}'.format(json_db_path))
self.target_db.foreign_keys_freeze()
for dirName, subdirList, fileList in os.walk(json_db_path):
print('TOTAL TABLES %d' % len(fileList))
for file in fileList:
table_name = file.split('.')[0]
if not self.target_db.table_exists(table_name):
print(f' Table {table_name} does not exists... skipping')
continue
with open(dirName + '/' + file) as json_file:
data = json.load(json_file)
total_records = len(data)
for row in data:
try:
if isinstance(transformer, types.FunctionType):
row = transformer(row,
table_name=table_name,
target_db=self.target_db,
json_db_path=json_db_path, schema_path=schema_path)
self.target_db.insert(table_name, row)
except (Exception,) as e:
if not re.search(r'duplicate\skey', str(e)):
print(e)
total_records -= 1
print(' {} \n {} of {} records {}'.format(table_name.upper(), total_records, len(data),
'dumped' if total_records > 0 else ''))
self.target_db.foreign_keys_unfreeze()
|
'''
Created on Dec 12, 2017
@author: candle
'''
from datetime import datetime
import string
import logging
log = logging.getLogger('sdsapp')
def to_timestamp (dt):
return dt.strftime ('%d.%m.%Y %H:%M:%S')
def to_datetime (st):
return datetime.strptime (st, "%d.%m.%Y %H:%M:%S")
def strip_whitespace (src):
ret = src
for ch in string.whitespace:
ret = ret.replace (ch, '')
return ret
def strip_formatting (src):
ret = src
ret = ret.replace ('\n', '')
ret = ret.replace ('\t', '')
ret = ret.replace ('\\t', '')
ret = ret.replace ('\\n', '')
return ret
from urllib.request import urlopen
from html.parser import HTMLParser
from lxml import etree
class SDSHTMLParser (HTMLParser):
def __init__ (self):
super().__init__()
def parse (self, data):
self.is_html = False
self.xml = etree.Element ('html')
self.cur_node = self.xml
self.feed (data)
return self.xml
def handle_starttag(self, tag, attrs):
tag = tag.lower ()
self.cur_node = etree.SubElement (self.cur_node, tag)
for attr, val in attrs:
try:
self.cur_node.set (attr, val)
except:
pass
def handle_endtag(self, tag):
tag = tag.lower ()
if tag == self.cur_node.tag:
self.cur_node = self.cur_node.getparent ()
if self.cur_node is None:
self.cur_node = self.xml
def handle_data(self, data):
if data is None:
return
# data = data.strip (string.whitespace)
if data == '':
return
clist = self.cur_node.getchildren ()
if len (clist) == 0:
if self.cur_node.text is None:
self.cur_node.text = data
else:
self.cur_node.text += data
else:
if clist [len(clist)-1].tail is None:
clist [len(clist)-1].tail = data
else:
clist [len(clist)-1].tail += data
def parse_html (data):
parser = SDSHTMLParser ()
return parser.parse(data)
def load_url (url):
ret = urlopen (url).read ()
return ret
def test_func_q4 (url):
html = load_url (url)
sample_html="""
<html>
<script>alert('qq');</script>
<div>text-only div</div>
<div>have subnodes<p>and more content</p></div>
<div>have subnodes<p>and more content</p>and even more content</div>
<div>
have div childrens
<div>child div content<p>and more content</p></div>
</div>
</html>
"""
# html = sample_html
xml = parse_html (str (html))
# Remove all nodes which definitely can't contain any useful text information
ignored_tags = frozenset (['script', 'style', 'meta', 'head', 'link', 'svg'])
from functools import reduce
def add_tag_to_xquery (left, right):
return left + (" or local-name()='%s'" % right)
xpq = reduce (add_tag_to_xquery, ignored_tags, 'false()')
xpq = '//*[%s]' % xpq
nodes = xml.xpath (xpq)
for node in nodes:
node.getparent ().remove (node)
# Get all <div> elements not contained inside other <div>
xpq = '//div[not(ancestor::div)]'
nodes = xml.xpath (xpq)
#dump inner HTML of nodeset to HTML_output
html_output = ''
for node in nodes:
inner_html = node.xpath ('*|text()')
for inode in inner_html:
if isinstance (inode, str):
html_output += inode
else:
html_output += etree.tostring (inode, encoding='UTF-8', pretty_print=True).decode('UTF-8')
return html_output
|
"""Adds blockhash linking to BonusPayouts
Revision ID: 52536bfc833b
Revises: 160f63e90b91
Create Date: 2014-04-12 13:18:24.408548
"""
# revision identifiers, used by Alembic.
revision = '52536bfc833b'
down_revision = '160f63e90b91'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('bonus_payout', sa.Column('blockhash', sa.String(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('bonus_payout', 'blockhash')
### end Alembic commands ###
|
"""This module contains the general information for VnicIScsi ManagedObject."""
from ...ucscentralmo import ManagedObject
from ...ucscentralcoremeta import UcsCentralVersion, MoPropertyMeta, MoMeta
from ...ucscentralmeta import VersionMeta
class VnicIScsiConsts():
ADDR_DERIVED = "derived"
ADMIN_HOST_PORT_1 = "1"
ADMIN_HOST_PORT_2 = "2"
ADMIN_HOST_PORT_ANY = "ANY"
ADMIN_HOST_PORT_NONE = "NONE"
ADMIN_VCON_1 = "1"
ADMIN_VCON_2 = "2"
ADMIN_VCON_3 = "3"
ADMIN_VCON_4 = "4"
ADMIN_VCON_ANY = "any"
CDN_SOURCE_USER_DEFINED = "user-defined"
CDN_SOURCE_VNIC_NAME = "vnic-name"
CONFIG_STATE_APPLIED = "applied"
CONFIG_STATE_APPLYING = "applying"
CONFIG_STATE_FAILED_TO_APPLY = "failed-to-apply"
CONFIG_STATE_NOT_APPLIED = "not-applied"
EXT_IPSTATE_NONE = "none"
EXT_IPSTATE_POOLED = "pooled"
EXT_IPSTATE_STATIC = "static"
INST_TYPE_DEFAULT = "default"
INST_TYPE_DYNAMIC = "dynamic"
INST_TYPE_DYNAMIC_VF = "dynamic-vf"
INST_TYPE_MANUAL = "manual"
OPER_HOST_PORT_1 = "1"
OPER_HOST_PORT_2 = "2"
OPER_HOST_PORT_ANY = "ANY"
OPER_HOST_PORT_NONE = "NONE"
OPER_ORDER_UNSPECIFIED = "unspecified"
OPER_SPEED_LINE_RATE = "line-rate"
OPER_VCON_1 = "1"
OPER_VCON_2 = "2"
OPER_VCON_3 = "3"
OPER_VCON_4 = "4"
OPER_VCON_ANY = "any"
ORDER_UNSPECIFIED = "unspecified"
OWNER_CONN_POLICY = "conn_policy"
OWNER_INITIATOR_POLICY = "initiator_policy"
OWNER_LOGICAL = "logical"
OWNER_PHYSICAL = "physical"
OWNER_POLICY = "policy"
OWNER_UNKNOWN = "unknown"
SWITCH_ID_A = "A"
SWITCH_ID_B = "B"
SWITCH_ID_NONE = "NONE"
SWITCH_ID_MGMT = "mgmt"
TYPE_ETHER = "ether"
TYPE_FC = "fc"
TYPE_IPC = "ipc"
TYPE_SCSI = "scsi"
TYPE_UNKNOWN = "unknown"
VNIC_DEF_TYPE_DYNAMIC_NW = "dynamic-nw"
VNIC_DEF_TYPE_OPTION17 = "option17"
VNIC_DEF_TYPE_OPTION43 = "option43"
VNIC_DEF_TYPE_STATIC = "static"
class VnicIScsi(ManagedObject):
"""This is VnicIScsi class."""
consts = VnicIScsiConsts()
naming_props = set([u'name'])
mo_meta = MoMeta("VnicIScsi", "vnicIScsi", "iscsi-[name]", VersionMeta.Version111a, "InputOutput", 0x7fffff, [], ["admin", "ls-config", "ls-network", "ls-server", "ls-storage"], [u'computeInstance', u'computeLocalTemplDef', u'lsServer'], [u'faultInst', u'vnicIPv4IscsiAddr', u'vnicIScsiAutoTargetIf', u'vnicIScsiInitiatorParams', u'vnicIScsiStaticTargetIf', u'vnicVlan'], ["Add", "Get", "Remove", "Set"])
prop_meta = {
"adaptor_profile_name": MoPropertyMeta("adaptor_profile_name", "adaptorProfileName", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x2, None, None, r"""[\-\.:_a-zA-Z0-9]{0,16}""", [], []),
"addr": MoPropertyMeta("addr", "addr", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x4, None, None, r"""(([0-9a-fA-F][0-9a-fA-F]:){5}([0-9a-fA-F][0-9a-fA-F]))|0""", ["derived"], []),
"admin_cdn_name": MoPropertyMeta("admin_cdn_name", "adminCdnName", "string", VersionMeta.Version141a, MoPropertyMeta.READ_WRITE, 0x8, None, None, r"""[\-\.:_a-zA-Z0-9]{0,16}""", [], []),
"admin_host_port": MoPropertyMeta("admin_host_port", "adminHostPort", "string", VersionMeta.Version141a, MoPropertyMeta.READ_WRITE, 0x10, None, None, None, ["1", "2", "ANY", "NONE"], []),
"admin_vcon": MoPropertyMeta("admin_vcon", "adminVcon", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x20, None, None, None, ["1", "2", "3", "4", "any"], []),
"auth_profile_name": MoPropertyMeta("auth_profile_name", "authProfileName", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x40, None, None, None, [], []),
"boot_dev": MoPropertyMeta("boot_dev", "bootDev", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"cdn_source": MoPropertyMeta("cdn_source", "cdnSource", "string", VersionMeta.Version141a, MoPropertyMeta.READ_WRITE, 0x80, None, None, None, ["user-defined", "vnic-name"], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"config_qualifier": MoPropertyMeta("config_qualifier", "configQualifier", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((defaultValue|not-applicable|adaptor-protected-eth-capability|vif-resources-overprovisioned|ungrouped-domain|unsupported-nvgre|unsupported-adaptor-for-vnic-cdn|unresolved-remote-vlan-name|invalid-wwn|service-profile-virtualization-conflict|unsupported-roce-netflow|unsupported-vxlan-netflow|fcoe-capacity|wwpn-derivation-virtualized-port|unresolved-vlan-name|vnic-virtualization-netflow-conflict|unsupported-vxlan-usnic|unsupported-roce-properties|pinning-vlan-mismatch|adaptor-requirement|vnic-not-ha-ready|missing-ipv4-inband-mgmt-addr|unsupported-nvgre-dynamic-vnic|duplicate-vnic-cdn-name|unresolved-remote-vsan-name|mac-derivation-virtualized-port|vnic-virtualization-conflict|unsupported-roce|unsupported-nvgre-netflow|vnic-vlan-assignment-error|insufficient-vhba-capacity|inaccessible-vlan|unable-to-update-ucsm|soft-pinning-vlan-mismatch|unsupported-roce-usnic|unsupported-nvgre-vmq|connection-placement|vnic-vcon-provisioning-change|missing-ipv6-inband-mgmt-addr|unsupported-nvgre-usnic|insufficient-roce-resources|missing-primary-vlan|adaptor-fcoe-capability|vfc-vnic-pvlan-conflict|virtualization-not-supported|unsupported-vxlan|unsupported-roce-nvgre|unresolved-vsan-name|insufficient-vnic-capacity|unassociated-vlan|unsupported-roce-vmq|unsupported-roce-vxlan|unsupported-vxlan-vmq|dynamic-vf-vnic|wwpn-assignment|missing-ipv4-addr|unsupported-vxlan-dynamic-vnic|pinned-target-misconfig|unsupported-vmq-resources),){0,58}(defaultValue|not-applicable|adaptor-protected-eth-capability|vif-resources-overprovisioned|ungrouped-domain|unsupported-nvgre|unsupported-adaptor-for-vnic-cdn|unresolved-remote-vlan-name|invalid-wwn|service-profile-virtualization-conflict|unsupported-roce-netflow|unsupported-vxlan-netflow|fcoe-capacity|wwpn-derivation-virtualized-port|unresolved-vlan-name|vnic-virtualization-netflow-conflict|unsupported-vxlan-usnic|unsupported-roce-properties|pinning-vlan-mismatch|adaptor-requirement|vnic-not-ha-ready|missing-ipv4-inband-mgmt-addr|unsupported-nvgre-dynamic-vnic|duplicate-vnic-cdn-name|unresolved-remote-vsan-name|mac-derivation-virtualized-port|vnic-virtualization-conflict|unsupported-roce|unsupported-nvgre-netflow|vnic-vlan-assignment-error|insufficient-vhba-capacity|inaccessible-vlan|unable-to-update-ucsm|soft-pinning-vlan-mismatch|unsupported-roce-usnic|unsupported-nvgre-vmq|connection-placement|vnic-vcon-provisioning-change|missing-ipv6-inband-mgmt-addr|unsupported-nvgre-usnic|insufficient-roce-resources|missing-primary-vlan|adaptor-fcoe-capability|vfc-vnic-pvlan-conflict|virtualization-not-supported|unsupported-vxlan|unsupported-roce-nvgre|unresolved-vsan-name|insufficient-vnic-capacity|unassociated-vlan|unsupported-roce-vmq|unsupported-roce-vxlan|unsupported-vxlan-vmq|dynamic-vf-vnic|wwpn-assignment|missing-ipv4-addr|unsupported-vxlan-dynamic-vnic|pinned-target-misconfig|unsupported-vmq-resources){0,1}""", [], []),
"config_state": MoPropertyMeta("config_state", "configState", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["applied", "applying", "failed-to-apply", "not-applied"], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, 0x100, 0, 256, None, [], []),
"equipment_dn": MoPropertyMeta("equipment_dn", "equipmentDn", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []),
"eth_ep_dn": MoPropertyMeta("eth_ep_dn", "ethEpDn", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []),
"ext_ip_state": MoPropertyMeta("ext_ip_state", "extIPState", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x200, None, None, None, ["none", "pooled", "static"], []),
"flt_aggr": MoPropertyMeta("flt_aggr", "fltAggr", "ulong", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"ident_pool_name": MoPropertyMeta("ident_pool_name", "identPoolName", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x400, None, None, None, [], []),
"init_name_suffix": MoPropertyMeta("init_name_suffix", "initNameSuffix", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"initiator_name": MoPropertyMeta("initiator_name", "initiatorName", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x800, None, None, r"""[0-9a-zA-Z\.:-]{0,223}""", [], []),
"inst_type": MoPropertyMeta("inst_type", "instType", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["default", "dynamic", "dynamic-vf", "manual"], []),
"iqn_ident_pool_name": MoPropertyMeta("iqn_ident_pool_name", "iqnIdentPoolName", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x1000, None, None, None, [], []),
"name": MoPropertyMeta("name", "name", "string", VersionMeta.Version111a, MoPropertyMeta.NAMING, 0x2000, None, None, r"""[\-\.:_a-zA-Z0-9]{1,16}""", [], []),
"nw_templ_name": MoPropertyMeta("nw_templ_name", "nwTemplName", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x4000, None, None, r"""[\-\.:_a-zA-Z0-9]{0,16}""", [], []),
"oper_adaptor_profile_name": MoPropertyMeta("oper_adaptor_profile_name", "operAdaptorProfileName", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []),
"oper_auth_profile_name": MoPropertyMeta("oper_auth_profile_name", "operAuthProfileName", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []),
"oper_cdn_name": MoPropertyMeta("oper_cdn_name", "operCdnName", "string", VersionMeta.Version141a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"oper_host_port": MoPropertyMeta("oper_host_port", "operHostPort", "string", VersionMeta.Version141a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["1", "2", "ANY", "NONE"], []),
"oper_ident_pool_name": MoPropertyMeta("oper_ident_pool_name", "operIdentPoolName", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []),
"oper_iqn_ident_pool_name": MoPropertyMeta("oper_iqn_ident_pool_name", "operIqnIdentPoolName", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []),
"oper_order": MoPropertyMeta("oper_order", "operOrder", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["unspecified"], ["0-4294967295"]),
"oper_speed": MoPropertyMeta("oper_speed", "operSpeed", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["line-rate"], ["8-40000000"]),
"oper_stats_policy_name": MoPropertyMeta("oper_stats_policy_name", "operStatsPolicyName", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []),
"oper_vcon": MoPropertyMeta("oper_vcon", "operVcon", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["1", "2", "3", "4", "any"], []),
"order": MoPropertyMeta("order", "order", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x8000, None, None, None, ["unspecified"], ["0-256"]),
"owner": MoPropertyMeta("owner", "owner", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["conn_policy", "initiator_policy", "logical", "physical", "policy", "unknown"], []),
"pin_to_group_name": MoPropertyMeta("pin_to_group_name", "pinToGroupName", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x10000, None, None, r"""[\-\.:_a-zA-Z0-9]{0,16}""", [], []),
"qos_policy_name": MoPropertyMeta("qos_policy_name", "qosPolicyName", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x20000, None, None, r"""[\-\.:_a-zA-Z0-9]{0,16}""", [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, 0x40000, 0, 256, None, [], []),
"stats_policy_name": MoPropertyMeta("stats_policy_name", "statsPolicyName", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x80000, None, None, r"""[\-\.:_a-zA-Z0-9]{0,16}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x100000, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"switch_id": MoPropertyMeta("switch_id", "switchId", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x200000, None, None, None, ["A", "B", "NONE", "mgmt"], []),
"type": MoPropertyMeta("type", "type", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["ether", "fc", "ipc", "scsi", "unknown"], []),
"vnic_def_type": MoPropertyMeta("vnic_def_type", "vnicDefType", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["dynamic-nw", "option17", "option43", "static"], []),
"vnic_name": MoPropertyMeta("vnic_name", "vnicName", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x400000, None, None, r"""[\-\.:_a-zA-Z0-9]{0,16}""", [], []),
}
prop_map = {
"adaptorProfileName": "adaptor_profile_name",
"addr": "addr",
"adminCdnName": "admin_cdn_name",
"adminHostPort": "admin_host_port",
"adminVcon": "admin_vcon",
"authProfileName": "auth_profile_name",
"bootDev": "boot_dev",
"cdnSource": "cdn_source",
"childAction": "child_action",
"configQualifier": "config_qualifier",
"configState": "config_state",
"dn": "dn",
"equipmentDn": "equipment_dn",
"ethEpDn": "eth_ep_dn",
"extIPState": "ext_ip_state",
"fltAggr": "flt_aggr",
"identPoolName": "ident_pool_name",
"initNameSuffix": "init_name_suffix",
"initiatorName": "initiator_name",
"instType": "inst_type",
"iqnIdentPoolName": "iqn_ident_pool_name",
"name": "name",
"nwTemplName": "nw_templ_name",
"operAdaptorProfileName": "oper_adaptor_profile_name",
"operAuthProfileName": "oper_auth_profile_name",
"operCdnName": "oper_cdn_name",
"operHostPort": "oper_host_port",
"operIdentPoolName": "oper_ident_pool_name",
"operIqnIdentPoolName": "oper_iqn_ident_pool_name",
"operOrder": "oper_order",
"operSpeed": "oper_speed",
"operStatsPolicyName": "oper_stats_policy_name",
"operVcon": "oper_vcon",
"order": "order",
"owner": "owner",
"pinToGroupName": "pin_to_group_name",
"qosPolicyName": "qos_policy_name",
"rn": "rn",
"statsPolicyName": "stats_policy_name",
"status": "status",
"switchId": "switch_id",
"type": "type",
"vnicDefType": "vnic_def_type",
"vnicName": "vnic_name",
}
def __init__(self, parent_mo_or_dn, name, **kwargs):
self._dirty_mask = 0
self.name = name
self.adaptor_profile_name = None
self.addr = None
self.admin_cdn_name = None
self.admin_host_port = None
self.admin_vcon = None
self.auth_profile_name = None
self.boot_dev = None
self.cdn_source = None
self.child_action = None
self.config_qualifier = None
self.config_state = None
self.equipment_dn = None
self.eth_ep_dn = None
self.ext_ip_state = None
self.flt_aggr = None
self.ident_pool_name = None
self.init_name_suffix = None
self.initiator_name = None
self.inst_type = None
self.iqn_ident_pool_name = None
self.nw_templ_name = None
self.oper_adaptor_profile_name = None
self.oper_auth_profile_name = None
self.oper_cdn_name = None
self.oper_host_port = None
self.oper_ident_pool_name = None
self.oper_iqn_ident_pool_name = None
self.oper_order = None
self.oper_speed = None
self.oper_stats_policy_name = None
self.oper_vcon = None
self.order = None
self.owner = None
self.pin_to_group_name = None
self.qos_policy_name = None
self.stats_policy_name = None
self.status = None
self.switch_id = None
self.type = None
self.vnic_def_type = None
self.vnic_name = None
ManagedObject.__init__(self, "VnicIScsi", parent_mo_or_dn, **kwargs)
|
from django.shortcuts import render, redirect
from .forms import IcecreamForm
from .models import Icecream
def icecream_detail_pk(request, pk):
name = icecream_db[pk]['name']
description = icecream_db[pk]['description']
context = {
'name': name,
'description': description,
}
return render(request, 'icecream/icecream-detail.html', context)
def icecream_detail(request):
icecreams = Icecream.objects.all()
return render(request, 'icecream/icecream-detail.html', {'icecreams': icecreams})
def icecream_new(request):
if request.method == "POST":
form = IcecreamForm(request.POST)
if form.is_valid:
form.save()
return redirect('index')
else:
form = IcecreamForm()
return render(request, 'icecream/icecream-new.html', {'form':form})
|
from django.urls import path, include
from . import views
from cart import views as cart_views
from django.conf.urls import url
from django.conf.urls.static import static
from django.conf import settings
app_name = 'buyer'
urlpatterns = [
path('about/', views.about, name='about'),
path('cart/', include('cart.urls')),
path('payment/', include('paytm.urls')),
path('shop/<int:rec>', views.shop_list, name='shop'),
path('shop/<int:rec>/<int:cid>/<int:sid>', views.shop_filters, name='shop-filters'),
path('shop/<int:cid>/<int:rec>', views.shop_category, name='shop-category'),
path('shop/item/<int:pid>/<int:rec>', views.shop_item, name='shop-item'),
path('', views.home, name='home'),
path('checkout/', views.checkout, name="checkout"),
path('search/', views.search, name='search'),
path('order/', views.order_create, name='order'),
path('order/cancel/<pid>', views.order_cancellation, name='order-cancellation'),
path('shop/sort_product', views.sort_products, name="sort_product")
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
from apps.accounts.util import check_is_admin
from apps.finder.models import Business
from apps.finder.models import Store, UserStoreSuggestion
from django.shortcuts import render
import settings
def maintenance(request):
check_is_admin(request.user)
return render(request, 'maintenance.html', locals())
def view_user_suggestions(request):
check_is_admin(request.user)
if request.method == "POST":
post_values = request.POST
name = post_values['name']
address1 = post_values['address']
address2 = post_values['address-line-2']
city = post_values['city']
state_code = post_values['state-code']
zip_code = post_values['zip-code']
latitude = post_values['latitude']
longitude = post_values['longitude']
phone = post_values['phone']
website = post_values['website']
facebook = post_values['facebook']
email = post_values['email']
if address2 is None:
address2 = ''
else:
suggestion = UserStoreSuggestion.objects.get(id=post_values['id'])
business = Business()
business.name = name
business.website = website
business.facebook = facebook
business.save()
store = Store()
store.user = suggestion.user
store.business = business
store.phone = phone
store.address1 = address1
store.address2 = address2
store.city = city
store.state_code = state_code
store.zip_code = zip_code
store.latitude = latitude
store.longitude = longitude
store.save()
suggestion.is_verified = True
suggestion.save()
new_suggestions = UserStoreSuggestion.objects.filter(is_verified=False)
return render(request, 'view_user_suggestions.html', locals())
def get_no_phone_stores(request):
check_is_admin(request.user)
version = settings.__version__
businesses_with_no_website = Business.objects.filter(website='')
num_businesses_with_no_website = businesses_with_no_website.count()
stores_with_no_phone = Store.objects.filter(phone='')
num_stores_with_no_phone = stores_with_no_phone.count()
return render(request, 'view_incomplete.html', locals())
|
import numpy as np
import os
import sys
import datetime
import cv2
from PIL import Image
from PIL.ImageQt import ImageQt
from libs.ustr import *
class NamedImage():
def __init__(self, image_name):
self._image_name = image_name
self._image = None
self._qt_image = None
self._np_image = None
self._path_name = ''
self._save_path = None
@property
def name(self):
return self._image_name
@property
def path(self):
return self._path_name
@property
def savepath(self):
if self._save_path is None:
return self._path_name
return self._save_path
@property
def image(self):
return self._image
@property
def qtimage(self):
if self._qt_image is None:
self._qt_image = ImageQt(self._image)
return self._qt_image
@property
def npimage(self):
if self._np_image is None:
(im_width, im_height) = self._image.size
self._np_image = np.array(self._image.getdata()).reshape((im_height, im_width, 3)).astype(np.uint8)
return self._np_image
@property
def size(self):
if self._image is None:
return 0, 0
return self._image.size
def isNull(self) -> bool:
if self._image is None:
return True
return False
def FromFile(self, path: str):
try:
self._path_name = path
self._image = Image.open(path)
except Exception as e:
self._image = None
return False
return True
def FromArray(self, np_array: object, imgname: str, savepath: str):
try:
self._path_name = ustr(imgname)
self._save_path = ustr(savepath)
self._image = Image.fromarray(cv2.cvtColor(np_array, cv2.COLOR_BGR2RGB), 'RGB')
self._np_image = np_array
except Exception as e:
self._image = None
return False
return True
def Save(self) -> str:
if self._save_path is None:
return self._path_name
fpath, fname = os.path.split(self._save_path)
try:
if not os.path.exists(fpath):
os.makedirs(fpath)
#cv2.imwrite(self._save_path, self._np_image)
self._image.save(self._save_path, "JPEG", quality=98)
except Exception as e:
print(e)
return self._save_path
|
"""Support for Pellet stoves using Duepi wifi modules using ESPLink serial interface"""
|
import json
import os
def load_json(path, default_dict=True):
"""
Loads a json file and returns it as a dictionary or list
"""
if os.path.exists(path) and os.path.isfile(path):
with open(path, encoding="utf8") as file:
data = json.load(file)
file.close()
return data
else:
if default_dict:
return {}
else:
return []
def save_json(data, path, indent=None):
"""
Saves dictionary or list as json file
"""
if not os.path.isdir(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
with open(path, 'w', encoding="utf-8") as file:
file.write(json.dumps(data, ensure_ascii=False, indent=indent))
file.close()
def save(data, path):
"""
Saves text to a file
"""
if not os.path.isdir(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
with open(path, 'w', encoding="utf-8") as file:
file.write(data)
file.close()
def save_binary(data, path):
"""
Saves data as a binary file
"""
if not os.path.isdir(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
with open(path, 'wb') as file:
file.write(data)
file.close()
def load_binary(path):
"""
Loads data as a binary file
"""
if os.path.exists(path) and os.path.isfile(path):
with open(path, 'rb') as file:
data = file.read()
file.close()
return data
else:
return None
|
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 18 10:34:30 2016
@author: dahoiv
"""
import os
import sqlite3
import ConvertDataToDB
from img_data import img_data
import image_registration
import util
def save_to_db(image_ids, ny_image_ids):
print("----here")
data_transforms = []
for (img_id, ny_img_id) in zip(image_ids, ny_image_ids):
print(img_id)
_img = img_data(img_id, db_path, util.TEMP_FOLDER_PATH)
_img.load_db_transforms()
print(_img.transform)
if _img.transform is None:
continue
_img.processed_filepath = image_registration.move_vol(_img.img_filepath, _img.get_transforms())
_img.image_id = ny_img_id
data_transforms.append(_img)
image_registration.save_transform_to_database(data_transforms)
if __name__ == "__main__":
os.nice(19)
util.setup("temp_convert/", "LGG")
util.mkdir_p(util.TEMP_FOLDER_PATH)
util.DATA_FOLDER = "/mnt/dokumneter/data/database/"
if True:
db_path = "/home/dahoiv/disk/data/database3/LGG/"
util.DATA_FOLDER = util.DATA_FOLDER + "LGG" + "/"
util.DB_PATH = util.DATA_FOLDER + "brainSegmentation.db"
convert_table_inv = ConvertDataToDB.get_convert_table('/home/dahoiv/disk/data/Segmentations/NY_PID_LGG segmentert.xlsx')
convert_table = {v: k for k, v in convert_table_inv.items()}
print(convert_table)
print(util.DB_PATH)
conn = sqlite3.connect(util.DB_PATH)
conn.text_factory = str
cursor = conn.execute('''SELECT pid from Patient''')
conn2 = sqlite3.connect(db_path + "brainSegmentation.db")
conn2.text_factory = str
image_ids = []
ny_image_ids = []
for row in cursor:
# print(row)
ny_pid = row[0]
try:
old_pid = int(convert_table_inv[str(ny_pid)])
except Exception:
continue
cursor2 = conn2.execute('''SELECT id from Images where pid = ? AND diag_pre_post = ?''', (old_pid, "pre"))
for _id in cursor2:
image_ids.append(_id[0])
cursor2.close()
cursor2 = conn.execute('''SELECT id from Images where pid = ? AND diag_pre_post = ?''', (ny_pid, "pre"))
for _id in cursor2:
ny_image_ids.append(_id[0])
cursor2.close()
cursor.close()
conn.close()
print(ny_image_ids, image_ids)
save_to_db(image_ids, ny_image_ids)
if False:
util.setup("temp_convert/", "GBM")
db_path = "/home/dahoiv/disk/data/database/GBM/"
util.DATA_FOLDER = util.DATA_FOLDER + "GBM" + "/"
util.DB_PATH = util.DATA_FOLDER + "brainSegmentation.db"
import do_img_registration_GBM
image_ids = do_img_registration_GBM.find_images()
ny_image_ids = image_ids
save_to_db(image_ids, ny_image_ids)
|
import numpy as np
from magnn.tensors import Tensor
class Loss:
def loss(self, predicted: Tensor, actual: Tensor) -> float:
raise NotImplementedError
def grad(self, predicted: Tensor, actual: Tensor) -> Tensor:
raise NotImplementedError
class MSE(Loss):
"""
Mean Squared Error
"""
def loss(self, predicted: Tensor, actual: Tensor) -> float:
return np.mean((predicted - actual) ** 2)
def grad(self, predicted: Tensor, actual: Tensor) -> Tensor:
return 2 * (predicted - actual)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
tumblr_api.py
Script to test using pytumblr api.
https://pypi.python.org/pypi/PyTumblr
"""
import sys
import traceback
from optparse import OptionParser
import pytumblr
from gluon import *
from applications.zcomx.modules.logger import set_cli_logging
VERSION = 'Version 0.1'
# C0301 (line-too-long): *Line too long (%%s/%%s)*
# pylint: disable=C0301
def clear(client):
"""Delete existing posts.
Args:
client: pytumblr TumblrRestClient instance
"""
posts_response = client.posts('zcomx')
for post in posts_response['posts']:
LOG.debug('Deleting: %s', post['id'])
client.delete_post('zcomx', post['id'])
def create_photo(client):
"""tumblr API create_photo."""
photo_data = dict(
state="draft",
tags=['tag1', 'tag2', 'zco.mx'],
format='html',
slug='unique-slug-002',
source='https://zco.mx/images/download/book_page.image.b224f4ba0b8dff48.757074696768742d3030312d30312e706e67.png?size=web',
link='https://zco.mx',
caption='This is a test',
tweet=None,
)
# photo['source'] = 'https://zco.mx/zcomx/static/images/zco.mx-logo-small.png'
photo_data['caption'] = """
<h3><a href="https://zco.mx/JordanCrane/Uptight-001">Uptight 001 (2006)</a></h3><p>Test 001</p><p>by <a class="orange" id="test" href="https://zco.mx/JordanCrane">https://zco.mx/JordanCrane</a> |<a href="http://whatthingsdo.com">website</a> | <a href="https://twitter.com/Jordan_Crane">twitter</a> | <a href="https://whatthingsdo.tumblr.com">tumblr</a></p>
"""
# """ # fixes vim syntax highlighting.
result = client.create_photo('zcomx', **photo_data)
print('create_photo: {id}'.format(id=result))
def create_quote(client):
"""tumblr API create_quote."""
quote_data = dict(
state="private",
tags=['Uptight', 'JordanCrane', 'zco.mx'],
format='html',
slug='Jordan Crane Uptight-001',
quote='This is the quote of the day',
source='Joe Doe',
)
result = client.create_quote('zcomx', **quote_data)
print('create_quote: {q}'.format(q=result))
def create_text(client):
"""tumblr API create_text."""
text_data = dict(
state="private",
tags=['Uptight', 'JordanCrane', 'zco.mx'],
format='html',
slug='Jordan Crane Uptight-001',
title='<span style="font-size: 18px;">List of Updated Ongoing Books for Thu, May 28, 2015</span>',
body="""
<ul>
<li> Name of Book by <tumblr_nick> - page <15>, <16>, <17></li>
<li> Book Title by <tumblr_nick> - page <57></li>
<li> Eavesdropper 001 by <andreatsurumi> - page <14></li>
</ul>
""",
)
result = client.create_text('zcomx', **text_data)
print('create_text: {r}'.format(r=result))
def delete_post(client, post_id):
"""tumblr API delete_post."""
result = client.delete_post('zcomx', post_id)
print('client.delete_post: {r}'.format(r=result))
def info(client):
"""Get client info results."""
print('client.info: {i}'.format(i=client.info()))
def posts(client, hostname='zcomx'):
"""Get client posts results."""
print('client.posts: {p}'.format(p=client.posts(hostname)))
def posts_summary(client, hostname='zcomx'):
"""Get client posts results."""
results = client.posts(hostname)
if 'posts' not in results:
LOG.error('posts not found in results')
return
for post in results['posts']:
print('{id} {slug}'.format(id=post['id'], slug=post['slug']))
def man_page():
"""Print manual page-like help"""
print("""
USAGE
tumblr_api.py
OPTIONS
-c, --clear
Delete all existing posts.
-h, --help
Print a brief help.
--man
Print man page-like help.
-v, --verbose
Print information messages to stdout.
--vv,
More verbose. Print debug messages to stdout.
""")
def main():
"""Main processing."""
usage = '%prog [options] [post_id]'
parser = OptionParser(usage=usage, version=VERSION)
parser.add_option(
'-c', '--clear',
action='store_true', dest='clear', default=False,
help='Delete existing posts.',
)
parser.add_option(
'--man',
action='store_true', dest='man', default=False,
help='Display manual page-like help and exit.',
)
parser.add_option(
'-v', '--verbose',
action='store_true', dest='verbose', default=False,
help='Print messages to stdout.',
)
parser.add_option(
'--vv',
action='store_true', dest='vv', default=False,
help='More verbose.',
)
(options, args) = parser.parse_args()
if options.man:
man_page()
quit(0)
set_cli_logging(LOG, options.verbose, options.vv)
post_id = None
if args:
post_id = args[0]
LOG.info('Started.')
LOG.debug('post_id: %s', post_id)
# Authenticate via OAuth
settings = current.app.local_settings
client = pytumblr.TumblrRestClient(
settings.tumblr_consumer_key,
settings.tumblr_consumer_secret,
settings.tumblr_oauth_token,
settings.tumblr_oauth_secret
)
if options.clear:
clear(client)
return
info(client)
# posts(client)
# posts(client, hostname='charlesforsman')
# posts_summary(client)
# delete_post(client, post_id)
create_photo(client)
# create_quote(client)
# create_text(client)
LOG.info('Done.')
if __name__ == '__main__':
# pylint: disable=broad-except
try:
main()
except SystemExit:
pass
except Exception:
traceback.print_exc(file=sys.stderr)
exit(1)
|
"""
Test case module.
"""
from time import time
import sys
import logging
import pdb
import functools
import traceback
import copy
CURRENT_TIMESTAMP = int(time())
SHITTY_NONCE = ""
DEFAULT_ENCODING = sys.getdefaultencoding()
def debug_on(*exceptions):
if not exceptions:
exceptions = (AssertionError, )
def decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
prev_root = copy(logging.root)
try:
logging.basicConfig(level=logging.DEBUG)
return f(*args, **kwargs)
except exceptions:
info = sys.exc_info()
traceback.print_exception(*info)
pdb.post_mortem(info[2])
finally:
logging.root = prev_root
return wrapper
return decorator
|
from device import device
from datetime import datetime
import json
if __name__ == "__main__":
data = device.fetch_data()
print(json.dumps(data, indent=True))
image = device.fetch_image()
if image:
file_name = datetime.now().strftime("%Y%m%d%H%M%S") + ".jpg"
with open('/tmp/' + file_name, 'wb') as image_file:
image_file.write(image)
print('image saved at /tmp/' + file_name)
else:
print("image fetch fail.")
print('flash pump')
device.pump_controller.turn_on()
print("flash led")
device.led_controller.flash()
|
# coding: utf-8
"""
Клиентский запрос.
"""
from typing import TYPE_CHECKING
from irbis._common import ANSI, UTF, prepare_format, throw_value_error
if TYPE_CHECKING:
from typing import Optional, Union
class ClientQuery:
"""
Базовый клиентский запрос.
"""
def __init__(self, connection, command: str) -> None:
self._memory: bytearray = bytearray()
self.ansi(command)
self.ansi(connection.workstation)
self.ansi(command)
self.add(connection.client_id)
self.add(connection.query_id)
connection.query_id += 1
self.ansi(connection.password)
self.ansi(connection.username)
self.new_line()
self.new_line()
self.new_line()
def add(self, number: int) -> 'ClientQuery':
"""
Добавление целого числа.
:param number: Число
:return: Self
"""
return self.ansi(str(number))
def ansi(self, text: 'Optional[str]') -> 'ClientQuery':
"""
Добавление строки в кодировке ANSI.
:param text: Добавляемая строка
:return: Self
"""
return self.append(text, ANSI)
def append(self, text: 'Optional[str]', encoding: str) -> 'ClientQuery':
"""
Добавление строки в указанной кодировке.
:param text: Добавляемая строка
:param encoding: Кодировка
:return: Self
"""
if text is not None:
self._memory.extend(text.encode(encoding))
self.new_line()
return self
def format(self, format_specification: 'Optional[str]') \
-> 'Union[ClientQuery, bool]':
"""
Добавление строки формата, предварительно подготовив её.
Также добавляется перевод строки.
:param format_specification: Добавляемая строка формата.
Может быть пустой.
:return: Self
"""
if format_specification is None:
self.new_line()
return False
prepared = prepare_format(format_specification)
if format_specification[0] == '@':
self.ansi(prepared)
elif format_specification[0] == '!':
self.utf(prepared)
else:
self.utf('!' + prepared)
return self
def new_line(self) -> 'ClientQuery':
"""
Перевод строки.
:return: Self
"""
self._memory.append(0x0A)
return self
def utf(self, text: 'Optional[str]') -> 'ClientQuery':
"""
Добавление строки в кодировке UTF-8.
:param text: Добавляемая строка
:return: Self
"""
return self.append(text, UTF)
def encode(self) -> bytes:
"""
Выдача, что получилось в итоге.
:return: Закодированный запрос
"""
prefix = (str(len(self._memory)) + '\n').encode(ANSI)
return prefix + self._memory
class DatabaseQuery(ClientQuery):
"""
Клиентский запрос с указание базы данных.
"""
def __init__(
self,
connection,
command: str,
database: 'Optional[str]' = None,
) -> None:
super().__init__(connection, command)
self.ansi(database or connection.database or throw_value_error())
__all__ = ['ClientQuery']
|
from django.contrib.postgres.operations import BtreeGinExtension, TrigramExtension
from django.db import migrations
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
BtreeGinExtension(),
TrigramExtension(),
]
|
import os
import logging
from camellia.util import define
from camellia.util import system as s
INFO_COLOR = '\033[94m'
DEBUG_COLOR = '\033[92m'
WARNING_COLOR = '\033[93m'
ERROR_COLOR = '\033[91m'
CRITICAL_COLOR = '\033[95m'
END_COLOR = '\033[0m'
class Log(object):
def __init__(self, name,
console=True,
format=define.BASE_FORMAT,
level=define.BASE_LEVEL,
shell=True):
self.logger = logging.getLogger(name)
self.logger.setLevel(level)
self.shell = shell
if console:
self.__addHandler(self.consoleHandler(format, level))
def __addHandler(self, handler):
self.logger.addHandler(handler)
def consoleHandler(self, format, level):
f = logging.Formatter(format)
h = logging.StreamHandler()
h.setLevel(level)
h.setFormatter(f)
return h
def debug(self, message):
if message != None:
if self.shell: self.logger.debug(DEBUG_COLOR + message + END_COLOR)
else: self.logger.debug(message)
def warning(self, message):
if message != None:
if self.shell: self.logger.warning(WARNING_COLOR + message + END_COLOR)
else: self.logger.warning(message)
def info(self, message):
if message != None:
if self.shell: self.logger.info(INFO_COLOR + message + END_COLOR)
else: self.logger.info(message)
def critical(self, message):
if message != None:
if self.shell: self.logger.critical(CRITICAL_COLOR + message + END_COLOR)
else: self.logger.critical(message)
def error(self, message):
if message != None:
if self.shell: self.logger.error(ERROR_COLOR + message + END_COLOR)
else: self.logger.error(message)
def __del__(self):
del self
LOG = Log(define.BASE_NAME)
if __name__ == "__main__":
logger = LOG
logger.debug("debug")
logger.warning("warning")
logger.info("info")
logger.critical("critical")
logger.error("error")
|
# Generated by Django 2.2.3 on 2019-08-14 01:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profile', '0025_auto_20190715_2206'),
]
operations = [
migrations.AddField(
model_name='product',
name='removed',
field=models.BooleanField(default=False),
preserve_default=False,
),
]
|
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 3 23:07:51 2020
@author: elewah
"""
|
#!/usr/bin/env python
""" generated source for module GdlUtils """
# package: org.ggp.base.util.gdl
import java.util.ArrayList
import java.util.Collection
import java.util.Collections
import java.util.HashMap
import java.util.HashSet
import java.util.List
import java.util.Map
import java.util.Set
import org.ggp.base.util.gdl.grammar.Gdl
import org.ggp.base.util.gdl.grammar.GdlConstant
import org.ggp.base.util.gdl.grammar.GdlDistinct
import org.ggp.base.util.gdl.grammar.GdlFunction
import org.ggp.base.util.gdl.grammar.GdlLiteral
import org.ggp.base.util.gdl.grammar.GdlNot
import org.ggp.base.util.gdl.grammar.GdlOr
import org.ggp.base.util.gdl.grammar.GdlProposition
import org.ggp.base.util.gdl.grammar.GdlRule
import org.ggp.base.util.gdl.grammar.GdlSentence
import org.ggp.base.util.gdl.grammar.GdlTerm
import org.ggp.base.util.gdl.grammar.GdlVariable
class GdlUtils(object):
""" generated source for class GdlUtils """
# TODO (AL): Check if we can switch over to just having this return a set.
@classmethod
def getVariables(cls, gdl):
""" generated source for method getVariables """
variablesList = ArrayList()
variables = HashSet()
GdlVisitors.visitAll(gdl, GdlVisitor())
return variablesList
@classmethod
def getVariableNames(cls, gdl):
""" generated source for method getVariableNames """
variables = cls.getVariables(gdl)
variableNames = ArrayList()
for variable in variables:
variableNames.add(variable.__name__)
return variableNames
@classmethod
def getSentencesInRuleBody(cls, rule):
""" generated source for method getSentencesInRuleBody """
result = ArrayList()
for literal in rule.getBody():
addSentencesInLiteral(literal, result)
return result
@classmethod
def addSentencesInLiteral(cls, literal, sentences):
""" generated source for method addSentencesInLiteral """
if isinstance(literal, (GdlSentence, )):
sentences.add(literal)
elif isinstance(literal, (GdlNot, )):
cls.addSentencesInLiteral(not_.getBody(), sentences)
elif isinstance(literal, (GdlOr, )):
while i < or_.arity():
pass
i += 1
elif not (isinstance(literal, (GdlDistinct, ))):
raise RuntimeException("Unexpected GdlLiteral type encountered: " + literal.__class__.getSimpleName())
@classmethod
def getTupleFromSentence(cls, sentence):
""" generated source for method getTupleFromSentence """
if isinstance(sentence, (GdlProposition, )):
return Collections.emptyList()
# A simple crawl through the sentence.
tuple_ = ArrayList()
try:
addBodyToTuple(sentence.getBody(), tuple_)
except RuntimeException as e:
raise RuntimeException(e.getMessage() + "\nSentence was " + sentence)
return tuple_
@classmethod
def addBodyToTuple(cls, body, tuple_):
""" generated source for method addBodyToTuple """
for term in body:
if isinstance(term, (GdlConstant, )):
tuple_.add(term)
elif isinstance(term, (GdlVariable, )):
tuple_.add(term)
elif isinstance(term, (GdlFunction, )):
cls.addBodyToTuple(function_.getBody(), tuple_)
else:
raise RuntimeException("Unforeseen Gdl tupe in SentenceModel.addBodyToTuple()")
@classmethod
def getTupleFromGroundSentence(cls, sentence):
""" generated source for method getTupleFromGroundSentence """
if isinstance(sentence, (GdlProposition, )):
return Collections.emptyList()
# A simple crawl through the sentence.
tuple_ = ArrayList()
try:
addBodyToGroundTuple(sentence.getBody(), tuple_)
except RuntimeException as e:
raise RuntimeException(e.getMessage() + "\nSentence was " + sentence)
return tuple_
@classmethod
def addBodyToGroundTuple(cls, body, tuple_):
""" generated source for method addBodyToGroundTuple """
for term in body:
if isinstance(term, (GdlConstant, )):
tuple_.add(term)
elif isinstance(term, (GdlVariable, )):
raise RuntimeException("Asking for a ground tuple of a non-ground sentence")
elif isinstance(term, (GdlFunction, )):
cls.addBodyToGroundTuple(function_.getBody(), tuple_)
else:
raise RuntimeException("Unforeseen Gdl tupe in SentenceModel.addBodyToTuple()")
@classmethod
def getAssignmentMakingLeftIntoRight(cls, left, right):
""" generated source for method getAssignmentMakingLeftIntoRight """
assignment = HashMap()
if not left.__name__ == right.__name__:
return None
if left.arity() != right.arity():
return None
if left.arity() == 0:
return Collections.emptyMap()
if not fillAssignmentBody(assignment, left.getBody(), right.getBody()):
return None
return assignment
@classmethod
def fillAssignmentBody(cls, assignment, leftBody, rightBody):
""" generated source for method fillAssignmentBody """
# left body contains variables; right body shouldn't
if len(leftBody) != len(rightBody):
return False
i = 0
while i < len(leftBody):
if isinstance(leftTerm, (GdlConstant, )):
if not leftTerm == rightTerm:
return False
elif isinstance(leftTerm, (GdlVariable, )):
if assignment.containsKey(leftTerm):
if not assignment.get(leftTerm) == rightTerm:
return False
else:
if not (isinstance(rightTerm, (GdlConstant, ))):
return False
assignment.put(leftTerm, rightTerm)
elif isinstance(leftTerm, (GdlFunction, )):
if not (isinstance(rightTerm, (GdlFunction, ))):
return False
if not leftFunction.__name__ == rightFunction.__name__:
return False
if not cls.fillAssignmentBody(assignment, leftFunction.getBody(), rightFunction.getBody()):
return False
i += 1
return True
@classmethod
@overloaded
def containsTerm(cls, sentence, term):
""" generated source for method containsTerm """
if isinstance(sentence, (GdlProposition, )):
return False
return cls.containsTerm(sentence.getBody(), term)
@classmethod
@containsTerm.register(object, List, GdlTerm)
def containsTerm_0(cls, body, term):
""" generated source for method containsTerm_0 """
for curTerm in body:
if curTerm == term:
return True
if isinstance(curTerm, (GdlFunction, )):
if cls.containsTerm((curTerm).getBody(), term):
return True
return False
|
import os
import re
import sys
import subprocess
import pytest
from testplan.common.utils.path import change_directory
import platform
ON_WINDOWS = platform.system() == 'Windows'
KNOWN_EXCEPTIONS = [
"TclError: Can't find a usable init\.tcl in the following directories:", # Matplotlib module improperly installed. Will skip Data Science example.
"ImportError: lib.*\.so\..+: cannot open shared object file: No such file or directory", # Matplotlib module improperly installed. Will skip Data Science example.
"ImportError: No module named sklearn.*", # Missing module sklearn. Will skip Data Science example.
"ImportError: No module named Tkinter", # Missing module Tkinter. Will skip Data Science example.
"ImportError: No module named _tkinter.*", # Missing module Tkinter. Will skip Data Science example.
"RuntimeError: Download pyfixmsg library .*", # Missing module pyfixmsg. Will skip FIX example.
"No spec file set\. You should download .*", # Missing FIX spec file. Will skip FIX example.
"AttributeError: 'module' object has no attribute 'poll'",
"RuntimeError: You need to compile test binary first." # Need to compile cpp binary first. Will skip GTest example.
]
SKIP_ON_WINDOWS = [
os.path.join('Cpp', 'GTest', 'test_plan.py'),
]
ROOT_DIR_CONTENTS = [
"setup.py",
"requirements.txt",
"README.rst",
"LICENSE.md"
]
def _depth_from_repo_root():
cwd = os.getcwd()
depth = []
while True:
contents = os.listdir(cwd)
if all([entry in contents for entry in ROOT_DIR_CONTENTS]):
return depth
parent_dir = os.path.dirname(cwd)
if os.path.realpath(cwd) == os.path.realpath(parent_dir):
raise RuntimeError('Could not find repo directory')
depth.append(os.pardir)
cwd = parent_dir
def _relative_dir(directory):
path_args = _depth_from_repo_root() + [directory]
return os.path.join(*path_args)
def _param_formatter(param):
if 'examples' in param:
return repr(param.rsplit('examples')[1])
return repr(param)
@pytest.mark.parametrize(
'root,filename',
[
(os.path.abspath(root), filename)
for root, _, files in os.walk(
_relative_dir(os.path.join('testplan', 'examples')))
for filename in files
if 'test_plan' in filename
],
ids=_param_formatter,
)
def test_example(root, filename):
file_path = os.path.join(root, filename)
if ON_WINDOWS and any(
[file_path.endswith(skip_name) for skip_name in SKIP_ON_WINDOWS]
):
pytest.skip()
with change_directory(root), open(filename) as file_obj:
file_obj.readline()
second_line = file_obj.readline()
try:
subprocess.check_output(
[sys.executable, filename],
stderr=subprocess.STDOUT
)
except subprocess.CalledProcessError as e:
out = e.output.decode()
for exception in KNOWN_EXCEPTIONS:
if re.search(exception, out):
pytest.xfail()
assert 'Exception in test_plan definition' not in out, \
'Exception raised in test_plan definition.'
assert 'Traceback (most recent call last):' not in out, \
'Exception raised during test:\n{}'.format(out)
assert \
('# This plan contains tests that demonstrate failures '
'as well.') == second_line.strip(), \
"Expected \'{}\' example to pass, it failed.\n{}".format(
file_path,
out
)
|
"""
Event called when someone parts a channel
"""
import consoleHelper
import bcolors
import glob
import clientPackets
import serverPackets
def handle(userToken, packetData):
# Channel part packet
packetData = clientPackets.channelPart(packetData)
partChannel(userToken, packetData["channel"])
def partChannel(userToken, channelName, kick = False):
# Get usertoken data
username = userToken.username
userID = userToken.userID
# Remove us from joined users and joined channels
if channelName in glob.channels.channels:
# Check that user is in channel
if channelName in userToken.joinedChannels:
userToken.partChannel(channelName)
# Check if user is in channel
if userID in glob.channels.channels[channelName].connectedUsers:
glob.channels.channels[channelName].userPart(userID)
# Force close tab if needed
if kick == True:
userToken.enqueue(serverPackets.channelKicked(channelName))
# Console output
consoleHelper.printColored("> {} parted channel {}".format(username, channelName), bcolors.YELLOW)
|
from paypalrestsdk import Payout, ResourceNotFound
import logging
logging.basicConfig(level=logging.INFO)
try:
payout = Payout.find("R3LFR867ESVQY")
print("Got Details for Payout[%s]" % (payout.batch_header.payout_batch_id))
except ResourceNotFound as error:
print("Web Profile Not Found")
|
# -*- coding: utf-8 -*-
import scrapy
from scrapy.http import Request
from jrjdataspiderProject.items import JrjdataspiderprojectItem,JrjdataspiderprojectItems
import re
import json
class BankproSpider(scrapy.Spider):
pipeline = ['JrjdataspiderprojectPipeline']
name = "bankpro"
allowed_domains = ["jrj.com.cn"]
start_urls = ['http://bankpro.jrj.com.cn/json/f.jspa?size=50&pn=1&t={"st":"0","xsdq":"-1,-1","sort":"sell_org_date","order":"desc","wd":""}']
def parse(self, response):
body = response.body
pat = r'var bps=(.*)'
regex = re.compile(pat)
rtn = regex.findall(body)[0]
ob = json.loads(rtn)
status = ob.get(u'success', False)
if status == True:
page_count = ob.get(u'page').get(u'pc')
self.page_count = int(page_count)
if self.page_count > 0:
for i in range(1, self.page_count + 1):
url = u'http://bankpro.jrj.com.cn/json/f.jspa?size=50&pn='+ str(i) +'&t={"st":"0","xsdq":"-1,-1","sort":"sell_org_date","order":"desc","wd":""}';
yield Request(url=url, callback=self.nextParse)
def nextParse(self, response):
body = response.body
pat = r'var bps=(.*)'
regex = re.compile(pat)
rtn = regex.findall(body)[0]
ob = json.loads(rtn)
status = ob.get(u'success', False)
if status == True:
bankProductList = ob.get(u'bankProductList', [])
items = JrjdataspiderprojectItems()
items['items'] = []
if len(bankProductList) > 0:
for im in bankProductList:
item = JrjdataspiderprojectItem()
item['bank_Id'] = im.get(u'bank_Id', None)
item['bank_Name'] = im.get(u'bank_Name', None)
item['days'] = im.get(u'days', None)
item['end_Date'] = im.get(u'end_Date', None)
item['entr_Curncy_Name'] = im.get(u'entr_Curncy_Name', None)
item['entr_Curncy_Type'] = im.get(u'entr_Curncy_Type', None)
item['entr_Min_Curncy'] = im.get(u'entr_Min_Curncy', None)
item['inc_Score'] = im.get(u'inc_Score', None)
item['inner_Code'] = im.get(u'inner_Code', None)
item['liq_Score'] = im.get(u'liq_Score', None)
item['mat_Actu_Yld'] = im.get(u'mat_Actu_Yld', None)
item['months'] = im.get(u'months', None)
item['multiple'] = im.get(u'multiple', None)
item['prd_Max_Yld'] = im.get(u'prd_Max_Yld', None)
item['prd_Max_Yld_De'] = im.get(u'prd_Max_Yld_De', None)
item['prd_Sname'] = im.get(u'prd_Sname', None)
item['prd_Type'] = im.get(u'prd_Type', None)
item['rist_Score'] = im.get(u'rist_Score', None)
item['sell_End_Date'] = im.get(u'sell_End_Date', None)
item['sell_Org_Date'] = im.get(u'sell_Org_Date', None)
item['star'] = im.get(u'star', None)
item['state'] = im.get(u'state', None)
item['stk_Score'] = im.get(u'stk_Score', None)
item['row'] = im.get(u'row', None)
items['items'].append(item)
yield items
|
import sys
from . import git
from typing import *
MAX_SUBVERSION = 1000000
class Version:
def __init__(self, tag: str, prefix: str, major: int, minor: int, patch: int, suffix: str):
self.tag = tag
self.prefix = prefix
self.major = major
self.minor = minor
self.patch = patch
self.suffix = suffix
if major >= MAX_SUBVERSION:
raise Exception(f"Invalid major: {major}")
if minor >= MAX_SUBVERSION:
raise Exception(f"Invalid minor: {minor}")
if patch >= MAX_SUBVERSION:
raise Exception(f"Invalid patch: {patch}")
def __str__(self) -> str:
return f"{self.major}.{self.minor}.{self.patch}{'-' + self.suffix if self.suffix else ''}"
def __lt__(self, other: "Version") -> bool:
return self.to_n() < other.to_n()
def to_n(self) -> int:
return self.major * MAX_SUBVERSION**2 + self.minor * MAX_SUBVERSION + self.patch
def parse_tag_as_semver(tag: str) -> Optional[Version]:
if not tag:
return None
prefix = ""
if tag[0] == "v" or tag[0] == "V":
prefix = tag[0]
parts = tag[1:].split(".")
else:
parts = tag.split(".")
if len(parts) != 3:
return None
try:
patch = parts[2]
suffix = ""
if "-" in patch:
patch, sufix = patch.split("-", 1)
return Version(tag, prefix, int(parts[0]), int(parts[1]), int(patch), suffix)
except:
return None
def get_all_versions_ordered(output_non_versions: bool=False) -> List[Version]:
success, tags = git.execute_git("tag", output=False)
if not success:
print('Error getting tags')
sys.exit(1)
versions = []
for line in tags.split("\n"):
line = line.strip()
if not line:
continue
version = parse_tag_as_semver(line)
if not version:
if output_non_versions:
print(f"{line} not a tag")
else:
versions.append(version)
versions.sort()
return versions
|
from django.shortcuts import render
from account.views import account_login
# Create your views here.
def index(request):
if not request.user.is_authenticated:
return account_login(request)
context = {}
# return render(request, "voting/login.html", context)
|
description = 'Chopper 2'
group = 'optional'
devices = dict(
ch2_speed=device('nicos.devices.generic.virtual.VirtualMotor',
description='Rotation speed',
abslimits=(0, 300),
userlimits=(0, 300),
fmtstr='%.f',
unit='Hz',
speed=5,
),
ch2_phase=device('nicos.devices.generic.virtual.VirtualMotor',
description='Phase angle',
abslimits=(-180, 180),
userlimits=(-180, 180),
fmtstr='%.f',
unit='Hz',
jitter=2,
speed=5,
),
)
|
names = ["Adarsh", "Advika", "Kunika"]
#--------------------------------------------------------------
# For Loop
#--------------------------------------------------------------
# Looping through the list
for name in names:
print (name)
# Looping through range
x = 0
for index in range(10):
x += 10
if index == 5:
print("Index is 5 so continuing...")
continue
if index == 7:
print("Breaking the loop")
break
print("10 * {0} = {1}".format(index + 1, x))
# Range with starting and end index
range(5, 10) == [5, 6, 7, 8 ,9]
# Range with increament counter
range(5, 10, 2) == [5, 7, 9]
#--------------------------------------------------------------
# While Loop
#--------------------------------------------------------------
x = 0
while True:
x += 1
if x > 5:
break
print(x)
|
import os, sys, re
|
# @author: https://github.com/luis2ra from https://www.w3schools.com/python/default.asp
'''
What is Python?
Python is a popular programming language. It was created by Guido van Rossum, and released in 1991.
It is used for:
* web development (server-side),
* software development,
* mathematics,
* system scripting.
What can Python do?
* Python can be used on a server to create web applications.
* Python can be used alongside software to create workflows.
* Python can connect to database systems. It can also read and modify files.
* Python can be used to handle big data and perform complex mathematics.
* Python can be used for rapid prototyping, or for production-ready software development.
Why Python?
* Python works on different platforms (Windows, Mac, Linux, Raspberry Pi, etc).
* Python has a simple syntax similar to the English language.
* Python has syntax that allows developers to write programs with fewer lines than some other programming languages.
* Python runs on an interpreter system, meaning that code can be executed as soon as it is written. This means that prototyping can be very quick.
* Python can be treated in a procedural way, an object-oriented way or a functional way.
Good to know
* The most recent major version of Python is Python 3, which we shall be using in this tutorial. However, Python 2, although not being updated with anything other than security updates, is still quite popular.
* In this tutorial Python will be written in a text editor. It is possible to write Python in an Integrated Development Environment, such as Thonny, Pycharm, Netbeans or Eclipse which are particularly useful when managing larger collections of Python files.
Python Syntax compared to other programming languages
* Python was designed for readability, and has some similarities to the English language with influence from mathematics.
* Python uses new lines to complete a command, as opposed to other programming languages which often use semicolons or parentheses.
* Python relies on indentation, using whitespace, to define scope; such as the scope of loops, functions and classes. Other programming languages often use curly-brackets for this purpose.
'''
print("Hello, World!!!")
|
from argparse import ArgumentParser
import numpy as np
import requests
from mmdet3d.apis import inference_detector, init_model
def parse_args():
parser = ArgumentParser()
parser.add_argument('pcd', help='Point cloud file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument('model_name', help='The model name in the server')
parser.add_argument(
'--inference-addr',
default='127.0.0.1:8080',
help='Address and port of the inference server')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--score-thr', type=float, default=0.5, help='3d bbox score threshold')
args = parser.parse_args()
return args
def parse_result(input):
bbox = input[0]['3dbbox']
result = np.array(bbox)
return result
def main(args):
# build the model from a config file and a checkpoint file
model = init_model(args.config, args.checkpoint, device=args.device)
# test a single point cloud file
model_result, _ = inference_detector(model, args.pcd)
# filter the 3d bboxes whose scores > 0.5
if 'pts_bbox' in model_result[0].keys():
pred_bboxes = model_result[0]['pts_bbox']['boxes_3d'].tensor.numpy()
pred_scores = model_result[0]['pts_bbox']['scores_3d'].numpy()
else:
pred_bboxes = model_result[0]['boxes_3d'].tensor.numpy()
pred_scores = model_result[0]['scores_3d'].numpy()
model_result = pred_bboxes[pred_scores > 0.5]
url = 'http://' + args.inference_addr + '/predictions/' + args.model_name
with open(args.pcd, 'rb') as points:
response = requests.post(url, points)
server_result = parse_result(response.json())
assert np.allclose(model_result, server_result)
if __name__ == '__main__':
args = parse_args()
main(args)
|
#!/usr/bin/env python3
# -*- coding=utf-8 -*-
import cv2 as cv
from goto import with_goto
"""
基于连续自适应均值迁移(CAM)的对象移动分析
CAM是连续自适应的均值迁移跟踪算法,相对于均值迁移相比较他的主要改进点有两处,一是会根据跟踪对象大小变化自动
调整搜索窗口大小;二是会返回更为完整的位置信息,其中包括了位置坐标及角度
cv.CamShift(probImage, window, criteria)
- probImage: 输入图像,直方图方向投影结果
- window: 搜索开窗大小,ROI对象区域
- criteria: 均值迁移停止条件
注:返回信息中需要手动更新开窗信息
"""
video_param = "../../../raspberry-auto/pic/balltest.mp4"
@with_goto
def main():
capture = cv.VideoCapture(video_param)
ret, frame = capture.read()
if True is not ret:
print("can't read any video!")
goto .end
cv.namedWindow("live", cv.WINDOW_AUTOSIZE)
x, y, w, h = cv.selectROI("live", frame, True, False)
track_window = (x, y, w, h)
roi = frame[y: y + h, x: x + w]
hsv_roi = cv.cvtColor(roi, cv.COLOR_BGR2HSV)
mask = cv.inRange(hsv_roi, (26, 43, 46), (34, 255, 255))
roi_hist = cv.calcHist([hsv_roi], [0], mask, [180], [0, 180])
cv.normalize(roi_hist, roi_hist, 0, 255, cv.NORM_MINMAX)
term_criteria = (cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 1)
while True:
ret, frame = capture.read()
if True is not ret:
print("video is end.")
break
hsv = cv.cvtColor(frame, cv.COLOR_BGR2HSV)
dst = cv.calcBackProject([hsv], [0], roi_hist, [0, 180], 1)
track_box = cv.CamShift(dst, track_window, term_criteria)
track_window = track_box[1]
print(track_box)
cv.ellipse(frame, track_box[0], (0, 0, 255), 3, cv.LINE_8)
cv.imshow("live", frame)
key = cv.waitKey(10)
if 27 == key: # esc
break
label .end
capture.release()
cv.destroyAllWindows()
if "__main__" == __name__:
main()
|
import apscheduler.executors.asyncio
import apscheduler.jobstores.mongodb
import apscheduler.schedulers.asyncio
from common.application_modules.module import ApplicationModule
scheduler_job_store = {"default": apscheduler.jobstores.mongodb.MongoDBJobStore()}
scheduler_executors = {"default": apscheduler.executors.asyncio.AsyncIOExecutor()}
scheduler = apscheduler.schedulers.asyncio.AsyncIOScheduler(
jobstores=scheduler_job_store, executors=scheduler_executors
)
class ScheduleModule(ApplicationModule):
def __init__(self, name):
super().__init__(name)
self.scheduler = scheduler
def add_jobs(self):
from game.models.world import CLOCK
self.scheduler.add_job(
CLOCK.tick,
"interval",
minutes=1,
)
def start(self, config, loop):
self.scheduler.configure(event_loop=loop)
self.scheduler.start()
self.add_jobs()
|
from rest_framework import permissions
class IsMember(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
profile = request.user.profile
return profile in obj.members.all()
|
# -*- coding: utf-8 -*-
from .project import * # NOQA
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.7/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Comment if you are not running behind proxy
USE_X_FORWARDED_HOST = True
# Set debug to false for production
DEBUG = TEMPLATE_DEBUG = False
INSTALLED_APPS += (
# 'raven.contrib.django.raven_compat', # enable Raven plugin
)
DATABASES = {
'default': {
# Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
# 'ENGINE': 'django.db.backends.postgresql_psycopg2',
'ENGINE': 'django.contrib.gis.db.backends.postgis',
# Or path to database file if using sqlite3.
'NAME': '',
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
# Empty for localhost through domain sockets or '127.0.0.1' for
# localhost through TCP.
'HOST': '',
# Set to empty string for default.
'PORT': '',
}
}
if 'raven.contrib.django.raven_compat' in INSTALLED_APPS:
SENTRY_DSN = ('#REPLACE ME#')
MIDDLEWARE_CLASSES = (
'raven.contrib.django.middleware.SentryResponseErrorIdMiddleware',
'raven.contrib.django.middleware.SentryLogMiddleware',
) + MIDDLEWARE_CLASSES
#
# Sentry settings - logs exceptions to a database
LOGGING = {
# internal dictConfig version - DON'T CHANGE
'version': 1,
'disable_existing_loggers': True,
# default root logger - handle with sentry
'root': {
'level': 'ERROR',
'handlers': ['sentry'],
},
'handlers': {
# send email to mail_admins, if DEBUG=False
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
},
# sentry logger
'sentry': {
'level': 'WARNING',
'class': 'raven.contrib.django.handlers.SentryHandler',
}
},
'loggers': {
'django.db.backends': {
'level': 'ERROR',
'handlers': ['sentry'],
'propagate': False
},
'raven': {
'level': 'ERROR',
'handlers': ['mail_admins'],
'propagate': False
},
'sentry.errors': {
'level': 'ERROR',
'handlers': ['mail_admins'],
'propagate': False
},
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True
}
}
}
|
"""
Evaluate the value of an arithmetic expression in Reverse Polish Notation.
Valid operators are +, -, *, and /. Each operand may be an integer or another expression.
Note that division between two integers should truncate toward zero.
It is guaranteed that the given RPN expression is always valid. That means the expression would always evaluate to a
result, and there will not be any division by zero operation.
Example 1:
Input: tokens = ["2","1","+","3","*"]
Output: 9
Explanation: ((2 + 1) * 3) = 9
Constraints:
1 <= tokens.length <= 104
tokens[i] is either an operator: "+", "-", "*", or "/", or an integer in the range [-200, 200].
"""
import operator
from typing import List
class Solution:
"""
Stack is the best structure for this kind of problems.
Runtime: 56 ms, faster than 98.03% of Python3
Memory Usage: 14.5 MB, less than 88.90% of Python3
Time / Space complexity: O(n)
"""
def evalRPN(self, tokens: List[str]) -> int:
stack = []
ops = {
"+": operator.add,
"-": operator.sub,
"*": operator.mul,
# division truncating result toward 0 (floor division)
"/": lambda dividend, divider: int(operator.truediv(dividend, divider)),
}
for token in tokens:
if (token.isdigit() or
(token.startswith("-") and token[1:].isdigit())):
stack.append(int(token))
elif token in ops:
b = stack.pop()
a = stack.pop()
result = ops[token](a, b)
stack.append(result)
return stack.pop()
if __name__ == '__main__':
solutions = [Solution()]
tc = (
(["2", "1", "+", "3", "*"], 9), # ((2 + 1) * 3) = 9
(["4", "13", "5", "/", "+"], 6), # (4 + (13 / 5)) = 6
(["10", "6", "9", "3", "+", "-11", "*", "/", "*", "17", "+", "5", "+"], 22),
)
for sol in solutions:
for inp_tokens, exp_res in tc:
assert sol.evalRPN(inp_tokens) == exp_res
|
import random
import pygame
from constants import *
class Pillar:
"""Pillar class for the flappy game. Y-direction is so that pillar starts from 0 and extends until gap, gap is between self.gap+gap, lower ifrom that to height-ground-self.gap-gap
"""
# Pillar on siis korkeussuunnassa 0->gap ja self.gap+gap->height-ground-self.gap-gap
# Pilarin alareuna siis matalimmillaan. 300
# Pilarin alareuna siis korkeimmillaan 140
# Linnun yläosa matalimmillaan 420
# Linnun yläosa korkeimmillaan 0
def __init__(self, position. random=True):
"""Initialize a pillar.
Args:
position (int): x-position of the pillar
random (bool): Random gap or predetermined
"""
if random:
self.gap = 100 + (random.randint(pillargapL, pillargapU))
else:
self.gap=150
self.pos = position
self.velocity = pillarVelocity
self.upperPillar = pygame.Rect(self.pos, 0, 50, self.gap)
self.lowerPillar = pygame.Rect(
self.pos, self.gap + gap, 50, height - ground - self.gap - gap
)
self.rectangles = [self.upperPillar, self.lowerPillar]
def move_pillar(self):
"""Move pillar to a direction of the velocity
"""
self.pos = self.pos - pillarVelocity
def draw_pillar(self):
"""Draw a pillar
"""
self.upperPillar = pygame.Rect(self.pos, 0, 50, self.gap)
self.lowerPillar = pygame.Rect(
self.pos, self.gap + gap, 50, height - ground - self.gap - gap
)
self.rectangles = [self.upperPillar, self.lowerPillar]
pygame.draw.rect(screen, BLACK, pygame.Rect(self.pos, 0, 50, self.gap))
pygame.draw.rect(
screen,
BLACK,
pygame.Rect(self.pos, self.gap + gap, 50, height - ground - self.gap - gap),
)
|
# -*- coding: utf-8 -*-
from .__version__ import __title__, __description__, __url__, __version__
from .__version__ import __author__, __author_email__, __license__
from .__version__ import __copyright__
|
"""
Computational engines used by GP backends.
"""
import numpy as np
class Engine(object):
"""The base class for computational engines.
"""
def addpath(self, path):
self._eng.addpath(path)
def eval(self, expr, verbose):
"""Evaluate an expression.
"""
raise NotImplementedError
def push(self, name, var):
"""Push a variable into the engine session under the given name.
"""
raise NotImplementedError
def pull(self, name):
"""Pull a variable from the engine session.
"""
raise NotImplementedError
class MATLABEngine(Engine):
def __init__(self):
import matlab.engine
from matlab import double as matdouble
from StringIO import StringIO
self._matarray = matdouble
self._eng = matlab.engine.start_matlab()
self._devnull = StringIO()
def push(self, name, var):
# Convert np.ndarrays into matlab.doubles and push into the workspace
if type(var) is np.ndarray:
self._eng.workspace[name] = self._matarray(var.tolist())
elif type(var) is dict:
var_copy = var.copy()
for k, v in var_copy.iteritems():
if type(v) is np.ndarray:
var_copy[k] = self._matarray(v.tolist())
self._eng.workspace[name] = var_copy
elif type(var) in {list, int, float}:
self._eng.workspace[name] = var
else:
raise ValueError("Unknown type (%s) variable being pushed "
"into the MATLAB session." % type(var))
def pull(self, name):
var = self._eng.workspace[name]
if type(var) is self._matarray:
var = np.asarray(var)
elif type(var) is dict:
for k, v in var.iteritems():
if type(v) is self._matarray:
var[k] = np.asarray(v)
return var
def eval(self, expr, verbose=0):
assert type(expr) is str
stdout = None if verbose else self._devnull
self._eng.eval(expr, nargout=0, stdout=stdout)
class OctaveEngine(Engine):
def __init__(self, jit_enable=True):
from oct2py import Oct2Py
from oct2py import Struct
self._struct = Struct
self._eng = Oct2Py()
if jit_enable:
self._eng.eval('jit_enable(1)', verbose=0)
self._eng.eval('pkg load statistics', verbose=0)
def push(self, name, var):
if type(var) is np.ndarray and var.dtype == 'float32':
# Octave does not support `sparse matrix * dense matrix` operations
# for float32 type, hence we cast `var` to float64 before pushing
# into the Octave session
var = var.astype('float64')
self._eng.push(name, var)
def pull(self, name):
var = self._eng.pull(name)
if type(var) is self._struct:
var = dict(var)
return var
def eval(self, expr, verbose=0):
assert type(expr) is str
self._eng.eval(expr, verbose=verbose)
|
# coding: utf-8
from .target_utils import ItemList
from .header_file import HeaderFileReader
class TargetContext(dict):
def __init__(self, *args, **kwargs):
dict.__init__({})
self['options'] = {}
vars_normal = ['defines', 'public_defines',
'include_dirs', 'public_include_dirs',
'lib_dirs', 'public_lib_dirs',
'libs', 'public_libs',
'headers', 'sysroots',
'ccflags', 'cxxflags', 'arflags', 'ldflags',
'deps', 'configs',
]
vars_pattern = ['public_headers', 'sources'] # support pattern add/sub
for v in vars_normal:
self[v] = ItemList(name=v)
for v in vars_pattern:
self[v] = ItemList(name=v, base=kwargs['source_base_dir'])
for k, v in kwargs.items():
if k in self:
self[k] += v
else:
self[k] = v
self.group_vars = [{}]
self.group_vars.extend(args)
def __getattr__(self, name):
if name in self:
return self[name]
return None
def getVar(self, key):
for v in self.group_vars:
if key in v:
return v[key]
return None
def setVar(self, key, value):
self.group_vars[0][key] = value
def enabled(self, key):
return self['options'].get(key, False)
def enable(self, key, value=True):
self['options'][key] = value
def parseFile(self, filepath, pattern):
reader = HeaderFileReader(filepath)
return reader.findall(pattern)
def parse_target_file(filepath):
global_scope = {}
local_scope = {}
with open(filepath, 'r', encoding='utf-8') as f:
content = f.read()
try:
exec(content, global_scope, local_scope)
except SyntaxError as e:
print('* Exception occupied in', filepath, e)
return []
return local_scope.get('export_libs', [])
|
from abc import ABC, abstractmethod
import csv
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import NullFormatter
class Logger(ABC):
@abstractmethod
def log_cost_accuracy(self, n_epoch, cost, accuracy):
pass
class SilentLogger(Logger):
def log_cost_accuracy(self, n_epoch, cost, accuracy):
pass
class StdOutLogger(Logger):
def log_cost_accuracy(self, n_epoch, cost, accuracy):
print(" - Epoch :", n_epoch)
print(" - Cost : ", cost)
print(" - Accuracy", accuracy)
print()
class CSVLogger(Logger):
def __init__(self, csv_file):
self.csv_file = csv_file
fd = open(self.csv_file, "a+")
writer = csv.writer(fd)
writer.writerow(["n_epoch", "cost", "accuracy"])
fd.close()
def log_cost_accuracy(self, n_epoch, cost, accuracy):
with open(self.csv_file, 'a') as fd:
writer = csv.writer(fd)
writer.writerow([n_epoch, cost, accuracy])
class MatplotlibLogger(Logger):
def __init__(self):
self.cost_vals = []
self.accuracy_vals = []
@staticmethod
def _plot_metric(metric_vals, metric_name, export_file=None):
t = np.arange(len(metric_vals))
fig, ax = plt.subplots()
ax.plot(t, metric_vals)
ax.set(xlabel="Epochs", ylabel=metric_name, title='Training pork')
ax.grid()
if export_file is not None:
fig.savefig(export_file)
plt.show()
def log_cost_accuracy(self, n_epoch, cost, accuracy):
self.cost_vals.append(cost)
self.accuracy_vals.append(accuracy)
def plot_cost(self, ): self._plot_metric(self.cost_vals, "Cost")
def plot_accuracy(self, ): self._plot_metric(self.accuracy_vals, "Accuracy")
def reset(self):
self.cost_vals = []
self.accuracy_vals = []
class BenchmarkLogger:
def __init__(self, csv_file_name: str):
self.csv_file_name = csv_file_name
self.train_cost_vals = []
self.test_cost_vals = []
self.train_acc_vals = []
self.test_acc_vals = []
def reset(self):
self.train_cost_vals = []
self.test_cost_vals = []
self.test_acc_vals = []
self.test_acc_vals = []
def dump_results(self):
if self.csv_file_name is None:
pass
fd = open(self.csv_file_name, "a+")
writer = csv.writer(fd)
writer.writerow(["n_iter", "train_cost", "test_cost", "train_acc", "test_acc"])
for i, res in enumerate(
zip(self.train_cost_vals, self.test_cost_vals, self.train_acc_vals, self.test_acc_vals)):
writer.writerow([i] + list(res))
fd.close()
def benchmark_log(self, train_cost, train_acc, test_cost, test_acc):
self.train_cost_vals.append(train_cost)
self.train_acc_vals.append(train_acc)
self.test_cost_vals.append(test_cost)
self.test_acc_vals.append(test_acc)
def plot_benchmark(self):
x = np.arange(len(self.train_cost_vals))
# Creating 2 subplots
plt.figure(1)
# Plotting Costs
plt.subplot(211)
plt.plot(x, self.train_cost_vals)
plt.plot(x, self.test_cost_vals)
plt.yscale('linear')
plt.title('Cost')
plt.legend(['Training Cost', 'Testing Cost'], loc='upper left')
plt.grid(True)
# Plotting Accuracies
plt.subplot(212)
plt.plot(x, self.train_acc_vals)
plt.plot(x, self.test_acc_vals)
plt.yscale('linear')
plt.title('Accuracy')
plt.legend(['Training Accuracy', 'Testing Accuracy'], loc='upper left')
plt.grid(True)
# Format the minor tick labels of the y-axis into empty strings with
# `NullFormatter`, to avoid cumbering the axis with too many labels.
plt.gca().yaxis.set_minor_formatter(NullFormatter())
plt.show()
|
# _*_ coding: utf-8 _*_
#
# Package: bookstore.src.core
__all__ = ["controller", "enum", "property", "repository", "service", "util", "validator"]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.