content
stringlengths 5
1.05M
|
|---|
"""CrowdStrike Falcon Detections API interface class.
_______ __ _______ __ __ __
| _ .----.-----.--.--.--.--| | _ | |_.----|__| |--.-----.
|. 1___| _| _ | | | | _ | 1___| _| _| | <| -__|
|. |___|__| |_____|________|_____|____ |____|__| |__|__|__|_____|
|: 1 | |: 1 |
|::.. . | CROWDSTRIKE FALCON |::.. . | FalconPy
`-------' `-------'
OAuth2 API - Customer SDK
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <https://unlicense.org>
"""
from ._util import force_default, process_service_request
from ._payload import generic_payload_list, update_detects_payload
from ._payload import aggregate_payload
from ._service_class import ServiceClass
from ._endpoint._detects import _detects_endpoints as Endpoints
class Detects(ServiceClass):
"""The only requirement to instantiate an instance of this class is one of the following.
- a valid client_id and client_secret provided as keywords.
- a credential dictionary with client_id and client_secret containing valid API credentials
{
"client_id": "CLIENT_ID_HERE",
"client_secret": "CLIENT_SECRET_HERE"
}
- a previously-authenticated instance of the authentication service class (oauth2.py)
- a valid token provided by the authentication service class (OAuth2.token())
"""
@force_default(defaults=["body"], default_types=["dict"])
def get_aggregate_detects(self: object, body: dict = None, **kwargs) -> dict:
"""Get detect aggregates as specified via json in request body.
Keyword arguments:
body -- full body payload, not required when using other keywords.
[
{
"date_ranges": [
{
"from": "string",
"to": "string"
}
],
"field": "string",
"filter": "string",
"interval": "string",
"min_doc_count": 0,
"missing": "string",
"name": "string",
"q": "string",
"ranges": [
{
"From": 0,
"To": 0
}
],
"size": 0,
"sort": "string",
"sub_aggregates": [
null
],
"time_zone": "string",
"type": "string"
}
]
date_ranges -- List of dictionaries.
field -- String.
filter -- FQL syntax. String.
interval -- String.
min_doc_count -- Minimum number of documents required to match. Integer.
missing -- String.
name -- Scan name. String.
q -- FQL syntax. String.
ranges -- List of dictionaries.
size -- Integer.
sort -- FQL syntax. String.
sub_aggregates -- List of strings.
time_zone -- String.
type -- String.
This method only supports keywords for providing arguments.
Returns: dict object containing API response.
HTTP Method: POST
Swagger URL
https://assets.falcon.crowdstrike.com/support/api/swagger.html#/detects/GetAggregateDetects
"""
if not body:
body = aggregate_payload(submitted_keywords=kwargs)
return process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="GetAggregateDetects",
body=body
)
@force_default(defaults=["body"], default_types=["dict"])
def update_detects_by_ids(self: object, *args, body: dict = None, **kwargs) -> dict:
"""Modify the state, assignee, and visibility of detections.
Keyword arguments:
assigned_to_uuid -- A user ID to assign the detection to.
body -- full body payload, not required when using other keywords.
{
"assigned_to_uuid": "string",
"comment": "string",
"ids": [
"string"
],
"show_in_ui": true,
"status": "string"
}
comment -- Optional comment to add to the detection. Comments are displayed with
the detection in Falcon and are usually used to provide context or
notes for other Falcon users. A detection can have multiple comments
over time.
ids -- ID(s) of the detection to update. String or list of strings.
show_in_ui -- Boolean determining if this detection is displayed in the Falcon
console.
status -- Current status of the detection. Allowed values:
ignored new
in_progress true_positive
false_positive
Arguments: When not specified, the first argument to this method is assumed to be 'ids'.
All others are ignored.
Returns: dict object containing API response.
HTTP Method: PATCH
Swagger URL
https://assets.falcon.crowdstrike.com/support/api/swagger.html#/detects/UpdateDetectsByIdsV2
"""
if not body:
body = update_detects_payload(current_payload=generic_payload_list(
submitted_arguments=args,
submitted_keywords=kwargs,
payload_value="ids"
),
passed_keywords=kwargs
)
return process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="UpdateDetectsByIdsV2",
body=body,
body_validator={
"assigned_to_uuid": str,
"comment": str,
"ids": list,
"show_in_ui": bool,
"status": str
} if self.validate_payloads else None,
body_required=["ids"] if self.validate_payloads else None
)
@force_default(defaults=["body"], default_types=["dict"])
def get_detect_summaries(self: object, *args, body: dict = None, **kwargs) -> dict:
"""View information about detections.
Keyword arguments:
body -- full body payload, not required when ids keyword is provided.
{
"ids": [
"string"
]
}
ids -- ID(s) of the detections to retrieve. String or list of strings.
Arguments: When not specified, the first argument to this method is assumed to be 'ids'.
All others are ignored.
Returns: dict object containing API response.
HTTP Method: POST
Swagger URL
https://assets.falcon.crowdstrike.com/support/api/swagger.html#/detects/GetDetectSummaries
"""
if not body:
body = generic_payload_list(submitted_arguments=args,
submitted_keywords=kwargs,
payload_value="ids"
)
return process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="GetDetectSummaries",
body=body,
body_validator={"ids": list} if self.validate_payloads else None,
body_required=["ids"] if self.validate_payloads else None
)
@force_default(defaults=["parameters"], default_types=["dict"])
def query_detects(self: object, parameters: dict = None, **kwargs) -> dict:
"""Search for detection IDs that match a given query.
Keyword arguments:
filter -- The filter expression that should be used to limit the results. FQL syntax.
An asterisk wildcard '*' includes all results.
AVAILABLE FILTERS
General
----------------------------------------------------
adversary_ids max_confidence
assigned_to_name max_severity
cid max_severity_displayname
date_updated seconds_to_triaged
detection_id seconds_to_resolved
first_behavior status
last_behavior
Behavioral (behaviors.filter) Ex: behaviors.md5
----------------------------------------------------
alleged_filetype pattern_disposition
behavior_id scenario
cmdline severity
confidence sha256
control_graph_id tactic
device_id technique
filename timestamp
ioc_source triggering_process_id
ioc_type triggering_process_graph_id
ioc_value user_id
md5 user_name
objective
parent_details.parent_cmdline
parent_details.parent_md5
parent_details.parent_process_id
parent_details.parent_process_graph_id
parent_details.parent_sha256
Devices (device.filter) Ex: device.platform_name
----------------------------------------------------
agent_load_flags machine_domain
agent_local_time major_version
agent_version minor_version
bios_manufacturer modified_timestamp
bios_version os_version
cid ou
config_id_base platform_id
config_id_build platform_name
config_id_platform product_type
cpu_signature product_type_desc
device_id release_group
external_ip reduced_functionality_mode
first_seen serial_number
hostname site_name
last_seen status
local_ip system_product_name
mac_address system_manufacturer
Misc
----------------------------------------------------
hostinfo.domain
hostinfo.active_directory_dn_display
quarantined_files.id quarantined_files.state
quarantined_files.paths quarantined_files.sha256
For more detail regarding filtering options, please review:
https://falcon.crowdstrike.com/documentation/86/detections-monitoring-apis#find-detections
limit -- The maximum number of detections to return in this response.
[Integer, default: 9999; max: 9999]
Use with the offset parameter to manage pagination of results.
offset -- The first detection to return, where 0 is the latest detection.
Use with the limit parameter to manage pagination of results.
parameters - full parameters payload, not required if using other keywords.
q -- Search all detection metadata for the provided string.
sort -- The property to sort by. FQL syntax (e.g. last_behavior|asc).
Available sort fields:
adversary_id last_behavior
devices.hostname max_confidence
first_behavior max_severity
This method only supports keywords for providing arguments.
Returns: dict object containing API response.
HTTP Method: GET
Swagger URL
https://assets.falcon.crowdstrike.com/support/api/swagger.html#/detects/QueryDetects
"""
return process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="QueryDetects",
keywords=kwargs,
params=parameters
)
# These method names align to the operation IDs in the API but
# do not conform to snake_case / PEP8 and are defined here for
# backwards compatibility / ease of use purposes
GetAggregateDetects = get_aggregate_detects
UpdateDetectsByIdsV2 = update_detects_by_ids
GetDetectSummaries = get_detect_summaries
QueryDetects = query_detects
|
list = [997430015, 'Удалённый профиль']
text = ','.join(list)
print(text)
|
"""Implementation of an Experience Replay Buffer."""
import numpy as np
import torch
from torch.utils import data
from torch.utils.data._utils.collate import default_collate
class StateExperienceReplay(data.Dataset):
"""A State distribution Experience Replay buffer.
The Experience Replay algorithm stores states and access them IID. It has a
size, and it erases the older samples, once the buffer is full, like on a queue.
Parameters
----------
max_len: int.
buffer size of experience replay algorithm.
Methods
-------
append(state) -> None:
append an state to the dataset.
is_full: bool
check if buffer is full.
update(indexes, td_error):
update experience replay sampling distribution with td_error feedback.
all_data:
Get all the transformed data.
sample_batch(batch_size):
Get a batch of data.
reset():
Reset the memory to zero.
get_observation(idx):
Get the observation at a given index.
References
----------
Lin, L. J. (1992).
Self-improving reactive agents based on reinforcement learning, planning and
teaching. Machine learning.
"""
def __init__(self, max_len, dim_state):
super().__init__()
self.max_len = max_len
self.dim_state = dim_state
self.memory = torch.empty(
(self.max_len,) + self.dim_state, dtype=torch.get_default_dtype()
)
self._ptr = 0
self.is_full = False
def __len__(self):
"""Return the current size of the buffer."""
if self.is_full:
return self.max_len
else:
return self._ptr
def __getitem__(self, idx):
"""Return any desired observation.
Parameters
----------
idx: int
Returns
-------
observation: Observation
idx: int
weight: torch.tensor.
"""
return self.memory[idx]
def reset(self):
"""Reset memory to empty."""
self.memory = torch.empty(
(self.max_len,) + self.dim_state, dtype=torch.get_default_dtype()
)
self._ptr = 0
self.is_full = False
def append(self, state):
"""Append new observation to the dataset.
Parameters
----------
state: Tensor
Raises
------
TypeError
If the new observation is not of type Observation.
"""
assert state.dim() == 2
num_states, dim_state = state.shape
assert (dim_state,) == self.dim_state
if num_states + self._ptr < self.max_len:
self.memory[self._ptr : self._ptr + num_states] = state
self._ptr += num_states
else:
self.is_full = True
delta = num_states + self._ptr - self.max_len
self.memory[self._ptr :] = state[delta:]
self.memory[:delta] = state[:delta]
self._ptr = delta
def sample_batch(self, batch_size):
"""Get a batch of data."""
indices = np.random.choice(len(self), batch_size)
return default_collate([self[i] for i in indices])
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from scipy import misc
import sys
import os
import argparse
import tensorflow as tf
import numpy as np
import facenet
import align.detect_face
import cv2
import random
from time import sleep
from tensorflow.python.platform import gfile
import pickle
def prewhiten_and_expand(x):
mean = np.mean(x)
std = np.std(x)
std_adj = np.maximum(std, 1.0/np.sqrt(x.size))
y = np.multiply(np.subtract(x, mean), 1/std_adj)
y = np.expand_dims(y, 0)
return y
def main(args):
sleep(random.random())
cam = cv2.VideoCapture(args.cam_device)
cam.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
cam.set(cv2.CAP_PROP_FRAME_HEIGHT, 360)
print('Creating networks and loading parameters')
with tf.Graph().as_default():
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
with sess.as_default():
pnet, rnet, onet = align.detect_face.create_mtcnn(sess, None)
facenet.load_model(args.facenet_model_file)
images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
classifier_filename_exp = os.path.expanduser(args.classifier_filename)
with open(classifier_filename_exp, 'rb') as infile:
(model, class_names) = pickle.load(infile)
print('Loaded classifier model from file "%s"' % classifier_filename_exp)
minsize = 20 # minimum size of face
threshold = [ 0.6, 0.7, 0.7 ] # three steps's threshold
factor = 0.709 # scale factor
file_index = 0
while cv2.waitKey(10) != 'q':
retval, img = cam.read()
if(retval):
if img.ndim<2:
print('Unable to align')
continue
if img.ndim == 2:
img = facenet.to_rgb(img)
img = img[:,:,0:3]
bounding_boxes, _ = align.detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)
nrof_faces = bounding_boxes.shape[0]
if nrof_faces>0:
det = bounding_boxes[:,0:4]
det_arr = []
img_size = np.asarray(img.shape)[0:2]
if nrof_faces>1:
if args.detect_multiple_faces:
for i in range(nrof_faces):
det_arr.append(np.squeeze(det[i]))
else:
bounding_box_size = (det[:,2]-det[:,0])*(det[:,3]-det[:,1])
img_center = img_size / 2
offsets = np.vstack([ (det[:,0]+det[:,2])/2-img_center[1], (det[:,1]+det[:,3])/2-img_center[0] ])
offset_dist_squared = np.sum(np.power(offsets,2.0),0)
index = np.argmax(bounding_box_size-offset_dist_squared*2.0) # some extra weight on the centering
det_arr.append(det[index,:])
else:
det_arr.append(np.squeeze(det))
for i, det in enumerate(det_arr):
det = np.squeeze(det)
bb = np.zeros(4, dtype=np.int32)
bb[0] = np.maximum(det[0]-args.margin/2, 0)
bb[1] = np.maximum(det[1]-args.margin/2, 0)
bb[2] = np.minimum(det[2]+args.margin/2, img_size[1])
bb[3] = np.minimum(det[3]+args.margin/2, img_size[0])
cropped = img[bb[1]:bb[3],bb[0]:bb[2],:]
scaled = misc.imresize(cropped, (args.image_size, args.image_size), interp='bilinear')
scaled = prewhiten_and_expand(scaled)
emb = sess.run(embeddings, feed_dict={images_placeholder:scaled, phase_train_placeholder:False})
predictions = model.predict_proba(emb)
best_class_indices = np.argmax(predictions)
best_class_probabilities = predictions[0, best_class_indices]
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.rectangle(img, (bb[0], bb[1]), (bb[2], bb[3]), (0,255,0), 5)
cv2.putText(img,class_names[best_class_indices] ,(bb[0], bb[1] - 10), font, 0.5, (255,0,0),2 ,cv2.LINE_AA)
else:
print('No face detected')
cv2.imshow('Detection',img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--image_size', type=int,
help='Image size (height, width) in pixels.', default=160)
parser.add_argument('--margin', type=int,
help='Margin for the crop around the bounding box (height, width) in pixels.', default=44)
parser.add_argument('--gpu_memory_fraction', type=float,
help='Upper bound on the amount of GPU memory that will be used by the process.', default=1.0)
parser.add_argument('--detect_multiple_faces', type=bool,
help='Detect and align multiple faces per image.', default=True)
parser.add_argument('--cam_device', type=int,
help='Cameradevice ID.', default=0)
parser.add_argument('--facenet_model_file', type=str,
help='Facenet model file path', default='/home/shivang/DevPro/FaceRecognition/20170512-11054/20170512-110547.pb')
parser.add_argument('--classifier_filename', type=str,
help='Facenet model file path', default='/home/shivang/DevPro/facenet/lfw_classifier.pkl')
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:]))
|
#!/usr/bin/env python2.7
import sys
import os
import operator
from StringIO import StringIO
import unittest
import snapconf
import snapconfshared
import snaputil
import snaptron
import snannotation
import snquery
import sqlite3
snaptron.sconn = sqlite3.connect(snapconfshared.SNAPTRON_SQLITE_DB)
snaptron.snc = snaptron.sconn.cursor()
#set of test interval queries
IQs=['chr1:10160-10161','CD99','chr11:82970135-82997450','chr11:82985784-82989768','chr11:82571908-82571909','chr11:82356629-82403678']
#RQs=['1:100000-100000','1:5-5']
#IRQs are a set of combination of indexes from IQs and RQs
#RQs=[{'length':[snapconfshared.operators['='],54]},{'samples_count':[snapconfshared.operators['='],10]}]
RQs=[{'length':[snapconfshared.operators[':'],54]},{'samples_count':[snapconfshared.operators[':'],10]}]
RQs_flat=['length:54','samples_count:10','coverage_avg>2.0','samples_count>:100','coverage_sum>:1000','samples_count:10000','coverage_avg>20']
IDs=[set(['33401689','33401829']),set(['6','9'])]
#holds the set of intropolis ids for each specific query for the original SRA set of inropolis junctions
EXPECTED_IIDS={
IQs[0]:set(['0','1','2','3','4','5','6','7','8','9','10','11','12','13','14','15','17','18','19','20','23','24','25','26','27','28','30','31','32','33','34','35','36','37','38','40','41','42','43','44','45','46','47','49','50','51','52','53','54','55','56','57','58','59','60','61','62']),
IQs[0]+str(RQs[0]):set(['2','17','25']),
IQs[0]+str(RQs[0])+str(IDs[1]):set([]),
str(IDs[0]):set(['33401689','33401829']),
IQs[1]+str(RQs[1]):set(['78227192','78227202','78227987','78227989','78229111','78231526','78231699','78232393','78235012','78238555','78239061','78247688','78248550','78249622','78249624','78249890','78250373','78250933','78251946','78252458','78252790','78256239','78257352','78258005','78258153','78258164','78258305','78258522','78258883','78258923','78259017','78259240','78259358','78259371','78259397','78259704','78259711','78259761','78259763','78259873','78259986','78260267','78260461','78260470','78260487','78260498','78260515','78260523','78260793','78260903','78261109','78261125','78261130','78261266','78261298','78261418','78261508','78261798','78262001','78262049','78262090','78262191','78262200','78262351','78262405','78262516','78262672','78262945','78263190','78263302','78263553','78263837','78263974','78264156','78264181','78264275','78264413']),
#IQs[2]+str(RQs_flat[1])+str(RQs_flat[2]):set(['7474725','7474726','7475267']),
IQs[2]+str(RQs_flat[1])+str(RQs_flat[2]):set([]),
IQs[3]+str(RQs_flat[3])+str(RQs_flat[4]):set(['14075114','14075109']),
#str(RQs_flat[5])+str(RQs_flat[6]):set(['1900915','17229066','14511883','18158500','19434757'])
#str(RQs_flat[5])+str(RQs_flat[6]):set(['21266715','59043106']),
str(RQs_flat[5])+str(RQs_flat[6]):set(['41278365']),
IQs[4]+str(RQs_flat[3])+str(RQs_flat[4]):set(['14065307']),
IQs[5]+str(RQs_flat[3])+str(RQs_flat[4]):set(['14065307','14065333'])
}
def setUpModule():
pass
def tearDownModule():
pass
gc = snannotation.GeneCoords()
#shortcuts for snaptron methods used in tests
def run_tabix(intervalq,region_args=snaptron.default_region_args):
runner = snquery.RunExternalQueryEngine(snapconfshared.TABIX,intervalq,None,set(),region_args=region_args)
return runner.run_query()
tc = run_tabix
rqp = snaptron.range_query_parser
def run_sqlite3(intervalq,rangeq,snaptron_ids,region_args=snaptron.default_region_args):
runner = snquery.RunExternalQueryEngine(snapconfshared.SQLITE,intervalq,rangeq,snaptron_ids,region_args=region_args)
return runner.run_query()
srl = run_sqlite3
sbi = snaptron.search_introns_by_ids
sbg = snaptron.search_by_gene_name
#pjq = snaptron.parse_json_query
pjq = snaptron.process_post_params
pp = snaptron.process_params
qr = snaptron.query_regions
qi = snaptron.query_ids
tdbs = snapconfshared.TABIX_DBS
class TestTabixCalls(unittest.TestCase):
'''
check tabix for basic queries (both interval and range including ids)
def run_tabix(qargs,rquerys,tabix_db,filter_set=None,sample_set=None,filtering=False,print_header=True,debug=True):
returns an id set if filtering==True
can also populate sample_set if defined.
These are true unit tests.
'''
def setUp(self):
pass
def tearDown(self):
snaptron.REQ_FIELDS=[]
def itc(self,interval_query,rq=None,range_query=None,filter_set=None,sample_set=None,filtering=False):
'''wrap the normal run_tabix/search_by_gene_name call to hardcode defaults for interval/gene name querying'''
if snapconfshared.INTERVAL_PATTERN.search(interval_query):
ra = snaptron.default_region_args._replace(range_filters=range_query,tabix_db_file=tdbs['chromosome'],intron_filter=filter_set,sample_filter=sample_set,save_introns=filtering)
return tc(interval_query,region_args=ra)
return sbg(gc,interval_query,{'rfilter':[rq]},range_query,intron_filters=filter_set,save_introns=filtering)
def idc(self,ids,filtering=False):
'''wrap the normal run_tabix call to hardcode defaults for interval querying AND range filtering'''
return sbi(ids,None,filtering=filtering)
def idcr(self,ids,range_query):
'''wrap the normal run_tabix call to hardcode defaults for interval querying AND range filtering'''
return sbi(ids,range_query)
#actual tests
def test_basic_json_parsing(self):
'''tests to see if our json parsing for the original hacky query language works'''
query = "[{\"intervals\":[\"chr6:1-10000000\"],\"samples_count\":[{\"op\":\":\",\"val\":5}],\"ids\":[1,4]}]"
(iq,rq,sq,idq,ra) = pjq(query)
self.assertEqual(iq[0][0],'chr6:1-10000000')
self.assertEqual(rq[0]['rfilter'][0],"samples_count:5")
self.assertEqual(sq[0],[])
self.assertEqual(idq[0],[1,4])
def test_range_query_parsing(self):
'''tests the parsing of the string of ranges-as-filters constraints'''
rfilters={}
tests_ = [['samples_count',':',5],['coverage_sum','>:',3],['coverage_avg','<',5.5]]
rfilters['rfilter']=["".join(map(lambda y: str(y),x)) for x in tests_]
snaptron_ids = set()
ranges_parsed = rqp(rfilters,snaptron_ids)
#for col in ['samples_count','coverage_sum','coverage_avg']:
for (col,op,val) in tests_:
self.assertEqual(col in ranges_parsed,True)
(op_,val_) = ranges_parsed[col]
self.assertEqual(snapconfshared.operators[op],op_)
self.assertEqual(val,val_)
def test_basic_interval(self):
'''make sure we're getting back an expected set of intropolis ids'''
i = 0
#get intropolis ids
(iids,sids) = self.itc(IQs[i], filtering=True)
self.assertEqual(iids, EXPECTED_IIDS[IQs[i]])
def test_stream_output(self):
'''make sure we're getting a correct header'''
sout = StringIO()
snaputil.REQ_FIELDS=[]
snaputil.stream_header(sout,region_args=snaptron.default_region_args)
hfields = sout.getvalue().split("\n")[0].rstrip().split("\t")
self.assertEqual(len(hfields),len(snapconfshared.INTRON_HEADER_FIELDS)+1)
req_field = 'snaptron_id'
snaputil.REQ_FIELDS=[snapconfshared.INTRON_HEADER_FIELDS_MAP[req_field]]
sout = StringIO()
snaputil.stream_header(sout,region_args=snaptron.default_region_args)
header = sout.getvalue().split("\n")[0].rstrip()
self.assertEqual(header,'DataSource:Type\t%s' % (req_field))
def test_basic_interval_and_range(self):
'''make sure we're getting back an expected set of intropolis ids'''
i = 0
r = 0
#get intropolis ids
(iids,sids) = self.itc(IQs[i], range_query=RQs[r], filtering=True)
self.assertEqual(iids, EXPECTED_IIDS[IQs[i]+str(RQs[r])])
def test_basic_interval_and_range_and_ids(self):
'''make sure we're getting back an expected set of intropolis ids'''
i = 0
r = 0
d = 1
#get intropolis ids
(iids,sids) = self.itc(IQs[i], range_query=RQs[r], filter_set=IDs[d], filtering=True)
self.assertEqual(iids, EXPECTED_IIDS[IQs[i]+str(RQs[r])+str(IDs[d])])
def test_basic_gene_name_and_range(self):
i = 1
r = 1
#get intropolis ids
(iids,sids) = self.itc(IQs[i], range_query=RQs[r], rq=RQs_flat[r], filtering=True)
self.assertEqual(iids, EXPECTED_IIDS[IQs[i]+str(RQs[r])])
def test_basic_ids(self):
'''make sure we're getting back an expected set of intropolis ids'''
d = 0
#get intropolis ids
(iids,sids) = self.idc(IDs[d], filtering=True)
self.assertEqual(iids, EXPECTED_IIDS[str(IDs[d])])
class TestQueryCalls(unittest.TestCase):
'''
Test the main top level methods in snaptron for querying with various predicates (regions, ranges, ids)
These are full round trip tests (so not really unittests as such, more system/integration tests)
'''
def setUp(self):
pass
def process_query(self,input_):
(iq,idq,rq,sq,ra) = pp(input_)
return {'iq':iq,'rq':rq,'sq':sq,'idq':idq,'ra':ra}
def test_interval_query(self):
q = 0
i = 0
queries = self.process_query('regions=%s' % (IQs[i]))
iq = queries['iq'][q]
rq = ''
(iids,sids) = qr([iq],rq,set(),filtering=True)
self.assertEqual(iids, EXPECTED_IIDS[IQs[i]])
def test_interval_query_for_ids(self):
q = 0
i = 0
queries = self.process_query('regions=%s&fields=snaptron_id' % (IQs[i]))
iq = queries['iq'][q]
rq = ''
(iids,sids) = qr([iq],rq,set(),filtering=True)
self.assertEqual(iids, EXPECTED_IIDS[IQs[i]])
def test_interval_with_range_query(self):
q = 0
i = 0
r = 0
queries = self.process_query('regions=%s&rfilter=%s' % (IQs[i],RQs_flat[r]))
iq = queries['iq'][q]
rq = queries['rq']
(iids,sids) = qr([iq],rq,set(),filtering=True)
self.assertEqual(iids, EXPECTED_IIDS[IQs[i]+str(RQs[r])])
def test_interval_with_range_with_ids_query(self):
q = 0
i = 0
r = 0
d = 1
queries = self.process_query('regions=%s&rfilter=%s&ids=%s' % (IQs[i],RQs_flat[r],",".join(IDs[d])))
iq = queries['iq'][q]
rq = queries['rq']
snaptron_ids = set()
qi(queries['idq'],snaptron_ids)
(iids,sids) = qr([iq],rq,snaptron_ids,filtering=True)
self.assertEqual(iids, EXPECTED_IIDS[IQs[i]+str(RQs[r])+str(IDs[d])])
def test_interval_with_fp_ranges(self):
q = 0
i = 2
r = 1
queries = self.process_query('regions=%s&rfilter=%s&rfilter=%s' % (IQs[i],RQs_flat[r],RQs_flat[r+1]))
iq = queries['iq'][q]
rq = queries['rq']
snaptron_ids = set()
(iids,sids) = qr([iq],rq,snaptron_ids,filtering=True)
self.assertEqual(iids, EXPECTED_IIDS[IQs[i]+str(RQs_flat[r])+str(RQs_flat[r+1])])
def test_fp_ranges(self):
q = 0
i = 2
r = 5
queries = self.process_query('rfilter=%s,%s' % (RQs_flat[r],RQs_flat[r+1]))
#queries = self.process_query('rfilter=samples_count:10000,coverage_avg>20')
rq = queries['rq']
snaptron_ids = set()
ra = snaptron.default_region_args._replace(save_introns=True)
#(iids,sids) = srl(rq,snaptron_ids,stream_back=False)
(iids,sids) = srl(None,rq,snaptron_ids,region_args=ra)
self.assertEqual(iids, EXPECTED_IIDS[str(RQs_flat[r])+str(RQs_flat[r+1])])
#self.assertEqual(iids, set([1900915,17229066,14511883,18158500,19434757]))
def test_interval_with_range_query_contains(self):
q = 0
i = 3
r = 3
queries = self.process_query('regions=%s&rfilter=%s&rfilter=%s&contains=1' % (IQs[i],RQs_flat[r],RQs_flat[r+1]))
iq = queries['iq'][q]
rq = queries['rq']
(iids,sids) = qr([iq],rq,set(),filtering=True,region_args=queries['ra'])
#snaptron.RETURN_ONLY_CONTAINED = False
self.assertEqual(iids, EXPECTED_IIDS[IQs[i]+str(RQs_flat[r])+str(RQs_flat[r+1])])
def test_interval_with_range_query_either_start(self):
q = 0
i = 5
r = 3
queries = self.process_query('regions=%s&rfilter=%s&rfilter=%s&either=1' % (IQs[i],RQs_flat[r],RQs_flat[r+1]))
iq = queries['iq'][q]
rq = queries['rq']
(iids,sids) = qr([iq],rq,set(),filtering=True,region_args=queries['ra'])
self.assertEqual(iids, EXPECTED_IIDS[IQs[i]+str(RQs_flat[r])+str(RQs_flat[r+1])])
def test_interval_with_range_query_not_either_end(self):
q = 0
i = 4
r = 3
queries = self.process_query('regions=%s&rfilter=%s&rfilter=%s&either=2' % (IQs[i],RQs_flat[r],RQs_flat[r+1]))
iq = queries['iq'][q]
rq = queries['rq']
(iids,sids) = qr([iq],rq,set(),filtering=True,region_args=queries['ra'])
self.assertEqual(iids, set([]))
def test_interval_with_range_query_exact(self):
q = 0
i = 5
r = 3
queries = self.process_query('regions=%s&rfilter=%s&rfilter=%s&exact=1' % (IQs[i],RQs_flat[r],RQs_flat[r+1]))
iq = queries['iq'][q]
rq = queries['rq']
(iids,sids) = qr([iq],rq,set(),filtering=True,region_args=queries['ra'])
self.assertEqual(iids, EXPECTED_IIDS[IQs[i-1]+str(RQs_flat[r])+str(RQs_flat[r+1])])
def test_interval_with_range_query_not_exact(self):
q = 0
i = 4
r = 3
queries = self.process_query('regions=%s&rfilter=%s&rfilter=%s&exact=1' % (IQs[i],RQs_flat[r],RQs_flat[r+1]))
iq = queries['iq'][q]
rq = queries['rq']
(iids,sids) = qr([iq],rq,set(),filtering=True,region_args=queries['ra'])
self.assertEqual(iids, set([]))
class TestSampleSearch(unittest.TestCase):
'''
Test the search and recovery of sample IDs and coverages in a junction's sample/coverage list
'''
def setUp(self):
pass
def test_basic_extraction(self):
sample_ids = set(["1","5","53","276","4987","18000"])
sid_search_obj = snquery.build_sid_ahoc_queries(sample_ids)
samples_str = ",5:5,4:32,276:7,18000:276"
fields = range(0,snapconfshared.SAMPLE_IDS_COL+5)
fields[snapconfshared.SAMPLE_IDS_COL] = samples_str
sid_search_iter = sid_search_obj.iter(samples_str)
(found,fields) = snaputil.extract_sids_and_covs_from_search_iter(sid_search_iter, fields)
samples = set([5,276,18000])
covs = set([5,7,276])
samples_found = set(found[0:len(found),0])
covs_found = set(found[0:len(found),1])
self.assertEqual(len(samples.intersection(samples_found)), len(samples))
self.assertEqual(len(covs.intersection(covs_found)), len(covs))
def test_summary_recalculation(self):
sample_ids = set(["1","5","53","276","4987","18000"])
sid_search_obj = snquery.build_sid_ahoc_queries(sample_ids)
samples_str = ",5:5,4:32,276:7,18000:276"
sids_covs_should_be_found = set(["5:5","276:7","18000:276"])
fields = range(0,snapconfshared.SAMPLE_IDS_COL+5)
fields[snapconfshared.SAMPLE_IDS_COL+1] = 4
fields[snapconfshared.SAMPLE_IDS_COL+2] = 320
fields[snapconfshared.SAMPLE_IDS_COL+3] = 80
fields[snapconfshared.SAMPLE_IDS_COL+4] = 19.5
fields[snapconfshared.SAMPLE_IDS_COL] = samples_str
sid_search_iter = sid_search_obj.iter(samples_str)
(found,fields) = snaputil.extract_sids_and_covs_from_search_iter(sid_search_iter, fields)
sids_covs_from_search = set([x for x in fields[snapconfshared.SAMPLE_IDS_COL].split(",")])
sids_covs_from_search.remove('')
self.assertEqual(sids_covs_from_search, sids_covs_should_be_found)
self.assertEqual(fields[snapconfshared.SAMPLE_IDS_COL+1], 3)
self.assertEqual(fields[snapconfshared.SAMPLE_IDS_COL+2], 288)
self.assertEqual(fields[snapconfshared.SAMPLE_IDS_COL+3], 96)
self.assertEqual(fields[snapconfshared.SAMPLE_IDS_COL+4], 7)
def test_range_filter(self):
sample_ids = set(["1","5","53","276","4987","18000"])
sid_search_obj = snquery.build_sid_ahoc_queries(sample_ids)
samples_str = ",5:5,4:32,276:7,18000:276"
fields = range(0,snapconfshared.SAMPLE_IDS_COL+5)
fields[snapconfshared.SAMPLE_IDS_COL+1] = 4
fields[snapconfshared.SAMPLE_IDS_COL+2] = 320
fields[snapconfshared.SAMPLE_IDS_COL+3] = 80
fields[snapconfshared.SAMPLE_IDS_COL+4] = 19.5
fields[snapconfshared.SAMPLE_IDS_COL] = samples_str
sid_search_iter = sid_search_obj.iter(samples_str)
(found,fields) = snaputil.extract_sids_and_covs_from_search_iter(sid_search_iter, fields)
range_filters = {'samples_count':[operator.gt,2],'coverage_sum':[operator.eq,288],'coverage_avg':[operator.ge,3.0],'coverage_median':[operator.lt,20]}
self.assertEqual(snaputil.filter_by_ranges(fields, range_filters), False)
#change range_filters
range_filters = {'samples_count':[operator.gt,2],'coverage_sum':[operator.eq,288],'coverage_avg':[operator.ge,3.0],'coverage_median':[operator.ne,7.0]}
self.assertEqual(snaputil.filter_by_ranges(fields, range_filters), True)
if __name__ == '__main__':
unittest.main()
|
"""
Author: George Azzari (gazzari@stanford.edu)
Center on Food Security and the Environment
Department of Earth System Science
Stanford University
"""
import ee
class Daymet:
def __init__(self):
# NOTE: no filterBounds needed; DAYMET is composed by whole-CONUS images
self.wholecoll = ee.ImageCollection('NASA/ORNL/DAYMET_V3')
@staticmethod
def addsradvp(img):
"""
Calculate vpd and radiation in units of MJ/m2
:param img: daymet image
:return: original daymet image enriched with radn and vpd bands
"""
sr = img.select('srad')
dl = img.select('dayl')
radn = sr.multiply(dl).divide(1000000)
vpx = img.expression("0.6107 * exp( 17.269*t / (237.3 + t))", {'t': img.select('tmax')})
vpn = img.expression("0.6107 * exp( 17.269*t / (237.3 + t))", {'t': img.select('tmin')})
vpd = vpx.subtract(vpn).multiply(0.75)
img = img.addBands(radn.select([0], ['radn']))
img = img.addBands(vpd.select([0], ['vpd']))
return img
@staticmethod
def _compute_radn(img):
sr = img.select('srad')
dl = img.select('dayl')
radn = sr.multiply(dl).divide(1000000)
return radn.select([0], ['radn'])
@staticmethod
def _compute_vpd(img):
vpx = img.expression("0.6107 * exp( 17.269*t / (237.3 + t))", {'t': img.select('tmax')})
vpn = img.expression("0.6107 * exp( 17.269*t / (237.3 + t))", {'t': img.select('tmin')})
vpd = vpx.subtract(vpn).multiply(0.75)
return vpd.select([0], ['vpd'])
@staticmethod
def _compute_gdd(img):
# NOTE: this has a hard-coded base temperature for corn in US.
gdd_c = img.expression(
'((30 - (30-Tmx)*(Tmx<30)) + (10 + (Tmn-10)*(Tmn>10)))/2.0 - 10.0',
{'Tmx': img.select('tmax'), 'Tmn': img.select('tmin')})
return gdd_c.select([0], ['gddC'])
def get_mean_radn(self, startdate, enddate):
c = self.wholecoll.filterDate(ee.Date(startdate), ee.Date(enddate)).map(self._compute_radn)
return c.mean()
def get_mean_precip(self, startdate, enddate):
c = self.wholecoll.filterDate(ee.Date(startdate), ee.Date(enddate))
return c.select('prcp').mean()
def get_mean_tmax(self, startdate, enddate):
c = self.wholecoll.filterDate(ee.Date(startdate), ee.Date(enddate))
return c.select('tmax').mean()
def get_mean_vpd(self, startdate, enddate):
c = self.wholecoll.filterDate(ee.Date(startdate), ee.Date(enddate)).map(self._compute_vpd)
return c.mean()
def get_mean_vhinge(self, startdate, enddate):
vpd = self.get_mean_vpd(startdate, enddate)
vhinge = vpd.expression("(x-1.6) * (x > 1.6)", {'x': vpd}).select([0], ['vhinge'])
return vhinge
def get_mean_phinge(self, startdate, enddate):
precip = self.get_mean_precip(startdate, enddate)
phinge = precip.expression("(3-x) * (x < 3)", {'x': precip}).select([0], ['phinge'])
return phinge
def get_cumul_gdd(self, startdate, enddate):
gdd_c = self.wholecoll.filterDate(ee.Date(startdate), ee.Date(enddate)).map(self._compute_gdd)
gdd_sum_c = gdd_c.sum().select([0], ['gddC'])
gdd_sum_f = gdd_sum_c.expression('1.8 * x', {'x': gdd_sum_c.select(0)}).select([0], ['gddF'])
return gdd_sum_c.addBands(gdd_sum_f)
def get_met_metrics(self, datesdict):
vpd = self.get_mean_vpd(datesdict['vpd_start'], datesdict['vpd_end'])
prec = self.get_mean_precip(datesdict['prec_start'], datesdict['prec_end'])
vhinge = self.get_mean_vhinge(datesdict['vpd_start'], datesdict['vpd_end'])
phinge = self.get_mean_phinge(datesdict['prec_start'], datesdict['prec_end'])
radn = self.get_mean_radn(datesdict['radn_start'], datesdict['radn_end'])
maxt = self.get_mean_tmax(datesdict['tmax_start'], datesdict['tmax_end'])
gdd_sum = self.get_cumul_gdd(datesdict['gdd_start'], datesdict['gdd_end'])
return ee.Image.cat(vpd, prec, vhinge, phinge, radn, maxt, gdd_sum)
def metmetrics_usa(self, yr):
"""
Calculate monthly and seasonal averages of weather variables (weather metrics).
These metrics depend on the location and the shape of the yield model trained
in APSIM.
:param yr: the year for which weather metrics are computed (int)
:return: ee.Image() with one band per metric
"""
# TODO: change this to be mappable (i.e. yr must be a ee.String)
# yr = ee.String(yr)
datesdict = dict(vpd_start=ee.Date.fromYMD(yr, 7, 1), vpd_end=ee.Date.fromYMD(yr, 7, 31),
prec_start=ee.Date.fromYMD(yr, 6, 1), prec_end=ee.Date.fromYMD(yr, 8, 31),
radn_start=ee.Date.fromYMD(yr, 6, 1), radn_end=ee.Date.fromYMD(yr, 8, 31),
tmax_start=ee.Date.fromYMD(yr, 8, 1), tmax_end=ee.Date.fromYMD(yr, 8, 31),
gdd_start=ee.Date.fromYMD(yr, 4, 1), gdd_end=ee.Date.fromYMD(yr, 10, 15))
met = self.get_met_metrics(datesdict)
return met.set({'year': yr})
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Bout (read bank-out) extracts transactions from pdf bank statements.
_ _
(_) (_)
(_) _ _ _ _ _ _ _ _ _ (_) _ _
(_)(_)(_)(_)_ _ (_)(_)(_) _ (_) (_)(_)(_)(_)(_)
(_) (_)(_) (_)(_) (_) (_)
(_) (_)(_) (_)(_) (_) (_) _
(_) _ _ _(_)(_) _ _ _ (_)(_)_ _ _(_)_ (_)_ _(_)
(_)(_)(_)(_) (_)(_)(_) (_)(_)(_) (_) (_)(_)
"""
import io
import logging
import click
import csv
from collections import namedtuple
from datetime import datetime
logger = logging.getLogger("bout")
profiles = {}
Transaction = namedtuple("Transaction",
["id", "date", "payee", "memo", "amount"])
InvalidTransaction = namedtuple("InvalidTransaction", [])
def get_icici_csv(data_row):
"""Convert a transaction row to tuple.
Details of fields
0: 'D', # Transaction date
2: 'M', # Transaction details
3: 'T', # Deposit
4: 'T-', # Withdrawal
"""
logger.debug("get_icicicsv: Data row = {}".format(data_row))
date = data_row[0].replace('-', '/')
if _valid_date(date):
amt = "-{}".format(data_row[4])
if data_row[3] != "0":
amt = data_row[3]
return Transaction(id=0,
date=date,
payee="", # Empty for ICICI bank account
memo=data_row[2],
amount=amt)
return InvalidTransaction()
def get_icicicc_csv(data_row):
"""Convert a transaction row to tuple.
Details of fields
0: 'D', # Transaction date
2: 'M', # Transaction details
5: 'T', # Amount
"""
logger.debug("get_icicicsv: Data row = {}".format(data_row))
date = data_row[0]
if _valid_date(date, date_format="%d/%m/%Y"):
amt = "-{}".format(data_row[5])
if data_row[6] == "CR":
amt = data_row[5]
return Transaction(id=0,
date=date,
payee="", # Empty for ICICI bank account
memo=data_row[2],
amount=amt)
return InvalidTransaction()
def qif_header():
"""Print qif header."""
click.echo("!Account\nNMyAccount\nTMyBank\n^\n!Type:Bank")
def to_qif(transaction):
"""Transform a cleaned up row to qif format.
Returns:
string of a particular transaction in qif format
See wikipedia for more details of QIF format.
https://en.wikipedia.org/wiki/Quicken_Interchange_Format#Detail_items
"""
logger.debug("to_qif: Input = {}".format(transaction))
return "D{0}\nM{1}\nT{2}\n^\n\n"\
.format(transaction.date, transaction.memo, transaction.amount)
def _valid_date(date_value, date_format="%d/%m/%Y"):
"""Validate a transaction date."""
try:
transaction_date = datetime.strptime(date_value, date_format)
return transaction_date is not None
except ValueError:
return False
def _filter_csv_header(doc, header):
head_skip = False
mem = io.StringIO()
with open(doc, encoding='utf-8', mode='r') as f:
for line in f:
if line.startswith(header):
head_skip = True
continue
if head_skip and (not line or line.isspace()):
break
if head_skip and ',' in line:
mem.write(line)
mem.seek(0)
return csv.reader(mem)
@click.command()
@click.argument("doc", type=click.Path(exists=True))
@click.option("--profile", prompt="Choose a profile", default="icici",
show_default=True,
type=click.Choice(["icici", "icicicc"]),
help="Document type profile.")
@click.option("--debug", is_flag=True, show_default=True,
help="Show diagnostic messages.")
def start(doc, profile, debug):
"""Bout (read bank-out) extracts transactions from csv bank statements."""
if debug:
logging.basicConfig(level=logging.DEBUG)
logger.info("Verbose messages are enabled.")
profiles.update({"icici": get_icici_csv,
"icicicc": get_icicicc_csv})
rows = []
if profile == "icici":
header = "DATE,MODE,PARTICULARS,DEPOSITS,WITHDRAWALS,BALANCE"
rows = _filter_csv_header(doc, header)
elif profile == "icicicc":
header = "Date,Sr.No.,Transaction Details,Reward Point Header,Intl.Amount,Amount(in Rs),BillingAmountSign"
rows = _filter_csv_header(doc, header)
# row -> clean_row
# clean_row, profile -> transaction
# transaction -> qif
create_transaction = profiles[profile]
print_header = False
for r in rows:
transaction = create_transaction(r)
if type(transaction) is not InvalidTransaction:
if not print_header:
qif_header()
print_header = True
click.echo(to_qif(transaction))
if __name__ == '__main__':
start()
|
import random
def select(array, i):
if len(array) <= 5:
copy = array[:]
copy.sort()
return copy[i - 1]
else:
medians = []
j = 0
while j < len(array):
subarray = array[j:j+5]
medians.append(select(subarray, len(subarray) / 2))
j += 5
mom = medians[len(medians) / 2]
l = []
r = []
for elm in array:
if elm > mom:
r.append(elm)
elif elm < mom:
l.append(elm)
p = len(l) + 1
if i == p:
return mom
elif i < p:
return select(l, i)
return select(r, i - p)
if __name__=='__main__':
array = [random.randint(0, 10000) for _ in range(100)]
array = list(set(array))
median = select(array, len(array) / 2)
print median
array.sort()
print array[(len(array) / 2) - 1]
|
import numpy as np
import pandas as pd
import xarray as xr
def new_test_dataset(time, height=180, **indexers):
"""
Create a test dataset with dimensions ("time", "lat", "lon") and data variables given by *indexers*.
:param time: Single date/time string or sequence of date-time strings.
:param height: Size of the latitude dimension.
:param indexers: Variable name to value mapping.
Value may be a scalar or a vector of same length as *time*.
:return: test dataset
"""
# TODO (forman): get rid of this code here, utilise xcube.api.new_cube() instead
time = [time] if isinstance(time, str) else time
width = height * 2
num_times = len(time)
res = 180 / height
shape = (1, height, width)
data_vars = dict()
for name, value in indexers.items():
try:
values = list(value)
except TypeError:
values = [value] * num_times
if len(values) != num_times:
raise ValueError()
data_vars[name] = (['time', 'lat', 'lon'],
np.concatenate(tuple(np.full(shape, values[i]) for i in range(num_times))))
return xr.Dataset(data_vars,
coords=dict(time=(['time'], pd.to_datetime(time)),
lat=(['lat'], np.linspace(-90 + res, +90 - res, height)),
lon=(['lon'], np.linspace(-180 + res, +180 - res, width))))
def create_s2plus_dataset():
x = xr.DataArray([310005., 310015., 310025., 310035., 310045.], dims=["x"],
attrs=dict(units="m", standard_name="projection_x_coordinate"))
y = xr.DataArray([5689995., 5689985., 5689975., 5689965., 5689955.], dims=["y"],
attrs=dict(units="m", standard_name="projection_y_coordinate"))
lon = xr.DataArray([[0.272763, 0.272906, 0.27305, 0.273193, 0.273336],
[0.272768, 0.272911, 0.273055, 0.273198, 0.273342],
[0.272773, 0.272917, 0.27306, 0.273204, 0.273347],
[0.272779, 0.272922, 0.273066, 0.273209, 0.273352],
[0.272784, 0.272927, 0.273071, 0.273214, 0.273358]],
dims=["y", "x"], attrs=dict(units="degrees_east", standard_name="longitude"))
lat = xr.DataArray([[51.329464, 51.329464, 51.329468, 51.32947, 51.329475],
[51.329372, 51.329376, 51.32938, 51.329384, 51.329388],
[51.329285, 51.329285, 51.32929, 51.329292, 51.329296],
[51.329193, 51.329197, 51.3292, 51.329205, 51.329205],
[51.3291, 51.329105, 51.32911, 51.329113, 51.329117]],
dims=["y", "x"], attrs=dict(units="degrees_north", standard_name="latitude"))
rrs_443 = xr.DataArray([[0.014, 0.014, 0.016998, 0.016998, 0.016998],
[0.014, 0.014, 0.016998, 0.016998, 0.016998],
[0.019001, 0.019001, 0.016998, 0.016998, 0.016998],
[0.019001, 0.019001, 0.016998, 0.016998, 0.016998],
[0.019001, 0.019001, 0.016998, 0.016998, 0.016998]],
dims=["y", "x"], attrs=dict(units="sr-1", grid_mapping="transverse_mercator"))
rrs_665 = xr.DataArray([[0.025002, 0.019001, 0.008999, 0.012001, 0.022999],
[0.028, 0.021, 0.009998, 0.008999, 0.022999],
[0.036999, 0.022999, 0.007999, 0.008999, 0.023998],
[0.041, 0.022999, 0.007, 0.009998, 0.021],
[0.033001, 0.018002, 0.007999, 0.008999, 0.021]],
dims=["y", "x"], attrs=dict(units="sr-1", grid_mapping="transverse_mercator"))
transverse_mercator = xr.DataArray(np.array([0xffffffff], dtype=np.uint32),
attrs=dict(grid_mapping_name="transverse_mercator",
scale_factor_at_central_meridian=0.9996,
longitude_of_central_meridian=3.0,
latitude_of_projection_origin=0.0,
false_easting=500000.0,
false_northing=0.0,
semi_major_axis=6378137.0,
inverse_flattening=298.257223563))
return xr.Dataset(dict(rrs_443=rrs_443, rrs_665=rrs_665, transverse_mercator=transverse_mercator),
coords=dict(x=x, y=y, lon=lon, lat=lat),
attrs={
"title": "T31UCS_20180802T105621",
"conventions": "CF-1.6",
"institution": "VITO",
"product_type": "DCS4COP Sentinel2 Product",
"origin": "Copernicus Sentinel Data",
"project": "DCS4COP",
"time_coverage_start": "2018-08-02T10:59:38.888000Z",
"time_coverage_end": "2018-08-02T10:59:38.888000Z"
})
def create_highroc_dataset(no_spectra=False):
"""
Simulates a HIGHROC OLCI L2 product in NetCDF 4 format
"""
lon = np.array([[8, 9.3, 10.6, 11.9],
[8, 9.2, 10.4, 11.6],
[8, 9.1, 10.2, 11.3]], dtype=np.float32)
lat = np.array([[56, 56.1, 56.2, 56.3],
[55, 55.2, 55.4, 55.6],
[54, 54.3, 54.6, 54.9]], dtype=np.float32)
if not no_spectra:
wavelengths = [(1, 400.0), (2, 412.5), (3, 442.5), (4, 490.0), (5, 510.0),
(6, 560.0), (7, 620.0), (8, 665.0), (9, 673.75), (10, 681.25),
(11, 708.75), (12, 753.75), (16, 778.75), (17, 865.0), (18, 885.0), (21, 940.0)]
rtoa_desc = "Top-of-atmosphere reflectance"
rrs_desc = "Atmospherically corrected angular dependent remote sensing reflectances"
rtoa_vars = {f'rtoa_{i}': create_waveband(i, wl, '1', rtoa_desc) for i, wl in wavelengths}
rrs_vars = {f'rrs_{i}': create_waveband(i, wl, 'sr^-1', rrs_desc) for i, wl in wavelengths}
else:
rtoa_vars = {}
rrs_vars = {}
return xr.Dataset(
data_vars=dict(
conc_chl=create_conc_chl(),
c2rcc_flags=create_c2rcc_flag_var(),
lon=(('y', 'x'), lon, dict(
long_name="longitude",
units="degrees_east",
)),
lat=(('y', 'x'), lat, dict(
long_name="latitude",
units="degrees_north",
)),
**rtoa_vars,
**rrs_vars,
),
attrs=dict(start_date='14-APR-2017 10:27:50.183264',
stop_date='14-APR-2017 10:31:42.736226')
)
def create_waveband(index, wavelength, units, long_name=None):
data = np.array([[7, 11, np.nan, 5],
[5, 10, 2, 21],
[16, 6, 20, 17]], dtype=np.float32)
return (('y', 'x'), data, dict(
long_name=long_name,
units=units,
spectral_band_index=index,
wavelength=wavelength,
bandwidth=15.0,
valid_pixel_expression="c2rcc_flags.F1",
_FillValue=np.nan,
))
def create_conc_chl():
data = np.array([[7, 11, np.nan, 5],
[5, 10, 2, 21],
[16, 6, 20, 17]], dtype=np.float32)
return (('y', 'x'), data, dict(
long_name="Chlorophylll concentration",
units="mg m^-3",
_FillValue=np.nan,
valid_pixel_expression="c2rcc_flags.F1",
))
def create_c2rcc_flag_var():
data = np.array([[1, 1, 1, 1],
[1, 4, 1, 2],
[8, 1, 1, 1]], dtype=np.uint32)
return xr.DataArray(data, dims=('y', 'x'), name='c2rcc_flags', attrs=dict(
long_name="C2RCC quality flags",
_Unsigned="true",
flag_meanings="F1 F2 F3 F4",
flag_masks=np.array([1, 2, 4, 8], np.int32),
flag_coding_name="c2rcc_flags",
flag_descriptions="D1 D2 D3 D4",
))
def create_cmems_sst_flag_var():
sea = 1
land = 2
lake = 4
ice = 8
data = np.array([[[sea + ice, land + ice, lake + ice, lake],
[sea + ice, sea, land, land],
[sea, sea, sea, land]]], dtype=np.float32)
return xr.DataArray(data, dims=('time', 'lat', 'lon'), name='mask', attrs=dict(
long_name="land sea ice lake bit mask",
flag_masks="0b, 1b, 2b, 3b",
flag_meanings="sea land lake ice",
valid_min=0,
valid_max=12,
))
|
from .util import *
from .web import *
from .setup import *
|
import coreapi
import coreschema
from rest_framework.schemas import AutoSchema
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework import status
from core.models import (Yearref,
Govrank,
Govindicatorrank)
class BenchmarkMandateView(APIView):
"""
Return mandate rankings for for a particular government category
"""
schema = AutoSchema(manual_fields=[
coreapi.Field(
'category',
required=True,
location='query',
schema=coreschema.String(
description='Unique government category ID'
)
),
coreapi.Field(
'year',
required=False,
location='query',
schema=coreschema.String(
description='full year eg: 2016'
)
),
])
def get(self, request, format=None):
year = self.request\
.query_params.get('year',
Yearref.objects.latest('yearid').yr)
category = self.request.query_params.get('category', None)
if category is None:
return Response(
status=status.HTTP_400_BAD_REQUEST
)
query = Govrank\
.objects\
.filter(govid__gcid=category,
yearid__yr=year)
serialize = serializers.BenchmarkMandateSerializer(
query,
context={'request': request},
many=True,
)
return Response(
{'results': serialize.data}
)
class BenchmarkIndicatorView(APIView):
"""
Return a particular mandate indicator ranking for all governments
within a particular government category
"""
schema = AutoSchema(manual_fields=[
coreapi.Field(
'indicator',
required=True,
location='path',
schema=coreschema.String(
description='mandate indicator id'
)
),
coreapi.Field(
'year',
required=False,
location='query',
schema=coreschema.String(
description='year eg: 2016'
)
),
coreapi.Field(
'category',
required=False,
location='query',
schema=coreschema.String(
description='government category id'
)
),
])
def get(self, request, indicator, format=None):
year = self.request.query_params.get('year',
Yearref.objects.latest('yearid').yr)
category = self.request.query_params.get('category', None)
if category is None:
return Response(
status=status.HTTP_400_BAD_REQUEST
)
query = Govindicatorrank.objects.filter(
yearid__yr=year,
iid=indicator,
govid__gcid=category
)
serialize = serializers.IndicatorRankSerializer(
query,
context={'request': request},
many=True,
)
return Response(
{'results': serialize.data}
)
|
import types
mpt = types.MappingProxyType({'k1': 'v1'})
print(mpt)
print('k1' in mpt)
print(mpt['k1'])
print(iter(mpt))
print(len(mpt))
print(mpt.copy())
print(mpt.get('k1'))
print(mpt.items())
print(mpt.keys())
print(mpt.values())
|
# Copyright (c) OpenMMLab. All rights reserved.
"""
python demo/bottom_up_img_demo.py \
configs/body/2d_kpt_sview_rgb_img/associative_embedding/crowdpose/higherhrnet_w32_anim_512x512_udp.py \
work_dirs/higherhrnet_w32_anim_512x512_udp/best_AP_epoch_20.pth \
--img-path data/anim/train \
--show
"""
import os
import os.path as osp
import warnings
from argparse import ArgumentParser
import mmcv
from mmpose.apis import (inference_bottom_up_pose_model, init_pose_model,
vis_pose_result)
from mmpose.datasets import DatasetInfo
def main():
"""Visualize the demo images."""
parser = ArgumentParser()
parser.add_argument('pose_config', help='Config file for detection')
parser.add_argument('pose_checkpoint', help='Checkpoint file')
parser.add_argument(
'--img-path',
type=str,
help='Path to an image file or a image folder.')
parser.add_argument(
'--show',
action='store_true',
default=False,
help='whether to show img')
parser.add_argument(
'--out-img-root',
type=str,
default='',
help='Root of the output img file. '
'Default not saving the visualization images.')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--kpt-thr', type=float, default=0.3, help='Keypoint score threshold')
parser.add_argument(
'--pose-nms-thr',
type=float,
default=0.9,
help='OKS threshold for pose NMS')
parser.add_argument(
'--radius',
type=int,
default=4,
help='Keypoint radius for visualization')
parser.add_argument(
'--thickness',
type=int,
default=1,
help='Link thickness for visualization')
args = parser.parse_args()
assert args.show or (args.out_img_root != '')
# prepare image list
if osp.isfile(args.img_path):
image_list = [args.img_path]
elif osp.isdir(args.img_path):
image_list = [
osp.join(args.img_path, fn) for fn in os.listdir(args.img_path)
if fn.lower().endswith(('.png', '.jpg', '.jpeg', '.tiff', '.bmp'))
]
else:
raise ValueError('Image path should be an image or image folder.'
f'Got invalid image path: {args.img_path}')
# build the pose model from a config file and a checkpoint file
pose_model = init_pose_model(
args.pose_config, args.pose_checkpoint, device=args.device.lower())
dataset = pose_model.cfg.data['test']['type']
dataset_info = pose_model.cfg.data['test'].get('dataset_info', None)
if dataset_info is None:
warnings.warn(
'Please set `dataset_info` in the config.'
'Check https://github.com/open-mmlab/mmpose/pull/663 for details.',
DeprecationWarning)
assert (dataset == 'BottomUpCocoDataset')
else:
dataset_info = DatasetInfo(dataset_info)
# optional
return_heatmap = False
# e.g. use ('backbone', ) to return backbone feature
output_layer_names = None
# process each image
for image_name in mmcv.track_iter_progress(image_list):
# test a single image, with a list of bboxes.
pose_results, returned_outputs = inference_bottom_up_pose_model(
pose_model,
image_name,
dataset=dataset,
dataset_info=dataset_info,
pose_nms_thr=args.pose_nms_thr,
return_heatmap=return_heatmap,
outputs=output_layer_names)
if args.out_img_root == '':
out_file = None
else:
os.makedirs(args.out_img_root, exist_ok=True)
out_file = os.path.join(args.out_img_root,
f'vis_{osp.basename(image_name)}.jpg')
# show the results
vis_pose_result(
pose_model,
image_name,
pose_results,
radius=args.radius,
thickness=args.thickness,
dataset=dataset,
dataset_info=dataset_info,
kpt_score_thr=args.kpt_thr,
show=args.show,
out_file=out_file)
if __name__ == '__main__':
main()
|
"""empty message
Revision ID: a5879be2fd07
Revises: d5f87144251f
Create Date: 2019-06-03 15:29:01.144838
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'a5879be2fd07'
down_revision = 'd5f87144251f'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('cloud_connection',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('type', sa.String(), nullable=True),
sa.Column('name', sa.String(), nullable=True),
sa.Column('bucket', sa.String(), nullable=True),
sa.Column('region', sa.String(), nullable=True),
sa.Column('access_key_id', sa.String(), nullable=True),
sa.Column('access_key_secret', sa.String(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('cloud_connection')
# ### end Alembic commands ###
|
# -*- coding: utf-8 -*-
import re
def strPreProcess(question):
value = question
try:
if re.search(r'为负值|为负|是负', value):
value = re.sub(r'为负值|为负|是负', '小于0', value)
if re.search(r'为正值|为正|是正', value):
value = re.sub(r'为正值|为正|是正', '大于0', value)
# X.x块钱 X毛钱
value = value.replace('块钱', '块')
value = value.replace('千瓦', 'kw')
value = value.replace('个', '')
value = value.replace(' ', '')
patten_money = re.compile(r'[零|一|幺|二|两|三|四|五|六|七|八|九|十|百]{1,}点[零|一|幺|二|两|三|四|五|六|七|八|九|十|百]{1,}')
k = patten_money.findall(value)
if k:
for item in k:
listm = item.split('点')
front, rf = chinese_to_digits(listm[0])
end, rn = chinese_to_digits(listm[1])
val = str(front) + '.' + str(end)
value = value.replace(item, val, 1)
patten_kuai = re.compile(r'[零|一|幺|二|两|三|四|五|六|七|八|九|十|百]{1,}块[零|一|幺|二|两|三|四|五|六|七|八|九|十|百]{,1}')
km = patten_kuai.findall(value)
if km:
for item in km:
listm = item.split('块')
front, rf = chinese_to_digits(listm[0])
end, rn = chinese_to_digits(listm[1])
if end:
val = str(front) + '.' + str(end) + '元'
else:
val = str(front) + '元'
value = value.replace(item, val, 1)
# value = value.replace('毛钱', '元',)
# value = value.replace('毛', '元')
patten_mao = re.compile(r'[零|一|幺|二|两|三|四|五|六|七|八|九|十|百]{1}毛|[0-9]毛')
kmao = patten_mao.findall(value)
if kmao:
for item in kmao:
strmao = item.replace('毛', '')
valmao, rm = chinese_to_digits(strmao)
maoflo = str(float(valmao)/10) + '元'
value = value.replace(item, maoflo, 1)
value = value.replace('元毛', '元')
patten_jao = re.compile(r'[一二两三四五六七八九123456789]角')
kjao = patten_jao.findall(value)
if kjao:
for item in kjao:
strjao = item.replace('角', '')
valjao, rm = chinese_to_digits(strjao)
jaoflo = str(float(valjao) / 10) + '元'
value = value.replace(item, jaoflo, 1)
value = value.replace('元毛', '元')
# patten_datec = re.compile(r'[零|一|幺|二|两|三|四|五|六|七|八|九|十|百|0|1|2|3|4|5|6|7|8|9]{1,}月[零|一|幺|二|两|三|四|五|六|七|八|九|十|百|0|1|2|3|4|5|6|7|8|9]{,2}')
# kmonthday = patten_datec.findall(value)
# if kmonthday:
# print('kmonthday',kmonthday)
#更改中文数字--阿拉伯数字
mm = re.findall(r'[〇零一幺二两三四五六七八九十百千]{2,}',value)
if mm:
for item in mm:
v, r = chinese_to_digits(item)
if r ==1 and v//10 + 1 !=len(item):
v = str(v).zfill(len(item) - v//10)
value = value.replace(item, str(v),1)
mmd = re.findall(r'123456789千]{2,}',value)
if mmd:
for item in mmd:
v, r = chinese_to_digits(item)
value = value.replace(item, str(v), 1)
mmm = re.findall(r'[一二两三四五六七八九十123456789]{1,}万[二两三四五六七八九23456789]',value)
if mmm:
for item in mmm:
sv = item.replace('万', '')
v,r = chinese_to_digits(sv)
value = value.replace(item, str(v*1000), 1)
#print('--mmm--',mmm,value)
#2万2--22000
mmw = re.findall(r'[一二两三四五六七八九十]万', value)
if mmw:
for item in mmw:
iv = item.replace('万','')
v, r = chinese_to_digits(iv)
value = re.sub(item, str(v)+'万', value)
'''
mmw = re.findall(r'[一幺二两三四五六七八九十]万',value)
if mmw:
for item in mmw:
v, r = chinese_to_digits(item)
value = re.sub(item, str(v), value)
mmy = re.findall(r'[一幺二两三四五六七八九十百]亿', value)
if mmy:
for item in mmy:
v, r = chinese_to_digits(item)
value = re.sub(item, str(v), value)
mmf = re.findall(r'\d*\.?\d+[百千万亿]{1,}',value)
if mmf:
for item in mmf:
v, r = chinese_to_digits(item)
v_item = re.sub(r'[百千万亿]{1,}','',item)
v =float(v_item) * r
value = re.sub(item, str(v), value)
'''
mm2 = re.findall(r'[〇零一幺二两三四五六七八九十百千]{1,}[倍|个|元|人|名|位|周|亿|以上|年|盒|册|天|集|宗]', value)
if mm2:
for item in mm2:
mm22 = re.findall(r'[〇零一幺二两三四五六七八九十百千]{1,}', item)
for item2 in mm22:
v2,r2= chinese_to_digits(item2)
itemvalue = item.replace(item2, str(v2), 1)
value = value.replace(item, itemvalue, 1)
#百分之几----\d%
if re.search(r'百分之', value):
items = re.findall(r'百分之[零|一|幺|二|两|三|四|五|六|七|八|九|十|百]{1,}', value)
#items= re.findall(r'百分之\d*?}', value)
if items:
for item in items:
item_t = item.replace('百分之', '')
k, r = chinese_to_digits(item_t)
item_t = str(k) + '%'
value = re.sub(str(item), str(item_t), value)
#print('1--',items,value)
items_two = re.findall(r'百分之\d{1,}\.?\d*', value)
if items_two:
for item in items_two:
item_t = item.replace('百分之', '') + '%'
value = re.sub(str(item), str(item_t), value)
#print('2--', items_two, value)
if re.search(r'百分点', value):
items_we = re.findall(r'[零|一|幺|二|两|三|四|五|六|七|八|九|十|百]{1,}.??百分点', value)
if items_we:
for item in items_we:
item_t = re.sub('.??百分点', '', item)
k,r = chinese_to_digits(item_t)
item_t = str(k) + '%'
value = re.sub(str(item), str(item_t), value)
#print('百分点-中',items_we,value)
items_se = re.findall(r'\d+?\.??\d*.??百分点', value)
if items_se:
for item in items_se:
item_t = re.sub('.??百分点', '', item) + '%'
value = re.sub(str(item), str(item_t), value)
#print('百分点-ala', items_se, value)
mm3 = re.findall(r'[大于|小于|前|超过|第|破][〇零一幺二两三四五六七八九十百千]{1,}', value)
if mm3:
for item in mm3:
mm33 = re.findall(r'[〇零一幺二两三四五六七八九十百千]{1,}', item)
for item2 in mm33:
v3, r3 = chinese_to_digits(item2)
itemvalue = item.replace(item2, str(v3), 1)
# v, r = chinese_to_digits(item)
value = value.replace(item, itemvalue, 1)
mm4 = re.findall(r'[排名|排行|达到|排在|排|列|率]{1,}前[0123456789]{1,}', value)
if mm4:
for item in mm4:
#print('qian_val',item,value)
v = re.sub(r'[排名|排行|达到|排在|排|列|率]{1,}前','',item)
s1 = item.replace('前', '大于', 1)
vs = s1.replace(v,str(int(v)+1),1)
value = value.replace(item, vs, 1)
#print('--前n--',item,value)
# 更改中文年份并补充完整
pattern_date1 = re.compile(r'(\d{2,4}年)')
#pattern_date1 = re.compile(r'(.{1}月.{,2})日|号')
date1 = pattern_date1.findall(value)
dateList1 = list(set(date1))
if dateList1:
for item in dateList1:
v = str_to_date(item)
value = re.sub(str(item), str(v), value)
pattern_date2 = re.compile(r'(\d+)(\-|\.)(\d+)(\-|\.)(\d+)')
date2 = pattern_date2.findall(value)
dateList2 = list(set(date2))
if dateList2:
for item in dateList2:
v = str_to_date(item)
value = re.sub(str(item), str(v), value)
pattern_date3 = re.compile(r'[零|一|幺|二|两|三|四|五|六|七|八|九|十|0|1|2|3|4|5|6|7|8|9]{1,}月[零|一|幺|二|两|三|四|五|六|七|八|九|十|0|1|2|3|4|5|6|7|8|9]{1,2}')
date3 = pattern_date3.findall(value)
if date3:
nflag = 0
for item in date3:
listm = item.split('月')
if listm[0].isdigit():
front = listm[0]
else:
front, rf = chinese_to_digits(listm[0])
nflag = 1
if listm[1].isdigit():
end = listm[1]
else:
end, rn = chinese_to_digits(listm[1])
nflag = 1
if nflag:
kv= str(front) + '月'+ str(end)
#kv = str_to_date(kv)
value = value.replace(item, kv,1)
pattern_date4 = re.compile(r'\d*?年[\D]{1}月')
date4 = pattern_date4.findall(value)
if date4:
for item in date4:
kitem = re.findall(r'([\D]{1})月',item)
k,v = chinese_to_digits(kitem[0])
mm = item.replace(kitem[0],str(k))
#mm = str_to_date(mm)
value = re.sub(item, mm, value)
if re.search(r'1下|1共|.1元股|1线', value):
value = value.replace('1下', '一下')
value = value.replace('.1元股', '元一股')
value = value.replace('1共', '一共')
value = value.replace('1线', '一线')
except Exception as exc:
# print('strPreProcess_error', exc,'--',value)
pass
return value
# 汉字数字转阿拉伯数字
def chinese_to_digits(uchars_chinese):
total = 0
common_used_numerals_tmp = {
'0': 0,
'1': 1,
'2': 2,
'3': 3,
'4': 4,
'5': 5,
'6': 6,
'7': 7,
'8': 8,
'9': 9,
'〇': 0,
'零': 0,
'一': 1,
'幺': 1,
'二': 2,
'两': 2,
'三': 3,
'四': 4,
'五': 5,
'六': 6,
'七': 7,
'八': 8,
'九': 9,
'十': 10,
'百': 100,
'千': 1000,
'万': 10000,
'百万': 1000000,
'千万': 10000000,
'亿': 100000000,
'百亿': 10000000000
}
r = 1 # 表示单位:个十百千...
try:
for i in range(len(uchars_chinese) - 1, -1, -1):
# print(uchars_chinese[i])
val = common_used_numerals_tmp.get(uchars_chinese[i])
if val is not None:
# print('val', val)
if val >= 10 and i == 0: # 应对 十三 十四 十*之类
if val > r:
r = val
total = total + val
else:
r = r * val
# total = total + r * x
elif val >= 10:
if val > r:
r = val
else:
r = r * val
elif val == 0 and i != 0:
r = r * 10
elif r == 1:
total = total + pow(10,len(uchars_chinese) - i - 1) * val
else:
total = total + r * val
except Exception as exc:
print(uchars_chinese)
print('chinese_to_digits_error',exc)
return total, r
# 日期字符转日期
def str_to_date(date_str):
try:
# 是数字 有年月日三位
date_search = re.search('(\d+)(\-|\.)(\d+)(\-|\.)(\d+)', date_str)
if date_search:
year_str = date_search.group(1)
month_str = date_search.group(3)
day_str = date_search.group(5)
if len(year_str) == 2:
year_str = '20' + year_str
if len(year_str) == 3:
year_str = '2' + year_str
date_date = '{}-{}-{}日'.format(year_str, month_str, day_str)
return date_date
# 是数字 只有年月
# 辅导公告 默认是月底
date_search = re.search('(\d+)(\-|\.)(\d+)', date_str)
if date_search:
year_str = date_search.group(1)
month_str = date_search.group(3)
if len(year_str) == 2:
year_str = '20' + year_str
if len(year_str) == 3:
year_str = '2' + year_str
date_date = '%s-%s月' % (year_str, month_str)
return date_date
# 以下包含汉字
date_str = date_str.replace('号', '日')
# 有年月日三位
date_search = re.search('(.{2,4})年(.*?)月(.*?)日', date_str)
if date_search:
if date_search.group(1).isdigit(): # 不能用isnumeric 汉字一二三四会被认为是数字
# 只有年月日是汉字 数字还是阿拉伯数字
year_str = date_search.group(1)
month_str = date_search.group(2)
day_str = date_search.group(3)
# 年份不足4位 把前面的补上
if len(year_str) == 2:
year_str = '20' + year_str
if len(year_str) == 3:
year_str = '2' + year_str
date_str = '%s-%s-%s日' % (year_str, month_str, day_str)
return date_str
# 只有两位
date_search = re.search('(.{2,4})年(.*?)月', date_str)
if date_search:
if date_search.group(1).isdigit():
year_str = date_search.group(1)
month_str = date_search.group(2)
if len(year_str) == 2:
year_str = '20' + year_str
if len(year_str) == 3:
year_str = '2' + year_str
date_str = '%s-%s月' % (year_str, month_str)
return date_str
# 只有一位
date_search = re.search('(\d{2,4})年', date_str)
if date_search:
if date_search.group(1).isdigit():
year_str = date_search.group(1)
if len(year_str) == 2 and int(year_str[0]) < 2:
year_str = '20' + year_str
if len(year_str) == 3:
year_str = '2' + year_str
date_str = '%s年' % (year_str)
return date_str
# print('处理不了的日期 %s' % date_str)
except Exception as exc:
pass
return None
def unit_convert(ques):
value = ques
try:
mmw = re.findall(r'[一幺二两三四五六七八九十]万', value)
if mmw:
for item in mmw:
v, r = chinese_to_digits(item)
value = re.sub(item, str(v), value)
mmy = re.findall(r'[一幺二两三四五六七八九十百]亿', value)
if mmy:
for item in mmy:
v, r = chinese_to_digits(item)
value = re.sub(item, str(v), value)
mmf = re.findall(r'\d*\.?\d+万|\d*\.?\d+百万|\d*\.?\d+千万|\d*\.?\d+亿', value)
if mmf:
for item in mmf:
mmf_v = re.sub(r'万|百万|千万|亿','',item)
mmf_r = re.sub(mmf_v,'',item)
v, r = chinese_to_digits(mmf_r)
#print('dig', mmf,v,'--',r)
value = re.sub(item, str(int(float(mmf_v)*r)), value)
except Exception as exc:
print('unit_convert_error',exc,'---',ques)
return value
str_test1 = '11和2012年,19年1月7日到十九日周票房超过一千万的影投公司,幺九年一月十四到十九播放数大于三千万的剧集,18年同期'
str_test2 = '市值是不超过百亿元,股价高于十块钱,增长超过两块五,或者上涨幅度大于百分之八的股票'
str_test3 = '涨幅为正,年收益为正值,税后利率不为负,税后利润不为负值的股票'
str_test4 = '2019年第1周超过一千万并且占比高于百分之十的,百分之16,百分之几,百分之92.5,百分之0.2,十五个百分点,八个百分点'
str_test5 = '请问有哪些综艺节目它的收视率超过百分之0.2或者市场的份额超过百分之2的'
str_test6 = '中国国航的指标是什么啊,它的油价汇率不足3.5个百分点'
str_test7 = '你知道零七年五月三号,一五年七月,分别领人名币三千块钱,改革开放三十年,给你十块'
str_test8 = '三块五毛钱,四块五毛钱,三千万,六点五块钱,八点五块钱,五毛钱'
'''
你好啊请问一下上海哪些楼盘的价格在2012年的五月份超过了一万五一平-----你好啊请问一下上海哪些楼盘的价格在2012年的五月份超过了一万51平
请问一下有没有什么股票交易交割高于七块钱一股的-----请问一下有没有什么股票交易交割高于七元一股的
二月四号到十号,排名前十的院线有几个总票房大于四亿的-----2月4号到十号,排名前十的院线有几个总票房大于四亿的
保利地产公司股11年每股盈余超过一元,那它12年的每股盈余又会是多少呀-----保利地产公司股2011年每股盈余超过一元,那它2012年的每股盈余又会是多少呀
想知道有多少家影投公司第四周的票房是超过一千五百万?-----想知道有多少家影投公司第四周的票房是超过一千500万?
我想咨询一下有哪些地产股票股价是不低于十块而且在11年每股税后利润还高于一块一股-----我想咨询一下有哪些地产股票股价是不低于十块而且在2011年每股税后利润还高于1.1元股
贷款年限10年假设降20个基点调整前后是什么情况才能使每月减少还款不足100元-----贷款年限2010年假设降20个基点调整前后是什么情况才能使每月减少还款不足100元
'''
#patten = re.compile(r'[零|一|幺|二|两|三|四|五|六|七|八|九|十|百]{1,}块[零|一|幺|二|两|三|四|五|六|七|八|九|十|百]{1,}')
def datacontinous(strcofig):
question = strcofig
p = re.compile(r'([零一幺二两三四五六七八九十百0123456789]+.?)([零一幺二两三四五六七八九十百0123456789]+.?)到([零一幺二两三四五六七八九十百0123456789]+.?[零一幺二两三四五六七八九十百0123456789]+)')
plist = p.findall(question)
if plist:
for item in plist:
front = '{}{}'.format(item[0],item[1])
end = str(item[2])
#print('---到---',plist,front,end)
pdig = re.compile(r'([零一幺二两三四五六七八九].?)+')
plist = pdig.findall(question)
if plist:
print('plist--',plist)
''''
with open("F:\\天池比赛\\nl2sql_test_20190618\\test.json", "r", encoding='utf-8') as fr,open("F:\\天池比赛\\nl2sql_test_20190618\\log.txt", "w+", encoding='utf-8') as fw:
count = 0
for line in fr.readlines():
lines = eval(line)
value_re = strPreProcess(lines['question'])
value_re = datacontinous(value_re)
count += 1
#if value_re != lines['question']:
# string = lines['question'] + '-----' + value_re + '\n'
# fw.write(str(string))
print('count',count)
'''
# value_re = strPreProcess(str_test7)
# print('----',value_re)
'''
if re.search(r'1下|1共|1句|1线|哪1年|哪1天', value):
value = value.replace('1下', '一下')
value = value.replace('1句', '一句')
value = value.replace('1共', '一共')
value = value.replace('1线', '一线')
value = value.replace('哪1年', '哪一年')
value = value.replace('哪1天', '哪一天')
if re.search(r'1手房|2手房|2线|2办', value):
value = value.replace('1手房', '一手房')
value = value.replace('2手房', '二手房')
value = value.replace('2线', '二线')
value = value.replace('2办', '两办')
'''
|
from drltools.utils import trainer, ddpg_config
from drltools.agent.agent import DDPGAgent
from unityagents import UnityEnvironment
env = UnityEnvironment(file_name="unity_environments/Reacher_mac.app", worker_id=1)
config = ddpg_config
agent_class = DDPGAgent
n_episodes = 2000
max_t = 1000
solved_score = 30
title = 'DDPG Reacher'
if __name__ == "__main__":
trainer(env, config, agent_class, n_episodes, max_t, solved_score, title)
|
# -*- coding: utf-8 -*-
from typing import Any # , Any
import logging
from collections import namedtuple
import contextlib
import decimal
import terminaltables
import colorclass
from .utils import colorize
from .param_types import Currency, Percentage
# logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
ColorContext = namedtuple('ColorContext', ('subtotal', 'margin', 'discount',
'deduction', 'total'))
"""A named tuple used to hold colors for items when rendered.
:param subtotal: Color for subtotal's
:param margin: Color for margin's
:param discount: Color for discount's
:param deduction: Color for deduction's
:param total: Color for total's
"""
TotaledContext = namedtuple('TotaledContext', ColorContext._fields)
"""Holds all the values to be rendered by a formatter.
:param subtotal: The subtotal of all the costs, hours, etc. for a calculation.
:param discount: The sum of all the percentage discounts for a calculation.
:param deduction: The sum of all the monetary deductions for a calculation.
:param total: The total of the calculation.
"""
DEFAULT_COLORS = ColorContext(
subtotal='magenta',
margin='blue',
discount='yellow',
deduction='red',
total='green'
)
"""Default colors to use as the ``ColorContext``."""
DEFAULT_FORMULA_STRING = """
color key: {header}
{subtotal_string}
(
(({subtotal} / (1 - {margin}) * (1 - {discount})) - {deduction}) = {total}
)
"""
"""A basic formula string to be formatted and rendered, to show the formula for
a calculation.
"""
class BaseFormatter(object):
"""All formatter's should sub-class this object, and override the
:py:meth:`render` method.
"""
@staticmethod
def colorize(item: Any, color: str) -> colorclass.Color:
"""If an item is a :py:class:`Currency` or :py:class:`Percentage`, then
call it's ``formatted_string`` method, before colorizing the
value.
:param item: A string, Currency, or Percentage to colorize.
:param color: The color to use on the item.
"""
if isinstance(item, (Currency, Percentage)):
item = item.formatted_string()
return colorize(item, color)
@staticmethod
def render(calculator) -> str:
"""The method all sub-classes should override to render a calculator.
:raises NotImplementedError: If a sub-class does not implement this
method.
"""
raise NotImplementedError()
@staticmethod
@contextlib.contextmanager
def totaled_ctx(calculator: Any) -> TotaledContext:
"""A context manager that yields the ``TotaledContext`` for a
calculator.
"""
with calculator.ctx() as ctx:
yield TotaledContext(*list(ctx) + [calculator.total()])
class BasicFormatter(BaseFormatter):
"""A basic formatter that renders the total as a formatted string.
"""
@staticmethod
def render(calculator: Any) -> str:
"""Return the total as formatted currency string."""
try:
return calculator.total().formatted_string()
except AttributeError as exc:
logger.debug('failed render for calculator: {}, exc: {}'.format(
calculator, exc)
)
raise TypeError("'{}' should inherit from BaseCalculator".format(
calculator)
)
class TerminalFormatter(terminaltables.AsciiTable, BaseFormatter):
"""A ``terminaltables.AsciiTable``, that supports colors and
a title.
:param colors: A 5 tuple or :py:class:`ColorContext` of strings that can
be used by to convert an item to a ``colorclass.Color``.
Defaults to (subtotal='magenta', margin='blue',
discount='yellow', deduction='red', total='green').
:param title: A title for the table. Defaults to ``'DETAILED'``. If you
do not want a title, then this can be set to ``None``
:param no_colors: If ``True``, turns off colored output for the table.
Default is ``False``.
:param color_header: If ``True`` then colorize the header as well.
Default is ``False``.
"""
def __init__(self, *colors, title: str='DETAILED', no_colors: bool=False,
color_header: bool=False):
super().__init__([], title=title)
if colors and no_colors is False:
self.colors = ColorContext(*colors)
elif no_colors is False:
self.colors = DEFAULT_COLORS
self.no_colors = no_colors
self.color_header = color_header
def render(self, calculator: Any) -> str:
"""Set's up the table, and returns it as a string, to be rendered.
:param calculator: The calculator to create a table for. Should
be a :py:class:`BaseCalculator` or sub-class.
"""
with self.totaled_ctx(calculator) as ctx:
headers = TotaledContext(*map(lambda x: x.upper(),
TotaledContext._fields))
if self.color_header is True:
headers = list(map(lambda x: self.colorize(*x),
zip(headers, self.colors)))
body = list(ctx)
if self.no_colors is False:
body = list(map(lambda items: self.colorize(*items),
zip(body, self.colors)))
logger.debug('body: {}'.format(body))
self.table_data = [headers, body]
return self.table
# TODO: Create a ``ColorContextError`` to raise instead of ``TypeError``
# if colors is the wrong size.
class FormulaFormatter(BaseFormatter):
"""Prints the formula used for the calculations.
:param colors: A 5 tuple or ``ColorContext`` of strings that can
be used by to convert an item to a ``colorclass.Color``.
Defaults to (subtotal='magenta', margin='blue',
discount='yellow', deduction='red', total='green').
:param formula_string: A string to use for the format. This should be a
a string that we call ``format``, that accepts
kwargs (header, subtotal, margin, discount,
deduction, and total). Defaults to
``DEFAULT_FORMULA_STRING``.
:param no_color: Turns colored output off.
:param title: Title to display before the output.
:raises TypeError: If colors is not a 5 tuple.
"""
def __init__(self, *colors, formula_string: str=None, no_color: bool=False,
title: str='FORMULA') -> None:
self.title = title
if colors and no_color is False:
# convert the colors to a ``ColorContext``
# this can raise ``TypeError`` if not enough or too many values.
self.colors = ColorContext(*colors)
elif no_color is False:
# set the colors to the default
self.colors = DEFAULT_COLORS
self.no_color = no_color
# set the formula string to use for this instance.
if formula_string:
self.formula_string = formula_string
else:
self.formula_string = DEFAULT_FORMULA_STRING
def render(self, calculator: Any) -> str:
"""Render a formula string used for a ``BaseCalculator`` instance or
sub-class.
:param calculator: A ``BaseCalculator`` or sub-class to use as the
context for the output.
"""
with self.totaled_ctx(calculator) as ctx:
# find the hours and rate to build the subtotal
# formula
hours = calculator._hours()
rate = decimal.Decimal(str(calculator.rate))
# format a subtotal formula string.
subtotal_string = \
'subtotal = ({costs} + ({hours} * {rate}))'.format(
costs=ctx.subtotal - (hours * rate),
hours=hours,
rate=rate)
# colorize if applicable.
if self.no_color is False:
ctx = TotaledContext(*map(lambda x: self.colorize(*x),
zip(ctx, self.colors)))
header = ' '.join(map(lambda x: self.colorize(*x),
zip(ctx._fields, self.colors)))
else:
ctx = TotaledContext(*map(lambda x: x.formatted_string(), ctx))
header = ' '.join(ctx._fields)
# get the values from the context, either colored or not,
# and add our headers.
values = ctx._asdict()
values['header'] = header
values['subtotal_string'] = subtotal_string
# format the string (``self.formual_string``)
formatted = self.formula_string.format(**values)
# build and return the final output string.
return '\n'.join((self.title, '-' * len(self.title),
formatted))
|
#! /usr/bin/python
"""
This index page includes 2 kinds of searches: gene names (IDs and symbols) search and gene functions.
Each directs to genesearch.py and gosearch.py, respectively.
"""
import cgi
if __name__=='__main__':
print 'Content-type: text/html'
print
print '<html><head><title>'
print 'Arabdopsis Athaliana Microarray Data Browser'
print '</title></head>'
print '<body>'
print '<div id="topbanner">'
print '<h1>Welcome to Arabidopsis Athaliana Microarray Database!</h1>'
print '<p></p>'
print '<p>Gene search is the only one you can enter multiple entries separated by comma " , ".</p>'
print '</div>'
print '<div id="mainbody">'
print '<table cellpadding=5 style="border-color:black;border-style:solid;border-width:thin" width="1000" align="center">'
print '<tbody>'
print '<form action="genesearch.py">'
print '<tr><td>'
print '<p><b>Search by gene name</b></p></td></tr>'
print '<tr><td>'
print '<input type=radio name=genename value="1" checked>Probe ID</td>'
print '<td>'
print '<input type=radio name=genename value="2">Gene Symbol</td>'
print '<td>'
print '<input type=radio name=genename value="3">Gene ID</td>'
print '<td>'
print '<input type=text name=gnsearch value=""></td>'
print '</tr>'
print '<tr>'
print '<td>'
print '<p><b>'
print 'Search by experiment features'
print '</b></p>'
print '</td>'
print '</tr>'
print '<tr><td>'
print '</td><td><select size="1" name="exp1"><option value="1">----</option><option value="2">Genetic Background</option><option value="3">Tissue</option><option value="4">Treatment</option></select></td><td>'
print 'contains'
print '</td><td><input type=text name="expsearch1" value=""></td></tr>'
print '<tr><td>'
print 'AND'
print '</td><td><select size="1" name="exp2"><option value="1">----</option><option value="2">Genetic Background</option><option value="3">Tissue</option><option value="4">Treatment</option></select></td><td>'
print 'contains'
print '</td><td><input type=text name="expsearch2" value=""></td></tr>'
print '<tr><td>'
print 'AND'
print '</td><td><select size="1" name="exp3"><option value="1">----</option><option value="2">Genetic Background</option><option value="3">Tissue</option><option value="4">Treatment</option></select></td><td>'
print 'contains'
print '</td><td><input type=text name="expsearch3" value=""></td></tr>'
print '<tr><td>'
print '<input type=submit></td></tr>'
print '</form>'
print '</tbody>'
print '</table>'
print '<table cellpadding=5 style="border-color:black;border-style:solid;border-width:thin" width="1000" align="center">'
print '<tbody>'
print '<form action="gosearch.py">'
print '<tr>'
print '<td>'
print '<p><b>'
print 'Search by GO terms'
print '</b></p>'
print '</td>'
print '</tr>'
print '<tr><td>'
print '</td><td><select size="1" name="genefunction1"><option value="1">----</option><option value="2">GO Biological Process</option><option value="3">GO Cellular Component</option><option value="4">GO Molecular Function</option></select></td><td>'
print 'contains'
print '</td><td><input type=text name="gfsearch1" value=""></td></tr>'
print '<tr><td>'
print 'AND'
print '</td><td><select size="1" name="genefunction2"><option value="1">----</option><option value="2">GO Biological Process</option><option value="3">GO Cellular Component</option><option value="4">GO Molecular Function</option></select></td><td>'
print 'contains'
print '</td><td><input type=text name="gfsearch2" value=""></td></tr>'
print '<tr><td>'
print 'AND'
print '</td><td><select size="1" name="genefunction3"><option value="1">----</option><option value="2">GO Biological Process</option><option value="3">GO Cellular Component</option><option value="4">GO Molecular Function</option></select></td><td>'
print 'contains'
print '</td><td><input type=text name="gfsearch3" value=""></td></tr>'
print '<tr>'
print '<td>'
print '<p><b>'
print 'Search by experiment features'
print '</b></p>'
print '</td>'
print '</tr>'
print '<tr><td>'
print '</td><td><select size="1" name="exp1"><option value="1">----</option><option value="2">Genetic Background</option><option value="3">Tissue</option><option value="4">Treatment</option></select></td><td>'
print 'contains'
print '</td><td><input type=text name="expsearch1" value=""></td></tr>'
print '<tr><td>'
print 'AND'
print '</td><td><select size="1" name="exp2"><option value="1">----</option><option value="2">Genetic Background</option><option value="3">Tissue</option><option value="4">Treatment</option></select></td><td>'
print 'contains'
print '</td><td><input type=text name="expsearch2" value=""></td></tr>'
print '<tr><td>'
print 'AND'
print '</td><td><select size="1" name="exp3"><option value="1">----</option><option value="2">Genetic Background</option><option value="3">Tissue</option><option value="4">Treatment</option></select></td><td>'
print 'contains'
print '</td><td><input type=text name="expsearch3" value=""></td></tr>'
print '<tr><td colspan="4"><input type=submit></td></tr>'
print '</form>'
print '</tbody>'
print '</table>'
print '</div>'
print '</form>'
print '</body>'
print '</html>'
|
# -*- coding: utf-8 -*-
import mimetypes
import os
from hashlib import md5
from lektor.publisher import Publisher, PublishError
from lektor.pluginsystem import Plugin
from lektor.project import Project
from lektor.types.formats import Markdown
from algoliasearch import algoliasearch
class AlgoliaPlugin(Plugin):
name = u'algolia'
description = u'Adds Algolia as a deploy target. Use algolia://<index> to deploy to an index.'
def on_setup_env(self, **extra):
config = self.get_config()
self.env.algolia_credentials = {}
self.env.algolia_credentials['app_id'] = config.get('app_id')
self.env.algolia_credentials['api_key'] = config.get('api_key')
# Modern Lektor stores publishers in env
if hasattr(self.env, 'publishers'):
self.env.publishers['algolia'] = AlgoliaPublisher
# Older versions stored publishers in a global
else:
from lektor.publisher import publishers
publishers['algolia'] = AlgoliaPublisher
def is_public_field(field):
# ignore system fields and the indexed boolean
name = field['name']
return name[0] != '_' and name != "indexed"
def public_field_names(model_fields):
return [field['name'] for field in model_fields if is_public_field(field)]
def stringify(record, field_name):
val = record[field_name]
if isinstance(val, Markdown):
return val.source
return unicode(val).encode('utf8')
def hit_object_ids(search_page):
return set([hit["objectID"] for hit in search_page['hits']])
def is_indexable(record):
return 'indexed' in record and record['indexed'] == True
def merge_credentials(config_creds, cli_creds):
"""merge config file credentials with command line credentials."""
merged_creds = config_creds
# do this second to prefer cli creds over config file
if cli_creds:
if cli_creds['username']:
merged_creds['app_id'] = cli_creds['username']
if cli_creds['password']:
merged_creds['api_key'] = cli_creds['password']
if cli_creds['key']:
merged_creds['api_key'] = cli_creds['key']
return merged_creds
class AlgoliaPublisher(Publisher):
def __init__(self, env, output_path):
super(AlgoliaPublisher, self).__init__(env, output_path)
# algolia = the algolia client, index = the index object
self.algolia = None
self.index = None
self.index_name = ''
def split_index_uri(self, target_url):
index_name = target_url.netloc
return index_name
def verify_index_exists(self):
exists = True
try:
settings = self.index.get_settings()
except algoliasearch.AlgoliaException as e:
print e
exists = False
return exists
def list_remote(self):
"handle pagination eventually..."
all_object_ids = set()
params = {'attributesToRetrieve': 'objectID', 'hitsPerPage': 100}
first_page = self.index.search('', params)
first_page_hits = hit_object_ids(first_page)
all_object_ids.update(first_page_hits)
page_count = first_page['nbPages']
for i in range(1, page_count):
next_page = self.index.search('', params.extend({'page': i}))
if next_page["nbHits"] > 0:
next_page_hits = hit_object_ids(next_page['hits'])
all_object_ids.update(next_page_hits)
else:
break
return all_object_ids
def add_index_children_json(self, pad, record):
record_json = []
for child in record.children.all():
if is_indexable(child):
model = child.datamodel
model_json = model.to_json(pad, child)
model_field_names = public_field_names(model_json['fields'])
child_data = {field_name: stringify(child, field_name) for field_name in model_field_names}
child_data['objectID'] = child['_gid']
# upload path so we can send the user to the right url for a search query!
child_data['_path'] = child['_path']
record_json.append(child_data)
record_json += self.add_index_children_json(pad, child)
return record_json
def list_local(self):
all_records = []
project = Project.discover()
env = project.make_env()
pad = env.new_pad()
root = pad.root
all_records = self.add_index_children_json(pad, root)
return all_records
def compute_diff(self, local_keys, remote_keys):
"""Compute the changeset for updating remote to match local"""
diff = {
'add': [],
'delete': [],
}
diff['delete'] = remote_keys.difference(local_keys)
diff['add'] = local_keys
return diff
def connect(self, credentials):
self.algolia = algoliasearch.Client(
credentials['app_id'], credentials['api_key']
)
def publish(self, target_url, credentials=None, **extra):
merged_creds = merge_credentials(self.env.algolia_credentials, credentials)
yield "Checking for Algolia credentials and index..."
if 'app_id' in merged_creds and 'api_key' in merged_creds:
self.connect(merged_creds)
self.index_name = self.split_index_uri(target_url)
self.index = self.algolia.init_index(self.index_name)
if not self.verify_index_exists():
raise PublishError(
'Algolia index "%s" does not exist, or the API key provided does not have access to it. \
Please create the index / verify your credentials on their website.'
% self.index_name
)
yield "Verified Algolia index exists and is accessible via your credentials."
local = self.list_local()
local_keys = set([record['objectID'] for record in local])
remote = self.list_remote()
yield "Found %d local records to index." % len(local)
yield "Found %d existing remote records in the index." % len(remote)
yield "Computing diff for index update..."
diff = self.compute_diff(local_keys, remote)
res_delete = self.index.delete_objects(list(diff['delete']))
delete_count = len(res_delete['objectIDs'])
yield "Deleted %d stale records from remote index." % delete_count
res_add = self.index.save_objects(local)
add_count = len(res_add['objectIDs'])
yield "Finished submitting %d new/updated records to the index." % add_count
yield "Processing the updated index is asynchronous, so Aloglia may take a while to reflect the changes."
else:
yield 'Could not connect to Algolia.'
yield 'Make sure api_key and app_id are present in your configs/algolia.ini file.'
|
# -*- coding: utf-8 -*-
import unittest
import libot.grasp.libot_trainer as trainer
import libot.grasp.libot_model as model
import collections
class NaoDialogTest(unittest.TestCase):
def setUp(self):
self.naoDialogUtil = model.NaoDialogUtil()
pass
def util_find_reponse(self, naoDialogModel, naoDialogContext, user_step, bot_step):
response=self.naoDialogUtil.find_response(naoDialogModel, naoDialogContext, user_step)
self.assertEqual(bot_step,response.responseText)
return response
def util_find_reponse_in(self, naoDialogModel, naoDialogContext, user_step, bot_step):
response=self.naoDialogUtil.find_response(naoDialogModel, naoDialogContext, user_step)
self.assertIs(True, response.responseText in bot_step)
return response
def util_find_reponse_with_event(self, naoDialogModel, naoDialogContext, user_step, bot_step, eventValue):
response=self.naoDialogUtil.find_response(naoDialogModel, naoDialogContext, user_step)
self.assertEqual(bot_step,response.responseText)
self.assertEqual(eventValue,response.eventValue)
return response
# dialog_scenario = collections.OrderedDict([("Labas","sveikinas"), ("Kaip tau sekasi","normoje")])
def util_find_reponses(self, naoDialogModel, dialog_scenario):
naoDialogContext = model.NaoDialogContext()
naoDialogUtil = model.NaoDialogUtil()
response_arr = []
for user_step,bot_step in dialog_scenario.items():
response = naoDialogUtil.find_response(naoDialogModel,naoDialogContext, user_step)
response_arr.append(response)
self.assertEqual(bot_step,response.responseText)
return response_arr
def test_dialog_parser_simple(self):
dialog_str = """topic: ~test_dialog()
language: ltu
u:(Labas) Sveiki
u:(Kaip tau sekasi) Normoje
u:(kuri diena) geroji"""
naoDialogTrainer = trainer.NaoDialogTrainer()
naoDialogModel = naoDialogTrainer.train(dialog_str)
naoDialogContext = model.NaoDialogContext()
self.util_find_reponse(naoDialogModel, naoDialogContext, "Labas", "sveiki")
self.util_find_reponse(naoDialogModel, naoDialogContext, "Kaip tau sekasi", "normoje")
self.util_find_reponse(naoDialogModel, naoDialogContext, "kuri diena", "geroji")
def test_dialog_parser_concept(self):
dialog_str = """topic: ~test_dialog()
language: ltu
concept:(greetings) ^rand[sveiki "laba diena"]
u:(~greetings) ~greetings
u:(Kaip tau sekasi) Normoje"""
naoDialogTrainer = trainer.NaoDialogTrainer()
naoDialogModel = naoDialogTrainer.train(dialog_str)
naoDialogContext = model.NaoDialogContext()
self.util_find_reponse_in(naoDialogModel, naoDialogContext, "sveiki", ["sveiki",'laba diena'])
self.util_find_reponse(naoDialogModel, naoDialogContext, "Kaip tau sekasi", "normoje")
def test_dialog_parser_variable(self):
dialog_str = """topic: ~test_dialog()
language: Lithuanian
u:(Kaip tau sekasi) Normoje
u:(kuri diena) geroji $LibotServiceEvent=geroji
u:(kiek valandų) pamiršau laikrodį $LibotServiceEvent=kelti_ranka
"""
naoDialogTrainer = trainer.NaoDialogTrainer()
naoDialogModel = naoDialogTrainer.train(dialog_str)
naoDialogContext = model.NaoDialogContext()
self.util_find_reponse(naoDialogModel, naoDialogContext, "Kaip tau sekasi", "normoje")
self.util_find_reponse_with_event(naoDialogModel, naoDialogContext, "kuri diena", "geroji", "geroji")
self.util_find_reponse_with_event(naoDialogModel, naoDialogContext, "kiek valandų", "pamiršau laikrodį", "kelti_ranka")
def test_dialog_generate_resouces(self):
dialog_str = """topic: ~test_dialog()
language: Lithuanian
concept:(greetings) ^rand[sveiki "laba diena"]
u:(~greetings) ~greetings
u:(Kaip tau sekasi) Normoje
u:(kuri diena) geroji"""
naoDialogTrainer = trainer.NaoDialogTrainer()
naoDialogModel = naoDialogTrainer.train(dialog_str)
(gram, sphinx_dictionary) = model.NaoDialogUtil().generate_sphinx_resouces(naoDialogModel)
self.assertEqual(gram,"#JSGF V1.0;\n\ngrammar adr_element;\n\npublic <adr_element> =\nsveiki|\nlaba diena|\nkaip tau sekasi|\nkuri diena;")
self.assertEqual(sphinx_dictionary,"diena\tD I E N A\nkaip\tK A I P\nkuri\tK U R I\nlaba\tL A B A\nsekasi\tS E K A S I\nsveiki\tS V E I K I\ntau\tT A U")
def test_dialog_parser_subrule(self):
dialog_str = """topic: ~test_dialog()
language: Lithuanian
u:(pakalbam apie gyvūnus) tu turi katę ar šunį?
u1:(šunį) ar didelis?
u2:(taip) prižiūrėk kad daug bėgiotų
u2:(ne) jie tokie mieli
u1:(katę) ar gyveni bute?
u2:(taip) tikiuosi bute daug miega
u2:(ne) ar katinas eina į lauką?
u3:(taip) ar gaudo peles?
u:(pakalbam apie sportą) puiki mintis
"""
naoDialogTrainer = trainer.NaoDialogTrainer()
naoDialogModel = naoDialogTrainer.train(dialog_str)
# chart = naoDialogTrainer.generate_dialog_chart(naoDialogModel)
# print(chart)
naoDialogContext = model.NaoDialogContext()
self.util_find_reponse(naoDialogModel, naoDialogContext, "pakalbam apie gyvūnus", "tu turi katę ar šunį?")
self.util_find_reponse(naoDialogModel, naoDialogContext, "katę", "ar gyveni bute?")
self.util_find_reponse(naoDialogModel, naoDialogContext, "ne", "ar katinas eina į lauką?")
self.util_find_reponse(naoDialogModel, naoDialogContext, "taip", "ar gaudo peles?")
self.util_find_reponse(naoDialogModel, naoDialogContext, "pakalbam apie sportą", "puiki mintis")
def test_dialog_parser_activate_proposal(self):
dialog_str = """topic: ~test_dialog()
language: Lithuanian
u:(labas) %game0 ar patinka žaisti?
u1:(taip) Smagu ^activate(game1)
u1:(ne) bet gal su robotu pažaisi?
proposal: %game1 ar mėgsti krepšinį
u1:(taip) Aš irgi taiklus ^activate(game0)
u1:(ne) Supratu ^activate(game0)
"""
naoDialogTrainer = trainer.NaoDialogTrainer()
naoDialogModel = naoDialogTrainer.train(dialog_str)
# chart = naoDialogTrainer.generate_dialog_chart(naoDialogModel)
# print(chart)
naoDialogContext = model.NaoDialogContext()
self.util_find_reponse(naoDialogModel, naoDialogContext, "labas", "ar patinka žaisti?")
self.util_find_reponse(naoDialogModel, naoDialogContext, "taip", "smagu. ar mėgsti krepšinį")
# self.util_find_reponse(naoDialogModel, naoDialogContext, "labas", "")
self.util_find_reponse(naoDialogModel, naoDialogContext, "ne", "supratu. ar patinka žaisti?")
self.util_find_reponse(naoDialogModel, naoDialogContext, "ne", "bet gal su robotu pažaisi?")
def test_dialog_parser_stayInContext(self):
dialog_str = """topic: ~test_dialog()
language: Lithuanian
u:(labas) ar patinka žaisti?
u1:(taip) Smagu $LibotServiceEvent=kelti_ranka ^stayInScope
u1:(ne) bet gal su robotu pažaisi ^stayInScope
u1:(nežinau) baigiam
"""
naoDialogTrainer = trainer.NaoDialogTrainer()
naoDialogModel = naoDialogTrainer.train(dialog_str)
# chart = naoDialogTrainer.generate_dialog_chart(naoDialogModel)
# print(chart)
naoDialogContext = model.NaoDialogContext()
self.util_find_reponse(naoDialogModel, naoDialogContext, "labas", "ar patinka žaisti?")
self.util_find_reponse(naoDialogModel, naoDialogContext, "ne", "bet gal su robotu pažaisi")
self.util_find_reponse(naoDialogModel, naoDialogContext, "labas", "")
self.util_find_reponse(naoDialogModel, naoDialogContext, "taip", "smagu")
self.util_find_reponse(naoDialogModel, naoDialogContext, "nežinau", "baigiam")
self.util_find_reponse(naoDialogModel, naoDialogContext, "labas", "ar patinka žaisti?")
def test_dialog_parser_event(self):
dialog_str = """topic: ~test_dialog()
language: Lithuanian
u:(Kaip tau sekasi) Normoje
u:(kuri diena) geroji
u:(e:Dialog/Fallback) Neturiu atsakymo
"""
naoDialogTrainer = trainer.NaoDialogTrainer()
naoDialogModel = naoDialogTrainer.train(dialog_str)
naoDialogContext = model.NaoDialogContext()
self.util_find_reponse(naoDialogModel, naoDialogContext, "nemokyta frazė", "neturiu atsakymo")
self.util_find_reponse(naoDialogModel, naoDialogContext, "kuri diena", "geroji")
|
import os
from celery.schedules import crontab
from flask_appbuilder.const import AUTH_OAUTH
from cachelib import RedisCache
from superset.custom_sso_security_manager import CustomSsoSecurityManager
OAUTH_ENABLED = int(os.getenv('OAUTH_ENABLED', 0))
SQLALCHEMY_DATABASE_URI = os.environ['SQLALCHEMY_DATABASE_URI']
REDIS_SERVER_IP = os.getenv('REDIS_SERVER_IP', '')
REDIS_PASSWORD = os.getenv('REDIS_PASSWORD', '')
SUPERSET_CACHE_REDIS_URL = "".join(['redis://:', REDIS_PASSWORD, '@', REDIS_SERVER_IP, ':6379/0'])
SUPERSET_BROKER_URL = "".join(['redis://:', REDIS_PASSWORD, '@', REDIS_SERVER_IP, ':6379/0'])
SUPERSET_CELERY_RESULT_BACKEND = "".join(['redis://:', REDIS_PASSWORD, '@', REDIS_SERVER_IP, ':6379/0'])
CACHE_WARMUP_USER = "whopper"
CACHE_DEFAULT_TIMEOUT = 60 * 60 * 6
CACHE_CONFIG = {
'CACHE_TYPE': 'redis',
'CACHE_DEFAULT_TIMEOUT': 60 * 60 * 6, # 6 hr default (in secs)
'CACHE_KEY_PREFIX': 'superset_results',
'CACHE_REDIS_URL': SUPERSET_CACHE_REDIS_URL,
}
SUPERSET_WEBSERVER_TIMEOUT = 180
SQLLAB_TIMEOUT = 180
SQLLAB_VALIDATION_TIMEOUT = 180
ENABLE_PROXY_FIX = True
if OAUTH_ENABLED:
CUSTOM_SECURITY_MANAGER = CustomSsoSecurityManager
AUTH_TYPE = AUTH_OAUTH
AUTH_USER_REGISTRATION = True
AUTH_USER_REGISTRATION_ROLE = "Gamma"
OAUTH_PROVIDERS = [
{'name': 'google', 'icon': 'fa-google', 'token_key': 'access_token', 'whitelist': ['@turtlemint.com'],
'remote_app': {
'client_id': '78989321337-5e44ugm9ev8davgp7591njjv7o81naoc.apps.googleusercontent.com',
'client_secret': '-ul9faMKxh5ddwmXrchaUewr',
'api_base_url': 'https://www.googleapis.com/oauth2/v2/',
'client_kwargs': {
'scope': 'email profile'
},
'request_token_url': None,
'access_token_url': 'https://accounts.google.com/o/oauth2/token',
'authorize_url': 'https://accounts.google.com/o/oauth2/auth'}
}
]
class CeleryConfig: # pylint: disable=too-few-public-methods
BROKER_URL = SUPERSET_BROKER_URL
CELERY_IMPORTS = ("superset.sql_lab", "superset.tasks")
CELERY_RESULT_BACKEND = SUPERSET_CELERY_RESULT_BACKEND
CELERYD_LOG_LEVEL = "DEBUG"
CELERYD_PREFETCH_MULTIPLIER = 1
CELERY_ACKS_LATE = False
CELERY_ANNOTATIONS = {
"sql_lab.get_sql_results": {"rate_limit": "100/s"},
"email_reports.send": {
"rate_limit": "1/s",
"time_limit": 120,
"soft_time_limit": 150,
"ignore_result": True,
},
}
CELERYBEAT_SCHEDULE = {
'cache-warmup-hourly': {
'task': 'cache-warmup',
'schedule': crontab(minute=30, hour='2,6,10,14'), # check for time zone
'kwargs': {
'strategy_name': 'top_n_dashboards',
'top_n': 50,
'since': '7 days ago',
},
},
"email_reports.schedule_hourly": {
"task": "email_reports.schedule_hourly",
"schedule": crontab(minute=1, hour="*"),
}
}
CELERY_CONFIG = CeleryConfig
RESULTS_BACKEND = RedisCache(
host=REDIS_SERVER_IP,
port=6379,
key_prefix='superset_results',
password=REDIS_PASSWORD
)
SUPERSET_WEBSERVER_PROTOCOL = os.environ['SUPERSET_WEBSERVER_PROTOCOL']
SUPERSET_WEBSERVER_ADDRESS = os.environ['SUPERSET_WEBSERVER_ADDRESS']
SUPERSET_WEBSERVER_PORT = os.environ['SUPERSET_WEBSERVER_PORT']
|
# Generated by Django 3.2.8 on 2021-10-31 03:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('twilio_management', '0002_alter_messagingservice_inbound_request_url'),
]
operations = [
migrations.AlterField(
model_name='messagingservice',
name='fallback_url',
field=models.CharField(max_length=255, null=True),
),
]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@title: Non-Exhaustive Gaussian Mixture Generative Adversarial Networks (NE-GM-GAN)
@topic: I-means Model
@author: Jun Zhuang, Mohammad Al Hasan
@ref:
https://math.stackexchange.com/questions/20593/calculate-variance-from-a-stream-of-sample-values
"""
import numpy as np
from utils import compute_loss
class Imeans():
def denoising(self, alist, ts=0.5):
"""
@topic: Denoising Activation Function
@input: 1D list, threshold(float); @output: 1D list.
"""
if ts > 1 or ts < 0:
return "Given threshold should be in the range of [0, 1]."
list_dn = []
#list_max, list_min = max(alist), min(alist)
list_max, list_min = max(alist), 0
for i in range(len(alist)):
# normalize the data
i_nm = (alist[i] - list_min) / (list_max - list_min)
# filter the data with given threshold
if i_nm > ts:
list_dn.append(1)
else:
list_dn.append(0)
return list_dn
def testing(self, x, i, n_round):
"""Output the information in n_round"""
if n_round <= 0:
return "n_round must be larger than zero."
if i % n_round == 0:
print(x)
def imeans(self, X, mu, cov, N, Z=3, WS=100, verbose=True):
"""
@topic: I-means algorithm: detect the number of new cluster.
@input: X: a batch of testing points (array);
mu: mean of original clusters (list of list); e.g. mu = [[mu0], [mu1], [mu2], ...]
cov: covariance of original clusters (list of list); e.g. cov = [[cov0], [cov1], [cov2], ...]
N: the number of samples in original clusters (list of int); N = [n0, n1, n2, ...]
Z: the value for "Z sigma rule" based on the test of confidence interval (int);
WS: the number of epochs in the warm-up stage for learning beta prior knowledge (int).
@output: k_new: the number of new cluster (int).
"""
# Initializ parameters
mu = list(mu)
sigma = [np.sqrt(np.diag(cov[i])) for i in range(len(cov))] # convert to (3, 121, 121) diag matrix.
ts = [compute_loss(mu[i]+Z*sigma[i], mu[i]) for i in range(len(mu)) if len(mu)==len(sigma)] # threshold list (3,)
N_test = list(np.zeros_like(N)) # empty list for storing the number of testing clusters
N_loss = [[] for i in range(len(N))] # collect the historical loss_{min} of existing clusters
N_sp = [[1, 1] for i in range(len(N))] # store the shape parameters [alpha, beta]
for i in range(len(X)): # for each testing point in a batch
if verbose:
self.testing("Round {0}: ".format(i), i, 100)
# Compute the loss to each cluster and find out the loss_{min}.
loss_k_list = []
for k in range(len(mu)):
loss_k = compute_loss(X[i], mu[k])
loss_k_list.append(loss_k)
if verbose:
self.testing("The loss to {0} clusters: \n {1}".format(len(loss_k_list), loss_k_list), i, 100)
loss_min = min(loss_k_list) # select the min value from loss_k_list.
nidx = loss_k_list.index(loss_min) # return the index of loss_min.
# Select the threshold TS
if len(N_loss[nidx]) <= WS:
TS = ts[nidx] # select TS based on "Z sigma rule" (Z=3).
ts[nidx] = compute_loss(mu[nidx]+Z*sigma[nidx], mu[nidx]) # Update TS
else:
# Compute the theta_MAP for "nidx" cluster: theta_MAP = alpha / (alpha + beta)
theta_MAP = N_sp[nidx][0] / (N_sp[nidx][0] + N_sp[nidx][1])
ts_idx = int(len(N_loss[nidx])*(1 - theta_MAP)) # compute the threshold TS index based on theta_MAP.
TS = N_loss[nidx][ts_idx] # select the "ts_idx"-th norm in "N_loss" as threshold.
# Make a decision
if loss_min <= TS: # if loss_min < TS: Xi belongs to cluster[nidx].
# Update mu and sigma in streaming data
mu_old = mu[nidx]
# mu_{n+1} = mu_{n} + (x_{n+1} - mu_{n})/(n+1)
mu[nidx] = mu_old + (X[i] - mu[nidx])/(N[nidx]+1)
# v_{n+1} = v_{n} + (x_{n+1} - mu_{n})*(x_{n+1} - mu_{n+1}); sigma_{n+1} = √[v_{n+1}/n]
sigma[nidx] = np.sqrt(((sigma[nidx]**2)*N[nidx] + (X[i] - mu_old)*(X[i] - mu[nidx]))/N[nidx])
N[nidx] = N[nidx] + 1
N_test[nidx] = N_test[nidx] + 1
N_loss[nidx].append(loss_min) # store the loss_min to corresponding clusters.
N_loss[nidx].sort() # sort the list of loss_min.
N_sp[nidx][1] = N_sp[nidx][1] + 1 # beta+1
if verbose:
self.testing("The number of samples in cluster {0}: {1}.".format(nidx, N[nidx]), i, 50)
else: # if loss_min > TS: Xi belongs to new cluster.
mu.append(X[i]) # assign current Xi as new mean vector
sigma.append(np.zeros_like(X[i])) # the sigma is 0 for only one point
ts.append(np.mean(ts)) # use the mean of ts list as the initial threshold of new point
N.append(1)
N_test.append(1)
N_loss.append([loss_min]) # store loss_min to new entry
N_sp.append([1,1]) # initialize a beta distribution for new cluster
N_sp[nidx][0] = N_sp[nidx][0] + 1 # alpha+1
# Filter the noise inside predicted result
if verbose:
print("Predicted clusters and corresponding numbers: \n", N_test)
N_test_dn = self.denoising(N_test, 0.3)
k_new = sum(N_test_dn)
return k_new
|
# coding utf-8
from selenium import webdriver
import time
import os
import re
import urllib
import sys
from bs4 import BeautifulSoup
import logging
from datetime import datetime
from datetime import timedelta
from datetime import date
import threading
import json
import xlrd
import xlwt
from xlrd import open_workbook
from xlutils.copy import copy
import nltk
import threading
import time
description_id = 1
browser = webdriver.Chrome()
def start(url, d, today, vstock):
global description_id
global browser
url = url
try:
browser.get(url)
t = browser.page_source
pn = re.compile(r'(.*)"statuses":(.*?)}]', re.S)
match = pn.match(t)
if not match:
return 0
result = match.group(2)
result = result + '}]'
decode = json.loads(result)
except:
pass
if __name__ == '__main__':
print(browser)
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
import os
import os.path
from flask_login import LoginManager, UserMixin
from .constantes import SECRET_KEY
chemin_actuel = os.path.dirname(os.path.abspath(__file__))
templates = os.path.join(chemin_actuel, "templates")
statics = os.path.join(chemin_actuel, "static")
app = Flask(__name__,
template_folder=templates,
static_folder=statics)
app.config['SECRET_KEY'] = SECRET_KEY
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///./pulPY.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
login_manager = LoginManager()
login_manager.init_app(app)
from app import routes
|
import torch
class GPU:
""" Class to work with GPUs """
def __init__(self):
pass
def get_default_device(self):
"""
Get the default device, if cuda is available get cuda.
"""
if torch.cuda.is_available():
return torch.device('cuda')
else:
return torch.device('cpu')
def to_device(self, data, device):
"""
Move data to default device.
"""
if isinstance(data, (list,tuple)):
return [self.to_device(x, device) for x in data]
return data.to(device, non_blocking=True)
|
from aws_cdk import core
from aws_cdk.aws_route53 import MxRecord, MxRecordValue, HostedZone
from kesher_service_cdk.service_stack.constants import KESHER_DOMAIN_NAME
class KesherGlobal(core.Construct):
# pylint: disable=redefined-builtin,invalid-name
def __init__(self, scope: core.Construct, id: str) -> None:
super().__init__(scope, id)
hosted_zone = HostedZone.from_lookup(scope=self, id="DomainHostedZone", domain_name=KESHER_DOMAIN_NAME, private_zone=False)
region = core.Stack.of(self).region
# host_name = f'inbound-smtp.{region}InboundUrl.amazonaws.com'
host_name = f'inbound-smtp.{region}.amazonaws.com'
# https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-mx-record.html
MxRecord(scope=self, id='DomainMXRecord',
values=[MxRecordValue(host_name=host_name, priority=10)],
zone=hosted_zone)
|
sub2main_dict = {'1': 'rice', '2': 'egg', '3': 'egg', '4': 'egg', '5': 'rice', '6': 'meat', '7': 'toufu', '8': 'toufu', '9': 'vegetable', '10': 'vegetable', '11': 'vegetable', '12': 'vegetable', '13': 'vegetable', '14': 'vegetable', '15': 'vegetable', '16': 'vegetable', '17': 'mix', '18': 'noddle', '19': 'meat', '20': 'noddle', '21': 'vegetable', '22': 'vegetable', '23': 'vegetable', '24': 'vegetable', '25': 'vegetable', '26': 'mix', '27': 'vegetable', '28': 'mix', '29': 'vegetable', '30': 'vegetable', '31': 'toufu', '32': 'meat', '33': 'mix', '34': 'vegetable', '35': 'toufu', '36': 'poultry', '37': 'meat', '38': 'meat', '39': 'poultry', '40': 'mix', '41': 'poultry', '42': 'meat', '43': 'meat', '44': 'poultry', '45': 'meat', '46': 'meat', '47': 'meat', '48': 'meat', '49': 'meat', '50': 'seafood', '51': 'seafood', '52': 'seafood', '53': 'seafood', '54': 'seafood', '55': 'seafood', '56': 'egg', '57': 'seafood', '58': 'meat', '59': 'poultry', '62': 'meat', '63': 'poultry', '64': 'poultry', '67': 'vegetable', '68': 'vegetable', '69': 'noddle', '71': 'soup', '80': 'bread', '84': 'mix', '85': 'mix', '86': 'mix',
'87': 'toufu', '89': 'seafood', '90': 'vegetable', '91': 'vegetable', '96': 'meat', '97': 'seafood', '98': 'vegetable', '99': 'seafood', '111': 'vegetable', '155': 'vegetable', '199': 'mix', '244': 'vegetable', '300': 'vegetable', '311': 'mix', '333': 'Popiah', '355': 'mix', '522': 'seafood', '523': 'seafood', '699': 'mix', '988': 'mix', '1004': 'mix', '2004': 'mix', '2006': 'egg', '2007': 'egg', '2013': 'mix', '2014': 'mix', '2015': 'mix', '2031': 'vegetable', '2032': 'vegetable', '2035':
'toufu', '2036': 'meat','900':'mix','3021':'mix', '2041': 'poultry', '2044': 'poultry', '2045': 'poultry', '2051': 'seafood', '2054': 'seafood', '2055': 'seafood', '2061': 'poultry', '2121': 'vegetable', '2322': 'vegetable', '2366': 'toufu', '2411': 'poultry', '2424': 'poultry', '2461': 'poultry', '2622': 'meat', '3003': 'meat', '3004': 'meat', '3013': 'seafood', '3017': 'seafood', '3018': 'poultry', '3019': 'vegetable', '3026': 'poultry', '3030': 'toufu', '3031': 'poultry', '3032': 'poultry', '3035': 'toufu',
'3036': 'egg', '3060': 'mix', '4005': 'rice', '4043': 'meat', '4046': 'poultry', '4051': 'poultry', '4221': 'poultry', "__background__":"__background__", '83':'rice'}
|
import copy
_base_ = '../../base.py'
# model settings
model = dict(
type='BYOL',
pretrained=None,
base_momentum=0.99,
pre_conv=True,
backbone=dict(
type='ResNet',
depth=50,
in_channels=3,
out_indices=[4], # 0: conv-1, x: stage-x
norm_cfg=dict(type='SyncBN')),
neck=dict(
type='NonLinearNeckSimCLR',
in_channels=2048,
hid_channels=4096,
out_channels=256,
num_layers=2,
sync_bn=True,
with_bias=True,
with_last_bn=False,
with_avg_pool=True),
head=dict(type='LatentPredictHead',
size_average=True,
predictor=dict(type='NonLinearNeckSimCLR',
in_channels=256, hid_channels=4096,
out_channels=256, num_layers=2, sync_bn=True,
with_bias=True, with_last_bn=False, with_avg_pool=False)))
# dataset settings
data_source_cfg = dict(
type='ImageNet',
memcached=True,
mclient_path='/mnt/lustre/share/memcached_client')
#data_train_list = 'data/imagenet/meta/train.txt'
#data_train_root = 'data/imagenet/train'
# data_train_list = 'data/VOCdevkit/VOC2007/meta/train.txt'
# data_train_root = 'data/VOCdevkit/VOC2007/JPEGImages'
# data_test_list = 'data/VOCdevkit/VOC2007/meta/test.txt'
# data_test_root = 'data/VOCdevkit/VOC2007/JPEGImages'
# data_test_list = 'data/isic2017/meta/test.txt'
# data_test_root = 'data/isic2017/test'
# data_train_list = 'data/isic2017/meta/train.txt'
# data_train_root = 'data/isic2017/train'
data_test_list = 'data/x_ray_dataset/test_list.txt'
data_test_root = 'data/x_ray_dataset/images'
data_train_list = 'data/x_ray_dataset/train_val_list.txt'
data_train_root = 'data/x_ray_dataset/images'
dataset_type = 'BYOLDataset'
#ImageNet Normalization Config
#img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
#isic2017 Normalization Config
# img_norm_cfg = dict(mean=[0.670, 0.585, 0.589], std=[0.177, 0.194, 0.230])
#x-ray dataset config
img_norm_cfg = dict(mean=[0.5245, 0.5245, 0.5245], std =[0.2589, 0.2589, 0.2589])
train_pipeline = [
dict(type='RandomResizedCrop', size=224, interpolation=3),
dict(type='RandomHorizontalFlip'),
dict(
type='RandomAppliedTrans',
transforms=[
dict(
type='ColorJitter',
brightness=0.4,
contrast=0.4,
saturation=0.2,
hue=0.1)
],
p=0.8),
dict(type='RandomGrayscale', p=0.2),
dict(
type='RandomAppliedTrans',
transforms=[
dict(
type='GaussianBlur',
sigma_min=0.1,
sigma_max=2.0)
],
p=1.),
dict(type='RandomAppliedTrans',
transforms=[dict(type='Solarization')], p=0.),
]
# prefetch
prefetch = False
if not prefetch:
train_pipeline.extend([dict(type='ToTensor'), dict(type='Normalize', **img_norm_cfg)])
train_pipeline1 = copy.deepcopy(train_pipeline)
train_pipeline2 = copy.deepcopy(train_pipeline)
train_pipeline2[4]['p'] = 0.1 # gaussian blur
train_pipeline2[5]['p'] = 0.2 # solarization
data = dict(
imgs_per_gpu=16, # total 32*8(gpu)*16(interval)=4096
workers_per_gpu=4,
train=dict(
type=dataset_type,
data_source=dict(
list_file=data_train_list, root=data_train_root,
**data_source_cfg),
pipeline1=train_pipeline1,
pipeline2=train_pipeline2,
prefetch=prefetch,
),
val = dict(
type=dataset_type,
data_source=dict(
list_file=data_test_list, root=data_test_root,
**data_source_cfg),
pipeline1=train_pipeline1,
pipeline2=train_pipeline2,
prefetch=prefetch,
)
)
# additional hooks
update_interval = 16*16 # interval for accumulate gradient
custom_hooks = [
dict(type='BYOLHook', end_momentum=1., update_interval=update_interval), dict(
type='NewValidateHook',
dataset=data['val'],
initial=True,
interval=1,
imgs_per_gpu=32,
workers_per_gpu=5,
prefetch=prefetch,
img_norm_cfg=img_norm_cfg)
]
# optimizer
optimizer = dict(type='LARS', lr=4.8/8, weight_decay=0.000001, momentum=0.9,
paramwise_options={
'(bn|gn)(\d+)?.(weight|bias)': dict(weight_decay=0., lars_exclude=True),
'bias': dict(weight_decay=0., lars_exclude=True)}) #lr=4.8/8,
# apex
use_fp16 = False
optimizer_config = dict(update_interval=update_interval, use_fp16=use_fp16)
# learning policy
lr_config = dict(
policy='CosineAnnealing',
min_lr=0.,
warmup='linear',
warmup_iters=10,
warmup_ratio=0.0001, # cannot be 0
warmup_by_epoch=True)
checkpoint_config = dict(interval=10)
# runtime settings
total_epochs = 200
|
"""
********************************************************************************
compas_fab.backends
********************************************************************************
.. currentmodule:: compas_fab.backends
This package contains classes backends for simulation, planning and execution.
V-REP
-----
.. autosummary::
:toctree: generated/
:nosignatures:
VrepClient
ROS
---
.. autosummary::
:toctree: generated/
:nosignatures:
RosClient
RosFileServerLoader
Long-running tasks
------------------
.. autosummary::
:toctree: generated/
:nosignatures:
FutureResult
CancellableFutureResult
Exceptions
----------
.. autosummary::
:toctree: generated/
:nosignatures:
BackendError
RosError
RosValidationError
VrepError
"""
from .exceptions import *
from .tasks import *
from .ros.client import *
from .ros.exceptions import *
from .ros.fileserver_loader import *
from .vrep.client import *
__all__ = [name for name in dir() if not name.startswith('_')]
|
import argparse
import math
class HMMPredictor():
def __init__(self):
self.words_lines = []
self.tags_lines = []
self.preditions = []
self.accuracy = 0
self.avg_log_likelihood = 0
self.idx_to_tag = {}
self.idx_to_word = {}
self.prior_table = {}
self.trans_table = {}
self.emit_table = {}
def fit(self,input_file,idx_to_word_file,idx_to_tag_file):
with open(input_file,mode="r",encoding="utf8") as f:
for line in f:
temp_words = []
temp_tags = []
splitted_line = line.strip().split(" ")
for token in splitted_line:
word = token.split("_")[0]
tag = token.split("_")[1]
temp_words.append(word)
temp_tags.append(tag)
self.words_lines.append(temp_words)
self.tags_lines.append(temp_tags)
with open(idx_to_word_file,mode="r",encoding="utf8") as f:
for idx,word in enumerate(f):
self.idx_to_word[idx] = word.strip()
with open(idx_to_tag_file,mode='r',encoding="utf8") as f:
for idx,tag in enumerate(f):
self.idx_to_tag[idx] = tag.strip()
def load_model(self,hmmprior_file,hmmemit_file,hmmtrans_file):
with open (hmmprior_file,mode='r') as f:
for idx,value in enumerate(f):
tag = self.idx_to_tag[idx]
self.prior_table[tag] = float(value.strip())
with open(hmmemit_file,mode="r") as f:
for idx,line in enumerate(f):
tag = self.idx_to_tag[idx]
self.emit_table[tag] = {}
splitted_line = line.strip().split(" ")
for j, value in enumerate(splitted_line):
word = self.idx_to_word[j]
self.emit_table[tag][word] = float(value.strip())
with open(hmmtrans_file,mode="r") as f:
for idx,line in enumerate(f):
prev_tag = self.idx_to_tag[idx]
self.trans_table[prev_tag] = {}
splitted_line = line.strip().split(" ")
for j, value in enumerate(splitted_line):
next_tag = self.idx_to_tag[j]
self.trans_table[prev_tag][next_tag] = float(value.strip())
def log_sum_exp_tric(self,V):
m = max(V)
temp = 0
for v in V:
temp += math.exp(v-m)
return m+math.log(temp)
def forward_log_alpha(self,observations):
T = len(observations)
alpha = {}
if T == 1:
alpha[T] = {}
for state in self.prior_table.keys():
alpha[T][state] = math.log(self.prior_table[state]) + math.log(self.emit_table[state][observations[T-1]])
else:
alpha[1] = {}
for state in self.prior_table.keys():
alpha[1][state] = math.log(self.prior_table[state]) + math.log(self.emit_table[state][observations[0]])
for idx in range(2,T+1):
alpha[idx] = {}
for state in self.prior_table.keys():
alpha[idx][state] = math.log(self.emit_table[state][observations[idx-1]])
V = []
for prev_state in self.prior_table.keys():
V.append(alpha[idx-1][prev_state]+math.log(self.trans_table[prev_state][state]))
alpha[idx][state] += self.log_sum_exp_tric(V)
return alpha
def backward_log_beta(self,observations):
T = len(observations)
beta = {}
beta[T] = {}
for state in self.prior_table.keys():
beta[T][state] = 0
for idx in range(1,T)[::-1]:
beta[idx] = {}
for state in self.prior_table.keys():
V = []
for next_state in self.prior_table.keys():
V.append(math.log(self.emit_table[next_state][observations[idx]])+beta[idx+1][next_state]+math.log(self.trans_table[state][next_state]))
beta[idx][state] = self.log_sum_exp_tric(V)
return beta
def predict(self):
total = 0
wrong = 0
for idx in range(len(self.words_lines)):
observations = self.words_lines[idx]
tags_line = self.tags_lines[idx]
T = len(observations)
alpha = self.forward_log_alpha(observations)
beta = self.backward_log_beta(observations)
self.avg_log_likelihood += self.compute_log_likelihood(alpha,T)
temp = []
for t in range(1,T+1):
total += 1
sums = []
for idx in self.idx_to_tag.keys():
tag = self.idx_to_tag[idx]
sums.append(alpha[t][tag]+beta[t][tag])
max_idx = sums.index(max(sums))
pred = self.idx_to_tag[max_idx]
tag = tags_line[t-1]
if pred != tag:
wrong += 1
temp.append(pred)
self.preditions.append(temp)
self.accuracy = 1-wrong/total
self.avg_log_likelihood /= len(self.words_lines)
def compute_log_likelihood(self,alpha,T):
V = []
for idx in self.idx_to_tag.keys():
tag = self.idx_to_tag[idx]
V.append(alpha[T][tag])
return self.log_sum_exp_tric(V)
def parse_args():
parser = argparse.ArgumentParser("Evaluating Validation Data with Forward-Backward Algorithm")
parser.add_argument("validation_input",help="path to the validation input .txt file that will be evaluated by your forward backward algorithm")
parser.add_argument("index_to_word",help="path to the .txt that specifies the dictionary mapping from words to indices.")
parser.add_argument("index_to_tag",help="path to the .txt that specifies the dictionary mapping from tags to indices.")
parser.add_argument("hmmprior",help="path to input .txt file which contains the estimated prior (π).")
parser.add_argument("hmmemit",help="path to input .txt file which contains the emission probabilities (B).")
parser.add_argument("hmmtrans",help="path to input .txt file which contains transition probabilities (A).")
parser.add_argument("predicted_file",help="path to the output .txt file to which the predicted tags will be written.")
parser.add_argument("metric_file",help="path to the output .txt file to which the metrics will be written.")
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
predictor = HMMPredictor()
predictor.fit(args.validation_input,args.index_to_word,args.index_to_tag)
predictor.load_model(args.hmmprior,args.hmmemit,args.hmmtrans)
predictor.predict()
accuracy = predictor.accuracy
avg_log_likelihood = predictor.avg_log_likelihood
with open(args.metric_file,mode="w") as f:
f.write("Average Log-Likelihood: " + str(avg_log_likelihood))
f.write("\n")
f.write("Accuracy: " + str(accuracy))
words_lines = predictor.words_lines
predictions = predictor.preditions
with open(args.predicted_file,mode="w") as f:
for i in range(len(words_lines)):
words_line = words_lines[i]
prediction = predictions[i]
out = [words_line[i]+"_"+prediction[i] for i in range(len(words_line))]
out = " ".join(out)
f.write(out)
f.write("\n")
|
from .makeconfig import MakeConfig
from .pyson import Pyson
from . import checks
from .syscheck import syscheck
from .Bot_Logging import log_error, Bot_Logging
from .Bot_Settings import Bot_Settings
|
from tkinter import simpledialog
class Dialog:
def __init__(self, title, message):
self.messagebox.showinfo(title, message)
self.answer = simpledialog.askstring("Input", "What is your first name?",
parent=window)
|
from pandas import DataFrame
from weaverbird.backends.pandas_executor.types import DomainRetriever, PipelineExecutor
from weaverbird.pipeline.steps import DeleteStep
def execute_delete(
step: DeleteStep,
df: DataFrame,
domain_retriever: DomainRetriever = None,
execute_pipeline: PipelineExecutor = None,
) -> DataFrame:
return df.drop(step.columns, axis=1)
|
"""
Test Carousel core.
"""
from carousel.core import Q_, UREG
from nose.tools import eq_, ok_
def test_pv_context():
"""
Test Pint PV context - specifically suns to power flux and v.v.
"""
esun = Q_(876.5, UREG.W / UREG.m / UREG.m)
eq_(esun.to('suns', 'pv'), 0.8765 * UREG.suns)
esun = Q_(0.8765, UREG.suns)
ok_(esun.dimensionless)
eq_(esun.to('W / m ** 2', 'pv'), 876.5 * UREG.W / UREG.m / UREG.m)
|
#!/usr/bin/env python
#coding=utf-8
import json
from lib.sqs import zhihufav_sqs
from lib.tasks import add_note
if __name__=="__main__":
sqs_info = zhihufav_sqs.get_messages(1)
for sqs in sqs_info:
sqs_body = sqs.get_body()
receipt_handle = sqs.receipt_handle
sqs_json = json.loads(sqs_body)
print(sqs_json)
api_url = sqs_json.get('api_url')
parent_note = sqs_json.get('parent_note')
add_note(api_url, parent_note, receipt_handle)
|
import braintree
import weasyprint
from django.shortcuts import render, redirect, get_object_or_404
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from django.template.loader import render_to_string
from orders.models import Order
from .tasks import payment_completed
# Instantiate braintree payment gateway
gateway = braintree.BraintreeGateway(settings.BRAINTREE_CONF)
@login_required
def payment_process(request):
order_id = request.session.get('order_id')
order = get_object_or_404(Order, id=order_id)
total_cost = order.get_total_cost()
if request.method == 'POST':
# Retrieve nonce
nonce = request.POST.get('payment_method_nonce', None)
# Create and submit transaction
result = gateway.transaction.sale({
'amount': f'{total_cost:.2f}',
'payment_method_nonce': nonce,
'options': {
'submit_for_settlement': True,
}
})
if result.is_success:
# Mark the order as paid
order.paid = True
# Store the unique braintree transaction id
order.braintree_id = result.transaction.id
order.save()
# Launch asynchronous task
payment_completed.delay(order.id)
return redirect('payment:done')
else:
return redirect('payment:canceled')
else:
# Generate token
client_token = gateway.client_token.generate()
return render(request, 'payment/process.html', {
'order': order,
'client_token': client_token,
})
@login_required
def payment_done(request):
order_id = request.session.get('order_id')
order = get_object_or_404(Order, id=order_id)
return render(request, 'payment/done.html', {
'order': order,
})
@login_required
def payment_canceled(request):
return render(request, 'payment/canceled.html')
@login_required
def payment_receipt(request, order_id):
order = get_object_or_404(Order, id=order_id)
# Generate invoice PDF
html = render_to_string('order/pdf.html', {'order': order})
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = f'filename=order_{order_id}.pdf'
weasyprint.HTML(string=html).write_pdf(response, stylesheets=[
weasyprint.CSS(settings.STATIC_ROOT + 'css/pdf.css')])
return response
|
from .build import build_dataset_from_cfg
import datasets.KITTIDataset
import datasets.PCNDataset
import datasets.ShapeNet55Dataset
|
from os import sep
from .ExamplePage import ExamplePage
class View(ExamplePage):
"""View the source of a Webware servlet.
For each Webware example, you will see a sidebar with various menu items,
one of which is "View source of <em>example</em>". This link points to the
View servlet and passes the filename of the current servlet. The View
servlet then loads that file's source code and displays it in the browser
for your viewing pleasure.
Note that if the View servlet isn't passed a filename,
it prints the View's docstring which you are reading right now.
"""
def writeContent(self):
req = self.request()
if req.hasField('filename'):
trans = self.transaction()
filename = req.field('filename')
if sep in filename:
self.write(
'<h3 style="color:red">Error</h3><p>Cannot request a file'
f' outside of this directory {filename!r}</p>')
return
filename = self.request().serverSidePath(filename)
self.request().fields()['filename'] = filename
trans.application().forward(trans, 'Colorize.py')
else:
doc = self.__class__.__doc__.split('\n', 1)
doc[1] = '</p>\n<p>'.join(doc[1].split('\n\n'))
self.writeln('<h2>{}</h2>\n<p>{}</p>'.format(*doc))
|
import torchvision.models as models
import torch
class resnet50(torch.nn.Module):
def __init__(self, dropout):
super().__init__()
resnet50 = models.resnet50(pretrained=False)
modules = list(resnet50.children())[:-1]
self._resnet50 = torch.nn.Sequential(*modules)
self.output_size = 2048
def add_classifier(self, no, name="classifier", modalities=None):
setattr(self, name, torch.nn.Linear(self.output_size, no))
def forward(self, x, *args, **kwargs):
*dim, c, h, w = x.size()
x = x.view(-1, c, h, w)
x = self._resnet50(x)
return x.view(*dim, self.output_size)
class resnet101(torch.nn.Module):
def __init__(self, dropout):
super().__init__()
resnet = models.resnet101(pretrained=False)
modules = list(resnet.children())[:-1]
self._model = torch.nn.Sequential(*modules)
self.output_size = 2048
def add_classifier(self, no, name="classifier", modalities=None):
setattr(self, name, torch.nn.Linear(self.output_size, no))
def forward(self, x, *args, **kwargs):
*dim, c, h, w = x.size()
x = x.view(-1, c, h, w)
x = self._model(x)
return x.view(*dim, self.output_size)
class densenet121(torch.nn.Module):
def __init__(self, dropout):
super().__init__()
resnet = models.densenet121(pretrained=False)
modules = list(resnet.children())[:-1]
self._model = torch.nn.Sequential(*modules)
self.output_size = 1024
def add_classifier(self, no, name="classifier", modalities=None):
setattr(self, name, torch.nn.Linear(self.output_size, no))
def forward(self, x, *args, **kwargs):
*dim, c, h, w = x.size()
x = x.view(-1, c, h, w)
x = self._model(x)
x = torch.mean(x,dim=[2,3])
return x.view(*dim, self.output_size)
class wrn50_2(torch.nn.Module):
def __init__(self, dropout):
super().__init__()
resnet = models.wide_resnet50_2(pretrained=False)
modules = list(resnet.children())[:-1]
self._model = torch.nn.Sequential(*modules)
self.output_size = 2048
def add_classifier(self, no, name="classifier", modalities=None):
setattr(self, name, torch.nn.Linear(self.output_size, no))
def forward(self, x, *args, **kwargs):
*dim, c, h, w = x.size()
x = x.view(-1, c, h, w)
x = self._model(x)
x = torch.mean(x, dim=[2, 3])
return x.view(*dim, self.output_size)
|
# baleen.ingest
# The ingestion runner that implements ingestion for a collection of feeds.
#
# Author: Benjamin Bengfort <benjamin@bengfort.com>
# Created: Wed Mar 02 23:23:06 2016 -0500
#
# Copyright (C) 2016 Bengfort.com
# For license information, see LICENSE.txt
#
# ID: ingest.py [4ee79a0] benjamin@bengfort.com $
"""
The ingestion runner that implements ingestion for a collection of feeds.
"""
##########################################################################
## Imports
##########################################################################
import uuid
from baleen.opml import OPML
from baleen.exceptions import *
from baleen import models as db
from baleen.feed import FeedSync
from baleen.config import settings
from baleen.utils.timez import Timer
from baleen.wrangle import PostWrangler
from baleen.utils.logger import LoggingMixin
from baleen.utils.decorators import memoized
from datetime import datetime
from collections import Counter
##########################################################################
## Helper Functions
##########################################################################
def stype(obj):
"""
Returns the string of the type. Used to count exception types.
"""
if isinstance(obj, BaleenError):
if hasattr(obj, "original"):
return "{} ({})".format(
type(obj).__name__, type(obj.original).__name__
)
return type(obj).__name__
##########################################################################
## Base Ingestion Class
##########################################################################
class Ingestor(LoggingMixin):
"""
Base class for the ingestors.
Ingestors manage the synchronization of feeds, wrangling of posts, and
fetching of web pages to store to the Mongo database. Ingestors can
either get feeds from a list of strings, an OPML file or a Mongo query.
Ingestors also perform logging and exception handling.
"""
def __init__(self, feeds=None, **options):
self.timer = None # Processing timer
self.jobid = None # Unique job id for every run
self.options = options # Any other options passed in
self._feeds = feeds # Allows pass in feed collection
self.errors = Counter() # Count the number of error types
@property
def name(self):
return self.__class__.__name__
@memoized
def counts(self):
"""
Keep track of counts and ensure zero keys exist.
"""
counts = Counter()
for key in ('feeds', 'posts', 'errors', 'feed_error'):
counts[key] = 0
return counts
def feeds(self):
"""
This is the primary entry point for subclasses, they must specificy
how to get access to a collection of feeds to syncrhonize.
"""
if self._feeds is not None:
return self._feeds
raise IngestionError(
"No feeds specified for {} ingestion!".format(self.name)
)
def started(self):
"""
Run when the ingestor is started and used for logging. Subclasses can
use it as a hook to perform extra work right before kick off.
"""
message = "{} job {} started".format(self.name, self.jobid)
self.logger.info(message)
def failed(self, exception):
"""
Executed when a complete ingestion run has failed (very bad). Used
to log the exception or clean up before Baleen crashes!
"""
message = "{} job {} failed!".format(self.name, self.jobid)
self.logger.error("Ingestion Error: {}".format(exception))
self.logger.critical(message)
def finished(self):
"""
Run when the ingestor has finished and used for logging. Subclasses
can use it as a hook to perform any completion work.
"""
# Notify the results
results = (
"Processed {feeds} feeds ({timer}) "
"{posts} posts with {errors} errors"
).format(
timer=self.timer, **self.counts
)
self.logger.info(results)
# Notify job finished
message = "{} job {} finished".format(self.name, self.jobid)
self.logger.info(message)
def process(self):
"""
Runs the ingestion process by iterating over the feeds, synchronizing
and then wrangling posts into the database as well as fetching pages.
"""
for idx, fsync in enumerate(FeedSync.factory(self.feeds())):
try:
self.process_feed(fsync)
self.counts['feeds'] += 1
except SynchronizationError as e:
self.counts['feed_error'] += 1
self.errors[stype(e)] += 1
self.logger.error(
"Error on Feed {} ({}): {}".format(
idx+1, fsync.feed, str(e)
)
)
def process_feed(self, fsync):
"""
Synchronizes a feed and catches exceptions
"""
factory = PostWrangler.factory(fsync.entries(), fsync.feed)
for idx, post in enumerate(factory):
try:
self.process_post(post)
self.counts["posts"] += 1
except WranglingError as e:
self.counts["errors"] += 1
self.errors[stype(e)] += 1
self.logger.error(
"Post Error for feed {} on entry {}: {}".format(
fsync.feed, idx, str(e)
)
)
def process_post(self, post):
"""
Wrangles a post from a single feed and catches exceptions
"""
post.wrangle()
if settings.fetch_html:
try:
post.fetch()
except FetchError as e:
self.counts["fetch_error"] += 1
self.errors[stype(e)] += 1
self.logger.error(
"Fetch Error for post \"{}\" ({}): {}".format(
post.post.title, post.post.url, str(e)
)
)
def ingest(self):
"""
Subclasses do not typically override the ingest method. Instead they
will override the process hooks for start, failed, and finish, or the
process method directly.
"""
# Set a unique job id for every time run is called.
# The job id is based on the hostname and a time sequence.
self.jobid = uuid.uuid1()
# Call the started hook for logging and notification.
self.started()
# Time how long it takes to perform the processing
with Timer() as self.timer:
try:
self.process()
except Exception as e:
# If something goes wrong, call the failed hook, then raise.
self.failed(e)
raise
# Call the finished hook for logging and notification.
self.finished()
##########################################################################
## Mongo Ingestion Class
##########################################################################
class MongoIngestor(Ingestor):
"""
Ingests feeds that are stored in the database.
This type of ingestor also tracks information into the database.
"""
def feeds(self):
"""
Returns an iterator of all active feeds from the database
"""
for feed in db.Feed.objects(active=True):
yield feed
def started(self):
"""
Save a record about the job start to the database.
"""
super(MongoIngestor, self).started()
self.job = db.Job(jobid=self.jobid, name=self.name)
self.job.save()
def failed(self, exception):
"""
Save information about the failure to the database.
"""
super(MongoIngestor, self).failed(exception)
self.job.failed = True
self.job.reason = str(exception)
self.job.finished = datetime.now()
self.job.save()
def finished(self):
"""
Update the job record in the database.
"""
super(MongoIngestor, self).finished()
self.job.reason = "OK"
self.job.finished = datetime.now()
self.job.counts = self.counts
self.job.errors = self.errors
self.job.totals = {
"feeds": db.Feed.objects.count(),
"posts": db.Post.objects.count(),
"jobs": db.Job.objects.count(),
}
self.job.save()
##########################################################################
## OPML Ingestion Class
##########################################################################
class OPMLIngestor(Ingestor):
"""
Ingests feeds from an OPML file.
"""
def __init__(self, path, **options):
self.opml = OPML(path)
super(OPMLIngestor, self).__init__(**options)
def feeds(self):
"""
Returns an iterator of all active feeds from the database
"""
for feed in self.opml:
yield feed
|
'''
Задание.
Компьютер загадывает целое число от 1 до 100, и нам нужно написать программу, которая угадывает число
за как можно меньшее количество попыток.
'''
import np
def guesswork(number):
'''Сначала устанавливаем любое random число, а потом уменьшаем или увеличиваем его в зависимости от того, больше оно или меньше нужного.
Функция принимает загаданное число и возвращает число попыток'''
trials = 1
prediction = np.random.randint(1, 101)
while prediction != number:
trials += 1
if prediction < number:
prediction -= (prediction - number) // 2
elif prediction > number:
prediction += (number - prediction) // 2
return trials
def score_game(guesswork):
'''Запускаем игру 1000 раз, чтобы узнать, как быстро игра угадывает число'''
trials_list = []
np.random.seed(1) # фиксируем RANDOM SEED, чтобы ваш эксперимент был воспроизводим!
random_array = np.random.randint(1, 101, size=(1000))
for number in random_array:
trials_list.append(guesswork(number))
score = int(np.mean(trials_list))
print(f"Ваш алгоритм угадывает число в среднем за {score} попыток")
return score
score_game(guesswork)
|
#!/usr/bin/python
# Wflow is Free software, see below:
#
# Copyright (c) J. Schellekens/Deltares 2005-2013
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#TODO: split off routing
"""
Run the wflow_hbv hydrological model..
usage:
wflow_hbv::
[-h][-v level][-F runinfofile][-L logfile][-C casename][-R runId]
[-c configfile][-T timesteps][-s seconds][-W][-E][-N][-U discharge]
[-P parameter multiplication][-X][-l loglevel]
-F: if set wflow is expected to be run by FEWS. It will determine
the timesteps from the runinfo.xml file and save the output initial
conditions to an alternate location. Also set fewsrun=1 in the .ini file!
-f: Force overwrite of existing results
-T: Set the number of timesteps to run
-N: No lateral flow, use runoff response function to generate fast runoff
-s: Set the model timesteps in seconds
-I: re-initialize the initial model conditions with default
-i: Set input table directory (default is intbl)
-x: run for subcatchment only (e.g. -x 1)
-C: set the name of the case (directory) to run
-R: set the name runId within the current case
-L: set the logfile
-c: name of wflow the configuration file (default: Casename/wflow_hbv.ini).
-h: print usage information
-U: The argument to this option should be a .tss file with measured discharge in
[m^3/s] which the program will use to update the internal state to match
the measured flow. The number of columns in this file should match the
number of gauges in the wflow\_gauges.map file.
-u: list of gauges/columns to use in update. Format:
-u [1 , 4 ,13]
The above example uses column 1, 4 and 13
-P: set parameter change string (e.g: -P "self.FC = self.FC * 1.6") for non-dynamic variables
-p: set parameter change string (e.g: -P "self.Precipitation = self.Precipitation * 1.11") for
dynamic variables
-l: loglevel (most be one of DEBUG, WARNING, ERROR)
-X overwrites the initial values at the end of each timestep
"""
import numpy
import os
import os.path
import shutil, glob
import getopt
from wflow.wf_DynamicFramework import *
from wflow.wf_DynamicFramework import *
from wflow.wflow_adapt import *
from wflow.wflow_adapt import *
#import scipy
#import pcrut
wflow = "wflow_hbv"
#: columns used in updating
updateCols = [] #: columns used in updating
""" Column used in updating """
def usage(*args):
"""
Print usage information
- *args: command line arguments given
"""
sys.stdout = sys.stderr
for msg in args: print msg
print __doc__
sys.exit(0)
class WflowModel(DynamicModel):
"""
The user defined model class.
"""
def __init__(self, cloneMap,Dir,RunDir,configfile):
DynamicModel.__init__(self)
self.caseName = os.path.abspath(Dir)
self.clonemappath = os.path.join(os.path.abspath(Dir),"staticmaps",cloneMap)
setclone(self.clonemappath)
self.runId = RunDir
self.Dir = os.path.abspath(Dir)
self.configfile = configfile
self.SaveDir = os.path.join(self.Dir,self.runId)
def updateRunOff(self):
"""
Updates the kinematic wave reservoir
"""
self.WaterLevel=(self.Alpha*pow(self.SurfaceRunoff,self.Beta))/self.Bw
# wetted perimeter (m)
P=self.Bw+(2*self.WaterLevel)
# Alpha
self.Alpha=self.AlpTerm*pow(P,self.AlpPow)
self.OldKinWaveVolume = self.KinWaveVolume
self.KinWaveVolume = self.WaterLevel * self.Bw * self.DCL
def stateVariables(self):
"""
returns a list of state variables that are essential to the model.
This list is essential for the resume and suspend functions to work.
This function is specific for each model and **must** be present.
:var self.SurfaceRunoff: Surface runoff in the kin-wave resrvoir [m^3/s]
:var self.WaterLevel: Water level in the kin-wave resrvoir [m]
:var self.DrySnow: Snow pack [mm]
:var self.FreeWater: Available free water [mm]
:var self.UpperZoneStorage: Water in the upper zone [mm]
:var self.LowerZoneStorage: Water in the lower zone [mm]
:var self.SoilMoisture: Soil moisture [mm]
:var self.InterceptionStorage: Amount of water on the Canopy [mm]
"""
states = ['FreeWater', 'SoilMoisture',
'UpperZoneStorage',
'LowerZoneStorage',
'InterceptionStorage',
'SurfaceRunoff',
'WaterLevel',
'DrySnow']
return states
# The following are made to better connect to deltashell/openmi
def supplyCurrentTime(self):
"""
gets the current time in seconds after the start of the run
Ouput:
- time in seconds since the start of the model run
"""
return self.currentTimeStep() * int(configget(self.config,'model','timestepsecs','86400'))
def parameters(self):
"""
Define all model parameters here that the framework should handle for the model
See wf_updateparameters and the parameters section of the ini file
If you use this make sure to all wf_updateparameters at the start of the dynamic section
and at the start/end of the initial section
"""
modelparameters = []
#Static model parameters e.g.
#modelparameters.append(self.ParamType(name="RunoffGeneratingGWPerc",stack="intbl/RunoffGeneratingGWPerc.tbl",type="static",default=0.1))
# Meteo and other forcing
self.P_mapstack = self.Dir + configget(self.config, "inputmapstacks", "Precipitation",
"/inmaps/P") # timeseries for rainfall
self.PET_mapstack = self.Dir + configget(self.config, "inputmapstacks", "EvapoTranspiration",
"/inmaps/PET") # timeseries for rainfall"/inmaps/PET" # potential evapotranspiration
self.TEMP_mapstack = self.Dir + configget(self.config, "inputmapstacks", "Temperature",
"/inmaps/TEMP") # timeseries for rainfall "/inmaps/TEMP" # global radiation
self.Inflow_mapstack = self.Dir + configget(self.config, "inputmapstacks", "Inflow",
"/inmaps/IF") # timeseries for rainfall "/inmaps/IF" # in/outflow locations (abstractions)
self.Seepage_mapstack = self.Dir + configget(self.config, "inputmapstacks", "Seepage",
"/inmaps/SE") # timeseries for rainfall "/inmaps/SE" # in/outflow locations (abstractions
# Meteo and other forcing
modelparameters.append(self.ParamType(name="Precipitation",stack=self.P_mapstack,type="timeseries",default=0.0,verbose=True,lookupmaps=[]))
modelparameters.append(self.ParamType(name="PotEvaporation",stack=self.PET_mapstack,type="timeseries",default=0.0,verbose=True,lookupmaps=[]))
modelparameters.append(self.ParamType(name="Temperature",stack=self.TEMP_mapstack,type="timeseries",default=10.0,verbose=True,lookupmaps=[]))
modelparameters.append(self.ParamType(name="Inflow",stack=self.Inflow_mapstack,type="timeseries",default=0.0,verbose=False,lookupmaps=[]))
modelparameters.append(self.ParamType(name="Seepage",stack=self.Seepage_mapstack,type="timeseries",default=0.0,verbose=False,lookupmaps=[]))
return modelparameters
def suspend(self):
"""
Suspends the model to disk. All variables needed to restart the model
are saved to disk as pcraster maps. Use resume() to re-read them
"""
self.logger.info("Saving initial conditions...")
self.wf_suspend(os.path.join(self.SaveDir,"outstate"))
if self.OverWriteInit:
self.logger.info("Saving initial conditions over start conditions...")
self.wf_suspend(os.path.join(self.SaveDir,"instate"))
if self.fewsrun:
self.logger.info("Saving initial conditions for FEWS...")
self.wf_suspend(os.path.join(self.Dir, "outstate"))
def initial(self):
"""
Initial part of the model, executed only once. Reads all static model
information (parameters) and sets-up the variables used in modelling.
*HBV Soil*
:var FC.tbl: Field Capacity (260.0) [mm]
:var BetaSeepage.tbl: exponent in soil runoff generation equation (1.8) [-]
:var LP.tbl: fraction of Fieldcapacity below which actual evaporation=potential evaporation (0.53000)
:var K4.tbl: Recession constant baseflow (0.02307)
*If SetKquickFlow is set to 1*
:var KQuickFlow.tbl: (0.09880)
:var SUZ.tbl: Level over wich K0 is used (100.0)
:var K0.tbl: (0.3)
*If SetKquickFlow is set to 0*
:var KHQ.tbl: recession rate at flow HQ (0.09880)
:var HQ.tbl: high flow rate HQ for which recession rate of upper reservoir is known (3.27000)
:var AlphaNL.tbl: measure of non-linearity of upper reservoir (1.1)
:var PERC.tbl: Percolation from Upper to Lowerzone (0.4000) [mm/day]
:var CFR.tbl: Refreezing efficiency constant in refreezing of freewater in snow (0.05000)
:var Pcorr.tbl: Correction factor for precipitation (1.0)
:var RFCF.tbl: Correction factor for rainfall (1.0)
:var SFCF.tbl: Correction factor for snowfall(1.0)
:var Cflux.tbl: Maximum capillary rise from runoff response routine to soil moisture routine (2.0)
:var ICF.tbl: Maximum interception storage (in forested AND non-forested areas) (2.0)
:var CEVPF.tbl: Correction factor for potential evaporation (1.0)
:var EPF.tbl: Exponent of correction factor for evaporation on days with precipitation(0.0)
:var ECORR.tbl: Evap correction (1.0)
*Snow modelling parameters*
:var TTI.tbl: critical temperature for snowmelt and refreezing (1.000) [oC]
:var TT.tbl: defines interval in which precipitation falls as rainfall and snowfall (-1.41934) [oC]
:var Cfmax.tbl: meltconstant in temperature-index ( 3.75653) [-]
:var WHC.tbl: fraction of Snowvolume that can store water (0.1) [-]
"""
global statistics
global multpars
global updateCols
setglobaloption("unittrue")
self.thestep = scalar(0)
#: files to be used in case of timesries (scalar) input to the model
#: name of the tss file with precipitation data ("../intss/P.tss")
self.precipTss = "../intss/P.tss"
self.evapTss="../intss/PET.tss" #: name of the tss file with potential evap data ("../intss/PET.tss")
self.tempTss="../intss/T.tss" #: name of the tss file with temperature data ("../intss/T.tss")
self.inflowTss="../intss/Inflow.tss" #: NOT TESTED name of the tss file with inflow data ("../intss/Inflow.tss")
self.SeepageTss="../intss/Seepage.tss" #: NOT TESTED name of the tss file with seepage data ("../intss/Seepage.tss")"
self.logger.info("running for " + str(self.nrTimeSteps()) + " timesteps")
# Set and get defaults from ConfigFile here ###################################
self.scalarInput = int(configget(self.config,"model","ScalarInput","0"))
self.Tslice = int(configget(self.config,"model","Tslice","1"))
self.interpolMethod = configget(self.config,"model","InterpolationMethod","inv")
self.reinit = int(configget(self.config,"run","reinit","0"))
self.fewsrun = int(configget(self.config,"run","fewsrun","0"))
self.OverWriteInit = int(configget(self.config,"model","OverWriteInit","0"))
self.updating = int(configget(self.config,"model","updating","0"))
self.updateFile = configget(self.config,"model","updateFile","no_set")
self.sCatch = int(configget(self.config,"model","sCatch","0"))
self.intbl = configget(self.config,"model","intbl","intbl")
self.P_style = int(configget(self.config,"model","P_style","1"))
self.PET_style = int(configget(self.config,"model","PET_style","1"))
self.TEMP_style = int(configget(self.config,"model","TEMP_style","1"))
self.modelSnow = int(configget(self.config,"model","ModelSnow","1"))
sizeinmetres = int(configget(self.config,"layout","sizeinmetres","0"))
alf = float(configget(self.config,"model","Alpha","60"))
Qmax = float(configget(self.config,"model","AnnualDischarge","300"))
self.UpdMaxDist =float(configget(self.config,"model","UpdMaxDist","100"))
self.MaxUpdMult =float(configget(self.config,"model","MaxUpdMult","1.3"))
self.MinUpdMult =float(configget(self.config,"model","MinUpdMult","0.7"))
self.UpFrac =float(configget(self.config,"model","UpFrac","0.8"))
self.ExternalQbase=int(configget(self.config,'model','ExternalQbase','0'))
self.SetKquickFlow=int(configget(self.config,'model','SetKquickFlow','0'))
self.MassWasting = int(configget(self.config,"model","MassWasting","0"))
self.SubCatchFlowOnly = int(configget(self.config, 'model', 'SubCatchFlowOnly', '0'))
# static maps to use (normally default)
wflow_subcatch = configget(self.config,"model","wflow_subcatch","staticmaps/wflow_subcatch.map")
wflow_dem = configget(self.config,"model","wflow_dem","staticmaps/wflow_dem.map")
wflow_ldd = configget(self.config,"model","wflow_ldd","staticmaps/wflow_ldd.map")
wflow_river = configget(self.config,"model","wflow_river","staticmaps/wflow_river.map")
wflow_riverlength = configget(self.config,"model","wflow_riverlength","staticmaps/wflow_riverlength.map")
wflow_riverlength_fact = configget(self.config,"model","wflow_riverlength_fact","staticmaps/wflow_riverlength_fact.map")
wflow_landuse = configget(self.config,"model","wflow_landuse","staticmaps/wflow_landuse.map")
wflow_soil = configget(self.config,"model","wflow_soil","staticmaps/wflow_soil.map")
wflow_gauges = configget(self.config,"model","wflow_gauges","staticmaps/wflow_gauges.map")
wflow_inflow = configget(self.config,"model","wflow_inflow","staticmaps/wflow_inflow.map")
wflow_mgauges = configget(self.config,"model","wflow_mgauges","staticmaps/wflow_mgauges.map")
wflow_riverwidth = configget(self.config,"model","wflow_riverwidth","staticmaps/wflow_riverwidth.map")
# 2: Input base maps ########################################################
subcatch=ordinal(self.wf_readmap(os.path.join(self.Dir, wflow_subcatch),0.0,fail=True)) # Determines the area of calculations (all cells > 0)
subcatch = ifthen(subcatch > 0, subcatch)
if self.sCatch > 0:
subcatch = ifthen(subcatch == sCatch,subcatch)
self.Altitude=self.wf_readmap(os.path.join(self.Dir,wflow_dem),0.0,fail=True) * scalar(defined(subcatch)) #: The digital elevation map (DEM)
self.TopoLdd=self.wf_readmap(os.path.join(self.Dir, wflow_ldd),0.0,fail=True) #: The local drinage definition map (ldd)
self.TopoId=ordinal(self.wf_readmap(os.path.join(self.Dir, wflow_subcatch),0.0,fail=True) ) #: Map define the area over which the calculations are done (mask)
self.River=cover(boolean(self.wf_readmap(os.path.join(self.Dir, wflow_river),0.0,fail=True)),0) #: river network map. Fro those cell that belong to a river a specific width is used in the kinematic wave caulations
self.RiverLength=self.wf_readmap(os.path.join(self.Dir, wflow_riverlength),0.0)
# Factor to multiply riverlength with (defaults to 1.0)
self.RiverLengthFac=self.wf_readmap(os.path.join(self.Dir, wflow_riverlength_fact),1.0)
# read landuse and soilmap and make sure there are no missing points related to the
# subcatchment map. Currently sets the lu and soil type type to 1
self.LandUse=self.wf_readmap(os.path.join(self.Dir , wflow_landuse),0.0,fail=True)#: Map with lan-use/cover classes
self.LandUse=cover(self.LandUse,nominal(ordinal(subcatch) > 0))
self.Soil=self.wf_readmap(os.path.join(self.Dir , wflow_soil),0.0,fail=True)#: Map with soil classes
self.Soil=cover(self.Soil,nominal(ordinal(subcatch) > 0))
self.OutputLoc=self.wf_readmap(os.path.join(self.Dir , wflow_gauges),0.0,fail=True) #: Map with locations of output gauge(s)
self.InflowLoc=nominal(self.wf_readmap(os.path.join(self.Dir , wflow_inflow),0.0)) #: Map with location of abstractions/inflows.
self.SeepageLoc=self.wf_readmap(os.path.join(self.Dir , wflow_inflow),0.0) #: Seapage from external model (if configured)
RiverWidth=self.wf_readmap(os.path.join(self.Dir, wflow_riverwidth),0.0)
# Temperature correction per cell to add
self.TempCor=self.wf_readmap(os.path.join(self.Dir , configget(self.config,"model","TemperatureCorrectionMap","staticmap/swflow_tempcor.map")),0.0)
if self.scalarInput:
self.gaugesMap=self.wf_readmap(os.path.join(self.Dir , wflow_mgauges),0.0,fail=True) #: Map with locations of rainfall/evap/temp gauge(s). Only needed if the input to the model is not in maps
self.OutputId=self.wf_readmap(os.path.join(self.Dir , wflow_subcatch),0.0,fail=True) # location of subcatchment
self.ZeroMap=0.0*scalar(defined(self.Altitude)) #map with only zero's
# 3: Input time series ###################################################
self.P_mapstack=self.Dir + configget(self.config,"inputmapstacks","Precipitation","/inmaps/P") # timeseries for rainfall
self.PET_mapstack=self.Dir + configget(self.config,"inputmapstacks","EvapoTranspiration","/inmaps/PET") # timeseries for rainfall"/inmaps/PET" # potential evapotranspiration
self.TEMP_mapstack=self.Dir + configget(self.config,"inputmapstacks","Temperature","/inmaps/TEMP") # timeseries for rainfall "/inmaps/TEMP" # global radiation
self.Inflow_mapstack=self.Dir + configget(self.config,"inputmapstacks","Inflow","/inmaps/IF") # timeseries for rainfall "/inmaps/IF" # in/outflow locations (abstractions)
self.Seepage_mapstack=self.Dir + configget(self.config,"inputmapstacks","Seepage","/inmaps/SE") # timeseries for rainfall "/inmaps/SE" # in/outflow locations (abstractions)
# For in memory override:
self.P = self.ZeroMap
self.PET = self.ZeroMap
self.TEMP = self.ZeroMap
# Set static initial values here #########################################
self.Latitude = ycoordinate(boolean(self.Altitude))
self.Longitude = xcoordinate(boolean(self.Altitude))
self.logger.info("Linking parameters to landuse, catchment and soil...")
self.Beta = scalar(0.6) # For sheetflow
#self.M=lookupscalar(self.Dir + "/" + modelEnv['intbl'] + "/M.tbl" ,self.LandUse,subcatch,self.Soil) # Decay parameter in Topog_sbm
self.N=lookupscalar(self.Dir + "/" + self.intbl + "/N.tbl",self.LandUse,subcatch,self.Soil) # Manning overland flow
""" *Parameter:* Manning's N for all non-river cells """
self.NRiver=lookupscalar(self.Dir + "/" + self.intbl + "/N_River.tbl",self.LandUse,subcatch,self.Soil) # Manning river
""" Manning's N for all cells that are marked as a river """
#HBV Soil params
self.FC=self.readtblDefault(self.Dir + "/" + self.intbl + "/FC.tbl",self.LandUse,subcatch,self.Soil,260.0)
self.BetaSeepage= self.readtblDefault(self.Dir + "/" + self.intbl + "/BetaSeepage.tbl",self.LandUse,subcatch,self.Soil,1.8) # exponent in soil runoff generation equation
self.LP= self.readtblDefault(self.Dir + "/" + self.intbl + "/LP.tbl",self.LandUse,subcatch,self.Soil, 0.53000) # fraction of Fieldcapacity below which actual evaporation=potential evaporation (LP)
self.K4= self.readtblDefault(self.Dir + "/" + self.intbl + "/K4.tbl",self.LandUse,subcatch,self.Soil, 0.02307) # Recession constant baseflow #K4=0.07; BASEFLOW:LINEARRESERVOIR
if self.SetKquickFlow:
self.KQuickFlow= self.readtblDefault(self.Dir + "/" + self.intbl + "/KQuickFlow.tbl",self.LandUse,subcatch,self.Soil, 0.09880) # recession rate at flow HQ #KHQ=0.2; OUTFLOWUPPERZONE_NONLINEARRESERVOIR
self.SUZ= self.readtblDefault(self.Dir + "/" + self.intbl + "/SUZ.tbl",self.LandUse,subcatch,self.Soil, 100.0) # Level over wich K0 is used
self.K0= self.readtblDefault(self.Dir + "/" + self.intbl + "/K0.tbl",self.LandUse,subcatch,self.Soil, 0.3) # K0
else:
self.KHQ= self.readtblDefault(self.Dir + "/" + self.intbl + "/KHQ.tbl",self.LandUse,subcatch,self.Soil, 0.09880) # recession rate at flow HQ #KHQ=0.2; OUTFLOWUPPERZONE_NONLINEARRESERVOIR
self.HQ= self.readtblDefault(self.Dir + "/" + self.intbl + "/HQ.tbl",self.LandUse,subcatch,self.Soil, 3.27000) # high flow rate HQ for which recession rate of upper reservoir is known #HQ=3.76;
self.AlphaNL= self.readtblDefault(self.Dir + "/" + self.intbl + "/AlphaNL.tbl",self.LandUse,subcatch,self.Soil, 1.1) # measure of non-linearity of upper reservoir #Alpha=1.6;
self.PERC= self.readtblDefault(self.Dir + "/" + self.intbl + "/PERC.tbl",self.LandUse,subcatch,self.Soil, 0.4000) # percolation from Upper to Lowerzone (mm/day)
self.CFR=self.readtblDefault(self.Dir + "/" + self.intbl + "/CFR.tbl",self.LandUse,subcatch,self.Soil, 0.05000) # refreezing efficiency constant in refreezing of freewater in snow
#self.FoCfmax=self.readtblDefault(self.Dir + "/" + modelEnv['intbl'] + "/FoCfmax.tbl",self.LandUse,subcatch,self.Soil, 0.6000) # correcton factor for snow melt/refreezing in forested and non-forested areas
self.Pcorr=self.readtblDefault(self.Dir + "/" + self.intbl + "/Pcorr.tbl",self.LandUse,subcatch,self.Soil, 1.0) # correction factor for precipitation
self.RFCF=self.readtblDefault(self.Dir + "/" + self.intbl + "/RFCF.tbl",self.LandUse,subcatch,self.Soil,1.0) # correction factor for rainfall
self.SFCF=self.readtblDefault(self.Dir + "/" + self.intbl + "/SFCF.tbl",self.LandUse,subcatch,self.Soil, 1.0) # correction factor for snowfall
self.Cflux= self.readtblDefault(self.Dir + "/" + self.intbl + "/Cflux.tbl",self.LandUse,subcatch,self.Soil, 2.0) # maximum capillary rise from runoff response routine to soil moisture routine
self.ICF= self.readtblDefault(self.Dir + "/" + self.intbl + "/ICF.tbl",self.LandUse,subcatch,self.Soil, 2.0) # maximum interception storage (in forested AND non-forested areas)
self.CEVPF= self.readtblDefault(self.Dir + "/" + self.intbl + "/CEVPF.tbl",self.LandUse,subcatch,self.Soil, 1.0) # correction factor for potential evaporation (1.15 in in forested areas )
self.EPF= self.readtblDefault(self.Dir + "/" + self.intbl + "/EPF.tbl",self.LandUse,subcatch,self.Soil, 0.0) # exponent of correction factor for evaporation on days with precipitation
self.ECORR= self.readtblDefault(self.Dir + "/" + self.intbl + "/ECORR.tbl",self.LandUse,subcatch,self.Soil, 1.0) # evap correction
# Soil Moisture parameters
self.ECALT= self.ZeroMap+0.00000 # evaporation lapse per 100m
#self.Ecorr=self.ZeroMap+1 # correction factor for evaporation
# HBV Snow parameters
# critical temperature for snowmelt and refreezing: TTI= 1.000
self.TTI=self.readtblDefault(self.Dir + "/" + self.intbl + "/TTI.tbl" ,self.LandUse,subcatch,self.Soil,1.0)
# TT = -1.41934 # defines interval in which precipitation falls as rainfall and snowfall
self.TT=self.readtblDefault(self.Dir + "/" + self.intbl + "/TT.tbl" ,self.LandUse,subcatch,self.Soil,-1.41934)
#Cfmax = 3.75653 # meltconstant in temperature-index
self.Cfmax=self.readtblDefault(self.Dir + "/" + self.intbl + "/Cfmax.tbl" ,self.LandUse,subcatch,self.Soil,3.75653)
# WHC= 0.10000 # fraction of Snowvolume that can store water
self.WHC=self.readtblDefault(self.Dir + "/" + self.intbl + "/WHC.tbl" ,self.LandUse,subcatch,self.Soil,0.1)
# Determine real slope and cell length
self.xl,self.yl,self.reallength = pcrut.detRealCellLength(self.ZeroMap,sizeinmetres)
self.Slope= slope(self.Altitude)
self.Slope=ifthen(boolean(self.TopoId),max(0.001,self.Slope*celllength()/self.reallength))
Terrain_angle=scalar(atan(self.Slope))
temp = catchmenttotal(cover(1.0), self.TopoLdd) * self.reallength * 0.001 * 0.001 * self.reallength
self.QMMConvUp = cover(self.timestepsecs * 0.001)/temp
# Multiply parameters with a factor (for calibration etc) -P option in command line
self.wf_multparameters()
self.N=ifthenelse(self.River, self.NRiver, self.N)
# Determine river width from DEM, upstream area and yearly average discharge
# Scale yearly average Q at outlet with upstream are to get Q over whole catchment
# Alf ranges from 5 to > 60. 5 for hardrock. large values for sediments
# "Noah J. Finnegan et al 2005 Controls on the channel width of rivers:
# Implications for modeling fluvial incision of bedrock"
upstr = catchmenttotal(1, self.TopoLdd)
Qscale = upstr/mapmaximum(upstr) * Qmax
W = (alf * (alf + 2.0)**(0.6666666667))**(0.375) * Qscale**(0.375) * (max(0.0001,windowaverage(self.Slope,celllength() * 4.0)))**(-0.1875) * self.N **(0.375)
# Use supplied riverwidth if possible, else calulate
RiverWidth = ifthenelse(RiverWidth <=0.0, W, RiverWidth)
self.SnowWater = self.ZeroMap
# Which columns/gauges to use/ignore in kinematic wave updating
self.UpdateMap = self.ZeroMap
if self.updating:
_tmp =pcr2numpy(self.OutputLoc,0.0)
gaugear= _tmp
touse = numpy.zeros(gaugear.shape,dtype='int')
for thecol in updateCols:
idx = (gaugear == thecol).nonzero()
touse[idx] = thecol
self.UpdateMap = numpy2pcr(Nominal,touse,0.0)
# Calculate distance to updating points (upstream) annd use to scale the correction
# ldddist returns zero for cell at the gauges so add 1.0 tp result
self.DistToUpdPt = cover(min(ldddist(self.TopoLdd,boolean(cover(self.UpdateMap,0)),1) * self.reallength/celllength(),self.UpdMaxDist),self.UpdMaxDist)
#self.DistToUpdPt = ldddist(self.TopoLdd,boolean(cover(self.OutputId,0.0)),1)
#* self.reallength/celllength()
# Initializing of variables
self.logger.info("Initializing of model variables..")
self.TopoLdd=lddmask(self.TopoLdd,boolean(self.TopoId))
catchmentcells=maptotal(scalar(self.TopoId))
# Limit lateral flow per subcatchment (make pits at all subcatch boundaries)
# This is very handy for Ribasim etc...
if self.SubCatchFlowOnly > 0:
self.logger.info("Creating subcatchment-only drainage network (ldd)")
ds = downstream(self.TopoLdd,self.TopoId)
usid = ifthenelse(ds != self.TopoId,self.TopoId,0)
self.TopoLdd = lddrepair(ifthenelse(boolean(usid),ldd(5),self.TopoLdd))
# Used to seperate output per LandUse/management classes
#OutZones = self.LandUse
#report(self.reallength,"rl.map")
#report(catchmentcells,"kk.map")
self.QMMConv = self.timestepsecs/(self.reallength * self.reallength * 0.001) #m3/s --> mm
self.ToCubic = (self.reallength * self.reallength * 0.001) / self.timestepsecs # m3/s
self.sumprecip=self.ZeroMap #: accumulated rainfall for water balance
self.sumevap=self.ZeroMap #: accumulated evaporation for water balance
self.sumrunoff=self.ZeroMap #: accumulated runoff for water balance (weigthted for upstream area)
self.sumlevel=self.ZeroMap #: accumulated level for water balance
self.sumpotevap=self.ZeroMap #accumulated runoff for water balance
self.sumtemp=self.ZeroMap #accumulated runoff for water balance
self.ForecQ_qmec=self.ZeroMap # Extra inflow to kinematic wave reservoir for forcing in m^/sec
self.KinWaveVolume=self.ZeroMap
self.OldKinWaveVolume=self.ZeroMap
self.Qvolume=self.ZeroMap
self.Q=self.ZeroMap
self.suminflow=self.ZeroMap
# cntd
self.FieldCapacity=self.FC #: total water holding capacity of the soil
self.Treshold=self.LP*self.FieldCapacity # Threshold soilwaterstorage above which AE=PE
#CatSurface=maptotal(scalar(ifthen(scalar(self.TopoId)>scalar(0.0),scalar(1.0)))) # catchment surface (in km2)
self.Aspect=scalar(aspect(self.Altitude))# aspect [deg]
self.Aspect = ifthenelse(self.Aspect <= 0.0 , scalar(0.001),self.Aspect)
# On Flat areas the Aspect function fails, fill in with average...
self.Aspect = ifthenelse (defined(self.Aspect), self.Aspect, areaaverage(self.Aspect,self.TopoId))
# Set DCL to riverlength if that is longer that the basic length calculated from grid
drainlength = detdrainlength(self.TopoLdd,self.xl,self.yl)
self.DCL=max(drainlength,self.RiverLength) # m
# Multiply with Factor (taken from upscaling operation, defaults to 1.0 if no map is supplied
self.DCL = self.DCL * max(1.0,self.RiverLengthFac)
# water depth (m)
# set width for kinematic wave to cell width for all cells
self.Bw=detdrainwidth(self.TopoLdd,self.xl,self.yl)
# However, in the main river we have real flow so set the width to the
# width of the river
self.Bw=ifthenelse(self.River, RiverWidth, self.Bw)
# term for Alpha
self.AlpTerm=pow((self.N/(sqrt(self.Slope))),self.Beta)
# power for Alpha
self.AlpPow=(2.0/3.0)*self.Beta
# initial approximation for Alpha
# calculate catchmentsize
self.upsize=catchmenttotal(self.xl * self.yl,self.TopoLdd)
self.csize=areamaximum(self.upsize,self.TopoId)
self.logger.info("End of initial section.")
def default_summarymaps(self):
"""
Returns a list of default summary-maps at the end of a run.
This is model specific. You can also add them to the [summary]section of the ini file but stuff
you think is crucial to the model should be listed here
Example:
"""
lst = ['self.Cfmax','self.csize','self.upsize','self.TTI','self.TT','self.WHC',
'self.Slope','self.N','self.xl','self.yl','self.reallength','self.DCL','self.Bw',]
return lst
def resume(self):
""" read initial state maps (they are output of a previous call to suspend()) """
if self.reinit == 1:
self.logger.info("Setting initial conditions to default (zero!)")
self.FreeWater = cover(0.0) #: Water on surface (state variable [mm])
self.SoilMoisture = self.FC #: Soil moisture (state variable [mm])
self.UpperZoneStorage = 0.2 * self.FC #: Storage in Upper Zone (state variable [mm])
self.LowerZoneStorage = 1.0/(3.0 * self.K4) #: Storage in Uppe Zone (state variable [mm])
self.InterceptionStorage = cover(0.0) #: Interception Storage (state variable [mm])
self.SurfaceRunoff = cover(0.0) #: Discharge in kinimatic wave (state variable [m^3/s])
self.WaterLevel = cover(0.0) #: Water level in kinimatic wave (state variable [m])
self.DrySnow=cover(0.0) #: Snow amount (state variable [mm])
else:
self.wf_resume(os.path.join(self.Dir, "instate"))
P=self.Bw+(2.0*self.WaterLevel)
self.Alpha=self.AlpTerm*pow(P,self.AlpPow)
self.OldSurfaceRunoff = self.SurfaceRunoff
self.SurfaceRunoffMM=self.SurfaceRunoff * self.QMMConv
# Determine initial kinematic wave volume
self.KinWaveVolume = self.WaterLevel * self.Bw * self.DCL
self.OldKinWaveVolume = self.KinWaveVolume
self.initstorage=self.FreeWater + self.DrySnow + self.SoilMoisture + self.UpperZoneStorage + self.LowerZoneStorage \
+ self.InterceptionStorage
if not self.SetKquickFlow:
self.KQuickFlow=(self.KHQ**(1.0+self.AlphaNL))*(self.HQ**-self.AlphaNL) # recession rate of the upper reservoir, KHQ*UHQ=HQ=kquickflow*(UHQ**alpha)
def dynamic(self):
"""
Below a list of variables that can be save to disk as maps or as
timeseries (see ini file for syntax):
*Dynamic variables*
:var self.SurfaceRunoff: Surface runoff in the kinematic wave [m^3/s]
:var self.WaterLevel: Water level in the kinematic wave [m] (above the bottom)
:var self.InterceptionStorage: actual interception storage [mm]
:var self.Snow: Snow depth [mm]
:var self.SnowWater: water content of the snow [mm]
:var self.LowerZoneStorage: water content of the lower zone [mm]
:var self.UpperZoneStorage: water content of the Upper zone [mm]
:var self.BaseFlow: Specific runoff (baseflow part) per cell [mm]
:var self.Percolation: actual percolation to the lower zone [mm]
:var self.SoilMoisture: actual soil moisture [mm]
:var self.QuickFlow: specific runoff (quickflow part) [mm]
:var self.RealQuickFlow: specific runoff (quickflow), If K upper zone is precalculated [mm]
:var self.CapFlux: capilary rise [mm]
:var self.SurfaceRunoffMM: SurfaceRunoff in mm
:var self.KinWaveVolume: Volume in the kinematic wave reservoir
:var self.SurfaceWaterSupply: the negative Inflow (water demand) that could be met from the surfacewater [m^3/s]
*Static variables*
:var self.Altitude: The altitude of each cell [m]
:var self.Bw: Width of the river [m]
:var self.River: booolean map indicating the presence of a river [-]
:var self.DLC: length of the river within a cell [m]
:var self.ToCubic: Mutiplier to convert mm to m^3/s for fluxes
"""
self.wf_updateparameters() # read forcing an dynamic parameters
self.Precipitation = max(0.0,self.Precipitation) * self.Pcorr
#self.Precipitation=cover(self.wf_readmap(self.P_mapstack,0.0),0.0) * self.Pcorr
#self.PotEvaporation=cover(self.wf_readmap(self.PET_mapstack,0.0),0.0)
#self.Inflow=cover(self.wf_readmap(self.Inflow_mapstack,0.0,verbose=False),0.0)
# These ar ALWAYS 0 at present!!!
#self.Inflow=pcrut.readmapSave(self.Inflow_mapstack,0.0)
if self.ExternalQbase:
self.Seepage = cover(self.wf_readmap(self.Seepage_mapstack,0.0),0.0)
else:
self.Seepage=cover(0.0)
self.Temperature=cover(self.wf_readmap(self.TEMP_mapstack,10.0),10.0)
self.Temperature = self.Temperature + self.TempCor
# Multiply input parameters with a factor (for calibration etc) -p option in command line (no also in ini)
self.wf_multparameters()
RainFrac=ifthenelse(1.0*self.TTI == 0.0,ifthenelse(self.Temperature <= self.TT,scalar(0.0),scalar(1.0)),min((self.Temperature-(self.TT-self.TTI/2.0))/self.TTI,scalar(1.0)))
RainFrac=max(RainFrac,scalar(0.0)) #fraction of precipitation which falls as rain
SnowFrac=1.0-RainFrac #fraction of self.Precipitation which falls as snow
self.Precipitation=self.SFCF*SnowFrac*self.Precipitation+self.RFCF*RainFrac*self.Precipitation # different correction for rainfall and snowfall
self.PotEvaporation=exp(-self.EPF*self.Precipitation)*self.ECORR * self.PotEvaporation # correction for potential evaporation on wet days
self.PotEvaporation=self.CEVPF*self.PotEvaporation # Correct per landuse
SnowFall=SnowFrac*self.Precipitation #: snowfall depth
RainFall=RainFrac*self.Precipitation #: rainfall depth
PotSnowMelt=ifthenelse(self.Temperature > self.TT,self.Cfmax*(self.Temperature-self.TT),scalar(0.0)) #Potential snow melt, based on temperature
PotRefreezing=ifthenelse(self.Temperature < self.TT, self.Cfmax*self.CFR*(self.TT-self.Temperature),0.0) #Potential refreezing, based on temperature
Refreezing=ifthenelse(self.Temperature < self.TT,min(PotRefreezing,self.FreeWater),0.0) #actual refreezing
self.SnowMelt=min(PotSnowMelt,self.DrySnow) #actual snow melt
self.DrySnow=self.DrySnow+SnowFall+Refreezing-self.SnowMelt #dry snow content
self.FreeWater=self.FreeWater-Refreezing #free water content in snow
MaxFreeWater=self.DrySnow*self.WHC
self.FreeWater=self.FreeWater+self.SnowMelt+RainFall
InSoil = max(self.FreeWater-MaxFreeWater,0.0) #abundant water in snow pack which goes into soil
self.FreeWater=self.FreeWater-InSoil
RainAndSnowmelt = RainFall + self.SnowMelt
self.SnowCover = ifthenelse(self.DrySnow >0, scalar(1), scalar(0))
self.NrCell= areatotal(self.SnowCover,self.TopoId)
#first part of precipitation is intercepted
Interception=min(InSoil,self.ICF-self.InterceptionStorage)#: Interception in mm/timestep
self.InterceptionStorage=self.InterceptionStorage+Interception #: Current interception storage
NetInSoil=InSoil-Interception
self.SoilMoisture=self.SoilMoisture+NetInSoil
DirectRunoff=max(self.SoilMoisture-self.FieldCapacity,0.0) #if soil is filled to capacity: abundant water runs of directly
self.SoilMoisture=self.SoilMoisture-DirectRunoff
NetInSoil=NetInSoil-DirectRunoff #net water which infiltrates into soil
MaxSnowPack = 10000.0
if self.MassWasting:
# Masswasting of snow
# 5.67 = tan 80 graden
SnowFluxFrac = min(0.5,self.Slope/5.67) * min(1.0,self.DrySnow/MaxSnowPack)
MaxFlux = SnowFluxFrac * self.DrySnow
self.DrySnow = accucapacitystate(self.TopoLdd,self.DrySnow, MaxFlux)
self.FreeWater = accucapacitystate(self.TopoLdd,self.FreeWater,SnowFluxFrac * self.FreeWater )
else:
SnowFluxFrac = self.ZeroMap
MaxFlux= self.ZeroMap
IntEvap=min(self.InterceptionStorage,self.PotEvaporation) #: Evaporation from interception storage
self.InterceptionStorage=self.InterceptionStorage-IntEvap
# I nthe origal HBV code
RestEvap = max(0.0,self.PotEvaporation-IntEvap)
SoilEvap=ifthenelse(self.SoilMoisture > self.Treshold,min(self.SoilMoisture,RestEvap),\
min(self.SoilMoisture,min(RestEvap,self.PotEvaporation*(self.SoilMoisture/self.Treshold))))
#: soil evapotranspiration
self.SoilMoisture=self.SoilMoisture-SoilEvap #evaporation from soil moisture storage
ActEvap=IntEvap+SoilEvap #: Sum of evaporation components (IntEvap+SoilEvap)
HBVSeepage=((self.SoilMoisture/self.FieldCapacity)**self.BetaSeepage)*NetInSoil #runoff water from soil
self.SoilMoisture=self.SoilMoisture-HBVSeepage
Backtosoil=min(self.FieldCapacity-self.SoilMoisture,DirectRunoff) #correction for extremely wet periods: soil is filled to capacity
DirectRunoff=DirectRunoff-Backtosoil
self.SoilMoisture=self.SoilMoisture+Backtosoil
InUpperZone=DirectRunoff+HBVSeepage # total water available for runoff
# Steps is always 1 at the moment
# calculations for Upper zone
self.UpperZoneStorage=self.UpperZoneStorage+InUpperZone #incoming water from soil
self.Percolation=min(self.PERC,self.UpperZoneStorage) #Percolation
self.UpperZoneStorage=self.UpperZoneStorage-self.Percolation
self.CapFlux=self.Cflux*(((self.FieldCapacity-self.SoilMoisture)/self.FieldCapacity)) #: Capillary flux flowing back to soil
self.CapFlux=min(self.UpperZoneStorage,self.CapFlux)
self.CapFlux=min(self.FieldCapacity-self.SoilMoisture,self.CapFlux)
self.UpperZoneStorage=self.UpperZoneStorage-self.CapFlux
self.SoilMoisture=self.SoilMoisture+self.CapFlux
if not self.SetKquickFlow:
self.QuickFlow = max(0,self.KQuickFlow*(self.UpperZoneStorage**(1.0+self.AlphaNL)))
self.RealQuickFlow = self.ZeroMap
else:
self.QuickFlow = self.KQuickFlow*self.UpperZoneStorage
self.RealQuickFlow = max(0,self.K0*(self.UpperZoneStorage - self.SUZ))
"""Quickflow volume in mm/timestep"""
self.UpperZoneStorage=self.UpperZoneStorage-self.QuickFlow-self.RealQuickFlow
# calculations for Lower zone
self.LowerZoneStorage=self.LowerZoneStorage+self.Percolation
self.BaseFlow=min(self.LowerZoneStorage,self.K4*self.LowerZoneStorage) #: Baseflow in mm/timestep
self.LowerZoneStorage=self.LowerZoneStorage-self.BaseFlow
# Direct runoff generation
if self.ExternalQbase:
DirectRunoffStorage=self.QuickFlow+self.Seepage+self.RealQuickFlow
else:
DirectRunoffStorage=self.QuickFlow+self.BaseFlow+self.RealQuickFlow
self.ActEvap = ActEvap
self.InSoil = InSoil
self.RainAndSnowmelt = RainAndSnowmelt
self.NetInSoil = NetInSoil
self.InwaterMM=max(0.0,DirectRunoffStorage)
self.Inwater=self.InwaterMM * self.ToCubic
self.QuickFlowCubic = (self.QuickFlow + self.RealQuickFlow) * self.ToCubic
self.BaseFlowCubic = self.BaseFlow * self.ToCubic
self.SurfaceWaterSupply = ifthenelse (self.Inflow < 0.0 , max(-1.0 * self.Inwater,self.SurfaceRunoff), self.ZeroMap)
self.Inwater = ifthenelse(self.SurfaceRunoff + self.Inwater < 0.0, -1.0 * self.SurfaceRunoff, self.Inwater)
##########################################################################
# Runoff calculation via Kinematic wave ##################################
##########################################################################
# per distance along stream
q=self.Inwater/self.DCL + self.ForecQ_qmec/self.DCL
self.OldSurfaceRunoff=self.SurfaceRunoff
self.SurfaceRunoff = kinematic(self.TopoLdd, self.SurfaceRunoff,q,self.Alpha, self.Beta,self.Tslice,self.timestepsecs,self.DCL) # m3/s
self.SurfaceRunoffMM=self.SurfaceRunoff*self.QMMConv # SurfaceRunoffMM (mm) from SurfaceRunoff (m3/s)
self.updateRunOff()
InflowKinWaveCell=upstream(self.TopoLdd,self.SurfaceRunoff)
self.MassBalKinWave = (self.KinWaveVolume - self.OldKinWaveVolume)/self.timestepsecs + InflowKinWaveCell + self.Inwater - self.SurfaceRunoff
Runoff=self.SurfaceRunoff
# Updating
# --------
# Assume a tss file with as many columns as outpulocs. Start updating for each non-missing value and start with the
# first column (nr 1). Assumes that outputloc and columns match!
if self.updating:
QM = timeinputscalar(self.updateFile, self.UpdateMap) * self.QMMConv
# Now update the state. Just add to the Ustore
# self.UStoreDepth = result
# No determine multiplication ratio for each gauge influence area.
# For missing gauges 1.0 is assumed (no change).
# UpDiff = areamaximum(QM, self.UpdateMap) - areamaximum(self.SurfaceRunoffMM, self.UpdateMap)
UpRatio = areamaximum(QM, self.UpdateMap)/areamaximum(self.SurfaceRunoffMM, self.UpdateMap)
UpRatio = cover(areaaverage(UpRatio,self.TopoId),1.0)
# Now split between Soil and Kyn wave
self.UpRatioKyn = min(self.MaxUpdMult,max(self.MinUpdMult,(UpRatio - 1.0) * self.UpFrac + 1.0))
UpRatioSoil = min(self.MaxUpdMult,max(self.MinUpdMult,(UpRatio - 1.0) * (1.0 - self.UpFrac) + 1.0))
# update/nudge self.UStoreDepth for the whole upstream area,
# not sure how much this helps or worsens things
UpdSoil = True
if UpdSoil:
toadd = (self.UpperZoneStorage * UpRatioSoil) - self.UpperZoneStorage
self.UpperZoneStorage = self.UpperZoneStorage + toadd
# Update the kinematic wave reservoir up to a maximum upstream distance
# TODO: add (much smaller) downstream updating also?
MM = (1.0 - self.UpRatioKyn)/self.UpdMaxDist
self.UpRatioKyn = MM * self.DistToUpdPt + self.UpRatioKyn
self.SurfaceRunoff = self.SurfaceRunoff * self.UpRatioKyn
self.SurfaceRunoffMM=self.SurfaceRunoff*self.QMMConv # SurfaceRunoffMM (mm) from SurfaceRunoff (m3/s)
self.updateRunOff()
Runoff=self.SurfaceRunoff
self.QCatchmentMM = self.SurfaceRunoff * self.QMMConvUp
#self.RunoffCoeff = self.QCatchmentMM/catchmenttotal(self.Precipitation, self.TopoLdd)/catchmenttotal(cover(1.0), self.TopoLdd)
self.sumprecip=self.sumprecip + self.Precipitation #accumulated rainfall for water balance
self.sumevap=self.sumevap + ActEvap #accumulated evaporation for water balance
self.sumpotevap=self.sumpotevap + self.PotEvaporation
self.sumtemp=self.sumtemp + self.Temperature
self.sumrunoff=self.sumrunoff + self.InwaterMM #accumulated Cell runoff for water balance
self.sumlevel=self.sumlevel + self.WaterLevel
self.suminflow=self.suminflow + self.Inflow
self.storage=self.FreeWater + self.DrySnow + self.SoilMoisture + self.UpperZoneStorage + self.LowerZoneStorage + self.InterceptionStorage
self.watbal=self.initstorage+self.sumprecip-self.sumevap-self.sumrunoff-self.storage
# The main function is used to run the program from the command line
def main(argv=None):
"""
Perform command line execution of the model.
"""
global multpars
global updateCols
caseName = "default_hbv"
runId = "run_default"
configfile="wflow_hbv.ini"
LogFileName="wflow.log"
_lastTimeStep = 1
_firstTimeStep = 0
fewsrun=False
runinfoFile="runinfo.xml"
timestepsecs=86400
wflow_cloneMap = 'wflow_subcatch.map'
NoOverWrite=1
loglevel = logging.DEBUG
if argv is None:
argv = sys.argv[1:]
if len(argv) == 0:
usage()
return
## Main model starts here
########################################################################
try:
opts, args = getopt.getopt(argv, 'c:QXS:F:hC:Ii:T:R:u:s:P:p:Xx:U:fl:L:')
except getopt.error, msg:
pcrut.usage(msg)
for o, a in opts:
if o == '-F':
runinfoFile = a
fewsrun = True
if o == '-C': caseName = a
if o == '-R': runId = a
if o == '-L': LogFileName = a
if o == '-l': exec "loglevel = logging." + a
if o == '-c': configfile = a
if o == '-s': timestepsecs = int(a)
if o == '-T': _lastTimeStep=int(a)
if o == '-S': _firstTimeStep=int(a)
if o == '-h': usage()
if o == '-f': NoOverWrite = 0
if fewsrun:
ts = getTimeStepsfromRuninfo(runinfoFile,timestepsecs)
starttime = getStartTimefromRuninfo(runinfoFile)
if (ts):
_lastTimeStep = ts# * 86400/timestepsecs
_firstTimeStep = 1
else:
print "Failed to get timesteps from runinfo file: " + runinfoFile
exit(2)
else:
starttime = dt.datetime(1990,01,01)
if _lastTimeStep < _firstTimeStep:
print "The starttimestep (" + str(_firstTimeStep) +") is smaller than the last timestep (" + str(_lastTimeStep) + ")"
usage()
myModel = WflowModel(wflow_cloneMap, caseName,runId,configfile)
dynModelFw = wf_DynamicFramework(myModel, _lastTimeStep,firstTimestep=_firstTimeStep,datetimestart=starttime)
dynModelFw.createRunId(NoOverWrite=NoOverWrite,logfname=LogFileName,level=loglevel,doSetupFramework=False)
for o, a in opts:
if o == '-P':
left = a.split('=')[0]
right = a.split('=')[1]
configset(myModel.config,'variable_change_once',left,right,overwrite=True)
if o == '-p':
left = a.split('=')[0]
right = a.split('=')[1]
configset(myModel.config,'variable_change_timestep',left,right,overwrite=True)
if o == '-X': configset(myModel.config,'model','OverWriteInit','1',overwrite=True)
if o == '-I': configset(myModel.config,'model','reinit','1',overwrite=True)
if o == '-i': configset(myModel.config,'model','intbl',a,overwrite=True)
if o == '-s': configset(myModel.config,'model','timestepsecs',a,overwrite=True)
if o == '-x': configset(myModel.config,'model','sCatch',a,overwrite=True)
if o == '-c': configset(myModel.config,'model','configfile', a,overwrite=True)
if o == '-M': configset(myModel.config,'model','MassWasting',"0",overwrite=True)
if o == '-Q': configset(myModel.config,'model','ExternalQbase','1',overwrite=True)
if o == '-U':
configset(myModel.config,'model','updateFile',a,overwrite=True)
configset(myModel.config,'model','updating',"1",overwrite=True)
if o == '-u':
exec "zz =" + a
updateCols = zz
dynModelFw.setupFramework()
dynModelFw.logger.info("Command line: " + str(argv))
dynModelFw._runInitial()
dynModelFw._runResume()
dynModelFw._runDynamic(0,0)
dynModelFw._runSuspend()
dynModelFw._wf_shutdown()
os.chdir("../../")
if __name__ == "__main__":
main()
|
# (C) Copyright 2010-2020 Enthought, Inc., Austin, TX
# All rights reserved.
import os
from traits.api import (
HasTraits, Directory, Str, Instance, provides
)
from force_gromacs.core.i_process import IProcess
from force_gromacs.simulation_builders.gromacs_topology_data import (
GromacsTopologyData
)
@provides(IProcess)
class GromacsTopologyWriter(HasTraits):
"""Class writes Gromacs topology file"""
# --------------------
# Required Attributes
# --------------------
#: GromacsTopologyData object containing information regarding
#: all topology files required and fragments present in simulation
topology_data = Instance(GromacsTopologyData)
# ------------------------------
# Required / Regular Attributes
# ------------------------------
#: Location to create topology file in. If not provided,
#: a default value including sim_name attribute will be used.
directory = Directory()
#: Name of the Gromacs topology file to be created. If not provided,
#: a default value including sim_name attribute will be used.
top_name = Str()
#: Reference name for the Gromacs simulation. Can be used to define
#: default values of directory and top_name attributes
sim_name = Str()
# ------------------
# Defaults
# ------------------
def _directory_default(self):
"""If directory is not defined, use current directory
with sim_name as default directory"""
return os.path.join(os.path.curdir, self.sim_name)
def _top_name_default(self):
"""If topology file name is not defined, use sim_name with
.top extension as default file name"""
return f"{self.sim_name}_topol.top"
# --------------------
# Private Methods
# --------------------
def _create_simulation_top(self):
"""Builds human readable topology file for Gromacs simulation"""
top_file = ""
# Add topology files at beginning
for topology in self.topology_data.molecule_files:
top_file += '#include "{}"\n'.format(topology)
# Include reference to simulation name
top_file += f'\n[ system ]\n{self.sim_name}\n'
# Add fragment symbols and numbers files at end
top_file += '\n[ molecules ]\n'
for symbol, n_mol in self.topology_data.fragment_ledger.items():
top_file += '{} {}\n'.format(symbol, n_mol)
return top_file
# ------------------
# Public Methods
# ------------------
def recall_stderr(self):
"""Returns dummy stderr message"""
return ''
def recall_stdout(self):
"""Returns dummy stdout message"""
return ''
def bash_script(self):
"""Output terminal command as a bash script"""
top_file = self._create_simulation_top()
bash_script = (
f"cat <<EOM > {self.directory}/{self.top_name}"
f"\n{top_file}EOM"
)
return bash_script
def run(self):
"""Writes a human readable topology file for
Gromacs simulation"""
top_file = self._create_simulation_top()
if not self.dry_run:
with open(f'{self.directory}/{self.top_name}',
'w') as outfile:
outfile.write(top_file)
# Provide successful return code
return 0
|
# -*- coding: utf-8 -*-
from mptools.frameworks.py4web.controller import LocalsOnly
from .... import settings
from .common import pbfWebWrapper
from .callbacks import vtile_
@action(f'{settings.PATHROOT}/vtile/<xtile:int>/<ytile:int>/<zoom:int>', method=['GET'])
@action.uses(LocalsOnly())
@action.uses(pbfWebWrapper)
def vtile_xyz(xtile, ytile, zoom):
return pbfWebWrapper(vtile_, x=xtile, y=ytile, z=zoom, source_name='osm')()
@action(f'{settings.PATHROOT}/vtile/<xtile:int>/<ytile:int>', method=['GET','POST'])
@action.uses(LocalsOnly())
@action.uses(pbfWebWrapper)
def vtile_xy(xtile, ytile):
return pbfWebWrapper(vtile_, x=xtile, y=ytile, source_name='osm')()
@action(f'{settings.PATHROOT}/vtile', method=['GET','POST'])
@action.uses(LocalsOnly())
@action.uses(pbfWebWrapper)
def vtile():
return pbfWebWrapper(vtile_, source_name='osm')()
|
import discord
from discord.ext import commands
from discord.utils import get
class c205(commands.Cog, name="c205"):
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.command(name='Manoeuvre_Salt_the_Earth', aliases=['c205', 'Scorn_Operative_11'])
async def example_embed(self, ctx):
embed = discord.Embed(title='Manoeuvre - Salt the Earth',
color=0xBC5A84)
embed.set_thumbnail(url='https://www.duelingbook.com/images/custom-pics/2300000/2348914.jpg')
embed.add_field(name='Status (Archetype)', value='Casual:3/Tournament:3 (Scorn Operative)', inline=True)
embed.add_field(name='Type', value='Trap/Continuous', inline=False)
embed.add_field(name='Card Effect', value='If a "Scorn Operative" monster you control declares an attack, or is attacked: You can draw 1 card. You can only use this effect of "Manoeuvre - Salt the Earth" up to twice per turn. If you activate and resolve the effect of "Manoeuvre - Synthetic Motion": You can draw 1 card. You can only control 1 "Manoeuvre - Salt the Earth".', inline=False)
embed.set_footer(text='Set Code: GMMP')
await ctx.send(embed=embed)
def setup(bot: commands.Bot):
bot.add_cog(c205(bot))
|
# Copyright (c) 2015, Dataent Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import dataent
from dataent.translate import send_translations
@dataent.whitelist()
def get(name):
"""
Return the :term:`doclist` of the `Page` specified by `name`
"""
page = dataent.get_doc('Page', name)
if page.is_permitted():
page.load_assets()
docs = dataent._dict(page.as_dict())
if getattr(page, '_dynamic_page', None):
docs['_dynamic_page'] = 1
return docs
else:
dataent.response['403'] = 1
raise dataent.PermissionError('No read permission for Page %s' %(page.title or name))
@dataent.whitelist(allow_guest=True)
def getpage():
"""
Load the page from `dataent.form` and send it via `dataent.response`
"""
page = dataent.form_dict.get('name')
doc = get(page)
# load translations
if dataent.lang != "en":
send_translations(dataent.get_lang_dict("page", page))
dataent.response.docs.append(doc)
def has_permission(page):
if dataent.session.user == "Administrator" or "System Manager" in dataent.get_roles():
return True
page_roles = [d.role for d in page.get("roles")]
if page_roles:
if dataent.session.user == "Guest" and "Guest" not in page_roles:
return False
elif not set(page_roles).intersection(set(dataent.get_roles())):
# check if roles match
return False
if not dataent.has_permission("Page", ptype="read", doc=page):
# check if there are any user_permissions
return False
else:
# hack for home pages! if no Has Roles, allow everyone to see!
return True
|
def main():
primes = [2]
x = 3
while primes[-1] <= 2e6:
for p in primes:
if x % p == 0:
break
if p * p > x:
primes.append(x)
break
x += 1
print(sum(primes[:-1]))
main()
|
# Telemetry ZeroMQ TCP Publisher
# Copyright (c) 2022 Applied Engineering
import concurrent.futures
import logging
import msgpack
import queue
import threading
import traceback
import zmq
import time
import random
# Set logging verbosity.
# CRITICAL will not log anything.
# ERROR will only log exceptions.
# INFO will log more information.
log_level = logging.INFO
# ZeroMQ Context.
context = zmq.Context.instance()
# Define the pub socket using the Context.
pub = context.socket(zmq.PUB)
pub.bind("tcp://*:5556")
# Define the rep socket using the Context.
rep = context.socket(zmq.REP)
rep.bind("tcp://*:55561")
# Define message end sequence.
end = b'EOM\n'
testData = {
"psuMode" : 1,
"throttlePercent" : 100,
"dutyPercent" : 100,
"pwmFrequency" : 1000,
"rpm" : 1200.0,
"torque" : 100.0,
"tempC" : 30.0,
"sourceVoltage" : 12.0,
"pwmCurrent" : 6.0,
"powerChange" : 1.0,
"voltageChange" : 1.0,
"mddStatus" : True,
"ocpStatus" : True,
"ovpStatus" : True,
"timeStamp" : 0.0
}
startTimestamp = time.time()
def modifyData(data):
data["timeStamp"] = round(time.time(), 3)
data["psuMode"] = random.randrange(1, 2)
data["throttlePercent"] = random.randrange(1, 100)
data["dutyPercent"] = random.randrange(1, 100)
data["pwmFrequency"] = random.randrange(100, 5000)
data["rpm"] = random.uniform(1.0, 3000.0)
data["torque"] = random.uniform(1.0, 500.0)
data["tempC"] = random.uniform(25.0, 70.0)
data["sourceVoltage"] = random.uniform(1.0, 40.0)
data["pwmCurrent"] = random.uniform(1.0, 6.0)
data["powerChange"] = random.uniform(0, 100.0)
data["voltageChange"] = random.uniform(0, 40.0)
data["mddStatus"] = True
data["ocpStatus"] = True
data["ovpStatus"] = True
return data
def sendPubData(exit_event):
while not exit_event.is_set():
try:
pub.send(msgpack.packb(modifyData(testData)))
logging.info(f"Sent data with timestamp - {round(time.time(), 3)}")
time.sleep(0.1)
except:
traceback.print_exc()
exit_event.set()
#
def updateSystemTime(timestamp):
#print(timestamp)
clk_id = time.CLOCK_REALTIME
time.clock_settime(clk_id, float(timestamp))
def sendSyncRequestSuccess():
rep.send(msgpack.packb(True))
def receiveTimestampSync(exit_event):
while not exit_event.is_set():
try:
newTimestamp = msgpack.unpackb(rep.recv(flags=zmq.NOBLOCK))
logging.info(f"Updating timestamp with {newTimestamp}")
updateSystemTime(newTimestamp)
sendSyncRequestSuccess()
except zmq.ZMQError as e:
if e.errno == zmq.EAGAIN:
logging.info("No timestamp update message")
pass # no message ready yet
else:
traceback.print_exc()
except:
traceback.print_exc()
exit_event.set()
time.sleep(1)
if __name__ == '__main__':
try:
logging.basicConfig(format='[%(asctime)s] %(levelname)s: %(message)s', level=log_level, datefmt="%H:%M:%S")
# Create exit event
exit_event = threading.Event()
# Spawn worker threads
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
executor.submit(sendPubData, exit_event)
executor.submit(receiveTimestampSync, exit_event)
except KeyboardInterrupt:
logging.info('Setting exit event.')
exit_event.set()
except:
traceback.print_exc()
exit_event.set()
|
def main():
print('hola mundo')
if __name__ == '__main__':
main()
|
from int_to_str import int_to_str
# Number Names
# Show how to spell out a number in English. You can use a pre-existing implementation or make your own, but you should support inputs up to at least one million (or the maximum value
# of your language’s default bounded integer type, if that’s less).
def main():
integer = (int)(''.join(list(filter(lambda x: x not in ',_ ', input('Enter an integer: ')))))
integer_str = ('negative ' if integer < 0 else '') + int_to_str(abs(integer))
integer_str = " ".join([i.title() for i in integer_str.split(' ')])
print(integer_str)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from mitemp_bt.mitemp_bt_poller import MiTempBtPoller
from btlewrap.bluepy import BluepyBackend
import sys
from btlewrap.base import BluetoothBackendException
import paho.mqtt.client as mqtt
import time
import json
import logging
import yaml
_LOGGER = logging.getLogger(__name__)
with open("config.yaml", 'r') as stream:
try:
config = yaml.load(stream, Loader=yaml.FullLoader)
except yaml.YAMLError as exc:
print(exc)
sys.exit(1)
client = mqtt.Client()
client.username_pw_set(
config["mqtt"]["auth"]["username"], password=config["mqtt"]["auth"]["password"])
client.connect(config["mqtt"]["host"], config["mqtt"]["port"], 60)
for device in config["devices"]:
device_id = list(device.keys())[0]
address = device[device_id]["address"]
name = device[device_id]["name"]
if config["mqtt"]["discovery"]:
tr = json.dumps({
'device_class': 'temperature',
'name': '%s Temperature' % name,
'state_topic': '%s/sensor/xiaomi/%s/state' % (config["mqtt"]["state_topic"], device_id),
'unit_of_measurement': '°C',
'value_template': '{{ value_json.temperature }}'
})
hr = json.dumps({
'device_class': 'humidity',
'name': '%s Humidity' % name,
'state_topic': '%s/sensor/xiaomi/%s/state' % (config["mqtt"]["state_topic"], device_id),
'unit_of_measurement': '%',
'value_template': '{{ value_json.humidity }}'
})
br = json.dumps({
'device_class': 'battery',
'name': '%s Battery' % name,
'state_topic': '%s/sensor/xiaomi/%s/state' % (config["mqtt"]["state_topic"], device_id),
'unit_of_measurement': '%',
'value_template': '{{ value_json.battery }}'
})
client.publish("%s/sensor/xiaomi/%s_temperature/config" %
(config["mqtt"]["state_topic"], device_id), tr, retain=True)
client.publish("%s/sensor/xiaomi/%s_humidity/config" %
(config["mqtt"]["state_topic"], device_id), hr, retain=True)
client.publish("%s/sensor/xiaomi/%s_battery/config" %
(config["mqtt"]["state_topic"], device_id), br, retain=True)
device[device_id]["poller"] = MiTempBtPoller(
address, cache_timeout=300, backend=BluepyBackend)
client.loop_start()
while True:
for device in config["devices"]:
device_id = list(device.keys())[0]
poller = device[device_id]["poller"]
print("trying to poll")
try:
r = json.dumps({
'temperature': poller.parameter_value("temperature"),
'humidity': poller.parameter_value("humidity"),
'battery': poller.parameter_value("battery")
})
except IOError as ioerr:
_LOGGER.warning("Polling error %s", ioerr)
sys.exit()
except BluetoothBackendException as bterror:
_LOGGER.warning("Polling error %s", bterror)
sys.exit()
finally:
client.publish("%s/sensor/xiaomi/%s/state" %
(config["mqtt"]["state_topic"], device_id), r)
print(r)
print("waiting 300")
time.sleep(300)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from src.models.modules.loss_modules import conv_bn_layer, tconv_bn_layer, tconv_layer, conv_layer, fc_layer, fc_bn_layer
class V_CNN(nn.Module):
def __init__(self, nc, ndf, height, width):
super(V_CNN,self).__init__()
self.nc = nc
self.ndf = ndf
self.ndf = ndf
self.w_params = nn.Sequential (
conv_layer(self.nc, self.ndf,4,stride=2,padding=1),
nn.LeakyReLU(0.1),
conv_bn_layer(self.ndf,self.ndf*2,4,stride=2,padding=1),
nn.LeakyReLU(0.1),
conv_bn_layer(self.ndf*2,self.ndf*4,4,stride=2,padding=1),
nn.LeakyReLU(0.1),
conv_bn_layer(self.ndf*4,self.ndf*8,4,stride=2,padding=1),
nn.LeakyReLU(0.1),
nn.Flatten(1),
fc_layer(self.ndf * 8 * height * width,1)
)
def forward(self, x):
x = self.w_params(x)
return x
class V_DCGAN(nn.Module):
def __init__(self, nc, ndf):
super().__init__()
self.nc = nc
self.ndf = ndf
# Input Dimension: (nc) x 64 x 64
self.conv1 = nn.Conv2d(self.nc, self.ndf,
4, 2, 1, bias=False)
# Input Dimension: (ndf) x 32 x 32
self.conv2 = nn.Conv2d(self.ndf, self.ndf*2,
4, 2, 1, bias=False)
self.bn2 = nn.BatchNorm2d(self.ndf*2)
# Input Dimension: (ndf*2) x 16 x 16
self.conv3 = nn.Conv2d(self.ndf*2, self.ndf*4,
4, 2, 1, bias=False)
self.bn3 = nn.BatchNorm2d(self.ndf*4)
# Input Dimension: (ndf*4) x 8 x 8
self.conv4 = nn.Conv2d(self.ndf*4, self.ndf*8,
4, 2, 1, bias=False)
self.bn4 = nn.BatchNorm2d(self.ndf*8)
# Input Dimension: (ndf*8) x 4 x 4
self.conv5 = nn.Conv2d(self.ndf*8, 1, 4, 1, 0, bias=False)
def forward(self, x):
x = F.leaky_relu(self.conv1(x), 0.2, True)
x = F.leaky_relu(self.bn2(self.conv2(x)), 0.2, True)
x = F.leaky_relu(self.bn3(self.conv3(x)), 0.2, True)
x = F.leaky_relu(self.bn4(self.conv4(x)), 0.2, True)
x = self.conv5(x)
return x
class V_DCGAN_128(nn.Module):
def __init__(self, nc, ndf):
super().__init__()
self.nc = nc
self.ndf = ndf
# Input Dimension: (nc) x 128 x 128
self.conv1 = nn.Conv2d(self.nc, self.ndf,
4, 2, 1, bias=False)
# Input Dimension: (ndf) x 64 x 64
self.conv2 = nn.Conv2d(self.ndf, self.ndf*2,
4, 2, 1, bias=False)
self.bn2 = nn.BatchNorm2d(self.ndf*2)
# Input Dimension: (ndf*2) x 32 x 32
self.conv3 = nn.Conv2d(self.ndf*2, self.ndf*4,
4, 2, 1, bias=False)
self.bn3 = nn.BatchNorm2d(self.ndf*4)
# Input Dimension: (ndf*4) x 16 x 16
self.conv4 = nn.Conv2d(self.ndf*4, self.ndf*8,
4, 2, 1, bias=False)
self.bn4 = nn.BatchNorm2d(self.ndf*8)
# Input Dimension: (ndf*8) x 8 x 8
self.conv5 = nn.Conv2d(self.ndf*8, self.ndf*16,
4, 2, 1, bias=False)
self.bn5 = nn.BatchNorm2d(self.ndf*16)
# Input Dimension: (ndf*8) x 4 x 4
self.conv6 = nn.Conv2d(self.ndf*16, 1, 4, 1, 0, bias=False)
def forward(self, x):
x = F.leaky_relu(self.conv1(x), 0.2, True)
x = F.leaky_relu(self.bn2(self.conv2(x)), 0.2, True)
x = F.leaky_relu(self.bn3(self.conv3(x)), 0.2, True)
x = F.leaky_relu(self.bn4(self.conv4(x)), 0.2, True)
x = F.leaky_relu(self.bn5(self.conv5(x)), 0.2, True)
x = self.conv6(x)
return x
|
import sys
import math
from FASTA import FASTA
from WaveletTree import WaveletTree
import Utils
def report_3(read_stopwatch, build_stopwatch, select_avg_time, rank_avg_time, access_avg_time):
report_2(read_stopwatch, build_stopwatch)
rank_file = open('rank.out', 'w')
rank_file.write(str(rank_avg_time))
rank_file.close()
access_file = open('access.out', 'w')
access_file.write(str(access_avg_time))
access_file.close()
select_file = open('select.out', 'w')
select_file.write(str(select_avg_time))
select_file.close()
def report_2(read_stopwatch, build_stopwatch):
read_file = open('read.out', 'w')
read_file.write(str(read_stopwatch.elapsed_ms()))
read_file.close()
build_file = open('build.out', 'w')
build_file.write(str(build_stopwatch.elapsed_ms()))
build_file.close()
def report(out_filepath, read_stopwatch, read_memory, build_stopwatch, build_memory):
""" Write report to file """
out_file = open(out_filepath, 'w')
out_file.write(Utils.write_line('report|' + out_filepath))
out_file.write(Utils.write_line('read|time ' + str(read_stopwatch.elapsed_ms())))
out_file.write(Utils.write_line('read|memory ' + str(read_memory) + ' kB'))
out_file.write(Utils.write_line('build|time ' + str(build_stopwatch.elapsed_ms())))
out_file.write(Utils.write_line('build|memory ' + str(build_memory) + ' kB'))
out_file.close()
""" Tester program that measures time and memory performance of reading data and building tree """
if __name__ == '__main__':
if len(sys.argv) < 3:
print 'Missing arguments: <in_file> <query>'
exit(-1)
# Reading data
read_stopwatch = Utils.Stopwatch()
fasta_file = FASTA(sys.argv[1])
read_stopwatch.start()
fasta_file.read()
read_stopwatch.stop()
read_memory_kB = Utils.get_max_memory_kB()
alphabet = list(set(fasta_file.data))
alphabet.sort()
# Building tree
build_stopwatch = Utils.Stopwatch()
tree = WaveletTree()
build_stopwatch.start()
tree.build(alphabet, fasta_file.data)
build_stopwatch.stop()
build_memory_kB = Utils.get_max_memory_kB()
if sys.argv[2] == 'query':
data_size = len(fasta_file.data)
test_idx = data_size / 2
test_character = fasta_file.data[0]
test_nth_occurence = math.floor(math.sqrt(test_idx))
test_runs = int(sys.argv[3])
stopwatch = Utils.Stopwatch()
time_access = 0
time_select = 0
time_rank = 0
for i in xrange(test_runs):
# Access
stopwatch.restart()
stopwatch.start()
tree.access(test_idx)
stopwatch.stop()
time_access = time_access + stopwatch.elapsed_ms()
# Select
stopwatch.restart()
stopwatch.start()
tree.select(test_nth_occurence, test_character)
stopwatch.stop()
time_select = time_select + stopwatch.elapsed_ms()
stopwatch.restart()
# Rank
stopwatch.start()
tree.rank(test_idx, test_character)
stopwatch.stop()
time_rank += stopwatch.elapsed_ms()
time_select /= test_runs
time_rank /= test_runs
time_access /= test_runs
report_3(read_stopwatch, build_stopwatch, time_select, time_rank, time_access)
else:
report_2(read_stopwatch, build_stopwatch)
|
from requests.structures import CaseInsensitiveDict
def fetch(session, url, data):
try:
headers = CaseInsensitiveDict()
headers["Content-Type"] = "application/json"
result = session.post(url, data, headers=headers)
return result.json()
except ValueError:
return {'Error': 'Something went wrong !'}
|
import pytest
import numpy as np
from transboost.weak_learner.decision_stump import MulticlassDecisionStump
from transboost.label_encoder import OneHotEncoder
class TestMulticlassDecisionStump:
def setup_method(self):
self.X = np.array([[0,13,255],[0,52,127],[3,4,204]])
self.Y = np.array([0,1,1])
def test_sort_data_as_staticmethod(self):
sorted_X, sorted_X_idx = MulticlassDecisionStump.sort_data(self.X)
sorted_answer_X = np.array([[0,4,127], [0,13,204], [3,52,255]])
sorted_X_idx_answer = np.array([[0,2,1], [1,0,2], [2,1,0]])
assert np.all(sorted_X == sorted_answer_X)
assert np.all(sorted_X_idx == sorted_X_idx_answer)
def test_fit(self):
ds = MulticlassDecisionStump(encoder=OneHotEncoder(self.Y))
sorted_X, sorted_X_idx = ds.sort_data(self.X)
ds.fit(self.X, self.Y, W=None, sorted_X=sorted_X, sorted_X_idx=sorted_X_idx)
assert ds.feature == 2
assert ds.stump == 229.5
assert ds.stump_idx == 2
def test_predict(self):
encoder = OneHotEncoder(self.Y)
ds = MulticlassDecisionStump(encoder=encoder)
sorted_X, sorted_X_idx = ds.sort_data(self.X)
ds.fit(self.X, self.Y, W=None, sorted_X=sorted_X, sorted_X_idx=sorted_X_idx)
Xts = np.array([[0,0,1], # Should be class 1
[0,0,255]]) # Should be class 0
Y_pred = encoder.decode_labels(ds.predict(Xts))
assert Y_pred[0] == 1
assert Y_pred[1] == 0
|
import os
import click
import audiomate
from audiomate import annotations
import spoteno
@click.command()
@click.argument('full_folder', type=click.Path())
@click.argument('out_folder', type=click.Path())
def run(full_folder, out_folder):
if not os.path.exists(out_folder):
print('Load source corpus')
ds = audiomate.Corpus.load(full_folder)
print('Normalize transcripts')
normalizer = spoteno.Normalizer.de()
utt_ids = []
transcripts = []
ll_idx = audiomate.corpus.LL_WORD_TRANSCRIPT
for utt in ds.utterances.values():
transcript = utt.label_lists[ll_idx].join()
transcripts.append(transcript)
utt_ids.append(utt.idx)
result = normalizer.normalize_list(transcripts)
for i, utt_idx in enumerate(utt_ids):
orig = transcripts[i]
normalized = result[i]
ll_orig = annotations.LabelList.create_single(
orig,
'word-transcript-orig'
)
ll_normalized = annotations.LabelList.create_single(
normalized,
audiomate.corpus.LL_WORD_TRANSCRIPT
)
ds.utterances[utt_idx].set_label_list(ll_orig)
ds.utterances[utt_idx].set_label_list(ll_normalized)
print('Save normalized corpus')
os.makedirs(out_folder, exist_ok=True)
ds.save_at(out_folder)
else:
print('Already normalized')
if __name__ == '__main__':
run()
|
import numpy as np
niter=1500 #trials per worker
r_values=np.array([0.1,0.2,0.3,0.4,0.5,.6,.7,.8,.9,1.,2.])#cm
D_values=np.array([0.2,1.0,1.5,2.0,3.,4.,5.])#cm^2/s
A_values=np.array([20.25,25,39,50,56.25,100,156.25,189,250])[::-1]#cm^2
L_values=np.sqrt(A_values)#cm
kappa_values=np.array([5,10,15,20,25,30,35,40,45,50,55,60,65,70,75,80,85,90,95,100,250,500])#1/s
# beta_values=np.array([0.01,0.05,0.01,0.1,0.5,1])#cm
Dt_values=np.array([10**-i for i in range(6)])
num_trials_per_setting=1
reflect_values=np.array([0,1])
set_second_values=np.array([0])
#iterate over settings, scheduling the longest jobs first
count=0
for set_second in set_second_values:
for r in r_values:
for D in D_values:
for L in L_values:
for kappa in kappa_values:
# for beta in beta_values:
for Dt in Dt_values:
for reflect in reflect_values:
num_trials=0
while num_trials<num_trials_per_setting:
num_trials+=1
count=count+1
print(f"{r} {D} {L} {kappa} {Dt} {niter} {reflect} {set_second}")
# print(count)
|
'''
Adapted from source files located here
https://github.com/Halogen002/Flare-Qt
My thanks go to Halogen002 for providing me with
the information I needed to write this definition.
I extended it to include xbox gametypes as well
'''
from reclaimer.common_descs import *
from reclaimer.misc.defs.objs.pc_gametype import PcGametypeTag
from supyr_struct.defs.tag_def import TagDef
def get(): return pc_gametype_def
def is_xbox_gametype(node=None, parent=None, **kwargs):
if parent is None:
return node.get_root().is_xbox
return parent.get_root().is_xbox
##################################################
'''Shared enumerators, booleans, and bitstructs'''
##################################################
enum_off_on = UEnum32('',
'off',
'on',
)
#why is is that two of the slayer specific booleans reverse the truthyness?
enum_on_off = UEnum32('',
'on',
'off',
)
speed_with_ball = UEnum32('speed with ball',
'slow',
'normal',
'fast',
)
trait_with_ball = UEnum32('trait with ball',
'none',
'invisible',
'extra damage',
'damage resistance',
)
trait_without_ball = UEnum32('trait without ball',
'none',
'invisible',
'extra damage',
'damage resistance',
)
ball_type = UEnum32('ball type',
'normal',
'reverse tag',
'juggernaut',
)
race_order = UEnum8('order',
'normal',
'any order',
'rally',
)
race_points_used = UEnum32('points used',
'minimum',
'maximum',
'sum'
)
vehicle_spawn = BitStruct("vehicle spawn",
UBitEnum('vehicle type',
'default',
'none',
'warthogs',
'ghosts',
'scorpions',
'rocket warthogs',
'banshees',
'gun turrets',
'custom',
SIZE=4,
),
UBitInt('warthogs', SIZE=3),
UBitInt('ghosts', SIZE=3),
UBitInt('scorpions', SIZE=3),
UBitInt('rocket warthogs', SIZE=3),
UBitInt('banshees', SIZE=3),
UBitInt('gun turrets', SIZE=3),
SIZE=4,
)
player_settings = Bool32("player settings",
'radar enabled',
'friend on hud',
'infinite grenades',
'shields disabled',
'invisible',
'generic weapons',
'enemies not on radar',
)
game_type = UEnum32('game type',
('ctf', 1),
('slayer', 2),
('oddball', 3),
('king', 4),
('race', 5),
DEFAULT=1
)
objective_indicator = UEnum32('objective indicator',
'motion tracker',
'nav point',
'none',
)
weapon_type = UEnum32('weapon type',
'default',
'pistols',
'rifles',
'plasma rifles',
'sniper',
'no sniper',
'rocket launchers',
'shotguns',
'short range',
'human',
'covenant',
'classic',
'heavy weapons',
)
friendly_fire = UEnum32('friendly fire',
'off',
'on',
'shields',
'explosions',
)
vehicle_type = UEnum32('vehicle type',
'all',
'none',
'warthog',
'ghost',
'scorpion',
)
##################################################
'''Structs for each of the different game types'''
##################################################
ctf_settings = Struct('ctf settings',
#UEnum8('assault', INCLUDE=enum_off_on),
#UInt8('unknown', VISIBLE=False),
#UEnum8('flag must reset', INCLUDE=enum_off_on),
#UEnum8('flag must be at home', INCLUDE=enum_off_on),
# It looks better this way.
# Make it a FlBool32 so it doesnt get screwed up for powerpc
FlBool32('flags',
('assault', 1<<0),
('flag must reset', 1<<16),
('flag must be at home', 1<<24),
),
UInt32('single_flag_time', SIDETIP="seconds", UNIT_SCALE=1/30),
SIZE=28,
)
slayer_settings = Struct('slayer settings',
#UEnum8('death bonus', INCLUDE=enum_on_off),
#UEnum8('kill penalty', INCLUDE=enum_on_off),
#UEnum8('kill in order', INCLUDE=enum_off_on),
# It looks better this way.
# Make it a FlBool32 so it doesnt get screwed up for powerpc
FlBool32('flags',
('no death bonus', 1<<0),
('no kill penalty', 1<<8),
('kill in order', 1<<16),
),
SIZE=28,
)
oddball_settings = Struct('oddball settings',
UEnum8('random ball', INCLUDE=enum_off_on),
Pad(3),
speed_with_ball,
trait_with_ball,
trait_without_ball,
ball_type,
UInt32('ball_count', MIN=1, MAX=16),
SIZE=28,
)
king_settings = Struct('king settings',
UEnum8('moving hill', INCLUDE=enum_off_on),
SIZE=28,
)
race_settings = Struct('race settings',
race_order,
Pad(3),
race_points_used,
SIZE=28,
)
header_comment = \
''' The length of an Xbox gametypes name is capped at 11
characters, whereas a PC gametype name is capped at 23.
Respawn times of 0 are "instant", which is still 3 seconds.
Respawn times cap at 300 seconds
Health can be anywhere between 50% and 400%
For PC gametypes, the max spawn for each kind of vehicle is 7.
The score limit minimum is 1, and is measured in different
units depending on what gametype this is:
CTF -------- flags
Slayer ----- kills
King ------- minutes
Oddball ---- points/minutes
Race ------- laps'''
xbox_gametype_header = Struct("gametype header",
StrUtf16('name', SIZE=24),
game_type,
UEnum32('teamplay', INCLUDE=enum_off_on),
player_settings,
objective_indicator,
UEnum32('odd man out', INCLUDE=enum_off_on),
UInt32('respawn time growth', SIDETIP="seconds", UNIT_SCALE=1/30),
UInt32('respawn time', MAX=300*30, SIDETIP="seconds", UNIT_SCALE=1/30),
UInt32('respawn suicide penalty', SIDETIP="seconds", UNIT_SCALE=1/30),
UInt32('lives', SIDETIP='[0 == unlimited]'),
Float('health',
DEFAULT=0.5, MIN=0.5, MAX=4.0, UNIT_SCALE=100, SIDETIP="%"),
UInt32('score limit', MIN=1, DEFAULT=1),
weapon_type,
vehicle_type,
SIZE=76, COMMENT=header_comment
)
pc_gametype_header = Struct("gametype header",
StrUtf16('name', SIZE=48),
game_type,
UEnum32('teamplay', INCLUDE=enum_off_on),
player_settings,
objective_indicator,
UEnum32('odd man out', INCLUDE=enum_off_on),
UInt32('respawn time growth', SIDETIP="seconds", UNIT_SCALE=1/30),
UInt32('respawn time', MAX=300*30, SIDETIP="seconds", UNIT_SCALE=1/30),
UInt32('respawn suicide penalty', SIDETIP="seconds", UNIT_SCALE=1/30),
UInt32('lives', SIDETIP='[0 == unlimited]'),
Float('health',
DEFAULT=0.5, MIN=0.5, MAX=4.0, UNIT_SCALE=100, SIDETIP="%"),
UInt32('score limit', MIN=1, DEFAULT=1),
weapon_type,
BitStruct('red vehicles', INCLUDE=vehicle_spawn),
BitStruct('blue vehicles', INCLUDE=vehicle_spawn),
UInt32('vehicle respawn time', SIDETIP="seconds", UNIT_SCALE=1/30),
friendly_fire,
UInt32('respawn betrayal penalty', SIDETIP="seconds", UNIT_SCALE=1/30),
UEnum32('auto team balance', INCLUDE=enum_off_on),
UInt32('time limit', SIDETIP="seconds", UNIT_SCALE=1/30),
SIZE=124, COMMENT=header_comment
)
xbox_gametype_footer = Container('gametype footer',
#20 byte hmac sha1 digest of the save file
BytesRaw('hmac_sig', SIZE=20),
Pad(388),
VISIBLE=False
)
pc_gametype_footer = Struct('gametype footer',
UInt32('crc 32'),
#its possible to make a gametype platform independent by keeping
#a copy of the settings here as well in a bytearray buffer
BytearrayRaw('hybrid settings', SIZE=28),
Pad(32),
UInt32('crc 32 ce'),
Pad(7972),
VISIBLE=False
)
header_switch = Switch('gametype header',
DEFAULT=pc_gametype_header,
CASE=is_xbox_gametype,
CASES={True: xbox_gametype_header},
)
union_settings_comment = '''
After you change these settings you'll still need to go into the
header and choose this gametypes type(ctf, slayer, race, etc).
'''
settings = Union('gametype settings',
CASE='.gametype_header.game_type.enum_name',
CASES={
'ctf':ctf_settings,
'slayer':slayer_settings ,
'oddball':oddball_settings,
'king':king_settings,
'race':race_settings,
},
COMMENT=union_settings_comment
)
footer_switch = Switch('gametype footer',
DEFAULT=pc_gametype_footer,
CASE=is_xbox_gametype,
CASES={True: xbox_gametype_footer},
)
pc_gametype_def = TagDef('pc_gametype',
header_switch,
settings,
footer_switch,
ext='.lst', endian='<', tag_cls=PcGametypeTag,
)
|
"""Deprecated."""
load("//bzllib:defs.bzl", _src_utils = "src_utils")
src_utils = _src_utils
|
# coding=utf-8
from .base import TethysGizmoOptions
__all__ = ['PlotObject', 'LinePlot', 'PolarPlot', 'ScatterPlot',
'PiePlot', 'BarPlot', 'TimeSeries', 'AreaRange', 'HeatMap']
class PlotViewBase(TethysGizmoOptions):
"""
Plot view classes inherit from this class.
"""
gizmo_name = "plot_view"
def __init__(self, width='500px', height='500px', engine='d3'):
"""
Constructor
"""
# Initialize the super class
super(PlotViewBase, self).__init__()
self.width = width
self.height = height
if engine not in ('d3', 'highcharts'):
raise ValueError('Parameter "engine" must be either "d3" or "highcharts".')
self.engine = engine
self.plot_object = PlotObject()
@staticmethod
def get_vendor_js():
"""
JavaScript vendor libraries to be placed in the
{% block global_scripts %} block
"""
return ('tethys_gizmos/vendor/highcharts/js/highcharts.js',
'tethys_gizmos/vendor/highcharts/js/highcharts-more.js',
'tethys_gizmos/vendor/highcharts/js/modules/exporting.js',
'https://cdnjs.cloudflare.com/ajax/libs/d3/3.5.5/d3.min.js',
'tethys_gizmos/vendor/d3_tooltip/d3.tip.v0.6.3.js')
@staticmethod
def get_gizmo_js():
"""
JavaScript specific to gizmo to be placed in the
{% block scripts %} block
"""
return ('tethys_gizmos/js/plot_view.js',)
@staticmethod
def get_gizmo_css():
"""
CSS specific to gizmo to be placed in the
{% block content_dependent_styles %} block
"""
return ('tethys_gizmos/css/plot_view.css',)
class PlotObject(TethysGizmoOptions):
"""
Base Plot Object that is constructed by plot views.
"""
def __init__(self, chart={}, title='', subtitle='', legend=None, display_legend=True,
tooltip=True, x_axis={}, y_axis={}, tooltip_format={}, plotOptions={}, **kwargs):
"""
Constructor
"""
# Initialize super class
super(PlotObject, self).__init__()
self.chart = chart
self.xAxis = x_axis
self.yAxis = y_axis
self.plotOptions = plotOptions
if title != '':
self.title = {'text': title}
if subtitle != '':
self.subtitle = {'text': subtitle}
if display_legend:
default_legend = {
'layout': 'vertical',
'align': 'right',
'verticalAlign': 'middle',
'borderWidth': 0
}
self.legend = legend or default_legend
if tooltip:
self.tooltip = tooltip_format
# add any other attributes the user wants
for key, value in kwargs.items():
setattr(self, key, value)
class LinePlot(PlotViewBase):
"""
Used to create line plot visualizations.
Attributes:
series(list, required): A list of series dictionaries.
height(str): Height of the plot element. Any valid css unit of length.
width(str): Width of the plot element. Any valid css unit of length.
engine(str): The plot engine to be used for rendering, either 'd3' or 'highcharts'. Defaults to 'd3'.
title(str): Title of the plot.
subtitle(str): Subtitle of the plot.
spline(bool): If True, lines are smoothed using a spline technique.
x_axis_title(str): Title of the x-axis.
x_axis_units(str): Units of the x-axis.
y_axis_title(str): Title of the y-axis.
y_axis_units(str): Units of the y-axis.
**Controller Example**
::
from tethys_sdk.gizmos import LinePlot
line_plot_view = LinePlot(
height='500px',
width='500px',
engine='highcharts',
title='Plot Title',
subtitle='Plot Subtitle',
spline=True,
x_axis_title='Altitude',
x_axis_units='km',
y_axis_title='Temperature',
y_axis_units='°C',
series=[
{
'name': 'Air Temp',
'color': '#0066ff',
'marker': {'enabled': False},
'data': [
[0, 5], [10, -70],
[20, -86.5], [30, -66.5],
[40, -32.1],
[50, -12.5], [60, -47.7],
[70, -85.7], [80, -106.5]
]
},
{
'name': 'Water Temp',
'color': '#ff6600',
'data': [
[0, 15], [10, -50],
[20, -56.5], [30, -46.5],
[40, -22.1],
[50, -2.5], [60, -27.7],
[70, -55.7], [80, -76.5]
]
}
]
)
context = {
'line_plot_view': line_plot_view,
}
**Template Example**
::
{% load tethys_gizmos %}
{% gizmo line_plot_view %}
"""
def __init__(self, series, height='500px', width='500px', engine='d3', title='', subtitle='', spline=False,
x_axis_title='', x_axis_units='', y_axis_title='', y_axis_units='', **kwargs):
"""
Constructor
"""
# Initialize super class
super(LinePlot, self).__init__(height=height, width=width, engine=engine)
chart = kwargs.pop('chart', None)
if not chart:
if spline:
chart = {'type': 'spline'}
else:
chart = {'type': 'line'}
if x_axis_title:
x_axis = {
'title': {
'enabled': True,
'text': '{0} ({1})'.format(x_axis_title, x_axis_units)
},
'labels': {'formatter': 'function () { return this.value + " %s"; }' % x_axis_units}
}
else:
x_axis = {
'labels': {'formatter': 'function () { return this.value + " %s"; }' % x_axis_units}
}
if y_axis_title:
y_axis = {
'title': {
'enabled': True,
'text': '{0} ({1})'.format(y_axis_title, y_axis_units)
},
'labels': {'formatter': 'function () { return this.value + " %s"; }' % y_axis_units}
}
else:
y_axis = {
'labels': {'formatter': 'function () { return this.value + " %s"; }' % y_axis_units}
}
tooltip_format = {
'headerFormat': '<b>{series.name}</b><br/>',
'pointFormat': '{point.x} %s: {point.y} %s' % (x_axis_units, y_axis_units)
}
# Initialize the plot view object
self.plot_object = PlotObject(chart=chart, title=title, subtitle=subtitle, series=series,
x_axis=x_axis, y_axis=y_axis, tooltip_format=tooltip_format, **kwargs)
class PolarPlot(PlotViewBase):
"""
Use to create a polar plot visualization.
Attributes:
series(list, required): A list of series dictionaries.
height(str): Height of the plot element. Any valid css unit of length.
width(str): Width of the plot element. Any valid css unit of length.
engine(str): The plot engine to be used for rendering, either 'd3' or 'highcharts'. Defaults to 'd3'.
title(str): Title of the plot.
subtitle(str): Subtitle of the plot.
categories(list): List of category names, one for each data point in the series.
**Controller Example**
::
from tethys_sdk.gizmos import PolarPlot
web_plot = PolarPlot(
height='500px',
width='500px',
engine='highcharts',
title='Polar Chart',
subtitle='Polar Chart',
pane={
'size': '80%'
},
categories=['Infiltration', 'Soil Moisture', 'Precipitation', 'Evaporation',
'Roughness', 'Runoff', 'Permeability', 'Vegetation'],
series=[
{
'name': 'Park City',
'data': [0.2, 0.5, 0.1, 0.8, 0.2, 0.6, 0.8, 0.3],
'pointPlacement': 'on'
},
{
'name': 'Little Dell',
'data': [0.8, 0.3, 0.2, 0.5, 0.1, 0.8, 0.2, 0.6],
'pointPlacement': 'on'
}
]
)
context = {
'web_plot': web_plot,
}
**Template Example**
::
{% load tethys_gizmos %}
{% gizmo web_plot %}
"""
def __init__(self, series=[], height='500px', width='500px', engine='d3', title='', subtitle='', categories=[],
**kwargs):
"""
Constructor
"""
# Initialize super class
super(PolarPlot, self).__init__(height=height, width=width, engine=engine)
chart = kwargs.pop('chart', None)
x_axis = kwargs.pop('x_axis', None)
y_axis = kwargs.pop('y_axis', None)
if not chart:
chart = {
'polar': True,
'type': 'line'
}
if not x_axis:
x_axis = {
'categories': categories,
'tickmarkPlacement': 'on',
'lineWidth': 0
}
if not y_axis:
y_axis = {
'gridLineInterpolation': 'polygon',
'lineWidth': 0,
'min': 0
}
# Initialize super class
self.plot_object = PlotObject(chart=chart, title=title, subtitle=subtitle, series=series,
x_axis=x_axis, y_axis=y_axis, **kwargs)
class ScatterPlot(PlotViewBase):
"""
Use to create a scatter plot visualization.
Attributes:
series(list, required): A list of series dictionaries.
height(str): Height of the plot element. Any valid css unit of length.
width(str): Width of the plot element. Any valid css unit of length.
engine(str): The plot engine to be used for rendering, either 'd3' or 'highcharts'. Defaults to 'd3'.
title(str): Title of the plot.
subtitle(str): Subtitle of the plot.
spline(bool): If True, lines are smoothed using a spline technique.
x_axis_title(str): Title of the x-axis.
x_axis_units(str): Units of the x-axis.
y_axis_title(str): Title of the y-axis.
y_axis_units(str): Units of the y-axis.
**Controller Example**
::
from tethys_sdk.gizmos import ScatterPlot
male_dataset = {
'name': 'Male',
'color': '#0066ff',
'data': [
[174.0, 65.6], [175.3, 71.8], [193.5, 80.7], [186.5, 72.6],
[187.2, 78.8], [181.5, 74.8], [184.0, 86.4], [184.5, 78.4],
[175.0, 62.0], [184.0, 81.6], [180.0, 76.6], [177.8, 83.6],
[192.0, 90.0], [176.0, 74.6], [174.0, 71.0], [184.0, 79.6],
[192.7, 93.8], [171.5, 70.0], [173.0, 72.4], [176.0, 85.9],
[176.0, 78.8], [180.5, 77.8], [172.7, 66.2], [176.0, 86.4],
[173.5, 81.8], [178.0, 89.6], [180.3, 82.8], [180.3, 76.4],
[164.5, 63.2], [173.0, 60.9], [183.5, 74.8], [175.5, 70.0],
[188.0, 72.4], [189.2, 84.1], [172.8, 69.1], [170.0, 59.5],
[182.0, 67.2], [170.0, 61.3], [177.8, 68.6], [184.2, 80.1],
[186.7, 87.8], [171.4, 84.7], [172.7, 73.4], [175.3, 72.1],
[180.3, 82.6], [182.9, 88.7], [188.0, 84.1], [177.2, 94.1],
[172.1, 74.9], [167.0, 59.1], [169.5, 75.6], [174.0, 86.2],
[172.7, 75.3], [182.2, 87.1], [164.1, 55.2], [163.0, 57.0],
[171.5, 61.4], [184.2, 76.8], [174.0, 86.8], [174.0, 72.2],
[177.0, 71.6], [186.0, 84.8], [167.0, 68.2], [171.8, 66.1]
]
}
female_dataset = {
'name': 'Female',
'color': '#ff6600',
'data': [
[161.2, 51.6], [167.5, 59.0], [159.5, 49.2], [157.0, 63.0],
[155.8, 53.6], [170.0, 59.0], [159.1, 47.6], [166.0, 69.8],
[176.2, 66.8], [160.2, 75.2], [172.5, 55.2], [170.9, 54.2],
[172.9, 62.5], [153.4, 42.0], [160.0, 50.0], [147.2, 49.8],
[168.2, 49.2], [175.0, 73.2], [157.0, 47.8], [167.6, 68.8],
[159.5, 50.6], [175.0, 82.5], [166.8, 57.2], [176.5, 87.8],
[170.2, 72.8], [174.0, 54.5], [173.0, 59.8], [179.9, 67.3],
[170.5, 67.8], [160.0, 47.0], [154.4, 46.2], [162.0, 55.0],
[176.5, 83.0], [160.0, 54.4], [152.0, 45.8], [162.1, 53.6],
[170.0, 73.2], [160.2, 52.1], [161.3, 67.9], [166.4, 56.6],
[168.9, 62.3], [163.8, 58.5], [167.6, 54.5], [160.0, 50.2],
[161.3, 60.3], [167.6, 58.3], [165.1, 56.2], [160.0, 50.2],
[170.0, 72.9], [157.5, 59.8], [167.6, 61.0], [160.7, 69.1],
[163.2, 55.9], [152.4, 46.5], [157.5, 54.3], [168.3, 54.8],
[180.3, 60.7], [165.5, 60.0], [165.0, 62.0], [164.5, 60.3]
]
}
scatter_plot_view = ScatterPlot(
width='500px',
height='500px',
engine='highcharts',
title='Scatter Plot',
subtitle='Scatter Plot',
x_axis_title='Height',
x_axis_units='cm',
y_axis_title='Weight',
y_axis_units='kg',
series=[
male_dataset,
female_dataset
]
)
context = {
'scatter_plot_view': scatter_plot_view,
}
**Template Example**
::
{% load tethys_gizmos %}
{% gizmo scatter_plot_view %}
"""
def __init__(self, series=[], height='500px', width='500px', engine='d3', title='', subtitle='',
x_axis_title='', x_axis_units='', y_axis_title='', y_axis_units='', **kwargs):
"""
Constructor
"""
# Initialize super class
super(ScatterPlot, self).__init__(height=height, width=width, engine=engine)
chart = kwargs.pop('chart', None)
if not chart:
chart = {
'type': 'scatter',
'zoomType': 'xy'
}
if x_axis_title:
x_axis = {
'title': {
'enabled': True,
'text': '{0} ({1})'.format(x_axis_title, x_axis_units)
}
}
if y_axis_title:
y_axis = {
'title': {
'enabled': True,
'text': '{0} ({1})'.format(y_axis_title, y_axis_units)
}
}
tooltip_format = {
'headerFormat': '<b>{series.name}</b><br/>',
'pointFormat': '{point.x} %s: {point.y} %s' % (x_axis_units, y_axis_units)
}
# Initialize super class
self.plot_object = PlotObject(chart=chart, title=title, subtitle=subtitle, series=series,
x_axis=x_axis, y_axis=y_axis, tooltip_format=tooltip_format, **kwargs)
class PiePlot(PlotViewBase):
"""
Use to create a pie plot visualization.
Attributes:
series(list, required): A list of series dictionaries.
height(str): Height of the plot element. Any valid css unit of length.
width(str): Width of the plot element. Any valid css unit of length.
engine(str): The plot engine to be used for rendering, either 'd3' or 'highcharts'. Defaults to 'd3'.
title(str): Title of the plot.
subtitle(str): Subtitle of the plot.
**Controller Example**
::
from tethys_sdk.gizmos import PieChart
pie_plot_view = PiePlot(
height='500px',
width='500px',
engine='highcharts',
title='Pie Chart',
subtitle='Pie Chart',
series=[
{'name': 'Firefox', 'value': 45.0},
{'name': 'IE', 'value': 26.8},
{'name': 'Chrome', 'value': 12.8},
{'name': 'Safari', 'value': 8.5},
{'name': 'Opera', 'value': 8.5},
{'name': 'Others', 'value': 0.7}
]
)
context = {
'pie_plot_view': pie_plot_view,
}
**Template Example**
::
{% load tethys_gizmos %}
{% gizmo pie_plot_view %}
"""
def __init__(self, series=[], height='500px', width='500px', engine='d3', title='', subtitle='', **kwargs):
"""
Constructor
Args:
"""
# Initialize super class
super(PiePlot, self).__init__(height=height, width=width, engine=engine)
chart = kwargs.pop('chart', None)
if not chart:
chart = {
'plotShadow': False
}
plotOptions = {
'pie': {
'allowPointSelect': True,
'cursor': 'pointer',
'dataLabels': {
'enabled': False
},
'showInLegend': True
}
}
tooltip_format = {
'pointFormat': '{series.name}: <b>{point.percentage:.1f}%</b>'
}
# Initialize super class
self.plot_object = PlotObject(chart=chart, title=title, subtitle=subtitle, series=series,
plotOptions=plotOptions, tooltip_format=tooltip_format, **kwargs)
class BarPlot(PlotViewBase):
"""
Bar Plot
Displays as either a bar or column chart.
Attributes:
series(list, required): A list of series dictionaries.
height(str): Height of the plot element. Any valid css unit of length.
width(str): Width of the plot element. Any valid css unit of length.
engine(str): The plot engine to be used for rendering, either 'd3' or 'highcharts'. Defaults to 'd3'.
title(str): Title of the plot.
subtitle(str): Subtitle of the plot.
horizontal(bool): If True, bars are displayed horizontally, otherwise they are displayed vertically.
categories(list): A list of category titles, one for each bar.
axis_title(str): Title of the axis.
axis_units(str): Units of the axis.
y_min(int,float): Minimum value of y axis.
**Controller Example**
::
from tethys_sdk.gizmos import BarPlot
bar_plot_view = BarPlot(
height='500px',
width='500px',
engine='highcharts',
title='Bar Chart',
subtitle='Bar Chart',
vertical=True,
categories=[
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'
],
axis_units='millions',
axis_title='Population',
series=[{
'name': "Year 1800",
'data': [100, 31, 635, 203, 275, 487, 872, 671, 736, 568, 487, 432]
}, {
'name': "Year 1900",
'data': [133, 200, 947, 408, 682, 328, 917, 171, 482, 140, 176, 237]
}, {
'name': "Year 2000",
'data': [764, 628, 300, 134, 678, 200, 781, 571, 773, 192, 836, 172]
}, {
'name': "Year 2008",
'data': [973, 914, 500, 400, 349, 108, 372, 726, 638, 927, 621, 364]
}
]
)
context = {
'bar_plot_view': bar_plot_view,
}
**Template Example**
::
{% load tethys_gizmos %}
{% gizmo bar_plot_view %}
"""
def __init__(self, series=[], height='500px', width='500px', engine='d3', title='', subtitle='',
horizontal=False, categories=[], axis_title='', axis_units='', group_tools=True,
y_min=0, **kwargs):
"""
Constructor
"""
# Initialize super class
super(BarPlot, self).__init__(height=height, width=width, engine=engine)
chart = kwargs.pop('chart', None)
y_axis = kwargs.pop('y_axis', None)
if not chart:
if not horizontal:
chart = {
'type': 'column'
}
plotOptions = {
'column': {
'pointPadding': 0.2,
'borderWidth': 0
}
}
else:
chart = {
'type': 'bar'
}
plotOptions = {
'bar': {
'dataLabels': {
'enabled': True
}
}
}
x_axis = {
'categories': categories,
'crosshair': True
}
if not y_axis:
if axis_units:
y_axis = {
'min': y_min,
'title': {
'text': '{0} ({1})'.format(axis_title, axis_units)
}
}
else:
y_axis = {
'min': y_min,
'title': {
'text': axis_title
}
}
if group_tools:
tooltip_format = {
'headerFormat': '<span style="font-size:10px">{point.key}</span><table>',
'pointFormat': '<tr><td style="color:{series.color};padding:0">{series.name}: </td>'
+ '<td style="padding:0"><b>{point.y:.1f} %s </b></td></tr>' % (axis_units),
'footerFormat': '</table>',
'shared': True,
'useHTML': True
}
# Initialize super class
self.plot_object = PlotObject(chart=chart, title=title, subtitle=subtitle, series=series,
plotOptions=plotOptions, tooltip_format=tooltip_format, x_axis=x_axis,
y_axis=y_axis, **kwargs)
class TimeSeries(PlotViewBase):
"""
Use to create a timeseries plot visualization
Attributes:
series(list, required): A list of series dictionaries.
height(str): Height of the plot element. Any valid css unit of length.
width(str): Width of the plot element. Any valid css unit of length.
engine(str): The plot engine to be used for rendering, either 'd3' or 'highcharts'. Defaults to 'd3'.
title(str): Title of the plot.
subtitle(str): Subtitle of the plot.
y_axis_title(str): Title of the axis.
y_axis_units(str): Units of the axis.
**Controller Example**
::
from tethys_sdk.gizmos import TimeSeries
timeseries_plot = TimeSeries(
height='500px',
width='500px',
engine='highcharts',
title='Irregular Timeseries Plot',
y_axis_title='Snow depth',
y_axis_units='m',
series=[{
'name': 'Winter 2007-2008',
'data': [
[datetime(2008, 12, 2), 0.8],
[datetime(2008, 12, 9), 0.6],
[datetime(2008, 12, 16), 0.6],
[datetime(2008, 12, 28), 0.67],
[datetime(2009, 1, 1), 0.81],
[datetime(2009, 1, 8), 0.78],
[datetime(2009, 1, 12), 0.98],
[datetime(2009, 1, 27), 1.84],
[datetime(2009, 2, 10), 1.80],
[datetime(2009, 2, 18), 1.80],
[datetime(2009, 2, 24), 1.92],
[datetime(2009, 3, 4), 2.49],
[datetime(2009, 3, 11), 2.79],
[datetime(2009, 3, 15), 2.73],
[datetime(2009, 3, 25), 2.61],
[datetime(2009, 4, 2), 2.76],
[datetime(2009, 4, 6), 2.82],
[datetime(2009, 4, 13), 2.8],
[datetime(2009, 5, 3), 2.1],
[datetime(2009, 5, 26), 1.1],
[datetime(2009, 6, 9), 0.25],
[datetime(2009, 6, 12), 0]
]
}]
)
context = {
'timeseries_plot': timeseries_plot,
}
**Template Example**
::
{% load tethys_gizmos %}
{% gizmo timeseries_plot %}
"""
def __init__(self, series=[], height='500px', width='500px', engine='d3', title='', subtitle='', y_axis_title='',
y_axis_units='', y_min=0, **kwargs):
"""
Constructor
"""
# Initialize super class
super(TimeSeries, self).__init__(height=height, width=width, engine=engine)
chart = kwargs.pop('chart', None)
x_axis = kwargs.pop('x_axis', None)
y_axis = kwargs.pop('y_axis', None)
if not chart:
chart = {
'type': 'area',
'zoomType': 'x'
}
if not x_axis:
x_axis = {
'type': 'datetime'
}
if not y_axis:
y_axis = {
'title': {
'text': '{0} ({1})'.format(y_axis_title, y_axis_units)
},
'min': y_min
}
tooltip_format = {
'pointFormat': '{point.y} %s' % (y_axis_units)
}
# Initialize super class
self.plot_object = PlotObject(chart=chart, title=title, subtitle=subtitle, series=series,
x_axis=x_axis, y_axis=y_axis, tooltip_format=tooltip_format, **kwargs)
class AreaRange(PlotViewBase):
"""
Use to create a area range plot visualization.
Attributes:
series(list, required): A list of series dictionaries.
height(str): Height of the plot element. Any valid css unit of length.
width(str): Width of the plot element. Any valid css unit of length.
engine(str): The plot engine to be used for rendering, either 'd3' or 'highcharts'. Defaults to 'd3'.
title(str): Title of the plot.
subtitle(str): Subtitle of the plot.
y_axis_title(str): Title of the axis.
y_axis_units(str): Units of the axis.
**Controller Example**
::
from tethys_sdk.gizmos import AreaRange
averages = [
[datetime(2009, 7, 1), 21.5], [datetime(2009, 7, 2), 22.1], [datetime(2009, 7, 3), 23],
[datetime(2009, 7, 4), 23.8], [datetime(2009, 7, 5), 21.4], [datetime(2009, 7, 6), 21.3],
[datetime(2009, 7, 7), 18.3], [datetime(2009, 7, 8), 15.4], [datetime(2009, 7, 9), 16.4],
[datetime(2009, 7, 10), 17.7], [datetime(2009, 7, 11), 17.5], [datetime(2009, 7, 12), 17.6],
[datetime(2009, 7, 13), 17.7], [datetime(2009, 7, 14), 16.8], [datetime(2009, 7, 15), 17.7],
[datetime(2009, 7, 16), 16.3], [datetime(2009, 7, 17), 17.8], [datetime(2009, 7, 18), 18.1],
[datetime(2009, 7, 19), 17.2], [datetime(2009, 7, 20), 14.4],
[datetime(2009, 7, 21), 13.7], [datetime(2009, 7, 22), 15.7], [datetime(2009, 7, 23), 14.6],
[datetime(2009, 7, 24), 15.3], [datetime(2009, 7, 25), 15.3], [datetime(2009, 7, 26), 15.8],
[datetime(2009, 7, 27), 15.2], [datetime(2009, 7, 28), 14.8], [datetime(2009, 7, 29), 14.4],
[datetime(2009, 7, 30), 15], [datetime(2009, 7, 31), 13.6]
]
ranges = [
[datetime(2009, 7, 1), 14.3, 27.7], [datetime(2009, 7, 2), 14.5, 27.8], [datetime(2009, 7, 3), 15.5, 29.6],
[datetime(2009, 7, 4), 16.7, 30.7], [datetime(2009, 7, 5), 16.5, 25.0], [datetime(2009, 7, 6), 17.8, 25.7],
[datetime(2009, 7, 7), 13.5, 24.8], [datetime(2009, 7, 8), 10.5, 21.4], [datetime(2009, 7, 9), 9.2, 23.8],
[datetime(2009, 7, 10), 11.6, 21.8], [datetime(2009, 7, 11), 10.7, 23.7], [datetime(2009, 7, 12), 11.0, 23.3],
[datetime(2009, 7, 13), 11.6, 23.7], [datetime(2009, 7, 14), 11.8, 20.7], [datetime(2009, 7, 15), 12.6, 22.4],
[datetime(2009, 7, 16), 13.6, 19.6], [datetime(2009, 7, 17), 11.4, 22.6], [datetime(2009, 7, 18), 13.2, 25.0],
[datetime(2009, 7, 19), 14.2, 21.6], [datetime(2009, 7, 20), 13.1, 17.1], [datetime(2009, 7, 21), 12.2, 15.5],
[datetime(2009, 7, 22), 12.0, 20.8], [datetime(2009, 7, 23), 12.0, 17.1], [datetime(2009, 7, 24), 12.7, 18.3],
[datetime(2009, 7, 25), 12.4, 19.4], [datetime(2009, 7, 26), 12.6, 19.9], [datetime(2009, 7, 27), 11.9, 20.2],
[datetime(2009, 7, 28), 11.0, 19.3], [datetime(2009, 7, 29), 10.8, 17.8], [datetime(2009, 7, 30), 11.8, 18.5],
[datetime(2009, 7, 31), 10.8, 16.1]
]
area_range_plot_object = AreaRange(
title='July Temperatures',
y_axis_title='Temperature',
y_axis_units='*C',
width='500px',
height='500px',
series=[{
'name': 'Temperature',
'data': averages,
'zIndex': 1,
'marker': {
'lineWidth': 2,
}
}, {
'name': 'Range',
'data': ranges,
'type': 'arearange',
'lineWidth': 0,
'linkedTo': ':previous',
'fillOpacity': 0.3,
'zIndex': 0
}]
)
context = {
'area_range_plot_object': area_range_plot_object,
}
**Template Example**
::
{% load tethys_gizmos %}
{% gizmo area_range_plot_object %}
""" # noqa: E501
def __init__(self, series=[], height='500px', width='500px', engine='d3', title='', subtitle='',
y_axis_title='', y_axis_units='', **kwargs):
"""
Constructor
"""
# Initialize super class
super(AreaRange, self).__init__(height=height, width=width, engine=engine)
chart = kwargs.pop('chart', None)
x_axis = kwargs.pop('x_axis', None)
y_axis = kwargs.pop('y_axis', None)
if not chart:
chart = {
}
if not x_axis:
x_axis = {
'type': 'datetime'
}
if not y_axis:
y_axis = {
'title': {
'text': '{0} ({1})'.format(y_axis_title, y_axis_units)
}
}
tooltip_format = {
'crosshairs': True,
'shared': True,
'valueSuffix': y_axis_units
}
# Initialize super class
self.plot_object = PlotObject(chart=chart, title=title, subtitle=subtitle, series=series,
x_axis=x_axis, y_axis=y_axis, tooltip_format=tooltip_format, **kwargs)
class HeatMap(PlotViewBase):
"""
Use to create a heat map visualization.
Attributes:
series(list, required): A list of series dictionaries.
height(str): Height of the plot element. Any valid css unit of length.
width(str): Width of the plot element. Any valid css unit of length.
engine(str): The plot engine to be used for rendering, either 'd3' or 'highcharts'. Defaults to 'd3'.
title(str): Title of the plot.
subtitle(str): Subtitle of the plot.
x_categories(list):
y_categories(list):
tooltip_phrase_one(str):
tooltip_phrase_two(str):
**Controller Example**
::
from tethys_sdk.gizmos import HeatMap
sales_data = [
[0, 0, 10], [0, 1, 19], [0, 2, 8], [0, 3, 24], [0, 4, 67], [1, 0, 92],
[1, 1, 58], [1, 2, 78], [1, 3, 117], [1, 4, 48], [2, 0, 35], [2, 1, 15],
[2, 2, 123], [2, 3, 64], [2, 4, 52], [3, 0, 72], [3, 1, 132], [3, 2, 114],
[3, 3, 19], [3, 4, 16], [4, 0, 38], [4, 1, 5], [4, 2, 8], [4, 3, 117],
[4, 4, 115], [5, 0, 88], [5, 1, 32], [5, 2, 12], [5, 3, 6], [5, 4, 120],
[6, 0, 13], [6, 1, 44], [6, 2, 88], [6, 3, 98], [6, 4, 96], [7, 0, 31],
[7, 1, 1], [7, 2, 82], [7, 3, 32], [7, 4, 30], [8, 0, 85], [8, 1, 97],
[8, 2, 123], [8, 3, 64], [8, 4, 84], [9, 0, 47], [9, 1, 114], [9, 2, 31],
[9, 3, 48], [9, 4, 91]
]
heat_map_plot = HeatMap(
width='500px',
height='500px',
title='Sales per employee per weekday',
x_categories=['Alexander', 'Marie', 'Maximilian', 'Sophia', 'Lukas', 'Maria', 'Leon', 'Anna', 'Tim', 'Laura'],
y_categories=['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday'],
tooltip_phrase_one='sold',
tooltip_phrase_two='items on',
colorAxis={
'min': 0,
'minColor': '#FFFFFF',
'maxColor': '.getOptions().colors[0]'
},
legend={
'align': 'right',
'layout': 'vertical',
'margin': 0,
'verticalAlign': 'top',
'y': 25,
'symbolHeight': 280
},
series=[{
'name': 'Sales per employee',
'borderWidth': 1,
'data': sales_data,
'dataLabels': {
'enabled': True,
'color': '#000000'
}
}]
)
context = {
'heat_map_plot': heat_map_plot,
}
**Template Example**
::
{% load tethys_gizmos %}
{% gizmo heat_map_plot %}
""" # noqa: E501
def __init__(self, series=[], height='500px', width='500px', engine='d3', title='', subtitle='', x_categories=[],
y_categories=[], tooltip_phrase_one='', tooltip_phrase_two='', **kwargs):
"""
Constructor
"""
# Initialize super class
super(HeatMap, self).__init__(height=height, width=width, engine=engine)
chart = kwargs.pop('chart', None)
if not chart:
chart = {
'type': 'heatmap',
'marginTop': 40,
'marginBottom': 80
}
x_axis = {
'categories': x_categories
}
y_axis = {
'categories': y_categories,
'title': 'null'
}
tooltip_format = {
'formatter': 'function() {return "<b>" + this.series.xAxis.categories[this.point.x] + "</b> %s <br><b>" + '
'this.point.value + "</b> %s <br><b>" + this.series.yAxis.categories[this.point.y] + "</b>";'
% (tooltip_phrase_one, tooltip_phrase_two)
}
# Initialize super class
self.plot_object = PlotObject(chart=chart, title=title, subtitle=subtitle, series=series, x_axis=x_axis,
y_axis=y_axis, tooltip_format=tooltip_format, **kwargs)
|
from Graph import Graph
from Graph import Tree
NUMBER_OF_NODES = 512
class NineTailModel:
def __init__(self):
edges = getEdges();
# Create a graph
vertices = [x for x in range(NUMBER_OF_NODES)]
graph = Graph(vertices, edges)
# Obtain a BSF tree rooted at the target node
self.tree = graph.bfs(511)
def getShortestPath(self, nodeIndex):
return self.tree.getPath(nodeIndex)
def printNode(node):
for i in range(9):
if i % 3 != 2:
print(node[i], end = " ")
else:
print(node[i])
print()
# Create all edges for the graph
def getEdges():
edges = [] # Store edges
for u in range(NUMBER_OF_NODES):
for k in range(9):
node = getNode(u) # Get the node for vertex u
if node[k] == 'H':
v = getFlippedNode(node, k)
# Add edge (v, u) for a legal move from node u to node v
edges.append([v, u])
return edges
def getFlippedNode(node, position):
row = position // 3
column = position % 3
flipACell(node, row, column)
flipACell(node, row - 1, column)
flipACell(node, row + 1, column)
flipACell(node, row, column - 1)
flipACell(node, row, column + 1)
return getIndex(node)
def getIndex(node):
result = 0
for i in range(9):
if node[i] == 'T':
result = result * 2 + 1
else:
result = result * 2 + 0
return result
def flipACell(node, row, column):
if row >= 0 and row <= 2 and column >= 0 and column <= 2:
# Within the boundary
if node[row * 3 + column] == 'H':
node[row * 3 + column] = 'T' # Flip from H to T
else:
node[row * 3 + column] = 'H' # Flip from T to H
def getNode(index):
result = 9 * [' ']
for i in range(9):
digit = index % 2
if digit == 0:
result[8 - i] = 'H'
else:
result[8 - i] = 'T'
index = index // 2
return result
|
#!/usr/bin/env python
import sys
import os
def print_context(frame, assembly_file, address):
address_without_0x = address.replace("0x", "").replace("[", "").replace("]", "")
function = "NULL"
instruction = "NULL"
for line in open(assembly_file):
if len(line) >= 4 and line[0] == "0" and line[1] == "0" and line[2] == "0":
function = line.strip().split()[1].replace(":", "").replace("<", "")
elif line.find(address_without_0x + ":") >= 0:
instruction = line.strip()
break
print("#" + str(frame) + " " + address + " " + function + " " + instruction)
def main(argc, argv):
#print(argv)
if argc != 3:
print("Usage: disassemble-and-get-stack.py -e executable < stack")
sys.exit()
executable = argv[2]
#print(executable)
assembly_file = executable + ".s"
os.system("objdump -d " + executable + " > " + assembly_file)
frame = 0
for line in sys.stdin:
tokens = line.split()
address = tokens[0]
if len(tokens) > 1:
address = tokens[1]
print_context(frame, assembly_file, address)
frame += 1
if __name__ == "__main__":
main(len(sys.argv), sys.argv)
|
import os # noqa: F401
from typing import List, Optional, Tuple, Union
from .types import KNOWN_LICENSES, License, trigrams
def guess_text(license_text: str) -> Optional[License]:
"""
Returns the most matching license iif it is 85% similar to a known one.
This logic mirrors that of https://github.com/sol/infer-license/ (haskell)
"""
p = probabilities(license_text)
# print("\n".join(repr(x) for x in p))
if p and p[0][1] > 0.80:
return p[0][0]
return None
def guess_file(filename: Union[str, "os.PathLike[str]"]) -> Optional[License]:
try:
with open(filename, encoding="utf-8") as f:
data = f.read()
except UnicodeDecodeError:
with open(filename, encoding="latin-1") as f:
data = f.read()
return guess_text(data)
def probabilities(license_text: str) -> List[Tuple[License, float]]:
"""
Returns potential licenses and their probabilities, in decreasing order.
"""
probabilities: List[Tuple[License, float]] = []
tg = trigrams(license_text)
for license in KNOWN_LICENSES:
count = sum(1 for t in license.trigrams if t in tg)
f = count / max(len(license.trigrams), len(tg))
probabilities.append((license, f))
probabilities.sort(key=lambda i: i[1], reverse=True)
return probabilities
|
"""
Functions for loading prepackaged datasets
Authors: Matthew Bernstein <mbernstein@morgridge.org>
"""
import pkg_resources as pr
import json
from os.path import join
import anndata
def load_dataset(dataset_id):
"""Load a prepackaged spatial gene expression dataset.
Parameters
----------
dataset_id : string, Options: {'GSM4284326_P10_ST_rep2'}
The ID of the dataset to load.
Returns
-------
adata : AnnData
The spatial gene expression dataset. The rows and column
coordinates are stored in `adata.obs['row']` and `adata.obs['col']`
respectively. The clusters are stored in `adata.obs['cluster']`.
The gene expression matrix `adata.X` is in units of Dino normalized
expression values.
"""
resource_package = __name__
if dataset_id == 'GSM4284326_P10_ST_rep2':
data_f = pr.resource_filename(
resource_package,
join('datasets', 'GSM4284326_P10_ST_rep2.h5ad')
)
adata = anndata.read_h5ad(data_f)
return adata
|
from gsrest.model.common import ConvertedValues
class Block:
""" Model representing block header fields and summary statistics """
def __init__(self, height, block_hash, no_txs, timestamp):
self.height = height
self.block_hash = block_hash
self.no_txs = no_txs
self.timestamp = timestamp
@staticmethod
def from_row(row):
return Block(row.height, row.block_hash.hex(),
row.no_transactions, row.timestamp)
def to_dict(self):
return self.__dict__
class BlockTxSummary:
""" Model representing block transaction summary statistics """
def __init__(self, tx_hash, no_inputs, no_outputs, total_input,
total_output):
self.tx_hash = tx_hash
self.no_inputs = no_inputs
self.no_outputs = no_outputs
self.total_input = total_input
self.total_output = total_output
@staticmethod
def from_row(row, rates):
return BlockTxSummary(row.tx_hash.hex(),
row.no_inputs,
row.no_outputs,
ConvertedValues(row.total_input,
rates).to_dict(),
ConvertedValues(row.total_output,
rates).to_dict()
)
def to_dict(self):
return self.__dict__
class BlockTxs:
""" Model representing all transactions of a given block """
def __init__(self, height, txs):
self.height = height
self.txs = txs
@staticmethod
def from_row(row, rates):
tx_summaries = [BlockTxSummary.from_row(tx, rates).to_dict()
for tx in row.txs]
return BlockTxs(row.height, tx_summaries)
def to_dict(self):
return self.__dict__
|
from workon.utils.auth import *
from workon.utils.cache import *
from workon.utils.celery import *
from workon.utils.color import *
from workon.utils.date import *
from workon.utils.debug import *
from workon.utils.email import *
from workon.utils.file import *
from workon.utils.hashtag import *
from workon.utils.html import *
from workon.utils.ical import *
from workon.utils.image import *
from workon.utils.ip_address import *
from workon.utils.number import *
from workon.utils.memoize import *
from workon.utils.models import *
from workon.utils.price import *
from workon.utils.pagination import *
from workon.utils.request import *
from workon.utils.route import *
from workon.utils.security import *
from workon.utils.string import *
from workon.utils.template import *
from workon.utils.url import *
from workon.utils.user import *
from workon.utils.types import *
from workon.contrib.tracker.utils import *
|
from turkey import Turkey
import random
class DuckAdapter(Turkey):
def __init__(self, duck):
self.duck = duck
def gobble(self):
self.duck.quack()
def fly(self):
if random.randrange(0, 5) == 0:
self.duck.fly()
|
from rest_framework import serializers
from accounts.api.serializers import UserDisplaySerializer
from employees.models import Employee
class StdImageFieldSerializer(serializers.ImageField):
"""
Get all the variations of the StdImageField
"""
def to_native(self, obj):
return self.get_variations_urls(obj)
def to_representation(self, obj):
return self.get_variations_urls(obj)
def get_variations_urls(self, obj):
"""
Get all the logo urls.
"""
# Initiate return object
return_object = {}
# Get the field of the object
field = obj.field
# A lot of ifs going around, first check if it has the field variations
if hasattr(field, 'variations'):
# Get the variations
variations = field.variations
# Go through the variations dict
for key in variations.keys():
# Just to be sure if the stdimage object has it stored in the obj
if hasattr(obj, key):
# get the by stdimage properties
field_obj = getattr(obj, key, None)
if field_obj and hasattr(field_obj, 'url'):
# store it, with the name of the variation type into our return object
return_object[key] = super(StdImageFieldSerializer, self).to_representation(field_obj)
# Also include the original (if possible)
try:
if hasattr(obj, 'url'):
return_object['original'] = super(StdImageFieldSerializer, self).to_representation(obj)
except ValueError:
pass
return return_object
class EmployeeModelSerializer(serializers.ModelSerializer):
user = UserDisplaySerializer(read_only=True)
primary_activity = serializers.SerializerMethodField()
secondary_activity = serializers.SerializerMethodField()
type = serializers.SerializerMethodField()
image = StdImageFieldSerializer()
def get_primary_activity(self, obj):
return Employee.ACTIVITY_DICT[obj.primary_activity]
def get_secondary_activity(self, obj):
return Employee.ACTIVITY_DICT[obj.secondary_activity]
def get_type(self, obj):
return Employee.EMPLOYEE_DICT[obj.type]
class Meta:
model = Employee
fields = "__all__"
|
from .resnet import resnet
from .vgg16 import vgg16
|
import numpy as np
from gym import utils
from gym.envs.mujoco import mujoco_env
class ReacherEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self):
utils.EzPickle.__init__(self)
mujoco_env.MujocoEnv.__init__(self, 'aubo_i5.xml', 2)
def _step(self, a):
vec = self.get_body_com("right_gripper_link")-self.get_body_com("target")
reward_dist = - np.linalg.norm(vec)
reward_ctrl = - np.square(a).sum()
reward = reward_dist + reward_ctrl
self.do_simulation(a, self.frame_skip)
ob = self._get_obs()
if (abs(reward_dist) < 0.05):
done = True
else:
done = False
return ob, reward, done, dict(reward_dist=reward_dist, reward_ctrl=reward_ctrl)
def viewer_setup(self):
self.viewer.cam.trackbodyid = 13
self.viewer.cam.elevation = -40
self.viewer.cam.distance = self.model.stat.extent * 1.5
def reset_model(self):
qpos = self.init_qpos
while True:
self.goal = self.np_random.uniform(low=-.1, high=.1, size=3)
if np.linalg.norm(self.goal) < 2:
break
qpos[-3:] = self.goal
qvel = self.init_qvel
qvel[-3:] = 0
self.set_state(qpos, qvel)
return self._get_obs()
def _get_obs(self):
theta = self.model.data.qpos.flat[:2]
return np.concatenate([
np.cos(theta),
np.sin(theta),
self.model.data.qpos.flat[2:],
self.model.data.qvel.flat[:2],
self.get_body_com("right_gripper_link") - self.get_body_com("target")
])
|
# -*- coding: utf-8 -*-
"""
This module implements tools to perform PCA transformation.
The main element is the **PcaHandler** class which is a user interface.
It performs direct and inverse PCA transformation for 3D data.
**Dimension_Reduction** is the background function which performs PCA
while the **EigenEstimate** function improves the estimation of PCA
eigenvalues.
"""
import time
import logging
import numpy as np
from . import sec2str
_logger = logging.getLogger(__name__)
def EigenEstimate(l, Ns):
""" Computes an estimate of the covariance eigenvalues given the sample
covariance eigenvalues. The Stein estimator coupled with isotonic
regression has been used here.
For more information, have a look at:
*
* MESTRE, Xavier. Improved estimation of eigenvalues and
eigenvectors of covariance matrices using their sample
estimates. IEEE Transactions on Information Theory, 2008,
vol. 54, no 11, p. 5113-5129.s
Arguments
---------
l: numpy array
Sample eigenvalues
Ns: int
Number of observations
Returns
-------
numpy array
Estimated covariance matrix eigenvalues.
float
Estimated Gaussian noise standard deviation.
int
Estimated dimension of the signal subspace.
"""
if l.ndim != 1:
raise ValueError('Input array l should have one dimension.')
# Get data dimension
M = l.size
# Initial data ----------------------
#
# The initial data consists in a table
#
# +-----+-----+-----+---------+
# | l_0 | l_1 | ... | l_{M-1} |
# +-----+-----+-----+---------+
# | a_0 | a_1 | ... | a_{M-1} |
# +-----+-----+-----+---------+
# where l (resp. a) are lattent variables (resp. denominator of Stein
# estimator).
# Stein estimator
table = np.stack((l, np.zeros(l.size)), axis=0)
for col in range(M):
# That's an (M, )-array filled with 1 but at col position.
ncol = np.logical_not(np.in1d(range(M), col))
table[1, col] = 1 + (1 / M) * np.sum(
(l[col] + l[ncol]) / (l[col] - l[ncol])
)
# table[1, col] = Ns - M + 1 + 2 * l[col] * np.sum(
# 1/(l[col] - l[ncol])
# )
# Procedure 1st step ----------------------
#
# Here, the goal is to make all a_i positive.
#
# 1. Start at the right of the table and search to the left until the
# first pair (l_j, a_j) with negative a_j is reached.
# 2. Pool this pair with the pair imediately on the left of it,
# replacing them with the pair (l_j + l_{j-1}, a_j + a_{j-1}), to
# form a list which is on pair shorter.
# 3. Repeat 1 and 2 until all a_j are positive.
#
# We will denote here M1 the length of the modified table.
#
# The back_pos variable is a list of lists. Its length will be M1 at the
# end of this step. back_pos[j1] for j1 smaller than M1 will be all the
# columns of the initial table that were used to create the column j1
# of the new table.
back_pos = [[i] for i in range(M)]
while (np.any(table[1, :] < 0)): # there are <0 alphai
# Initial cursor position in the table.
cpos = table.shape[1] - 1
# Searching the position of the negative a_i.
while (table[1, cpos] > 0):
cpos = cpos - 1
# The sum of the two pairs.
sum_pairs = np.sum(table[:, cpos-1:cpos+1], axis=1)[:, np.newaxis]
# Depending of the cases, the arrays to stack are different.
if cpos == table.shape[1] - 1: # That's the last pair.
hstack_ar = (table[:, :cpos-1], sum_pairs)
elif cpos == 1: # That's the first pair
hstack_ar = (sum_pairs, table[:, cpos+1:])
else: # The cursor is in the middle of table.
hstack_ar = (table[:, :cpos-1], sum_pairs, table[:, cpos+1:])
# Create new table
table = np.hstack(hstack_ar)
# Modify index list.
back_pos[cpos-1].extend(back_pos[cpos])
del back_pos[cpos]
# Procedure 2nd step ----------------------
#
# Here, the goal is to re-order the ratios l_j/a_j so that they are
# decreasing.
#
# To that end, a row will be added to table, which is the ratio of the
# first and the second lines.
#
# A pair (l_j, a_j) is called violating pair if the ratio l_j/a_j is not
# larger than l_{j+1}/a_{j+1}.
#
# 1. Start at the bottom of the list found in Step 1 and proceed to the
# left until the first violating pair, say (l_j, a_j), is reached.
# 2. Pool this violating pair with the pair immediately on the right by
# replacing these two pairs and their ratios with the pair
# (l_j+l_{j+1}, a_j+a_{j+1}) and its ratio
# (l_j+l_{j+1})/(a_j+a_{j+1}), forming a new list shorter by one pair.
# 3. Start at the pair imediately at the right (or the replacing pair
# itself if that's the last one) and proceed to the left until a
# violating pair is found, then repeat 2.
# 4. Repeat 3 until all ratios l_j/a_j are in decreasing order.
#
# In this step, the back_pos variable will be modified in a similar way
# as for Step 1.
table = np.vstack((table, table[0, :] / table[1, :]))
# Current position
cpos = table.shape[1] - 2
# If cpos get to -1, it means that no pair is violating.
while cpos >= 0:
while table[2, cpos+1] < table[2, cpos] and cpos >= 0:
cpos = cpos - 1
if cpos >= 0:
# A violating pair was found.
# The pairs are summed.
sum_pairs = np.sum(table[:, cpos:cpos+2], axis=1)[:, np.newaxis]
sum_pairs[2] = sum_pairs[0] / sum_pairs[1]
# Depending of the cases, the arrays to stack are different.
if cpos == table.shape[1] - 2: # That's the before last pair.
hstack_ar = (table[:, :cpos], sum_pairs)
elif cpos == 0: # That's the first pair
hstack_ar = (sum_pairs, table[:, cpos+2:])
else: # The cursor is in the middle of table.
hstack_ar = (table[:, :cpos], sum_pairs, table[:, cpos+2:])
# Create new table
table = np.hstack(hstack_ar)
# Modify index list.
back_pos[cpos].extend(back_pos[cpos+1])
del back_pos[cpos+1]
# Move the cursor to the left if cpos is at the extreme right.
if cpos == table.shape[1] - 1:
cpos = table.shape[1] - 2
# Procedure 3nd step ----------------------
#
# Each ratio in the final table was obtained by pooling a block of one
# or more consecutive pairs in the original list. To obtain Stein's
# modified estimates, we assign this ratio to all pairs of the block.
# Stein estimate output.
sl = np.zeros(M)
for cnt in range(table.shape[1]):
sl[back_pos[cnt]] = table[2, cnt] * np.ones(len(back_pos[cnt]))
# Sigma and dimension estimation ----------------------
#
# A threasholding is applied to avoid zero estimates.
sl[sl < 1e-12] = 1e-12
# Noise standard deviation is estimated to be the last Stein estimate.
sig = np.sqrt(sl[-1])
# The signal dimension is estimated to be the first positin such that sl
# is equal to sig.
D = min(back_pos[-1])
return (sl, sig, D)
def Dimension_Reduction(Y, mask=None, PCA_th='auto', verbose=True):
"""Reduces the dimension of a multi-band image.
Arguments
---------
Y: (m, n, l) numpy array
The multi-band image where the last axis is the spectral one.
mask: optional, (m, n) numpy array
The spatial sampling mask filled with True where pixels are sampled.
This is used to remove correctly the data mean.
Default if a matrix full of True.
PCA_th: optional, str, int
The PCA threshold.
'auto' for automatic estimation.
'max' to keep all components.
An interger to choose the threshold.
In case there are less samples (N) than the data dimension (l),
thi sparameter is overridded to keep a threshold of N-1.
verbose: optional, bool
Prints output if True. Default is True.
Returns
-------
(m, n, PCA_th) numpy array
The data in the reduced subspace.
Its shape is (m, n, PCA_th) where PCA_th is the estimated data
dimension.
dict
The dictionary contaning additional information about the reduction.
See Note.
Note
----
The InfoOut dictionary containg the thee following keys:
1. 'H' which is the base of the reduced subspace.
Its shape is (l, PCA_th) where PCA_th is the estimated data
dimension.
2. 'd' which is the evolution of the PCA-eigenvalues after estimation.
3. 'PCA_th' which is the estimated data dimension.
4. 'sigma' which is the estimated Gaussian noise standard deviation.
5. 'Ym' which is a (m, n, l) numpy array where the data mean over bands
is repeated for each spatial location.
"""
if mask is not None and mask.shape != Y.shape[:2]:
raise ValueError('Incoherent mask shape.')
# Default mask is full sampling.
#
if mask is None:
mask = np.ones(Y.shape[:2])
# Start messaage
#
if verbose:
print("- PCA transformation -")
start = time.time()
# Store the data dimensions.
#
m, n, M = Y.shape
N = int(mask.sum())
P = m * n
# Remove data mean
#
# Reshape data and remove mean
# Compute the indexes of the non-zeros elements of the flatten mask.
nnz = np.flatnonzero(mask)
Yr = Y.reshape((n * m, M)).T # Reshaped data have 'r' additianal letter.
# Compute the mean along bands of the reshaped data.
Yrm = np.tile(np.mean(Yr[:, nnz], axis=1), (P, 1)).T
Yrwm = Yr - Yrm # Remove mean
# Perform PCA.
#
[d, V] = np.linalg.eigh(np.cov(Yrwm[:, nnz]))
ind = np.argsort(d)[::-1]
d = d[ind]
V = V[:, ind]
# Selct only the N first elements in case less samples than dim.
# N <= M
#
if (N <= M):
_logger.warning('Number of samples is lower than data dimension.')
d = d[:N - 1]
V = V[:, :N - 1]
# Perform Stein isotonic regression
#
_logger.info('Performing Stein regression.')
dout, sigma, Rest = EigenEstimate(d, N)
# Sets the PCA threshold level
#
if N <= M:
Auto = N-1 # np.minimum(N-1, Rest)
Max = N - 1
else:
Auto = Rest
Max = np.minimum(M, N)
if PCA_th == 'auto':
th = Auto
elif PCA_th == 'max':
th = Max
elif PCA_th > Max:
_logger.warning(
'PCA threshold too high. '
'Highest possible value used instead.')
th = Max
else:
th = PCA_th
th = int(th)
_logger.info('Threshold is {}.'.format(th))
# Prepare output.
#
H = V[:, :th]
S = np.dot(H.T, Yrwm).T.reshape((m, n, th))
Yrrm = Yrm.T.reshape((m, n, M))
# Output message
#
if (verbose):
print(
'Dimension reduced from {} to {}.\n'
'Estimated sigma^2 is {:.2e}.\n'
'Done in {}.\n'
'-'.format(M, th, sigma**2, sec2str.sec2str(time.time()-start)))
InfoOut = {'H': H, 'd': dout, 'PCA_th': th, 'sigma': sigma, 'Ym': Yrrm}
return (S, InfoOut)
class PcaHandler:
"""Interface to perform PCA.
The PCA is applied at class initialization based on the input data.
This same operation can be applied afterward to other data using the
:code:`direct` and :code:`inverse` methods.
Attributes
----------
Y: (m, n, l) numpy array
Multi-band data.
Y_PCA: (m, n, PCA_th) numpy array
The data in PCA space.
mask: optional, (m, n) numpy array
Spatial sampling mask.
Default is full sampling.
PCA_transform: optional, bool
Flag that sets if PCA should really be applied. This is useful
in soma cases where PCA has already been applied.
Default is True.
verbose: optional, bool
If True, information is sent to output.
H: (l, PCA_th) numpy array
The subspace base.
Ym: (m, n, l) numpy array
Matrix whose spectra are all composed of the data spectral mean.
PCA_th: int
The estimated data dimension.
InfoOut: dict
The dictionary contaning additional information about the reduction.
See Note.
Note
----
The InfoOut dictionary containg the thee following keys:
1. 'H' which is the base of the reduced subspace.
Its shape is (l, PCA_th) where PCA_th is the estimated data
dimension.
2. 'd' which is the evolution of the PCA-eigenvalues after estimation.
3. 'PCA_th' which is the estimated data dimension.
4. 'sigma' which is the estimated Gaussian noise standard deviation.
5. 'Ym' which is a (m, n, l) numpy array where the data mean over bands
is repeated for each spatial location.
"""
def __init__(self, Y, mask=None, PCA_transform=True, PCA_th='auto',
verbose=True):
"""PcaHandler constructor.
Arguments
----------
Y: (m, n, l) numpy array
Multi-band data.
mask: (m, n) numpy array
Spatial sampling mask.
PCA_transform: optional, bool
Flag that sets if PCA should really be applied. This is useful
in soma cases where PCA has already been applied.
Default is True.
verbose: optional, bool
If True, information is sent to output.
"""
_logger.info('Initializing a PcaHandler object.')
# Test PCA_transform
if type(PCA_transform) is not bool:
raise ValueError('The PCA_transform parameter should be boolean.')
# Save everything
self.Y = Y
self.mask = mask
self.PCA_transform = PCA_transform
self.PCA_th = PCA_th
self.verbose = verbose
# Transform data into PCA
if self.PCA_transform:
_logger.info('Performing PCA.')
Y_PCA, InfoOut = Dimension_Reduction(
self.Y,
mask=self.mask,
PCA_th=self.PCA_th,
verbose=self.verbose)
self.H = InfoOut['H']
self.Ym = InfoOut['Ym']
self.PCA_th = InfoOut['PCA_th']
self.InfoOut = InfoOut
else:
_logger.info('Not performing PCA.')
Y_PCA = self.Y.copy()
self.H = np.eye(self.Y.shape[-1])
self.Ym = np.zeros(self.Y.shape)
self.PCA_th = self.Y.shape[-1]
self.InfoOut = None
self.Y_PCA = Y_PCA
def direct(self, X=None):
"""Performs direct PCA transformation.
The input X array can be data to project into the PCA subspace
or None. If input is None (which is default), the output will be
simply self.Y_PCA.
Caution
-------
The input data to transform should have the same shape as the Y
initial data.
Arguments
---------
X: (m, n, l) numpy array
The data to transform into PCA space.
Returns
-------
(m, n, PCA_th) numpy array
Multi-band data in reduced space.
"""
if X is None:
return self.Y_PCA
else:
m, n, B = X.shape
centered_data = (X - self.Ym).reshape((m*n, B)).T
return (self.H.T @ centered_data).T.reshape(
(m, n, self.PCA_th))
def inverse(self, X_PCA):
"""Performs inverse PCA transformation.
Caution
-------
The input data to transform should have the same shape as the
self.Y_PCA transformed data.
Arguments
---------
X_PCA: (m, n, PCA_th) numpy array
The data to transform into data space.
Returns
-------
(m, n, l) numpy array
Multi-band data after inverse transformation.
"""
m, n, _ = X_PCA.shape
M = self.H.shape[0]
X_tmp = X_PCA.reshape((m*n, self.PCA_th)).T
return (self.H @ X_tmp).T.reshape((m, n, M)) + self.Ym
|
# -*- coding: utf-8 -*-
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
import logging
import sys
from cached_property import cached_property
import pytest
from goodplay import ansible_support, docker_support, junitxml
from goodplay.context import GoodplayContext
junitxml.patch_pytest_to_strip_file_extensions()
# https://urllib3.readthedocs.org/en/latest/security.html#insecureplatformwarning
logging.captureWarnings(True)
def enable_logging_goodplay_info_to_stdout():
goodplay_stdout_handler = logging.StreamHandler(sys.stdout)
goodplay_stdout_handler.setLevel(logging.INFO)
goodplay_stdout_handler.setFormatter(logging.Formatter())
goodplay_logger = logging.getLogger('goodplay')
goodplay_logger.addHandler(goodplay_stdout_handler)
goodplay_logger.setLevel(logging.DEBUG)
enable_logging_goodplay_info_to_stdout()
def pytest_addoption(parser):
parser.getgroup('goodplay').addoption(
'--use-local-roles', dest='use_local_roles', action='store_true',
help='prefer to use local roles instead of auto-installed requirements')
class GoodplayFailed(Exception):
pass
@pytest.mark.hookwrapper
def pytest_runtest_makereport(item, call):
outcome = yield
report = outcome.get_result()
if call.excinfo and call.excinfo.typename == 'GoodplayFailed':
report.longrepr = 'Failed: {}'.format(call.excinfo.value)
# - GoodplayPlaybookFile (pytest.File)
# - GoodplayEnvironment (pytest.Collector) -- manage Docker
# - GoodplayPlaybook (pytest.Collector) -- manage Ansible runner
# - GoodplayTest (pytest.Item)
def pytest_collect_file(parent, path):
return GoodplayPlaybookFile.consider_and_create(path, parent)
class GoodplayContextSupport(object):
@cached_property
def ctx(self):
return self.parent.ctx
# generic playbook preparations
class GoodplayPlaybookFile(pytest.File):
def __init__(self, ctx, fspath, parent=None, config=None, session=None):
super(GoodplayPlaybookFile, self).__init__(fspath, parent, config, session)
self.ctx = ctx
@classmethod
def consider_and_create(cls, path, parent):
if ansible_support.is_test_playbook_file(path):
ctx = GoodplayContext(playbook_path=path, config=parent.config)
if ctx.inventory_path:
return GoodplayPlaybookFile(ctx, path, parent)
def collect(self):
try:
environment_names = \
docker_support.environment_names_for_playbook_path(self.ctx.playbook_path)
if environment_names:
for environment_name in environment_names:
yield GoodplayEnvironment(environment_name, self, self.config, self.session)
else:
yield GoodplayEnvironment(None, self, self.config, self.session)
finally:
if self.config.option.collectonly:
self.ctx.release()
def teardown(self):
self.ctx.release()
# environment can be unspecific
class GoodplayEnvironment(GoodplayContextSupport, pytest.Collector):
def __init__(self, environment_name, parent=None, config=None, session=None):
if environment_name:
# let the super class calculate the node id
nodeid = None
else:
nodeid = parent.nodeid
super(GoodplayEnvironment, self).__init__(
environment_name, parent, config, session, nodeid=nodeid)
self.environment_name = environment_name
self.docker_runner = None
def collect(self):
yield GoodplayPlaybook(self.parent.name, self, self.config, self.session)
def setup(self):
self.docker_runner = docker_support.DockerRunner(self.ctx, self.environment_name)
self.docker_runner.setup()
def teardown(self):
if self.docker_runner:
self.docker_runner.teardown()
# environment specific playbook preparations
class GoodplayPlaybook(GoodplayContextSupport, pytest.Collector):
def __init__(self, name, parent=None, config=None, session=None):
super(GoodplayPlaybook, self).__init__(name, parent, config, session, nodeid=parent.nodeid)
self.playbook_runner = None
def collect(self):
for task in self.ctx.playbook.test_tasks:
yield GoodplayTest(task, self)
def setup(self):
self.playbook_runner = self.ctx.playbook.create_runner()
self.playbook_runner.run_async()
def teardown(self):
if self.playbook_runner:
self.playbook_runner.wait()
if self.playbook_runner.failures:
raise GoodplayFailed('\n'.join(self.playbook_runner.failures))
class GoodplayTest(GoodplayContextSupport, pytest.Item):
def __init__(self, task, parent=None, config=None, session=None):
super(GoodplayTest, self).__init__(task.name, parent, config, session)
self.task = task
def __repr__(self):
return "<GoodplayTest '{0}'>".format(self.name)
@cached_property
def playbook_runner(self):
return self.parent.playbook_runner
def setup(self):
self.playbook_runner.wait_for_test_task(self.task)
def runtest(self):
outcome = self.playbook_runner.wait_for_test_task_outcome(self.task)
if outcome in ('skipped', None):
pytest.skip()
elif outcome == 'failed':
raise GoodplayFailed()
|
import graphene
import json
import uuid
from datetime import datetime
class Post(graphene.ObjectType):
title = graphene.String()
content = graphene.String()
class User(graphene.ObjectType):
id = graphene.ID(default_value=str(uuid.uuid4()))
username = graphene.String()
created_at = graphene.DateTime(default_value=datetime.now())
avatar_url = graphene.String()
def resolve_avatar_url(self, info):
return 'https://cloudinary.com/{}/{}'.format(self.username, self.id)
class Query(graphene.ObjectType):
users = graphene.List(User, limit=graphene.Int())
hello = graphene.String()
is_admin = graphene.Boolean()
def resolve_hello(self, info):
return "world"
def resolve_is_admin(self, info):
return True
def resolve_users(self, info, limit=None):
return [
User(id="1", username="Fred", created_at=datetime.now()),
User(id="2", username="Doug", created_at=datetime.now())
][:limit]
class CreateUser(graphene.Mutation):
user = graphene.Field(User)
class Arguments:
username = graphene.String()
def mutate(self, info, username):
user = User(username=username)
return CreateUser(user=user)
class CreatePost(graphene.Mutation):
post = graphene.Field(Post)
class Arguments:
title = graphene.String()
content = graphene.String()
def mutate(self, info, title, content):
if info.context.get('is_anonymous'):
raise Exception('Not authenticated!')
post = Post(title=title, content=content)
return CreatePost(post=post)
class Mutation(graphene.ObjectType):
create_user = CreateUser.Field()
create_post = CreatePost.Field()
schema = graphene.Schema(query=Query, mutation=Mutation)
result = schema.execute(
'''
{
users {
id
createdAt
username
avatarUrl
}
}
''',
# context={'is_anonymous': True}
# variable_values={'limit': 1}
)
dictResult = dict(result.data.items())
print(json.dumps(dictResult, indent=2))
|
#!/bin/sh
''''which python3.6 >/dev/null 2>&1 && exec python3.6 "$0" "$@" # '''
''''which python3.5 >/dev/null 2>&1 && exec python3.5 "$0" "$@" # '''
''''exec echo "Error: I can't find python3.[6|5] anywhere." # '''
from argh import ArghParser, arg, wrap_errors, expects_obj
from connexion import FlaskApp
import src.setup as setup
log_levels = ['CRITICAL', 'FATAL', 'ERROR', 'WARN', 'WARNING', 'INFO', 'DEBUG', 'NOTSET']
# TODO: Clear console on dev rebuild.
# TODO: Stats (Frontend) Log table (default), stats tab - if stats is not present in response then give user a message saving stats has been disabled on the server.
@arg('--dev', default = False, help = 'Enable Flask development mode.')
@arg('--console', default = False, help = 'Enable logging to console.')
@arg('--no-file', default = False, help = 'Disable logging to file.')
@arg('--no-stats', default = False, help = 'Disables endpoint stats gathering.')
@arg('--no-auth', default = False, help = 'Disables jwt authentication.')
@arg('--host', default = '0.0.0.0', help = 'IP to serve on.')
@arg('--port', default = 5000, help = 'Port to serve on.')
@arg('--ssh', default = None, help = 'Use the SSH console (rather than the local console). In the format user@host')
@arg('--password', default = None, help = 'Password to use with SSH console.')
@arg('-v', '--verbose', default = False, help = 'Enable verbose logging. Sets the log levels to DEBUG and logs more in depth information.')
@arg('--config-path', default = '/opt/dfn-software/dfnstation.cfg', help = 'Path to the dfnstation.cfg file.')
@arg('--disk-usage-path', default = '/tmp/dfn_disk_usage', help = 'Path to the dfn_disk_usage file.')
@arg('--log-level',
choices = log_levels,
default = 'INFO',
help = 'Logging level for the whole application.')
@arg('--api-log-level',
choices = log_levels,
default = 'INFO',
help = 'Logging level for the logs sent to the frontend.')
@wrap_errors([ValueError, OSError, KeyError])
@expects_obj
def run(args):
connexion_app = FlaskApp(__name__)
flask_app = connexion_app.app
config = flask_app.config
setup.args(config, args)
setup.extensions(flask_app)
setup.logger(config)
setup.routes(connexion_app, args)
flask_app.run(
host = args.host,
port = args.port)
if __name__ == '__main__':
parent_parser = ArghParser(description = 'Launches the DFN-Maintenance-GUI.')
parent_parser.add_commands([run])
parent_parser.set_default_command(run)
parent_parser.dispatch()
|
import db
import functools
import logging
from models.exceptions import InvalidComparisonError
logger = logging.getLogger(__name__)
class User(object):
def __init__(self, _id, name, email):
self.id = _id
self.name = name
self.email = email
@classmethod
@functools.lru_cache(None)
def get_by_id(cls, _id):
with db.DBConnection() as dbconn:
user_row = dbconn.fetchone(
'SELECT id, name, email FROM users WHERE id = ?',
(_id,)
)
if user_row:
return User(*user_row)
return None
def __eq__(self, other):
if other.id != self.id:
raise InvalidComparisonError('Can only compare user objects with the same ID')
return (self.name, self.email) == (other.name, other.email)
def __repr__(self):
return '%s: %s (ID: %s)' % (
self.__class__.__name__, self.name, self.id
)
def update_or_insert(self):
existing_user = User.get_by_id(self.id)
statements = []
if existing_user is None:
logging.info('%r does not exist - creating', self)
statements.append(
(
'INSERT INTO users (id, name, email) VALUES (?, ?, ?)',
(self.id, self.name, self.email)
)
)
elif isinstance(existing_user, User) and existing_user != self:
logging.info('%r exists - but has changed - updating', self)
statements.append(
(
'UPDATE users SET name = ?, email = ? WHERE id = ?',
(self.name, self.email, self.id)
)
)
return statements
|
from django.shortcuts import render, redirect
from django.urls import reverse_lazy
from django.views.generic import CreateView, ListView
from django.template import Template
from tryon.models import Tryon
from tryon.forms import TryonForm
import os
from os import path
import sys
sys.path.append(os.path.abspath('model'))
sys.path.append(os.path.abspath('media'))
from virtuon import virtuon
from clear import clear
from pairs import pairs
# Create your views here.
class TryonView(CreateView):
model = Tryon
template = "index.html"
success_url = "predict.html"
def get(self, request):
clear()
form = TryonForm()
ctx = {'form': form}
return render(request, self.template, ctx)
def post(self, request):
form = TryonForm(request.POST, request.FILES or None)
if not form.is_valid():
ctx = {'form': form}
return render(request, self.template, ctx)
form.save()
return redirect('tryon:predict')
class TryonPredict(ListView):
template = "predict.html"
def get(self, request):
if path.isfile("media/output/d0.jpg") is not True:
pairs()
virtuon()
cloth = ("cloth/c0.jpg")
pose = ("image/d0.jpg")
output = ("output/d0.jpg")
ctx = {"output": output, "cloth": cloth, "pose": pose}
return render(request, self.template, ctx)
|
import logging
import sys
import time
from unittest import TestCase
from hsmpy import HSM, State, FINAL, Condition, Event
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logger = logging.getLogger(__name__)
class TestHSM(TestCase):
def test_run(self):
class S1(State):
def __init__(self, uber, **kwargs):
super().__init__(uber, **kwargs)
self.loops = 0
def enter(self, **kwargs):
logger.debug("Entered State 1")
def loop(self, event):
logger.debug("Looping {}".format(self.loops))
self.loops += 1
def final(self):
logger.debug("Leaving State 1")
class S2(State):
def __init__(self, uber, **kwargs):
super().__init__(uber, **kwargs)
self.loops = 0
def enter(self, **kwargs):
logger.debug("Entered State 2")
def loop(self, event):
logger.debug("Looping {}".format(self.loops))
self.loops += 1
def final(self):
logger.debug("Leaving State 2")
def counts_exceeded(self):
return self.loops > 42
self.hsm = HSM(init_state=S1)
def test():
logger.debug(time.clock())
return time.clock() > 0.1
self.hsm.add_transition({"from": S1, "to": S2, "condition": {Condition.TIMEOUT: 5}})
self.hsm.add_transition({"from": S2, "to": S1, "condition": {Condition.EVENT_TYPE: "wah"}})
self.hsm.add_transition({"from": S1, "to": S2, "condition": test})
self.hsm.add_transition({"from": S2, "to": FINAL, "condition": S2.counts_exceeded})
logger.debug("Starting hsm...")
self.hsm.start()
logger.debug("Started.")
time.sleep(7)
self.hsm.send_event(Event("wah", ""))
self.hsm.join()
logger.debug("Final")
def test_class_based_run(self):
class S1(State):
def __init__(self, uber, **kwargs):
super().__init__(uber, **kwargs)
self.loops = 0
def enter(self, **kwargs):
logger.debug("Entered State 1")
def loop(self, event):
logger.debug("Looping {}".format(self.loops))
self.loops += 1
def final(self):
logger.debug("Leaving State 1")
class S2(State):
def __init__(self, uber, **kwargs):
super().__init__(uber, **kwargs)
self.loops = 0
def enter(self, **kwargs):
logger.debug("Entered State 2")
def loop(self, event):
logger.debug("Looping {}".format(self.loops))
self.loops += 1
def final(self):
logger.debug("Leaving State 2")
def counts_exceeded(self):
return self.loops > 42
def test():
return time.clock() > 0.1
class MyHSM(HSM):
transitions = [
{"from": S1, "to": S2, "condition": {Condition.TIMEOUT: 5}},
{"from": S2, "to": S1, "condition": {Condition.EVENT_TYPE: "wah"}},
{"from": S1, "to": S2, "condition": test},
{"from": S2, "to": FINAL, "condition": S2.counts_exceeded},
]
init_state = S1
mhsm = MyHSM()
logger.debug("Starting mhsm...")
logger.debug(len(mhsm._transitions))
mhsm.start()
logger.debug("Started.")
time.sleep(7)
mhsm.send_event(Event("wah", ""))
mhsm.join()
logger.debug("Final")
def test_hierarchical_run(self):
class S1(State):
pass
class S2(State):
pass
class S3(State):
pass
class S4(State):
pass
class S5(State):
pass
class InnerHSM(HSM):
transitions = [
{"from": S1, "to": S2, "condition": {Condition.TIMEOUT: 2}},
{"from": S2, "to": S3, "condition": {Condition.TIMEOUT: 2}},
{"from": S3, "to": FINAL, "condition": {Condition.TIMEOUT: 2}},
]
init_state = S1
class OuterHSM(HSM):
transitions = [
{"from": S4, "to": S5, "condition": {Condition.TIMEOUT: 2}},
{"from": S5, "to": InnerHSM, "condition": {Condition.TIMEOUT: 2}},
{"from": InnerHSM, "to": FINAL, "condition": Condition.ON_FINAL},
]
init_state = S4
ohsm = OuterHSM()
ohsm.start()
ohsm.join()
def test_hierarchical_with_event_run(self):
class S1(State):
pass
class S2(State):
def loop(self, event):
if event:
logger.debug("S2 received Event {}".format(event))
class S3(State):
def loop(self, event):
if event:
logger.debug("S3 received Event {}".format(event))
class S4(State):
pass
class S5(State):
pass
class InnerHSM(HSM):
transitions = [
{"from": S1, "to": S2, "condition": {Condition.EVENT_TYPE: "drei"}},
{"from": S1, "to": S3, "condition": {Condition.EVENT_TYPE: "vier"}},
{"from": S1, "to": FINAL, "condition": {Condition.TIMEOUT: 5}},
{"from": S3, "to": FINAL, "condition": {Condition.TIMEOUT: 2}},
{"from": S2, "to": FINAL, "condition": {Condition.TIMEOUT: 2}},
]
init_state = S1
class OuterHSM(HSM):
transitions = [
{"from": S4, "to": S5, "condition": {Condition.EVENT_TYPE: "eins"}},
{"from": S5, "to": InnerHSM, "condition": {Condition.EVENT_TYPE: "zwei"}},
{"from": InnerHSM, "to": FINAL, "condition": Condition.ON_FINAL},
]
init_state = S4
ohsm = OuterHSM()
ohsm.start()
time.sleep(2)
for e in ["eins", "zwei", "drei", "vier"]:
ohsm.send_event(Event(e, ""))
ohsm.join()
def test_state_arguments(self):
class S1(State):
def loop(self, event):
logger.debug([self.x, self.y, self.z])
self.x += 1
class Init(State):
pass
class MyHSM(HSM):
transitions = [
{"from": Init, "to": S1, "condition": True, "args": {"x": 1, "y": 2, "z": 3}},
{"from": S1, "to": FINAL, "condition": {Condition.TIMEOUT: 5}}
]
init_state = Init
hsm = MyHSM()
hsm.start()
hsm.join()
def test_uber_state(self):
class S1(State):
def enter(self):
logger.debug("My uber state is {}".format(self.uber.__class__.__name__))
def loop(self, event):
super().loop(event)
self.uber.looplist.append(1)
def final(self):
super().final()
logger.debug(self.uber.looplist)
class MyHSM(HSM):
transitions = [
{"from": S1, "to": FINAL, "condition": {Condition.TIMEOUT: 5}}
]
init_state = S1
def __init__(self):
super().__init__(uber=self)
self.looplist = []
hsm = MyHSM()
hsm.start()
hsm.join()
|
import os
from negociant.trader.app.ctaStrategy.ctaBacktesting import DAILY_DB_NAME
from negociant.trader.app.ctaStrategy.ctaHistoryData import loadDailyQuandlCsv
loadDailyQuandlCsv(os.path.join('.', 'PAdjM.csv'), DAILY_DB_NAME, 'p.HOT')
loadDailyQuandlCsv(os.path.join('.', 'SRAdjM.csv'), DAILY_DB_NAME, 'SR.HOT')
|
import torchtext
import string
import re
import random
from torchtext.vocab import Vectors
def preprocessing_text(text):
for p in string.punctuation:
if (p == ".") or (p == ","):
continue
else:
text = text.replace(p, " ")
text = text.replace(".", " . ")
text = text.replace(",", " , ")
return text
# 分かち書き
def tokenizer_punctuation(text):
return text.strip().split()
# 前処理と分かち書きをまとめる
def tokenizer_with_preprocessing(text):
text = preprocessing_text(text)
ret = tokenizer_punctuation(text)
return ret
# test tokenizer with preprocessing
# print(tokenizer_with_preprocessing('I like dogs.'))
def get_tweets_and_sentiment_label_loaders(max_length=256, batch_size=64):
# データを読み込む際の読み込んだ内容に対して行う処理を定義
max_length = max_length
batch_size = batch_size
ID = torchtext.data.Field(sequential=False, use_vocab=False)
TEXT1 = torchtext.data.Field(sequential=True, tokenize=tokenizer_with_preprocessing, use_vocab=True, lower=True, include_lengths=True, batch_first=True, fix_length=max_length, init_token="<cls>", eos_token="<eos>") # raw text
TEXT2 = torchtext.data.Field(sequential=True, tokenize=tokenizer_with_preprocessing, use_vocab=True, lower=True, include_lengths=True, batch_first=True, fix_length=max_length, init_token="<cls>", eos_token="<eos>") # selected_text
LABEL = torchtext.data.Field(sequential=False, use_vocab=False, preprocessing=lambda l: 0 if l == 'neutral' else 1 if l == 'positive' else 2, is_target=True) # sentiment label
TEST_TEXT = torchtext.data.Field(sequential=True, tokenize=tokenizer_with_preprocessing, use_vocab=True, lower=True, include_lengths=True, batch_first=True, fix_length=max_length, init_token="<cls>", eos_token="<eos>") # raw_text
TEST_LABEL = torchtext.data.Field(sequential=False, use_vocab=False, preprocessing=lambda l: 0 if l == 'neutral' else 1 if l == 'positive' else 2, is_target=True) # sentiment label
train_val_ds = torchtext.data.TabularDataset(
path='../data/train.csv', format='csv',
fields=[('ID', None), ('Text1', TEXT1), ('Text2', TEXT2), ('Label', LABEL)],
skip_header=True)
test_ds = torchtext.data.TabularDataset(
path='../data/test.csv', format='csv',
fields=[('ID', None), ('Test_Text', TEST_TEXT), ('Test_Label', TEST_LABEL)],
skip_header=True)
# test dataloader
# print('訓練 検証のデータ数: {}'.format(len(train_val_ds)))
# print('1つ目の訓練&検証データ:{}'.format(vars(train_val_ds[0])))
# print('テストのデータ数: {}'.format(len(test_ds)))
# print('1つ目のテストデータ:{}'.format(vars(test_ds[0])))
train_ds, val_ds = train_val_ds.split(split_ratio=0.8, random_state=random.seed(1234))
# # test split data
# print(len(train_ds))
# print(len(val_ds))
# print(vars(train_ds[0]))
# make vocab
fasttext_vectors = Vectors(name='../data/wiki-news-300d-1M.vec')
# test vectors
# print(fasttext_vectors.dim)
# print(len(fasttext_vectors.itos))
# ベクトル化したボキャブラリーを作成
TEXT1.build_vocab(train_ds, vectors=fasttext_vectors, min_freq=10)
TEXT2.build_vocab(train_ds, vectors=fasttext_vectors, min_freq=10)
TEST_TEXT.build_vocab(test_ds, vectors=fasttext_vectors, min_freq=10)
# # ボキャブラリのベクトル確認
# print(TEXT1.vocab.vectors.shape)
# print(TEXT1.vocab.vectors)
# print(TEXT1.vocab.stoi)
# print(TEST_TEXT.vocab.vectors.shape)
# print(TEST_TEXT.vocab.vectors)
# print(TEST_TEXT.vocab.stoi)
# make Dataloader
train_dl = torchtext.data.Iterator(train_ds, batch_size=24, train=True)
val_dl = torchtext.data.Iterator(val_ds, batch_size=24, train=False, sort=False)
test_dl = torchtext.data.Iterator(test_ds, batch_size=24, sort=False)
# test
batch = next(iter(train_dl))
print(batch.Text1)
print(batch.Text2)
print(batch.Label)
return train_dl, val_dl, test_dl, TEXT1, TEXT2, TEST_TEXT
if __name__ == '__main__':
get_tweets_and_sentiment_label_loaders(max_length=256, batch_size=64)
|
"""
[5/28/2014] Challenge #164 [Intermediate] Part 3 - Protect The Bunkers
https://www.reddit.com/r/dailyprogrammer/comments/26oop1/5282014_challenge_164_intermediate_part_3_protect/
##Description
Most of the residential buildings have been destroyed by the termites due to a bug in /u/1337C0D3R's code. All of the
civilians in our far-future society now live in bunkers of a curious design - the bunkers were poorly designed using
the ASCII Architect and are thus not secure. If the bunkers are breached by a hostile force, it is almost certain that
all the civilians will die.
The high-tech termites have developed a taste for human flesh. Confident from their victory at the building lines, they
are now staging a full attack on the bunkers. The government has hired you to design protective walls against the
termite attacks. However, their supplies are limited, so you must form a method to calculate the minimum amount of
walls required.
A map of an area under assault by evil termites can be described as a 2d array of length m and width n. There are five
types of terrain which make up the land:
* *: A termite nest. Termites can pass through here. The termites begin their assault here. Protective walls cannot be
placed here.
* #: Impassible terrain. Termites cannot pass through here. Protective walls cannot be placed here.
* +: Unreliable terrain. Termites can pass through here. Protective walls cannot be placed here.
* -: Reliable terrain. Termites can pass through here. Protective walls can be placed here.
* o: Bunker. Termites can pass through here. If they do, the civilians die a horrible death. Protective walls cannot be
placed here.
Termites will begin their attack from the nest. They will then spread orthogonally (at right angles) through terrain
they can pass through.
A map will always follow some basic rules:
* There will only be one nest.
* Bunkers will always be in a single filled rectangle (i.e. a contiguous block).
* A bunker will never be next to a nest.
* There will always be a solution (although it may require a lot of walls).
##Formal Inputs And Outputs
##Input Description
Input will be given on STDIN, read from a file map.txt, or supplied as a command line argument. The first line of input
will contain 2 space separated integers m and n. Following that line are n lines with m space seperated values per
line. Each value will be one of five characters: *, #, +, -, or o.
Input Limits
1 <= n < 16
3 <= m < 16
##Output Description
Output will be to STDOUT or written to a file output.txt. Output consists of a single integer which is the number of
walls required to protect all the bunkers.
##Sample Inputs and Outputs
##Sample Input 1
6 6
#++++*
#-#+++
#--#++
#ooo--
#ooo-#
######
##Sample Output 1
2
(The walls in this example are placed as follows, with @ denoting walls:
#++++*
#@#+++
#--#++
#ooo@-
#ooo-#
######
##Notes
Thanks again to /u/202halffound
"""
def main():
pass
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
__all__=("bind", "bindVarsToFunc", "modifyCode", "UnbindableException")
__author__="KOLANICH"
__license__="Unlicense"
__copyright__=r"""
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <https://unlicense.org/>
"""
from sys import version_info
from types import CodeType, FunctionType
import dis, opcode
from collections import namedtuple, OrderedDict
from struct import pack
from inspect import currentframe
import re
from collections import defaultdict
CODE_PROP_PREFIX="co_"
codeArgs=("argcount", "kwonlyargcount", "nlocals", "stacksize", "flags", "codestring", "constants", "names", "varnames", "filename", "name", "firstlineno", "lnotab", "freevars", "cellvars")
def modifyCode(c:CodeType, patch:dict):
"""Used to apply a patch to code object"""
kwargs=OrderedDict()
for pN in codeArgs:
aN=CODE_PROP_PREFIX+pN
if hasattr(c, aN):
kwargs[pN]=getattr(c, aN)
elif hasattr(c, pN):
kwargs[pN]=getattr(c, pN)
else:
kwargs[pN]=None
for pN, v in patch.items():
kwargs[pN]=v
#print(kwargs)
return CodeType(*kwargs.values()) #**kwargs don't work here
lc=opcode.opmap["LOAD_CONST"]
ld=opcode.opmap["LOAD_DEREF"]
lg=opcode.opmap["LOAD_GLOBAL"]
lf=opcode.opmap["LOAD_FAST"]
lcl=opcode.opmap["LOAD_CLOSURE"]
lat=opcode.opmap["LOAD_ATTR"]
ln=opcode.opmap["LOAD_NAME"]
#loadInstrs={v for k,v in opcode.opmap.items() if k.find("LOAD")>=0}
loadInstrs={ld, lg, ln, lf, lcl}
opcVarCollMappingInitial={
#tuple(opcode.hasfree):"freevars",
tuple((v for k,v in opcode.opmap.items() if k.find("DEREF")>=0)):"freevars",
tuple(opcode.hasname):"names",
#tuple(opcode.haslocal):"varnames",
#(lcl,):"cellvars"
}
opcVarCollMapping={}
for opcs, v in opcVarCollMappingInitial.items():
for k in opcs:
opcVarCollMapping[k]=v
def resolveOpcode(key):
if isinstance(key, str):
key=dis.opmap[key]
return key
if version_info.minor >= 6:
INSTR_PACK_STR="<BB"
elif version_info.minor < 6:
INSTR_PACK_STR="<BH"
def genLoadInstr(opcode, arg):
return pack(INSTR_PACK_STR, opcode, arg)
INSTR_WIDTH=len(genLoadInstr(0, 0))
class SymbolBuffer:
"""Every load instruction corresponds to a buffer where it stores names. This represents a buffer tied to that instruction."""
def __init__(self, opcodes:(tuple, set), buffer:(tuple, list)):
self.opcodes=set(opcodes)
self.buffer=dict(enumerate(buffer))
def __getitem__(self, key:(int)):
return self.buffer[key]
def __contains__(self, key:(int)):
return key in self.buffer
def __delitem__(self, key:(int)):
del(self.buffer[key])
def __getattr__(self, key):
return getattr(self.buffer, key)
def __repr__(self):
return "".join((self.__class__.__name__,"(",repr(self.buffer),")"))
class InlineAccountingSymbolBuffer(SymbolBuffer):
def __init__(self, opcode:(tuple, set), buffer:(tuple, list), name=None, offset=0):
super().__init__(opcode, buffer)
self.maxArg=len(buffer)
self.name=name
self.inlined={}
self.remaps={}
self.offset=offset
self.updateRemaps()
def updateRemaps(self, begin=0):
if self.buffer:
self.remaps=dict(zip(self.buffer.keys(), range(self.offset, self.offset+len(self.buffer))))
def __repr__(self):
return "".join((self.__class__.__name__,"(",repr(self.buffer),", ",repr(self.remaps),", ",repr(self.name),")"))
def __getitem__(self, key:(int, str)):
return super().__getitem__(resolveOpcode(key)+self.offset)
def __delitem__(self, key:(int)):
super().__delitem__(resolveOpcode(key)+self.offset)
def __contains__(self, key:(int, str)):
return super().__contains__(resolveOpcode(key)+self.offset)
class FuncScope(dict):
"""The set of buffers with symbols names every function and/or its code object has."""
def __init__(self, cd:CodeType, symbolBufferConstructor=None):
super().__init__()
if symbolBufferConstructor is None:
symbolBufferConstructor=InlineAccountingSymbolBuffer
for opcodes, propName in opcVarCollMappingInitial.items():
propName=CODE_PROP_PREFIX+propName
v=getattr(cd, propName)
buffer=symbolBufferConstructor(opcodes, v, propName)
for opc in opcodes:
self[opc]=buffer
#print(self.buffers)
#self.cd=cd
def __getitem__(self, key:(int, str)):
return super().__getitem__(resolveOpcode(key))
def __contains__(self, key:(int, str)):
return super().__contains__(resolveOpcode(key))
def __repr__(self):
return "".join((self.__class__.__name__, "(", repr({opcVarCollMapping[k]:v for k,v in super().items()}), ")"))
class UnbindableException(Exception):
pass
ignoredVariables=set(dir(FunctionType))
def getCallerContext(captureGlobals=False):
f=currentframe()
ctx=f.f_back.f_back.f_globals if captureGlobals else {}
ctx.update(f.f_back.f_back.f_locals)
del(f)
return {k:v for k,v in ctx.items() if k not in ignoredVariables}
def bindVarsToFunc(f:FunctionType, inlines:dict=None, returnInfo=False):
"""An implementition of inliner. f is function, inlines is dict of variables to inline, when returnInfo is True instead of function this returns a tuple (FuncScope, modified bytecode, inlined version of function)"""
if inlines is None:
inlines=getCallerContext()
#print(inlines.keys())
cd=f.__code__
bcode=bytearray(cd.co_code)
consts=list(cd.co_consts)
clos=dict(enumerate(f.__closure__)) if f.__closure__ else {}
scope=FuncScope(cd)
scope[ld].offset=len(cd.co_cellvars)
scope[ld].updateRemaps()
#print(scope)
#dis.dis(f)
parsedBytecode=dis.Bytecode(cd)
toInlineCands=defaultdict(set)
for instr in parsedBytecode: #first pass: collecting names with load instrs
opc=instr.opcode
if opc in scope:
arg=instr.arg
symbolsBuf=scope[opc]
if arg in symbolsBuf:
toInlineCands[symbolsBuf[arg]].add(opc)
#toInlineCands={name:opcs-loadInstrs for name, opcs in toInlineCands.items() if len(opcs&loadInstrs) and name in inlines.keys() }
toInlineCands={name:opcs-loadInstrs for name, opcs in toInlineCands.items() if name in inlines.keys() }
for symbolsBuf in scope.values():
toInline=[it for it in symbolsBuf.items() if it[1] in toInlineCands.keys()]
toInline.sort(key=lambda it: it[0])
for arg, varName in toInline:
if lat not in toInlineCands[varName]:
del(symbolsBuf[arg])
if ld in symbolsBuf.opcodes:
del(clos[arg])
symbolsBuf.inlined[arg+symbolsBuf.offset]=len(consts)
consts.append(inlines[varName])
symbolsBuf.updateRemaps()
#print(scope)
for instr in parsedBytecode:
opc=instr.opcode
if opc in scope:
arg=instr.arg
symbolsBuf=scope[opc]
if arg in symbolsBuf.inlined:
if opc not in loadInstrs: # it's not a load
if arg not in symbolsBuf: # name moved to consts entirely
raise UnbindableException("Inlined variable `"+getattr(cd, symbolsBuf.name)[arg]+"` (from buffer `"+symbolsBuf.name+"`) was touched in the way other than load ("+opcode.opname[opc]+")!")
elif arg in symbolsBuf.remaps: # name is used for another purpose like load_attr
arg=symbolsBuf.remaps[arg]
else: #it's load, replace with load_const
arg=symbolsBuf.inlined[arg]
opc=lc
elif arg in symbolsBuf.remaps:
#print(symbolsBuf, arg, instr)
arg=symbolsBuf.remaps[arg]
bcode[instr.offset:instr.offset+INSTR_WIDTH]=genLoadInstr(opc, arg)
#print(scope)
patch={
opcVarCollMapping[opc]:tuple(symbolsIndexRemap.values())
for opc, symbolsIndexRemap in scope.items()
}
patch["constants"]=tuple(consts)
patch["codestring"]=bytes(bcode)
cd=modifyCode(cd, patch)
newFunc=FunctionType(cd, f.__globals__, f.__name__, f.__defaults__, tuple(clos.values()))
newFunc.__doc__=f.__doc__
if returnInfo:
return (scope, bcode, newFunc)
return newFunc
def bind(*args, **kwargs):
"""Inlines variables into a function passed.
4 ways to call
* passing a func explicitly, as to inlineFunc:
bind(func, {"inline_me":1})
* as a decorator with a dict
@bind({"inline_me":1})
* as a decorator with kwargs
@bind(inline_me=1)
* as a decorator with implicit context capture
@bind
"""
inlines=None
if len(args) == 1:
if isinstance(args[0], dict):
inlines=args[0]
elif isinstance(args[0], FunctionType):
inlines=getCallerContext()
return bindVarsToFunc(args[0], inlines)
elif kwargs and not len(args):
inlines=kwargs
kwargs={}
if inlines is not None:
def dec(f:FunctionType):
return bindVarsToFunc(f, inlines, **kwargs)
return dec
else:
return bindVarsToFunc(*args, **kwargs)
selfBind=True
if selfBind:
genLoadInstr=bindVarsToFunc(genLoadInstr)
modifyCode=bindVarsToFunc(modifyCode)
getCallerContext=bindVarsToFunc(getCallerContext)
#dis.dis(bindVarsToFunc)
bindVarsToFunc=bindVarsToFunc(bindVarsToFunc)
#dis.dis(bindVarsToFunc)
#bindVarsToFunc=bindVarsToFunc(bindVarsToFunc, {"modifyCode":modifyCode, "genLoadInstr":genLoadInstr})
bind=bindVarsToFunc(bind)
#dis.dis(bind.__code__.co_code)
pass
|
from .ReadHelper import read_int_8, read_int_24be, signed24, read_int_16be
PC_SIZE_CONVERSION_RATIO = 5.0 / 3.0
def read_pc_file(f, out, settings=None):
pcm_threads = [
{"color": 0x000000, "description": "PCM Color 1"},
{"color": 0x000080, "description": "PCM Color 2"},
{"color": 0x0000FF, "description": "PCM Color 3"},
{"color": 0x008080, "description": "PCM Color 4"},
{"color": 0x00FFFF, "description": "PCM Color 5"},
{"color": 0x800080, "description": "PCM Color 6"},
{"color": 0xFF00FF, "description": "PCM Color 7"},
{"color": 0x800000, "description": "PCM Color 8"},
{"color": 0xFF0000, "description": "PCM Color 9"},
{"color": 0x008000, "description": "PCM Color 10"},
{"color": 0x00FF00, "description": "PCM Color 11"},
{"color": 0x808000, "description": "PCM Color 12"},
{"color": 0xFFFF00, "description": "PCM Color 13"},
{"color": 0x808080, "description": "PCM Color 14"},
{"color": 0xC0C0C0, "description": "PCM Color 15"},
{"color": 0xFFFFFF, "description": "PCM Color 16"},
]
f.seek(2, 0)
colors = read_int_16be(f)
if colors is None:
return # File is blank.
for i in range(0, colors):
color_index = read_int_16be(f)
thread = pcm_threads[color_index]
out.add_thread(thread)
stitch_count = read_int_16be(f)
while True:
x = read_int_24be(f)
c0 = read_int_8(f)
y = read_int_24be(f)
c1 = read_int_8(f)
ctrl = read_int_8(f)
if ctrl is None:
break
x = signed24(x)
y = -signed24(y)
x *= PC_SIZE_CONVERSION_RATIO
y *= PC_SIZE_CONVERSION_RATIO
if ctrl == 0x00:
out.stitch_abs(x, y)
continue
if ctrl & 0x01:
out.color_change()
continue
if ctrl & 0x04:
out.move_abs(x, y)
continue
break # Uncaught Control
out.end()
def read(f, out, settings=None):
read_pc_file(f, out)
|
# -*- coding: utf-8 -*-
# Author: XuMing <xuming624@qq.com>
# Brief:
from __future__ import print_function
from setuptools import setup, find_packages
from parrots import __version__
long_description = '''
## Usage
### install
* pip3 install parrots
* Or
```
git clone https://github.com/shibing624/parrots.git
cd pyrrots
python3 setup.py install
```
### speech recognition
input:
```
import parrots
text = parrots.speech_recognition_from_file('./16k.wav')
print(text)
```
output:
```
北京图书馆
```
### tts
input:
```
import parrots
audio_file_path = parrots.synthesize('北京图书馆')
print(audio_file_path)
```
output:
```
北京图书馆 语音文件路径
```
'''
setup(
name='parrots',
version=__version__,
description='Chinese Text To Speech and Speech Recognition',
long_description=long_description,
author='XuMing',
author_email='xuming624@qq.com',
url='https://github.com/shibing624/parrots',
license="Apache 2.0",
classifiers=[
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Natural Language :: Chinese (Simplified)',
'Natural Language :: Chinese (Traditional)',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Internet :: WWW/HTTP'
],
keywords='TTS, chinese text to speech, speech',
install_requires=[
'pypinyin',
'pydub',
'pyaudio',
'jieba'
],
packages=find_packages(exclude=['tests']),
package_dir={'parrots': 'parrots'},
package_data={
'parrots': ['*.*', 'LICENSE', 'README.*', 'data/*', 'utils/*', 'data/pinyin2hanzi/*', 'data/speech_model/*']}
)
|
#!/usr/bin/env python
import imp
import io
import os
import sys
from setuptools import find_packages, setup
def read(*filenames, **kwargs):
encoding = kwargs.get('encoding', 'utf-8')
sep = kwargs.get('sep', '\n')
buf = []
for filename in filenames:
with io.open(filename, encoding=encoding) as f:
buf.append(f.read())
return sep.join(buf)
root = os.path.dirname(os.path.realpath(__file__))
version_module = imp.load_source(
'version', os.path.join(root, 'myday', 'version.py'))
testing = bool({'pytest', 'test'}.intersection(sys.argv))
setup(
name="myday",
version=version_module.version,
author="Trevor Bekolay",
author_email="tbekolay@gmail.com",
packages=find_packages(),
include_package_data=True,
scripts=[],
url="https://github.com/tbekolay/myday",
license="MIT license",
description="What should I do with my day?",
long_description=read('README.rst', 'CHANGES.rst'),
entry_points={
'console_scripts': [
'myday = myday:main',
]
},
setup_requires=["pytest-runner"] if testing else [],
install_requires=[
'click',
'orgmode',
'python-dateutil',
'recurrent',
],
classifiers=[ # https://pypi.python.org/pypi?%3Aaction=list_classifiers
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: End Users/Desktop",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
]
)
|
'''
Comparison of Dual Averaging algorithms
@author: Maximilian Balandat
@date: May 8, 2015
'''
# Set up infrastructure and basic problem parameters
import multiprocessing as mp
import numpy as np
import datetime, os
from ContNoRegret.Domains import nBox, UnionOfDisjointnBoxes, DifferenceOfnBoxes, unitbox, hollowbox
from ContNoRegret.LossFunctions import AffineLossFunction, QuadraticLossFunction, random_AffineLosses, random_QuadraticLosses
from ContNoRegret.NoRegretAlgos import ContNoRegretProblem
from ContNoRegret.utils import CNR_worker, plot_results, save_results, circular_tour
from ContNoRegret.animate import save_animations
from ContNoRegret.Potentials import (ExponentialPotential, IdentityPotential, pNormPotential, CompositePotential,
ExpPPotential, PExpPotential, HuberPotential, LogtasticPotential, FractionalLinearPotential)
# this is the location of the folder for the results
results_path = '/Users/balandat/Documents/Code/Continuous_No-Regret/results/'
desc = 'DA_Comparison'
tmpfolder = '/Volumes/tmp/' # if possible, choose this to be a RamDisk
save_res = True
show_plots = False
create_anims = True
show_anims = False
# before running the computation, read this file so we can later save a copy in the results folder
with open(__file__, 'r') as f:
thisfile = f.read()
T = 5000 # Time horizon
M = 10.0 # Uniform bound on the function (L-infinity norm)
Lbnd = 5.0 # Uniform bound on the Lipschitz constant
N = 2500 # Number of parallel algorithm instances
Ngrid = 250000 # Number of gridpoints for the sampling step
dom = unitbox(2)
# dom = UnionOfDisjointnBoxes([nBox([(-1,0), (-1,0)]), nBox([(0,1), (0,1)])])
# dom = DifferenceOfnBoxes(nBox([(-1,1), (-1,1)]), [nBox([(-0.5,0.5), (-0.5,0.5)])])
# Now create some random loss functions
# d=2 means sampling the a vector uniformly at random from {x : ||x||_2<L}}
lossfuncs, M = random_AffineLosses(dom, Lbnd, T, d=2)
# mus = circular_tour(dom, T)
# mus_random = dom.sample_uniform(T)
# epsilon = 0.4
# mus = ((1-epsilon)*mus + epsilon*mus_random)
# lossfuncs, Mnew = random_QuadraticLosses(dom, mus, Lbnd, M, pd=True)
# testfunc = QuadraticLossFunction(dom, [0,0], np.array([[1,0],[0,1]]), 0)
# c = testfunc.min()
# lossfuncs = [QuadraticLossFunction(dom, [0,0], np.array([[1,0],[0,1]]), -c) for t in range(T)]
# print(M, Mnew)
# normbounds = {'1': [lossfunc.norm(1) for lossfunc in lossfuncs],
# '2': [lossfunc.norm(2) for lossfunc in lossfuncs]}
# create Continuous No-Regret problem
prob = ContNoRegretProblem(dom, lossfuncs, Lbnd, M, desc=desc)
# choose learning rate parameters
thetas = [1]
alphas = [0.5]
# potentials
potentials = [ExponentialPotential(), pNormPotential(1.25), pNormPotential(1.5), pNormPotential(1.75)]#, CompositePotential(2)]
# , pNormPotential(1.5), ExpPPotential(1.5), PExpPotential(1.5),
# CompositePotential(2), CompositePotential(4)]
# , CompositePotential(gamma=2), CompositePotential(gamma=4),
# pNormPotential(1.25), pNormPotential(1.5), pNormPotential(1.75)]
# potentials = [ExponentialPotential(), CompositePotential(gamma=2), CompositePotential(gamma=4),
# pNormPotential(1.25), pNormPotential(1.75)]
# potentials = [ExponentialPotential(), CompositePotential(gamma=2), CompositePotential(gamma=4),
# pNormPotential(1.25), pNormPotential(1.75), LogtasticPotential()]
# potentials = [pNormPotential(1.25), pNormPotential(1.5), pNormPotential(1.75), pNormPotential(2.0)]
# the following runs fine if the script is the __main__ method, but crashes when running from ipython
pool = mp.Pool(processes=mp.cpu_count()-1)
DAkwargs = [{'alphas':[pot.alpha_opt(dom.n)], 'thetas':thetas, 'Ngrid':Ngrid, 'potential':pot,
'pid':i, 'tmpfolder':tmpfolder, 'label':pot.desc} for i,pot in enumerate(potentials)]
processes = [pool.apply_async(CNR_worker, (prob, N, 'DA'), kwarg) for kwarg in DAkwargs]
# GPkwargs = {'alphas':alphas, 'thetas':thetas, 'Ngrid':Ngrid, 'pid':len(processes), 'tmpfolder':tmpfolder, 'label':'GP'}
# processes.append(pool.apply_async(CNR_worker, (prob, N, 'GP'), GPkwargs))
#
# OGDkwargs = {'alphas':alphas, 'thetas':thetas, 'Ngrid':Ngrid, 'pid':len(processes), 'tmpfolder':tmpfolder, 'label':'OGD'}
# processes.append(pool.apply_async(CNR_worker, (prob, N, 'OGD'), OGDkwargs))
#
# ONSkwargs = {'alpha':0.1, 'Ngrid':Ngrid, 'pid':len(processes), 'tmpfolder':tmpfolder, 'label':'ONS'}
# processes.append(pool.apply_async(CNR_worker, (prob, N, 'ONS'), ONSkwargs))
#
# FTALkwargs = {'alpha':0.1, 'Ngrid':Ngrid, 'pid':len(processes), 'tmpfolder':tmpfolder, 'label':'ONS'}
# processes.append(pool.apply_async(CNR_worker, (prob, N, 'FTAL'), FTALkwargs))
#
# EWOOkwargs = {'alpha':0.1, 'Ngrid':Ngrid, 'pid':len(processes), 'tmpfolder':tmpfolder, 'label':'EWOO'}
# processes.append(pool.apply_async(CNR_worker, (prob, N, 'EWOO'), EWOOkwargs))
# wait for the processes to finish an collect the results
results = [process.get() for process in processes]
# plot results and/or save a persistent copy (pickled) of the detailed results
if save_res:
# create a time stamp for unambiguously naming the results folder
timenow = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M')
results_directory = '{}{}/'.format(results_path, timenow)
os.makedirs(results_directory, exist_ok=True) # this could probably use a safer implementation
plot_results(results, 100, results_directory, show_plots)
save_animations(results, 10, results_directory, show_anims)
save_results(results, results_directory)
# store the previously read-in contents of this file in the results folder
with open(results_directory+str(__file__), 'w') as f:
f.write(thisfile)
else:
plot_results(results, offset=100)
|
"""
This module contains all the methods related to "buildtest build" which is used
for building test scripts from a Buildspec
"""
import json
import logging
import os
import re
import shutil
import sys
import tempfile
import traceback
from datetime import datetime
from buildtest import BUILDTEST_VERSION
from buildtest.builders.compiler import CompilerBuilder
from buildtest.builders.script import ScriptBuilder
from buildtest.builders.spack import SpackBuilder
from buildtest.buildsystem.builders import Builder
from buildtest.buildsystem.parser import BuildspecParser
from buildtest.cli.compilers import BuildtestCompilers
from buildtest.config import SiteConfiguration
from buildtest.defaults import (
BUILD_HISTORY_DIR,
BUILD_REPORT,
BUILDSPEC_CACHE_FILE,
BUILDTEST_DEFAULT_TESTDIR,
BUILDTEST_LOGFILE,
BUILDTEST_REPORTS,
BUILDTEST_RERUN_FILE,
DEFAULT_LOGDIR,
console,
)
from buildtest.exceptions import BuildspecError, BuildTestError
from buildtest.executors.setup import BuildExecutor
from buildtest.log import init_logfile
from buildtest.schemas.defaults import schema_table
from buildtest.system import BuildTestSystem
from buildtest.utils.file import (
create_dir,
is_dir,
is_file,
load_json,
resolve_path,
walk_tree,
)
from jsonschema.exceptions import ValidationError
from rich import box
from rich.panel import Panel
from rich.table import Column, Table
logger = logging.getLogger(__name__)
# Context manager that copies stdout and any exceptions to a log file
class Tee(object):
def __init__(self, filename):
self.file = open(filename, "w")
self.stdout = sys.stdout
def __enter__(self):
sys.stdout = self
def __exit__(self, exc_type, exc_value, tb):
sys.stdout = self.stdout
if exc_type is not None:
self.file.write(traceback.format_exc())
self.file.close()
def write(self, data):
self.file.write(data)
self.stdout.write(data)
def flush(self):
self.file.flush()
self.stdout.flush()
def resolve_testdirectory(configuration, testdir=None):
"""This method resolves which test directory to select. For example, one
can specify test directory via command line ``buildtest build --testdir <path>``
or path in configuration file. The default is $HOME/.buildtest/var/tests
Args:
configuration (buildtest.config.SiteConfiguration): An instance of SiteConfiguration class which contains content of buildtest configuration file
testdir (str, optional): Path to test directory specified via command line ``buildtest build --testdir``
Returns:
str: Path to test directory
"""
# variable to set test directory if prefix is set
config_testdir = resolve_path(
configuration.target_config.get("testdir"), exist=False
)
# resolve full path for test directory specified by --testdir option
testdir = resolve_path(testdir, exist=False)
# Order of precedence when detecting test directory
# 1. Command line option --testdir
# 2. Configuration option specified by 'testdir'
# 3. Defaults to $BUILDTEST_ROOT/.var/tests
test_directory = testdir or config_testdir or BUILDTEST_DEFAULT_TESTDIR
return test_directory
def discover_buildspecs(
buildspecs=None, exclude_buildspecs=None, executors=None, tags=None
):
"""This method discovers all buildspecs based on --buildspecs, --tags, --executor
and excluding buildspecs (--exclude).
Args:
buildspecs (list, optional): List of input buildspecs passed by argument ``buildtest build --buildspec``
exclude_buildspecs (list, optional): List of excluded buildspecs by argument ``buildtest build --exclude``
tags (list, optional): List of input tags for discovering buildspecs by argument ``buildtest build --tags``
executors (list, optional): List of input executors for discovering buildspecs by argument ``buildtest build --executor``
Returns:
dict: A dictionary containing a list of included, excluded, detected buildspecs and buildspecs detected based on tags and executors
"""
# a dictionary used to keep track of included, excluded and detected buildspecs.
buildspec_dict = {}
buildspec_dict["included"] = []
buildspec_dict["excluded"] = []
buildspec_dict["detected"] = []
buildspec_dict["tags"] = {}
buildspec_dict["executors"] = {}
logger.debug(
f"Discovering buildspecs based on tags={tags}, executor={executors}, buildspec={buildspecs}, excluded buildspec={exclude_buildspecs}"
)
# discover buildspecs based on --tags
if tags:
found_buildspecs, buildspec_dict["tags"] = discover_buildspecs_by_tags(tags)
buildspec_dict["included"] += found_buildspecs
logger.debug(f"Discovered buildspecs based on tags: {tags}")
logger.debug(found_buildspecs)
# discover buildspecs based on --executor
if executors:
found_buildspecs, buildspec_dict["executors"] = discover_buildspecs_by_executor(
executors
)
buildspec_dict["included"] += found_buildspecs
logger.debug(f"Discovered buildspecs based on executors: {executors}")
logger.debug(found_buildspecs)
# discover buildspecs based on --buildspec
if buildspecs:
# Discover list of one or more Buildspec files based on path provided. Since --buildspec can be provided multiple
# times we need to invoke discover_buildspecs once per argument.
for option in buildspecs:
bp = discover_by_buildspecs(option)
# only add buildspecs if its not None
if bp:
buildspec_dict["included"] += bp
# remove any duplicate Buildspec from list by converting list to set and then back to list
buildspec_dict["included"] = list(set(buildspec_dict["included"]))
# if no files discovered let's stop now
if not buildspec_dict["included"]:
msg = "There are no config files to process."
sys.exit(msg)
logger.debug(
f"buildtest discovered the following Buildspecs: {buildspec_dict['included']}"
)
buildspec_dict["detected"] = buildspec_dict["included"].copy()
# if user pass buildspecs to be excluded (buildtest build -x <buildspec>) then
# discover all excluded buildspecs and remove from discovered list
if exclude_buildspecs:
# discover all excluded buildspecs, if its file add to list,
# if its directory traverse all .yml files
for name in exclude_buildspecs:
bp = discover_by_buildspecs(name)
if bp:
buildspec_dict["excluded"] += bp
# remove any duplicates from list
buildspec_dict["excluded"] = list(set(buildspec_dict["excluded"]))
logger.debug(
f"The exclude pattern is the following: {buildspec_dict['excluded']}"
)
# detected buildspecs are any buildspecs in included buildspecs not in excluded buildspecs
buildspec_dict["detected"] = [
file
for file in buildspec_dict["included"]
if file not in buildspec_dict["excluded"]
]
logger.debug(
f"Buildspec list after applying exclusion: {buildspec_dict['detected']}"
)
# if no files remain after exclusion let's stop now.
if not buildspec_dict["detected"]:
msg = "There are no Buildspec files to process."
sys.exit(msg)
return buildspec_dict
def print_discovered_buildspecs(buildspec_dict):
"""This method will print the discovered buildspecs in table format
Args:
buildspec_dict (dict): A dictionary containing a list of included and excluded buildspecs and breakdown of buildspecs by tags and executors
"""
console.rule("[bold red] Discovering Buildspecs")
table = Table(
title="Discovered buildspecs", box=box.DOUBLE_EDGE, header_style="blue"
)
table.add_column("buildspec", style="red")
for i in buildspec_dict["included"]:
table.add_row(i)
console.print(table)
# if any buildspecs removed due to -x option we print them to screen
if buildspec_dict["excluded"]:
table = Table(
title="Excluded buildspecs", box=box.DOUBLE_EDGE, header_style="blue"
)
table.add_column("buildspec", style="red")
for i in buildspec_dict["excluded"]:
table.add_row(i)
console.print(table)
# print breakdown of buildspecs by tags
if buildspec_dict.get("tags"):
for tagname in buildspec_dict["tags"].keys():
table = Table(
title=f"Buildspecs By Tag={tagname}",
box=box.DOUBLE_EDGE,
header_style="blue",
)
table.add_column("buildspec", style="red")
for row in buildspec_dict["tags"][tagname]:
table.add_row(row)
console.print(table)
# print breakdown of buildspecs by executors
if buildspec_dict.get("executors"):
for executorname in buildspec_dict["executors"].keys():
table = Table(
title=f"Buildspecs by Executor={executorname}",
box=box.DOUBLE_EDGE,
header_style="blue",
)
table.add_column("buildspecs")
for row in buildspec_dict["executors"][executorname]:
table.add_row(f"[red]{row}")
console.print(table)
print("\n")
console.print(
"[green bold]Total Discovered Buildspecs: ", len(buildspec_dict["included"])
)
console.print(
"[red bold]Total Excluded Buildspecs: ", len(buildspec_dict["excluded"])
)
console.print(
"[blue bold]Detected Buildspecs after exclusion: ",
len(buildspec_dict["detected"]),
)
def discover_buildspecs_by_tags(tagnames):
"""This method discovers buildspecs by tags, using ``buildtest build --tags`` option.
This method will read BUILDSPEC_CACHE_FILE and search for ``tags`` key in buildspec recipe and
match with input tag. The input ``tags`` are a list of tagnames to search in buildspec with the
``tags`` property in buildspec. The return is a list of buildspec files to process.
Args:
tagnames (list): List of input tags from command line argument ``buildtest build --tags <tags>``
Returns:
list, dict: first argument is a list of buildspecs discovered for all tag names. The second argument is
dictionary breakdown of buildspecs by each tag name
"""
tag_dict = {}
cache = load_json(BUILDSPEC_CACHE_FILE)
buildspecs = []
# query all buildspecs from BUILDSPEC_CACHE_FILE for tags keyword and
# if it matches input_tag we add buildspec to list
for name in tagnames:
tag_dict[name] = set()
for buildspecfile in cache["buildspecs"].keys():
for test in cache["buildspecs"][buildspecfile].keys():
# if input tag is not of type str we skip the tag name since it is not valid
if not isinstance(name, str):
logger.warning(f"Tag: {name} is not of type 'str'")
continue
# if tags is not declared we set to empty list
tag = cache["buildspecs"][buildspecfile][test].get("tags") or []
if name in tag:
buildspecs.append(buildspecfile)
tag_dict[name].add(buildspecfile)
# remove any duplicates and return back a list
buildspecs = list(set(buildspecs))
return buildspecs, tag_dict
def discover_buildspecs_by_executor(executors):
"""This method discovers buildspecs by executor name, using ``buildtest build --executor``
command. This method will read BUILDSPEC_CACHE_FILE and search for ``executor`` property
in buildspec and match with input executor name. The return is a list of matching
buildspec with executor name to process.
Args:
executors (list): List of input executor name from command line argument ``buildtest build --executor <name>``
Returns:
list, dict: first argument is a list of buildspecs discovered for all executors. The second argument is
dictionary breakdown of buildspecs by each executor name
"""
executor_dict = {}
cache = load_json(BUILDSPEC_CACHE_FILE)
buildspecs = []
# query all buildspecs from BUILDSPEC_CACHE_FILE for tags keyword and
# if it matches input_tag we add buildspec to list
for name in executors:
executor_dict[name] = set()
for buildspecfile in cache["buildspecs"].keys():
for test in cache["buildspecs"][buildspecfile].keys():
# check if executor in buildspec matches one in argument (buildtest build --executor <EXECUTOR>)
if name == cache["buildspecs"][buildspecfile][test].get("executor"):
buildspecs.append(buildspecfile)
executor_dict[name].add(buildspecfile)
# remove any duplicates and return back a list
buildspecs = list(set(buildspecs))
return buildspecs, executor_dict
def discover_by_buildspecs(buildspec):
"""Given a buildspec file specified by the user with ``buildtest build --buildspec``,
discover one or more files and return a list for buildtest to process.
This method is called once per argument of ``--buildspec`` or ``--exclude``
option. If its a directory path we recursively find all buildspecs with
with **.yml** extension. If filepath doesn't exist or file extension is not **.yml** we
return None and capture error in log.
.. code-block:: console
# file path
buildtest build --buildspec tutorials/hello.sh.yml
# directory path
buildtest build --buildspec tutorials
# invalid file path returns None
buildtest build -b /xyz.yml
# invalid file extension
buildtest build -b README.md
Args:
buildspec (str): Full path to buildspec based on argument ``buildtest build --buildspec``
Returns:
list: List of resolved buildspecs.
"""
buildspecs = []
# if buildspec doesn't exist print message and log error and return
if not os.path.exists(os.path.abspath(buildspec)):
msg = (
f"[red]Unable to find any buildspecs: {os.path.abspath(buildspec)} [/red] \n"
+ "Please provide an absolute or relative path to a file or directory"
)
console.print(msg)
logger.error(msg)
return
# Now handle path based on being a directory or file path
if os.path.isdir(buildspec):
logger.debug(
f"Buildspec File: {buildspec} is a directory so traversing directory tree to find all Buildspec files with .yml extension"
)
buildspecs = walk_tree(buildspec, ".yml")
elif os.path.isfile(buildspec):
# if buildspec doesn't end in .yml extension we print message and return None
if not re.search(".yml$", buildspec):
msg = f"{buildspec} does not end in file extension .yml"
print(msg)
logger.error(msg)
return
buildspecs = [buildspec]
logger.debug(f"Buildspec: {buildspec} is a file")
# If we don't have any files discovered
if not buildspecs:
msg = "No Buildspec files found with input: %s." % buildspec
print(msg)
logger.error(msg)
return
# return all buildspec by resolving path, this gets the real canonical path and address shell expansion and user expansion
buildspecs = [resolve_path(file) for file in buildspecs]
logger.info(
f"Based on input argument we discovered the following buildspecs: {buildspecs}"
)
return buildspecs
def print_filters():
"""This method will print list of filters fields used by ``buildtest build --filter``. This method is invoked by
running ``buildtest build --helpfilter``.
"""
table = Table(title="Buildtest Filters", header_style="blue")
table.add_column("Field", style="green")
table.add_column("Description", style="red")
table.add_row("tags", "Filter tests by [italic]'tag'[/italic] field")
table.add_row("type", "Filter test by [italic]'type'[/italic] field")
table.add_row("maintainers", "Filter test by [italic]'maintainers'[/italic] field")
console.print(table)
class BuildTest:
"""This class is an interface to building tests via ``buildtest build`` command."""
def __init__(
self,
configuration=None,
buildspecs=None,
exclude_buildspecs=None,
tags=None,
executors=None,
testdir=None,
stage=None,
filter_buildspecs=None,
rebuild=None,
buildtest_system=None,
report_file=None,
maxpendtime=None,
poll_interval=None,
keep_stage_dir=None,
retry=None,
account=None,
helpfilter=None,
numprocs=None,
numnodes=None,
modules=None,
modulepurge=None,
unload_modules=None,
rerun=None,
executor_type=None,
timeout=None,
):
"""The initializer method is responsible for checking input arguments for type
check, if any argument fails type check we raise an error. If all arguments pass
we assign the values and proceed with building the test.
Args:
configuration (buildtest.config.SiteConfiguration, optional): Loaded configuration content which is an instance of SiteConfiguration
buildspecs (list, optional): list of buildspecs from command line ``buildtest build --buildspec``
exclude_buildspecs (list, optional): list of excluded buildspecs from command line ``buildtest build --exclude``
tags (list, optional): list if tags passed from command line ``buildtest build --tags``
executors (list, optional): list of executors passed from command line ``buildtest build --executors``
testdir (str): Path to test directory where tests are written. This argument can be passed from command line ``buildtest build --testdir``
stage (str, optional): Stop build after parse or build stage which can be configured via ``buildtest build --stage`` option
filter_buildspecs (dict, optional): filters buildspecs and tests based on ``buildtest build --filter`` argument which is a key/value dictionary that can filter tests based on **tags**, **type**, and **maintainers**
rebuild (int, optional): Rebuild tests X times based on ``buildtest build --rebuild`` option.
buildtest_system (buildtest.system.BuildTestSystem, optional): Instance of BuildTestSystem class
report_file (str, optional): Location to report file where test data will be written upon completion. This can be specified via ``buildtest build --report`` command
maxpendtime (int, optional): Specify maximum pending time in seconds for batch job until job is cancelled
poll_interval (int, optional): Specify poll interval in seconds for polling batch jobs.
keep_stage_dir (bool, optional): Keep stage directory after job completion
retry (int, optional): Number of retry for failed jobs
account (str, optional): Project account to charge jobs. This takes input argument ``buildtest build --account``
helpfilter (bool, optional): Display available filter fields for ``buildtest build --filter`` command. This argument is set to ``True`` if one specifies ``buildtest build --helpfilter``
numprocs (str, optional): List of comma separated process values to run batch jobs specified via ``buildtest build --procs``
numnodes (str, optional): List of comma separated nodes values to run batch jobs specified via ``buildtest build --nodes``
modules (str, optional): List of modules to load for every test specified via ``buildtest build --modules``.
modulepurge (bool, optional): Determine whether to run 'module purge' before running test. This is specified via ``buildtest build --modulepurge``.
unload_modules (str, optional): List of modules to unload for every test specified via ``buildtest build --unload-modules``.
rerun (bool, optional): Rerun last successful **buildtest build** command. This is specified via ``buildtest build --rerun``. All other options will be ignored and buildtest will read buildtest options from file **BUILDTEST_RERUN_FILE**.
executor_type (bool, optional): Filter test by executor type. This option will filter test after discovery by local or batch executors. This can be specified via ``buildtest build --exec-type``
timeout (int, optional): Test timeout in seconds specified by ``buildtest build --timeout``
"""
if buildspecs and not isinstance(buildspecs, list):
raise BuildTestError(f"{buildspecs} is not of type list")
if exclude_buildspecs and not isinstance(exclude_buildspecs, list):
raise BuildTestError(f"{exclude_buildspecs} is not of type list")
if tags and not isinstance(tags, list):
raise BuildTestError(f"{tags} is not of type list")
if executors and not isinstance(executors, list):
raise BuildTestError(f"{executors} is not of type list")
if testdir and not isinstance(testdir, str):
raise BuildTestError(f"{testdir} is not of type str")
if stage and not isinstance(stage, str):
raise BuildTestError(f"{stage} is not of type str")
# if --rebuild is specified check if its an integer and within 50 rebuild limit
if rebuild:
if not isinstance(rebuild, int):
raise BuildTestError(f"{rebuild} is not of type int")
if rebuild > 50:
raise BuildTestError(
f"--rebuild {rebuild} exceeds maximum rebuild limit of 50"
)
if timeout:
if not isinstance(timeout, int):
raise BuildTestError(f"{timeout} is not of type int")
if timeout <= 0:
raise BuildTestError("Timeout must be greater than 0")
self.keep_stage_dir = keep_stage_dir
self.configuration = configuration
self.buildspecs = buildspecs
self.exclude_buildspecs = exclude_buildspecs
self.tags = tags
self.executors = executors
self.maxpendtime = maxpendtime
self.pollinterval = poll_interval
self.helpfilter = helpfilter
self.retry = retry
self.rerun = rerun
self.account = account
self.stage = stage
self.filter_buildspecs = filter_buildspecs
self.rebuild = rebuild
self.modules = modules
self.modulepurge = modulepurge
self.unload_modules = unload_modules
self.numprocs = numprocs
self.numnodes = numnodes
self.executor_type = executor_type
self.timeout = timeout
# this variable contains the detected buildspecs that will be processed by buildtest.
self.detected_buildspecs = None
self.invalid_buildspecs = None
self.builders = None
self.finished_builders = None
if self.helpfilter:
print_filters()
return
# get real path to log directory which accounts for variable expansion, user expansion, and symlinks
self.logdir = (
resolve_path(self.configuration.target_config.get("logdir"), exist=False)
or DEFAULT_LOGDIR
)
# create a temporary file to store logfile and we don't delete file by setting 'delete=False'
# by default tempfile will delete file upon exit.
self.logfile = tempfile.NamedTemporaryFile(
prefix="buildtest_", delete=False, suffix=".log"
)
if self.logdir:
create_dir(self.logdir)
self.logfile.name = os.path.join(
self.logdir, os.path.basename(self.logfile.name)
)
logger = logging.getLogger(__name__)
# if BUILDTEST_LOGFILE is not created we initialize logger. This is relevant when invoking BuildTest class in regression test
if not is_file(BUILDTEST_LOGFILE):
logger = init_logfile(logfile=BUILDTEST_LOGFILE)
self.testdir = resolve_testdirectory(self.configuration, testdir)
create_dir(self.testdir)
logger.debug(f"Tests will be written in {self.testdir}")
self.report_file = resolve_path(report_file, exist=False) or BUILD_REPORT
if is_dir(self.report_file):
raise BuildTestError(
f"{report_file} is a directory please specify a file name where report will be written"
)
# if buildtest build --rerun is set read file then rerun last command regardless of input specified in command line.
# the last command is stored in file BUILDTEST_RERUN_FILE which is a dictionary containing the input arguments.
if self.rerun:
self.load_rerun_file()
self.buildexecutor = BuildExecutor(
self.configuration,
maxpendtime=self.maxpendtime,
account=self.account,
pollinterval=self.pollinterval,
timeout=self.timeout,
)
self.system = buildtest_system
if not self.system:
self.system = BuildTestSystem()
self._validate_filters()
msg = f"""
[magenta]User:[/] [cyan]{self.system.system['user']}
[magenta]Hostname:[/] [cyan]{self.system.system['host']}
[magenta]Platform:[/] [cyan]{self.system.system['platform']}
[magenta]Current Time:[/] [cyan]{datetime.now().strftime('%Y/%m/%d %X')}
[magenta]buildtest path:[/] [cyan]{shutil.which('buildtest')}
[magenta]buildtest version:[/] [cyan]{BUILDTEST_VERSION}
[magenta]python path:[/] [cyan]{self.system.system['python']}
[magenta]python version:[/] [cyan]{self.system.system['pyver']}[/]
[magenta]Configuration File:[/] [cyan]{self.configuration.file}[/]
[magenta]Test Directory:[/] [cyan]{self.testdir}[/]
[magenta]Report File:[/] [cyan]{self.report_file}[/]
[magenta]Command:[/] [cyan]{' '.join(sys.argv)}[/]
"""
console.print(Panel.fit(msg, title="buildtest summary"), justify="left")
def load_rerun_file(self):
"""This will load content of file BUILDTEST_RERUN_FILE that contains a dictionary of key/value pair
that keeps track of last ``buildtest build`` command. This is used with ``buildtest build --rerun``. Upon loading
file we reinitalize all class variables that store argument for ``buildtest build`` options"""
if not is_file(BUILDTEST_RERUN_FILE):
raise BuildTestError(
"Please run a 'buildtest build' command before using '--rerun' option. "
)
console.print(
f"Reading content of rerun file {BUILDTEST_RERUN_FILE} all other options will be ignored."
)
content = load_json(BUILDTEST_RERUN_FILE)
configuration = SiteConfiguration(content["configuration"])
configuration.detect_system()
configuration.validate()
self.configuration = configuration
self.buildspecs = content["buildspecs"]
self.tags = content["tags"]
self.filter = content["filter"]
self.exclude_buildspecs = content["exclude_buildspecs"]
self.executors = content["executors"]
self.report_file = content["report_file"]
self.stage = content["stage"]
self.keep_stage_dir = content["keep_stage_dir"]
self.testdir = content["testdir"]
self.maxpendtime = content["maxpendtime"]
self.pollinterval = content["pollinterval"]
self.account = content["account"]
self.retry = content["retry"]
self.modules = content["modules"]
self.modulepurge = content["modulepurge"]
self.unload_modules = content["unload_modules"]
self.rebuild = content["rebuild"]
self.numnodes = content["numnodes"]
self.numprocs = content["numprocs"]
self.executor_type = content["executor_type"]
self.timeout = content["timeout"]
def save_rerun_file(self):
buildtest_cmd = {
"configuration": self.configuration.file,
"buildspecs": self.buildspecs,
"tags": self.tags,
"filter": self.filter_buildspecs,
"exclude_buildspecs": self.exclude_buildspecs,
"executors": self.executors,
"report_file": self.report_file,
"stage": self.stage,
"keep_stage_dir": self.keep_stage_dir,
"testdir": self.testdir,
"maxpendtime": self.maxpendtime,
"pollinterval": self.pollinterval,
"account": self.account,
"rebuild": self.rebuild,
"retry": self.retry,
"modules": self.modules,
"modulepurge": self.modulepurge,
"unload_modules": self.unload_modules,
"numprocs": self.numprocs,
"numnodes": self.numnodes,
"executor_type": self.executor_type,
"timeout": self.timeout,
}
with open(BUILDTEST_RERUN_FILE, "w") as fd:
fd.write(json.dumps(buildtest_cmd, indent=2))
def _validate_filters(self):
"""Check filter fields provided by ``buildtest build --filter`` are valid types and supported. Currently
supported filter fields are ``tags``, ``type``, ``maintainers``
Raises:
BuildTestError: if input filter field is not valid we raise exception. For ``type`` filter we check for value and make sure the schema type is supported
"""
valid_fields = ["tags", "type", "maintainers"]
# if filter fields not specified there is no need to check fields
if not self.filter_buildspecs:
return
for key in self.filter_buildspecs.keys():
if key not in valid_fields:
raise BuildTestError(
f"Invalid filter field: {key} the available filter fields are: {valid_fields}"
)
if key == "type":
if self.filter_buildspecs[key] not in schema_table["types"]:
raise BuildTestError(
f"Invalid value for filter 'type': '{self.filter_buildspecs[key]}', valid schema types are : {schema_table['types']}"
)
def discovered_buildspecs(self):
"""Return all discovered buildspecs which includes included buildspecs, excluded buildspecs and detected buildspecs."""
return self.discovered_bp
def build(self):
"""This method is responsible for discovering buildspecs based on input argument. Then we parse
the buildspecs and retrieve builder objects for each test. Each builder object will invoke :func:`buildtest.buildsystem.base.BuilderBase.build`
which will build the test script, and then we run the test and update report.
"""
if self.helpfilter:
return
self.discovered_bp = discover_buildspecs(
buildspecs=self.buildspecs,
exclude_buildspecs=self.exclude_buildspecs,
tags=self.tags,
executors=self.executors,
)
print_discovered_buildspecs(buildspec_dict=self.discovered_bp)
self.detected_buildspecs = self.discovered_bp["detected"]
self.save_rerun_file()
# Parse all buildspecs and skip any buildspecs that fail validation, return type
# is a builder object used for building test.
self.parse_buildspecs()
# if no builders found or --stage=parse set we return from method
if not self.builders or self.stage == "parse":
return
self.build_phase()
# if --stage=build is set we return from method
if self.stage == "build":
return
self.finished_builders = self.run_phase()
# store path to logfile in each builder object. There is a single logfile per build.
for builder in self.finished_builders:
builder.metadata["logpath"] = self.logfile.name
if not self.keep_stage_dir:
logger.debug("Removing stage directory for all tests")
for builder in self.finished_builders:
shutil.rmtree(builder.stage_dir)
# only update report if we have a list of valid builders returned from run_phase
if self.finished_builders:
update_report(self.finished_builders, self.report_file)
print(f"Writing Logfile to: {self.logfile.name}")
self._update_build_history(self.finished_builders)
def parse_buildspecs(self):
"""Parse all buildspecs by passing buildspec file to :class:`buildtest.buildsystem.parser.BuildspecParser` class.
If buildspec fails validation we skip the buildspec and print all skipped buildspecs.
If buildspec passes validation we get all builders by invoking :class:`buildtest.buildsystem.builders.Builder` class that
is responsible for creating builder objects for each test.
Raises:
SystemExit: If no builders are created after parsing buildspecs
"""
console.rule("[bold red]Parsing Buildspecs")
self.builders = []
self.invalid_buildspecs = []
# store list of valid buildspecs that pass after calling BuildspecParser and used only for printing purpose
valid_buildspecs = []
# stores a list of buildspecs that are filtered out
filtered_buildspecs = []
bc = BuildtestCompilers(configuration=self.configuration)
console.print(
f"Buildtest will parse {len(self.detected_buildspecs)} buildspecs"
)
for buildspec in self.detected_buildspecs:
try:
# Read in Buildspec file here, loading each will validate the buildspec file
bp = BuildspecParser(buildspec, self.buildexecutor)
except (BuildTestError, BuildspecError, ValidationError) as err:
self.invalid_buildspecs.append(buildspec)
logger.error(err)
continue
valid_buildspecs.append(buildspec)
builder = Builder(
bp=bp,
buildtest_compilers=bc,
buildexecutor=self.buildexecutor,
filters=self.filter_buildspecs,
testdir=self.testdir,
rebuild=self.rebuild,
buildtest_system=self.system,
configuration=self.configuration,
numprocs=self.numprocs,
numnodes=self.numnodes,
executor_type=self.executor_type,
)
if not builder.get_builders():
filtered_buildspecs.append(buildspec)
continue
self.builders += builder.get_builders()
console.print(f"[green]Valid Buildspecs: {len(valid_buildspecs)}")
console.print(f"[red]Invalid Buildspecs: {len(self.invalid_buildspecs)}")
for buildspec in valid_buildspecs:
msg = f"[green]{buildspec}: VALID"
console.print(msg)
# print any skipped buildspecs if they failed to validate during build stage
if self.invalid_buildspecs:
for buildspec in self.invalid_buildspecs:
msg = f"[red]{buildspec}: INVALID"
console.print(msg)
if filtered_buildspecs:
table = Table("[blue]buildspecs", title="Buildspecs Filtered out")
for test in filtered_buildspecs:
table.add_row(f"[red]{test}")
console.print(table)
# if no builders found we return from this method
if not self.builders:
console.print(
"[red]\nbuildtest is unable to create any tests because there are no valid buildspecs. "
)
print(f"\nPlease see logfile: {BUILDTEST_LOGFILE}")
sys.exit(1)
console.print("Total builder objects created:", len(self.builders))
script_builders = []
compiler_builder = []
spack_builder = []
batch_builders = []
for builder in self.builders:
if isinstance(builder, ScriptBuilder):
script_builders.append(builder)
if isinstance(builder, CompilerBuilder):
compiler_builder.append(builder)
if isinstance(builder, SpackBuilder):
spack_builder.append(builder)
if not builder.is_local_executor():
batch_builders.append(builder)
console.print("Total compiler builder:", len(compiler_builder))
console.print("Total script builder:", len(script_builders))
console.print("Total spack builder:", len(spack_builder))
self.print_builders(
compiler_builder, spack_builder, script_builders, batch_builders
)
def build_phase(self):
"""This method will build all tests by invoking class method ``build`` for
each builder that generates testscript in the test directory. If no builders are
present upon building test we raise exception and terminate immediately
Raises:
BuildTestError: If no builders are present in build phase
"""
invalid_builders = []
console.rule("[bold red]Building Test")
valid_builders = []
for builder in self.builders:
try:
builder.build(
modules=self.modules,
modulepurge=self.modulepurge,
unload_modules=self.unload_modules,
)
except BuildTestError as err:
console.print(f"[red]{err}")
invalid_builders.append(builder)
logger.error(err)
continue
valid_builders.append(builder)
# set retry limit for each builder
builder.retry(self.retry)
# remove builders if any invalid builders detected in build phase
if invalid_builders:
for test in invalid_builders:
self.builders.remove(test)
if not self.builders:
raise BuildTestError(
f"Unable to create any test during build phase. Please check {BUILDTEST_LOGFILE} for more details"
)
def run_phase(self):
"""This method will run all builders with the appropriate executor.
The :class:`buildtest.executors.setup.BuildExecutor` class is responsible for orchestrating builder execution to the
appropriate executor class. The BuildExecutor contains a list of executors picked up from buildtest configuration.
For tests running locally, we get the test metadata and count PASS/FAIL test
state which is printed at end in Test Summary. For tests that need batch submission
via scheduler, the first stage of run will dispatch job, and state will be
unknown. After dispatching all jobs, we will poll jobs until they are complete.
The poll section is skipped if all tests are run locally. In poll section we
regenerate table with all valid builders and updated test state and returncode
and recalculate total pass/fail tests. Any test that failed to run or be
dispatched will be skipped during run stage and they will not be recorded in the test report
Returns:
A list of valid builders after running tests
"""
console.rule("[bold red]Running Tests")
self.buildexecutor.run(self.builders)
builders = self.buildexecutor.get_validbuilders()
########## TEST SUMMARY ####################
if not builders:
sys.exit("Unable to run any tests")
self._print_test_summary(builders)
return builders
def build_success(self):
"""Returns True if build was successful otherwise returns False"""
return True if self.finished_builders else False
def _print_test_summary(self, builders):
"""Print a summary of total pass and fail test with percentage breakdown.
Args:
builders (list): List of builders that ran to completion
"""
table = Table(title="Test Summary", show_lines=True, header_style="blue")
table.add_column("builder", overflow="fold")
table.add_column("executor")
table.add_column("status")
table.add_column("checks (ReturnCode, Regex, Runtime)", overflow="fold")
table.add_column("returnCode")
table.add_column("runtime")
passed_tests = 0
failed_tests = 0
total_tests = 0
for builder in builders:
if builder.metadata["result"]["state"] == "PASS":
passed_tests += 1
color_row = "green"
else:
failed_tests += 1
color_row = "red"
table.add_row(
f"[{color_row}]{builder}",
f"[{color_row}]{builder.executor}",
f"[{color_row}]{builder.metadata['result']['state']}",
f"[{color_row}]{builder.metadata['check']['returncode']} [{color_row}]{builder.metadata['check']['regex']} [{color_row}]{builder.metadata['check']['runtime']}",
f"[{color_row}]{builder.metadata['result']['returncode']}",
f"[{color_row}]{builder.metadata['result']['runtime']}",
)
total_tests += 1
console.print(table)
print("\n\n")
pass_rate = passed_tests * 100 / total_tests
pass_rate = format(pass_rate, ".3f")
fail_rate = failed_tests * 100 / total_tests
fail_rate = format(fail_rate, ".3f")
msg1 = f"[green]Passed Tests: {passed_tests}/{total_tests} Percentage: {pass_rate}%"
msg2 = (
f"[red]Failed Tests: {failed_tests}/{total_tests} Percentage: {fail_rate}%"
)
console.print(msg1)
console.print(msg2)
print("\n")
self.test_summary = {
"total": str(total_tests),
"pass": str(passed_tests),
"fail": str(failed_tests),
"pass_rate": pass_rate,
"fail_rate": fail_rate,
}
def _update_build_history(self, builders):
"""Write a build history file that is stored in ``$BUILDTEST_ROOT/var/.history`` directory summarizing output of build. The history
file is a json file named `build.json` which contains a copy of the build log for troubleshooting. buildtest will create a sub-directory
that is incremented such as 0, 1, 2 in **$BUILDTEST_ROOT/var/.history** which is used to differentiate builds.
Shown below is content of the top-level directory for the history directory. There is one subdirectory for each build ID starting with 0
.. code-block:: console
bash-3.2$ ls -l $BUILDTEST_ROOT/var/.history
total 0
drwxr-xr-x 4 siddiq90 92503 128 Sep 8 13:50 0
drwxr-xr-x 4 siddiq90 92503 128 Sep 8 13:50 1
For every build ID we have a ``build.json`` and log file for each build.
.. code-block:: console
bash-3.2$ ls $BUILDTEST_ROOT/var/.history/{0,1}
/Users/siddiq90/Documents/GitHubDesktop/buildtest/var/.history/0:
build.json buildtest_y3gh46j_.log
/Users/siddiq90/Documents/GitHubDesktop/buildtest/var/.history/1:
build.json buildtest_a1rjdy59.log
"""
create_dir(BUILD_HISTORY_DIR)
num_files = len(os.listdir(BUILD_HISTORY_DIR))
# create a sub-directory in $BUILDTEST_ROOT/var/.history/ that is incremented for every build starting with 0, 1, 2, ...
self.build_history_dir = os.path.join(BUILD_HISTORY_DIR, str(num_files))
create_dir(self.build_history_dir)
build_history_file = os.path.join(self.build_history_dir, "build.json")
# copy the log file.
shutil.copyfile(BUILDTEST_LOGFILE, self.logfile.name)
shutil.copyfile(
BUILDTEST_LOGFILE,
os.path.join(self.build_history_dir, os.path.basename(self.logfile.name)),
)
history_data = {
"command": " ".join(sys.argv),
"user": self.system.system["user"],
"hostname": self.system.system["host"],
"platform": self.system.system["platform"],
"date": datetime.now().strftime("%Y/%m/%d %X"),
"buildtest": shutil.which("buildtest"),
"python": self.system.system["python"],
"python_version": self.system.system["pyver"],
"testdir": self.testdir,
"configuration": self.configuration.file,
"system": self.configuration.name(),
"logpath": os.path.join(
self.build_history_dir, os.path.basename(self.logfile.name)
),
"invalid_buildspecs": self.invalid_buildspecs,
"buildspecs": {
"detected": self.discovered_bp["detected"],
"included": self.discovered_bp["included"],
"excluded": self.discovered_bp["excluded"],
},
"test_summary": {
"pass": self.test_summary["pass"],
"fail": self.test_summary["fail"],
"total": self.test_summary["total"],
"pass_rate": self.test_summary["pass_rate"],
"fail_rate": self.test_summary["fail_rate"],
},
}
history_data["builders"] = {}
for builder in builders:
uid = str(builder.metadata["full_id"])
history_data["builders"][uid] = {}
history_data["builders"][uid]["name"] = builder.name
history_data["builders"][uid]["buildspec"] = builder.buildspec
history_data["builders"][uid]["tags"] = builder.metadata["tags"]
history_data["builders"][uid]["executors"] = builder.metadata["executor"]
history_data["builders"][uid]["state"] = builder.metadata["result"]["state"]
history_data["builders"][uid]["returncode"] = builder.metadata["result"][
"returncode"
]
history_data["builders"][uid]["runtime"] = builder.metadata["result"][
"runtime"
]
history_data["builders"][uid]["testpath"] = builder.metadata["testpath"]
history_data["builders"][uid]["errfile"] = builder.build_script
history_data["builders"][uid]["outfile"] = builder.metadata["outfile"]
history_data["builders"][uid]["errfile"] = builder.metadata["errfile"]
with open(build_history_file, "w") as fd:
fd.write(json.dumps(history_data, indent=2))
def get_build_history_dir(self):
"""Return root of build history directory"""
return self.build_history_dir
def print_builders(
self, compiler_builder, spack_builder, script_builder, batch_builder
):
"""Print detected builders during build phase"""
script_table = Table(
Column(header="builder", style="blue"),
Column(header="executor", style="green"),
Column(header="compiler", style="red"),
Column(header="nodes", style="orange3"),
Column(header="procs", style="orange3"),
Column(header="description", style="magenta"),
Column(header="buildspecs", style="yellow"),
title="Script Builder Details",
show_lines=True,
header_style="blue",
)
compiler_table = Table(
Column(header="builder", style="blue"),
Column(header="executor", style="green"),
Column(header="compiler", style="red"),
Column(header="nodes", style="orange3"),
Column(header="procs", style="orange3"),
Column(header="description", style="magenta"),
Column(header="buildspecs", style="yellow"),
title="Compiler Builder Details",
show_lines=True,
header_style="blue",
)
spack_table = Table(
Column(header="builder", style="blue"),
Column(header="executor", style="green"),
Column(header="compiler", style="red"),
Column(header="nodes", style="orange3"),
Column(header="procs", style="orange3"),
Column(header="description", style="magenta"),
Column(header="buildspecs", style="yellow"),
title="Spack Builder Details",
show_lines=True,
header_style="blue",
)
if script_builder:
for builder in script_builder:
description = builder.recipe.get("description") or ""
# table entries must be rendered by rich and for purpose we need everything to be converted to string.
script_table.add_row(
f"{builder}",
f"{builder.executor}",
f"{builder.compiler}",
f"{builder.numnodes}",
f"{builder.numprocs}",
f"{description}",
f"{builder.buildspec}",
)
console.print(script_table)
if spack_builder:
for builder in spack_builder:
description = builder.recipe.get("description") or ""
spack_table.add_row(
f"{builder}",
f"{builder.executor}",
f"{builder.numnodes}",
f"{builder.numprocs}",
f"{description}",
f"{builder.buildspec}",
)
console.print(spack_table)
if compiler_builder:
for builder in compiler_builder:
description = builder.recipe.get("description") or ""
compiler_table.add_row(
f"{builder}",
f"{builder.executor}",
f"{builder.compiler}",
f"{builder.numnodes}",
f"{builder.numprocs}",
f"{description}",
f"{builder.buildspec}",
)
console.print(compiler_table)
if batch_builder:
table = Table(
title="Batch Job Builders", show_lines=True, header_style="blue"
)
table.add_column("builder", overflow="fold", style="blue")
table.add_column("executor", overflow="fold", style="green")
table.add_column("buildspecs", overflow="fold", style="yellow")
for builder in batch_builder:
table.add_row(
f"{builder}",
f"{builder.executor}",
f"{builder.buildspec}",
)
console.print(table)
if self.numprocs:
table = Table(
title="Batch Job Builders by Processors",
show_lines=True,
header_style="blue",
)
table.add_column("builder", overflow="fold", style="blue")
table.add_column("executor", overflow="fold", style="green")
table.add_column("procs", overflow="fold", style="orange3")
table.add_column("buildspecs", overflow="fold", style="yellow")
for builder in batch_builder:
# skip builders that dont have attribute builder.numprocs which is set if buildtest build --procs is specified
if not builder.numprocs:
continue
table.add_row(
f"{builder}",
f"{builder.executor}",
f"{builder.numprocs}",
f"{builder.buildspec}",
)
console.print(table)
if self.numnodes:
table = Table(
title="Batch Job Builders by Nodes",
show_lines=True,
header_style="blue",
)
table.add_column("builder", overflow="fold", style="blue")
table.add_column("executor", overflow="fold", style="green")
table.add_column("nodes", overflow="fold", style="orange3")
table.add_column("buildspecs", overflow="fold", style="yellow")
for builder in batch_builder:
# skip builders that dont have attribute builder.numprocs which is set if buildtest build --procs is specified
if not builder.numnodes:
continue
table.add_row(
f"{builder}",
f"{builder.executor}",
f"{builder.numnodes}",
f"{builder.buildspec}",
)
console.print(table)
def update_report(valid_builders, report_file):
"""This method will update BUILD_REPORT after every test run performed
by ``buildtest build``. If BUILD_REPORT is not created, we will create
file and update json file by extracting contents from builder metadata
Args:
valid_builders (list): List of valid builders that ran to completion
report_file (str): Specify location to report file.
"""
create_dir(os.path.dirname(report_file))
report = {}
# if file exists, read json file
if is_file(report_file):
report = load_json(report_file)
for builder in valid_builders:
buildspec = builder.buildspec
name = builder.name
entry = {}
report[buildspec] = report.get(buildspec) or {}
# report[buildspec][name] = report.get(buildspec, {}).get(name) or []
report[buildspec][name] = report[buildspec].get(name) or []
# query over attributes found in builder.metadata, we only assign
# keys that we care about for reporting
for item in [
"id",
"full_id",
"description",
"schemafile",
"executor",
"compiler",
"hostname",
"user",
"testroot",
"testpath",
"stagedir",
"command",
"outfile",
"errfile",
"buildenv",
"buildspec_content",
"test_content",
"buildscript_content",
"logpath",
"metrics",
"check",
]:
entry[item] = builder.metadata[item]
entry["tags"] = ""
# convert tags to string if defined in buildspec
if builder.metadata["tags"]:
if isinstance(builder.metadata["tags"], list):
entry["tags"] = " ".join(builder.metadata["tags"])
else:
entry["tags"] = builder.metadata["tags"]
# query over result attributes, we only assign some keys of interest
for item in ["starttime", "endtime", "runtime", "state", "returncode"]:
entry[item] = str(builder.metadata["result"][item])
entry["output"] = builder.metadata["output"]
entry["error"] = builder.metadata["error"]
entry["job"] = builder.metadata["job"]
entry["build_script"] = builder.build_script
report[buildspec][name].append(entry)
with open(report_file, "w") as fd:
json.dump(report, fd, indent=2)
logger.debug(f"Updating report file: {report_file}")
console.print(f"Adding {len(valid_builders)} test results to {report_file}")
# BUILDTEST_REPORTS file keeps track of all report files which
# contains a single line that denotes path to report file. This file only contains unique report files
content = []
if is_file(BUILDTEST_REPORTS):
content = load_json(BUILDTEST_REPORTS)
if report_file not in content:
content.append(report_file)
with open(BUILDTEST_REPORTS, "w") as fd:
json.dump(content, fd, indent=2)
|
import datetime
import enum
import functools
from typing import List
class Op(enum.Enum):
Equal = "eq"
NotEqual = "ne"
Less = "lt"
LessEqual = "le"
Greater = "gt"
GreaterEqual = "ge"
class FilterInstance:
def __init__(self, name, operator: Op, value):
self.name = name
self._op = operator
self._value = value
def build_api_param(self):
return {f"{self.name}__{self._op.value}": self._value}
def __repr__(self):
return f"{self.name} {self._op.name} {self._value}"
class Filter:
ops: List[Op]
def __init__(self, name):
self._name = name
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
cls._define_requested_ops()
@classmethod
def _define_requested_ops(cls):
for op in cls.ops:
op_method = functools.partialmethod(cls.get_filter_instance, op=op)
func_name = f"__{op.value}__"
setattr(cls, func_name, op_method)
def _filter_op_impl(self, value, op: Op):
return self.get_filter_instance(value, op)
def get_filter_instance(self, value, op: Op):
return FilterInstance(self._name, op, value)
def __eq__(self, value):
raise NotImplementedError()
def __ne__(self, value):
raise NotImplementedError()
def __lt__(self, value):
raise NotImplementedError()
def __le__(self, value):
raise NotImplementedError()
def __gt__(self, value):
raise NotImplementedError()
def __ge__(self, value):
raise NotImplementedError()
class SimpleFilter(Filter):
ops = [
Op.Equal,
Op.NotEqual,
Op.Less,
Op.LessEqual,
Op.Greater,
Op.GreaterEqual,
]
class DatetimeFilter(Filter):
ops = [Op.Equal, Op.NotEqual, Op.Less, Op.LessEqual, Op.Greater, Op.GreaterEqual]
def get_filter_instance(self, value: datetime.datetime, op: Op):
aware_dt = value.replace(tzinfo=datetime.timezone.utc)
timestamp = int(aware_dt.timestamp())
return FilterInstance(self._name, op, timestamp)
class filters:
ID = SimpleFilter(name="id")
CreatedAt = DatetimeFilter(name="created_at")
Email = SimpleFilter(name="email")
Reference = SimpleFilter(name="reference")
|
import scipy
import numpy as np
import skimage
import scipy.misc
import skimage.measure
image_list = ['27', '78', '403', '414', '480', '579', '587', '664', '711', '715', '756', '771', '788', '793', '826', '947', '994', '1076', '1097', '1099', '1141', '1197', '1263', '1320', '1389', '1463', '1563']
#image_list = ['27', '78', '403', '414', '480', '579']
gnd_truth_hr_image_path = 'Data/MRI/PaperTestData/HR_gnd/'
generated_hr_image_path = 'Data/MRI/PaperTestData/HR_gen/'
avg_psnr = 0
avg_ssim = 0
for im in image_list:
gnd_truth_hr_img = scipy.misc.imread(gnd_truth_hr_image_path+'valid_hr-id-'+im+'.png', mode='L')
generated_hr_img = scipy.misc.imread(generated_hr_image_path+'valid_hr_gen-id-'+im+'.png', mode='L')
# print out PSNR and SSIM
psnr_i = skimage.measure.compare_psnr(gnd_truth_hr_img, generated_hr_img)
ssim_i = skimage.measure.compare_ssim(gnd_truth_hr_img, generated_hr_img, data_range=generated_hr_img.max() - generated_hr_img.min())
print('PSNR = ' + str(psnr_i) + ', SSIM = ' + str(ssim_i))
avg_psnr += psnr_i
avg_ssim += ssim_i
avg_psnr /= len(image_list)
avg_ssim /= len(image_list)
print('Average PSNR = ' + str(avg_psnr))
print('Average SSIM = ' + str(avg_ssim))
# resize ground truth to (384x384) image
#gnd_truth_hr_img = scipy.misc.imread(gnd_truth_hr_image_path, mode='L')
#gnd_truth_hr_img_resized = scipy.misc.imresize(gnd_truth_hr_img, [384, 384], interp='bicubic', mode='L')
# read generated (384x384) image
#generated_hr_img = scipy.misc.imread(generated_hr_image_path, mode='L')
# print out PSNR
#print(skimage.measure.compare_psnr(gnd_truth_hr_img_resized, generated_hr_img))
# print out SSIM
#print(skimage.measure.compare_ssim(gnd_truth_hr_img_resized, generated_hr_img, data_range=generated_hr_img.max() - generated_hr_img.min()))
|
"""An example to run the minitaur environment of trotting gait.
"""
import time
import os
import numpy as np
import tensorflow as tf
from pybullet_envs.minitaur.envs import minitaur_gym_env
from pybullet_envs.minitaur.envs import minitaur_trotting_env
#FLAGS = tf.flags.FLAGS
#tf.flags.DEFINE_string("log_path", None, "The directory to write the log file.")
def main(unused_argv):
environment = minitaur_trotting_env.MinitaurTrottingEnv(
urdf_version=minitaur_gym_env.RAINBOW_DASH_V0_URDF_VERSION,
use_signal_in_observation=False,
use_angle_in_observation=False,
render=True,
log_path=os.getcwd())
np.random.seed(100)
sum_reward = 0
environment.reset()
steps = 5000
for _ in range(steps):
# Sleep to prevent serial buffer overflow on microcontroller.
time.sleep(0.002)
action = [0] * 8
_, reward, done, _ = environment.step(action)
sum_reward += reward
if done:
break
tf.logging.info("reward: {}".format(sum_reward))
if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run()
|
__author__ = 'Jonathan Spitz'
# ##################################### PICKER CLASS ##################################### #
# A Picker object selects the best genomes from the given population based on their
# fitness values.
# To create an object you'll need to provide:
class Picker():
def __init__(self):
self.something
|
from __future__ import unicode_literals, print_function, division
import sys
__author__ = 'dongliu'
class OutputLevel(object):
ONLY_URL = 0
HEADER = 1
TEXT_BODY = 2
ALL_BODY = 3
class ParseConfig(object):
""" global settings """
def __init__(self):
self.level = OutputLevel.ONLY_URL
self.pretty = False
self.encoding = None
self.group = False
_parse_config = ParseConfig()
def get_config():
global _parse_config
return _parse_config
class Filter(object):
"""filter settings"""
def __init__(self):
self.ip = None
self.port = None
self.domain = None
self.uri_pattern = None
def by_ip(self, ip):
return not self.ip or self.ip == ip
def by_port(self, port):
return not self.port or self.port == port
def by_domain(self, domain):
return not self.domain or self.domain == domain
def by_uri(self, uri):
return not self.uri_pattern or self.uri_pattern in uri
_filter = Filter()
def get_filter():
global _filter
return _filter
out = None
|
from System.Collections.Specialized import *
from System.IO import *
from System.Text import *
from Deadline.Scripting import *
from DeadlineUI.Controls.Scripting.DeadlineScriptDialog import DeadlineScriptDialog
########################################################################
## Globals
########################################################################
scriptDialog = None
settings = None
########################################################################
## Main Function Called By Deadline
########################################################################
def __main__( *args ):
global scriptDialog
global settings
scriptDialog = DeadlineScriptDialog()
scriptDialog.SetTitle( "Submit chef-client run To Deadline" )
scriptDialog.SetIcon( scriptDialog.GetIcon( 'Chef' ) )
scriptDialog.AddGrid()
scriptDialog.AddControlToGrid( "Separator1", "SeparatorControl", "Job Description", 0, 0, colSpan=3 )
scriptDialog.AddControlToGrid( "NameLabel", "LabelControl", "Job Name", 1, 0, "The name of your job. This is optional, and if left blank, it will default to 'Untitled'.", False )
scriptDialog.AddControlToGrid( "NameBox", "TextControl", "Untitled", 1, 1, colSpan=2)
scriptDialog.AddControlToGrid( "CommentLabel", "LabelControl", "Comment", 2, 0, "A simple description of your job. This is optional and can be left blank.", False )
scriptDialog.AddControlToGrid( "CommentBox", "TextControl", "", 2, 1, colSpan=2 )
scriptDialog.AddControlToGrid( "DepartmentLabel", "LabelControl", "Department", 3, 0, "The department you belong to. This is optional and can be left blank.", False )
scriptDialog.AddControlToGrid( "DepartmentBox", "TextControl", "", 3, 1, colSpan=2 )
scriptDialog.EndGrid()
scriptDialog.AddGrid()
scriptDialog.AddControlToGrid( "Separator2", "SeparatorControl", "Job Options", 0, 0, colSpan=3 )
scriptDialog.AddControlToGrid( "PoolLabel", "LabelControl", "Pool", 1, 0, "The pool that your job will be submitted to.", False )
scriptDialog.AddControlToGrid( "PoolBox", "PoolComboControl", "none", 1, 1 )
scriptDialog.AddControlToGrid( "SecondaryPoolLabel", "LabelControl", "Secondary Pool", 2, 0, "The secondary pool lets you specify a Pool to use if the primary Pool does not have any available Slaves.", False )
scriptDialog.AddControlToGrid( "SecondaryPoolBox", "SecondaryPoolComboControl", "", 2, 1 )
scriptDialog.AddControlToGrid( "GroupLabel", "LabelControl", "Group", 3, 0, "The group that your job will be submitted to.", False )
scriptDialog.AddControlToGrid( "GroupBox", "GroupComboControl", "none", 3, 1 )
scriptDialog.AddControlToGrid( "PriorityLabel", "LabelControl", "Priority", 4, 0, "A job can have a numeric priority ranging from 0 to 100, where 0 is the lowest priority and 100 is the highest priority.", False )
scriptDialog.AddRangeControlToGrid( "PriorityBox", "RangeControl", RepositoryUtils.GetMaximumPriority() / 2, 0, RepositoryUtils.GetMaximumPriority(), 0, 1, 4, 1 )
scriptDialog.AddControlToGrid( "TaskTimeoutLabel", "LabelControl", "Task Timeout", 5, 0, "The number of minutes a slave has to render a task for this job before it requeues it. Specify 0 for no limit.", False )
scriptDialog.AddRangeControlToGrid( "TaskTimeoutBox", "RangeControl", 0, 0, 1000000, 0, 1, 5, 1 )
scriptDialog.AddSelectionControlToGrid( "AutoTimeoutBox", "CheckBoxControl", False, "Enable Auto Task Timeout", 5, 2, "If the Auto Task Timeout is properly configured in the Repository Options, then enabling this will allow a task timeout to be automatically calculated based on the render times of previous frames for the job. " )
scriptDialog.AddControlToGrid( "ConcurrentTasksLabel", "LabelControl", "Concurrent Tasks", 6, 0, "The number of tasks that can render concurrently on a single slave. This is useful if the rendering application only uses one thread to render and your slaves have multiple CPUs.", False )
scriptDialog.AddRangeControlToGrid( "ConcurrentTasksBox", "RangeControl", 1, 1, 16, 0, 1, 6, 1 )
scriptDialog.AddSelectionControlToGrid( "LimitConcurrentTasksBox", "CheckBoxControl", True, "Limit Tasks To Slave's Task Limit", 6, 2, "If you limit the tasks to a slave's task limit, then by default, the slave won't dequeue more tasks then it has CPUs. This task limit can be overridden for individual slaves by an administrator." )
scriptDialog.AddControlToGrid( "MachineLimitLabel", "LabelControl", "Machine Limit", 7, 0, "", False )
scriptDialog.AddRangeControlToGrid( "MachineLimitBox", "RangeControl", 0, 0, 1000000, 0, 1, 7, 1 )
scriptDialog.AddSelectionControlToGrid( "IsBlacklistBox", "CheckBoxControl", False, "Machine List Is A Blacklist", 7, 2, "" )
scriptDialog.AddControlToGrid( "MachineListLabel", "LabelControl", "Machine List", 8, 0, "Use the Machine Limit to specify the maximum number of machines that can render your job at one time. Specify 0 for no limit.", False )
scriptDialog.AddControlToGrid( "MachineListBox", "MachineListControl", "", 8, 1, colSpan=2 )
scriptDialog.AddControlToGrid( "LimitGroupLabel", "LabelControl", "Limits", 9, 0, "The Limits that your job requires.", False )
scriptDialog.AddControlToGrid( "LimitGroupBox", "LimitGroupControl", "", 9, 1, colSpan=2 )
scriptDialog.AddControlToGrid( "DependencyLabel", "LabelControl", "Dependencies", 10, 0, "Specify existing jobs that this job will be dependent on. This job will not start until the specified dependencies finish rendering. ", False )
scriptDialog.AddControlToGrid( "DependencyBox", "DependencyControl", "", 10, 1, colSpan=2 )
scriptDialog.AddControlToGrid( "OnJobCompleteLabel", "LabelControl", "On Job Complete", 11, 0, "If desired, you can automatically archive or delete the job when it completes. ", False )
scriptDialog.AddControlToGrid( "OnJobCompleteBox", "OnJobCompleteControl", "Nothing", 11, 1 )
scriptDialog.AddSelectionControlToGrid( "SubmitSuspendedBox", "CheckBoxControl", False, "Submit Job As Suspended", 11, 2, "If enabled, the job will submit in the suspended state. This is useful if you don't want the job to start rendering right away. Just resume it from the Monitor when you want it to render. " )
scriptDialog.EndGrid()
scriptDialog.AddGrid()
scriptDialog.AddControlToGrid( "Separator3", "SeparatorControl", "Chef Options", 0, 0, colSpan=4)
scriptDialog.AddControlToGrid("LogLevelLabel", "LabelControl", "Log Level", 1, 0, "The level of logging Chef will provide.", False)
scriptDialog.AddComboControlToGrid( "LogLevelComboBox", "ComboControl", "warn", ["debug", "info", "warn", "error", "fatal"], 1, 1)
scriptDialog.EndGrid()
scriptDialog.AddGrid()
scriptDialog.AddHorizontalSpacerToGrid( "HSpacer1", 0, 0 )
submitButton = scriptDialog.AddControlToGrid( "SubmitButton", "ButtonControl", "Submit", 0, 1, expand=False )
submitButton.ValueModified.connect(SubmitButtonPressed)
closeButton = scriptDialog.AddControlToGrid( "CloseButton", "ButtonControl", "Close", 0, 2, expand=False )
closeButton.ValueModified.connect(scriptDialog.closeEvent)
scriptDialog.EndGrid()
settings = ( "DepartmentBox","CategoryBox","PoolBox","SecondaryPoolBox","GroupBox","PriorityBox","IsBlacklistBox","MachineListBox","LimitGroupBox","LogLevelComboBox" )
scriptDialog.LoadSettings( GetSettingsFilename(), settings )
scriptDialog.EnabledStickySaving( settings, GetSettingsFilename() )
scriptDialog.ShowDialog( False )
def GetSettingsFilename():
return Path.Combine( ClientUtils.GetUsersSettingsDirectory(), "ChefSettings.ini" )
def SubmitButtonPressed( *args ):
global scriptDialog
jobInfoFilename = Path.Combine( ClientUtils.GetDeadlineTempPath(), "chef_job_info.job" )
try:
writer = StreamWriter( jobInfoFilename, False, Encoding.Unicode )
except Exception as e:
scriptDialog.ShowMessageBox("error: " + str(e) , "")
writer.WriteLine( "Plugin=Chef" )
writer.WriteLine( "Name=%s" % scriptDialog.GetValue( "NameBox" ) )
writer.WriteLine( "Comment=%s" % scriptDialog.GetValue( "CommentBox" ) )
writer.WriteLine( "Department=%s" % scriptDialog.GetValue( "DepartmentBox" ) )
writer.WriteLine( "Pool=%s" % scriptDialog.GetValue( "PoolBox" ) )
writer.WriteLine( "SecondaryPool=%s" % scriptDialog.GetValue( "SecondaryPoolBox" ) )
writer.WriteLine( "Group=%s" % scriptDialog.GetValue( "GroupBox" ) )
writer.WriteLine( "Priority=%s" % scriptDialog.GetValue( "PriorityBox" ) )
writer.WriteLine( "TaskTimeoutMinutes=%s" % scriptDialog.GetValue( "TaskTimeoutBox" ) )
writer.WriteLine( "LimitGroups=%s" % scriptDialog.GetValue( "LimitGroupBox" ) )
writer.WriteLine( "JobDependencies=%s" % scriptDialog.GetValue( "DependencyBox" ) )
writer.WriteLine( "OnJobComplete=%s" % scriptDialog.GetValue( "OnJobCompleteBox" ) )
#writer.WriteLine( "Frames=0" )
#writer.WriteLine( "ChunkSize=1" )
writer.WriteLine( "MaintenanceJob=true" )
writer.WriteLine( "MaintenanceJobStartFrame=0" )
writer.WriteLine( "MaintenanceJobEndFrame=0" )
if( bool(scriptDialog.GetValue( "IsBlacklistBox" )) ):
writer.WriteLine( "Blacklist=%s" % scriptDialog.GetValue( "MachineListBox" ) )
else:
writer.WriteLine( "Whitelist=%s" % scriptDialog.GetValue( "MachineListBox" ) )
if( bool(scriptDialog.GetValue( "SubmitSuspendedBox" )) ):
writer.WriteLine( "InitialStatus=Suspended" )
writer.Close()
# Create plugin info file.
pluginInfoFilename = Path.Combine( ClientUtils.GetDeadlineTempPath(), "chef_plugin_info.job" )
writer = StreamWriter( pluginInfoFilename, False, Encoding.Unicode )
writer.WriteLine("LogLevel=%s" % scriptDialog.GetValue("LogLevelComboBox") )
writer.Close()
# Setup the command line arguments.
arguments = StringCollection()
arguments.Add( jobInfoFilename )
arguments.Add( pluginInfoFilename )
# Now submit the job.
results = ClientUtils.ExecuteCommandAndGetOutput( arguments )
scriptDialog.ShowMessageBox( results, "Submission Results" )
|
"""Input/Output module.
"""
from .io_base import save, load
#from .mechanics_run import MechanicsHdf5Runner
|
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.layers import Dense, Input, Concatenate, Lambda
from scipy.stats import entropy
from matplotlib.lines import Line2D
import config as cf
from targets import target_distribution_gen
def build_model():
cf.pnn.inputsize = 3 # Number of hidden variables, i.e. alpha, beta, gamma
""" Build NN for triangle """
# Hidden variables as inputs.
inputTensor = Input((cf.pnn.inputsize,))
# Group input tensor according to whether alpha, beta or gamma hidden variable.
group_alpha = Lambda(lambda x: x[:,:1], output_shape=((1,)))(inputTensor)
group_beta = Lambda(lambda x: x[:,1:2], output_shape=((1,)))(inputTensor)
group_gamma = Lambda(lambda x: x[:,2:3], output_shape=((1,)))(inputTensor)
# Neural network at the sources, for pre-processing (e.g. for going from uniform distribution to non-uniform one)
## Note that in the example code greek_depth is set to 0, so this part is trivial.
for _ in range(cf.pnn.greek_depth):
group_alpha = Dense(cf.pnn.greek_width,activation=cf.pnn.activ, kernel_regularizer=cf.pnn.kernel_reg)(group_alpha)
group_beta = Dense(cf.pnn.greek_width,activation=cf.pnn.activ, kernel_regularizer=cf.pnn.kernel_reg)(group_beta)
group_gamma = Dense(cf.pnn.greek_width,activation=cf.pnn.activ, kernel_regularizer=cf.pnn.kernel_reg)(group_gamma)
# Route hidden variables to visibile parties Alice, Bob and Charlie
group_a = Concatenate()([group_beta,group_gamma])
group_b = Concatenate()([group_gamma,group_alpha])
group_c = Concatenate()([group_alpha,group_beta])
# Neural network at the parties Alice, Bob and Charlie.
## Note: increasing the variance of the initialization seemed to help in some cases, especially when the number if outputs per party is 4 or more.
kernel_init = tf.keras.initializers.VarianceScaling(scale=cf.pnn.weight_init_scaling, mode='fan_in', distribution='truncated_normal', seed=None)
for _ in range(cf.pnn.latin_depth):
group_a = Dense(cf.pnn.latin_width,activation=cf.pnn.activ, kernel_regularizer=cf.pnn.kernel_reg, kernel_initializer = kernel_init)(group_a)
group_b = Dense(cf.pnn.latin_width,activation=cf.pnn.activ, kernel_regularizer=cf.pnn.kernel_reg, kernel_initializer = kernel_init)(group_b)
group_c = Dense(cf.pnn.latin_width,activation=cf.pnn.activ, kernel_regularizer=cf.pnn.kernel_reg, kernel_initializer = kernel_init)(group_c)
# Apply final softmax layer
group_a = Dense(cf.pnn.a_outputsize,activation=cf.pnn.activ2, kernel_regularizer=cf.pnn.kernel_reg)(group_a)
group_b = Dense(cf.pnn.b_outputsize,activation=cf.pnn.activ2, kernel_regularizer=cf.pnn.kernel_reg)(group_b)
group_c = Dense(cf.pnn.c_outputsize,activation=cf.pnn.activ2, kernel_regularizer=cf.pnn.kernel_reg)(group_c)
outputTensor = Concatenate()([group_a,group_b,group_c])
model = Model(inputTensor,outputTensor)
return model
def np_euclidean_distance(p,q=0):
""" Euclidean distance, useful for plotting results."""
return np.sqrt(np.sum(np.square(p-q),axis=-1))
def np_distance(p,q=0):
""" Same as the distance used in the loss function, just written for numpy arrays."""
if cf.pnn.loss.lower() == 'l2':
return np.sum(np.square(p-q),axis=-1)
elif cf.pnn.loss.lower() == 'l1':
return 0.5*np.sum(np.abs(p-q),axis=-1)
elif cf.pnn.loss.lower() == 'kl':
p = np.clip(p, K.epsilon(), 1)
q = np.clip(q, K.epsilon(), 1)
return np.sum(p * np.log(np.divide(p,q)), axis=-1)
elif cf.pnn.loss.lower() == 'js':
p = np.clip(p, K.epsilon(), 1)
q = np.clip(q, K.epsilon(), 1)
avg = (p+q)/2
return np.sum(p * np.log(np.divide(p,avg)), axis=-1) + np.sum(q * np.log(np.divide(q,avg)), axis=-1)
def keras_distance(p,q):
""" Distance used in loss function. """
if cf.pnn.loss.lower() == 'l2':
return K.sum(K.square(p-q),axis=-1)
elif cf.pnn.loss.lower() == 'l1':
return 0.5*K.sum(K.abs(p-q), axis=-1)
elif cf.pnn.loss.lower() == 'kl':
p = K.clip(p, K.epsilon(), 1)
q = K.clip(q, K.epsilon(), 1)
return K.sum(p * K.log(p / q), axis=-1)
elif cf.pnn.loss.lower() == 'js':
p = K.clip(p, K.epsilon(), 1)
q = K.clip(q, K.epsilon(), 1)
avg = (p+q)/2
return K.sum(p * K.log(p / avg), axis=-1) + K.sum(q * K.log(q / avg), axis=-1)
def customLoss_distr(y_pred):
""" Converts the output of the neural network to a probability vector.
That is from a shape of (batch_size, a_outputsize + b_outputsize + c_outputsize) to a shape of (a_outputsize * b_outputsize * c_outputsize,)
"""
a_probs = y_pred[:,0:cf.pnn.a_outputsize]
b_probs = y_pred[:,cf.pnn.a_outputsize : cf.pnn.a_outputsize + cf.pnn.b_outputsize]
c_probs = y_pred[:,cf.pnn.a_outputsize + cf.pnn.b_outputsize : cf.pnn.a_outputsize + cf.pnn.b_outputsize + cf.pnn.c_outputsize]
a_probs = K.reshape(a_probs,(-1,cf.pnn.a_outputsize,1,1))
b_probs = K.reshape(b_probs,(-1,1,cf.pnn.b_outputsize,1))
c_probs = K.reshape(c_probs,(-1,1,1,cf.pnn.c_outputsize))
probs = a_probs*b_probs*c_probs
probs = K.mean(probs,axis=0)
probs = K.flatten(probs)
return probs
def customLoss(y_true,y_pred):
""" Custom loss function."""
# Note that y_true is just batch_size copies of the target distributions. So any row could be taken here. We just take 0-th row.
return keras_distance(y_true[0,:], customLoss_distr(y_pred))
# Set up generator for X and Y data
training_mean = 0.5
training_sigma = 0.28867513459 #= np.sqrt(1/12)
def generate_xy_batch():
while True:
temp = np.divide((np.random.random((cf.pnn.batch_size, cf.pnn.inputsize)) - training_mean),training_sigma)
yield (temp, cf.pnn.y_true)
def generate_x_test():
while True:
temp = np.divide((np.random.random((cf.pnn.batch_size_test, cf.pnn.inputsize)) - training_mean),training_sigma)
yield temp
def single_evaluation(model):
""" Evaluates the model and returns the resulting distribution as a numpy array. """
test_pred = model.predict_generator(generate_x_test(), steps=1, max_queue_size=10, workers=1, use_multiprocessing=False, verbose=0)
result = K.eval(customLoss_distr(test_pred))
return result
def single_run():
""" Runs training algorithm for a single target distribution. Returns model."""
# Model and optimizer related setup.
K.clear_session()
model = build_model()
if cf.pnn.start_from is not None:
print("LOADING MODEL WEIGHTS FROM", cf.pnn.start_from)
model = load_model(cf.pnn.start_from,custom_objects={'customLoss': customLoss})
if cf.pnn.optimizer.lower() == 'adadelta':
optimizer = tf.keras.optimizers.Adadelta(lr=cf.pnn.lr, rho=0.95, epsilon=None, decay=cf.pnn.decay)
elif cf.pnn.optimizer.lower() == 'sgd':
optimizer = tf.keras.optimizers.SGD(lr=cf.pnn.lr, decay=cf.pnn.decay, momentum=cf.pnn.momentum, nesterov=True)
else:
optimizer = tf.keras.optimizers.SGD(lr=cf.pnn.lr, decay=cf.pnn.decay, momentum=cf.pnn.momentum, nesterov=True)
print("\n\nWARNING!!! Optimizer {} not recognized. Please implement it if you want to use it. Using SGD instead.\n\n".format(cf.pnn.optimizer))
cf.pnn.optimizer = 'sgd' # set it for consistency.
model.compile(loss=customLoss, optimizer = optimizer, metrics=[])
# Fit model
model.fit_generator(generate_xy_batch(), steps_per_epoch=cf.pnn.no_of_batches, epochs=1, verbose=1, validation_data=generate_xy_batch(), validation_steps=cf.pnn.no_of_validation_batches, class_weight=None, max_queue_size=10, workers=1, use_multiprocessing=False, shuffle=False, initial_epoch=0)
return model
def compare_models(model1,model2):
""" Evaluates two models for p_target distribution and return one which is closer to it."""
result1 = single_evaluation(model1)
result2 = single_evaluation(model2)
if np_distance(result1, cf.pnn.p_target) < np_distance(result2, cf.pnn.p_target):
return model1, 1
else:
return model2, 2
def update_results(model_new,i):
""" Updates plots and results if better than the one I loaded the model from in this round.
If I am in last sample of the sweep I will plot no matter one, so that there is at least one plot per sweep.
"""
result_new = single_evaluation(model_new)
distance_new = np_distance(result_new, cf.pnn.p_target)
# Decide whether to use new or old model.
if cf.pnn.start_from is not None: # skips this comparison if I was in a fresh_start
try:
model_old = load_model('./saved_models/best_'+str(i).zfill(int(np.ceil(np.log10(cf.pnn.target_distributions.shape[0]))))+'.hdf5', custom_objects={'customLoss': customLoss})
result_old = single_evaluation(model_old)
distance_old = np_distance(result_old, cf.pnn.p_target)
if distance_new > distance_old:
print("Moving on. With old model distance is at {}.".format(distance_old))
result = result_old
model = model_old
distance = distance_old
else:
print("Distance imporved! Distance with new model:", distance_new)
result = result_new
model = model_new
distance = distance_new
except FileNotFoundError:
print("This distance:", distance_new)
result = result_new
model = model_new
distance = distance_new
else:
print("This distance:", distance_new)
result = result_new
model = model_new
distance = distance_new
# Update results
model.save(cf.pnn.savebestpath)
cf.pnn.distributions[i,:] = result
cf.pnn.distances[i] = distance
cf.pnn.euclidean_distances[i] = np_euclidean_distance(result, cf.pnn.p_target)
np.save("./saved_results/target_distributions.npy",cf.pnn.target_distributions)
np.save("./saved_results/distributions.npy",cf.pnn.distributions)
np.save("./saved_results/distances.npy",cf.pnn.distances)
np.save("./saved_results/euclidean_distances.npy",cf.pnn.euclidean_distances)
# Plot distances
plt.clf()
plt.title("D(p_target,p_machine)")
plt.plot(cf.pnn.target_ids,cf.pnn.euclidean_distances, 'ro')
if i!=0 and cf.pnn.sweep_id==0:
plt.ylim(bottom=0,top = np.sort(np.unique(cf.pnn.euclidean_distances))[-2]*1.2)
else:
plt.ylim(bottom=0,top = np.sort(np.unique(cf.pnn.euclidean_distances))[-1]*1.2)
plt.savefig("./figs_training_sweeps/sweep"+str(cf.pnn.sweep_id)+".png")
# Plot distributions
plt.clf()
plt.plot(cf.pnn.p_target,'ro',markersize=5)
plt.plot(result,'gs',alpha = 0.85,markersize=5)
plt.title("Target distr. (in red): {} {:.3f}".format(cf.pnn.target_distr_name, cf.pnn.target_ids[i]))
plt.ylim(bottom=0,top=max(cf.pnn.p_target)*1.2)
plt.savefig("./figs_distributions/target_"+str(i).zfill(int(np.ceil(np.log10(cf.pnn.target_ids.shape[0]))))+".png")
# Plot strategies (only turn on if you're really interested, since it takes quite a bit of time to update in each step!)
plot_strategies(i)
def plot_strategies(i):
sample_size = 4000 #how many hidden variable triples to sample from
random_sample_size = 5 #for each hidden variable triple, how many times to sample from strategies.
alpha_value = 0.25# 3/random_sample_size #opacity of dots. 0.1 or 0.25 make for nice paintings.
markersize = 5000/np.sqrt(sample_size)
modelpath = './saved_models/best_'+str(i).zfill(int(np.ceil(np.log10(cf.pnn.target_distributions.shape[0]))))+'.hdf5'
input_data = generate_x_test()
inputs = next(input_data)
while inputs.shape[0] < sample_size:
inputs = np.concatenate((inputs, next(input_data)),axis=0)
inputs = inputs[:sample_size,:]
K.clear_session()
model = load_model(modelpath,custom_objects={'customLoss': customLoss})
y = model.predict(inputs)
y_a = y[:,0:cf.pnn.a_outputsize]
y_b = y[:,cf.pnn.a_outputsize:cf.pnn.a_outputsize+cf.pnn.b_outputsize]
y_c = y[:,cf.pnn.a_outputsize+cf.pnn.b_outputsize:cf.pnn.a_outputsize+cf.pnn.b_outputsize+cf.pnn.c_outputsize]
y_a = np.array([np.random.choice(np.arange(cf.pnn.a_outputsize),p=y_a[j,:], size = random_sample_size) for j in range(y_a.shape[0])]).reshape(random_sample_size*sample_size)
y_b = np.array([np.random.choice(np.arange(cf.pnn.b_outputsize),p=y_b[j,:], size = random_sample_size) for j in range(y_b.shape[0])]).reshape(random_sample_size*sample_size)
y_c = np.array([np.random.choice(np.arange(cf.pnn.c_outputsize),p=y_c[j,:], size = random_sample_size) for j in range(y_c.shape[0])]).reshape(random_sample_size*sample_size)
training_mean = 0.5
training_sigma = np.sqrt(1/12)
inputs = inputs* training_sigma + training_mean
# Tile and reshape since we sampled random_sample_size times from each input.
inputs = np.array(np.array([np.tile(inputs[i,:],(random_sample_size,1)) for i in range(inputs.shape[0])])).reshape(random_sample_size*sample_size,3)
alphas = inputs[:,0]
betas = inputs[:,1]
gammas = inputs[:,2]
inputs_a = np.stack((betas,gammas)).transpose()
inputs_b = np.stack((alphas,gammas)).transpose()
inputs_c = np.stack((alphas,betas)).transpose()
colordict = {0:'red',1:'green',2:'blue',3:'orange'}
colors_alice = [colordict[i] for i in y_a]
colors_bob = [colordict[i] for i in y_b]
colors_charlie = [colordict[i] for i in y_c]
from matplotlib.lines import Line2D
legend_elements = [Line2D([0], [0], marker='o', color='w', label='0',
markerfacecolor='red', markersize=8),
Line2D([0], [0], marker='o', color='w', label='1',
markerfacecolor='green', markersize=8),
Line2D([0], [0], marker='o', color='w', label='2',
markerfacecolor='blue', markersize=8),
Line2D([0], [0], marker='o', color='w', label='3',
markerfacecolor='orange', markersize=8)]
fig, axes = plt.subplots(2, 2, figsize=(12,12))
plt.subplot(2,2,1)
plt.scatter(inputs_a[:,0],inputs_a[:,1], color = colors_alice, alpha=alpha_value, s = markersize)
plt.gca().invert_yaxis()
plt.title('Response of Alice to her inputs.')
plt.xlabel(r'$\beta$')
plt.ylabel(r'$\gamma$')
plt.subplot(2,2,2)
plt.scatter(inputs_b[:,0],inputs_b[:,1], color = colors_bob, alpha=alpha_value, s = markersize)
plt.gca().invert_yaxis()
plt.title('Response of Bob to his inputs.')
plt.xlabel(r'$\alpha$')
plt.ylabel(r'$\gamma$')
plt.subplot(2,2,3)
plt.scatter(inputs_c[:,1],inputs_c[:,0], color = colors_charlie, alpha=alpha_value, s = markersize)
plt.gca().invert_yaxis()
plt.title('Response of Charlie to his inputs.')
plt.xlabel(r'$\beta$')
plt.ylabel(r'$\alpha$')
plt.subplot(2,2,4)
plt.plot(cf.pnn.target_distributions[i,:],'ro',markersize=5)
plt.plot(cf.pnn.distributions[i,:],'gs',alpha = 0.85,markersize=5)
plt.title('Target (red) and learned (green) distributions')
plt.xlabel('outcome')
plt.ylabel('probability of outcome')
fig.suptitle(cf.pnn.target_distr_name +', distribution no. '+str(i), fontsize = 14)
#fig.legend(handles=legend_elements, loc='lower right',bbox_to_anchor = (0.75,0.25))
fig.legend(handles=legend_elements, loc='upper right')
plt.savefig('./figs_strategies/strat_'+str(i))
|
"""
This module provides several probability distributions for drawing synthetic
data for clusters.
CLASSES AND METHODS
GaussianData : draw Gaussian data for cluster
__init__(self)
sample_cluster(self,class_size,mean,axes,sd)
ExpData : draw doubly exponentially distributed data for cluster
__init__(self)
sample_cluster(self,class_size,mean,axes,sd)
tData : draw Student t-distributed data for cluster
__init__(self)
sample_cluster(self,class_size,mean,axes,sd)
"""
from .core import DataDist
import numpy as np
class GaussianData(DataDist):
"""
Draw Gaussian data for a cluster.
Attributes
----------
None
Methods
-------
__init__(self, ...)
sample_cluster(self, class_size, mean, axes, sd, ...)
"""
def __init__(self):
"""
Instantiate a GaussianData object.
"""
pass
def sample_cluster(self,class_size,mean,axes,sd,seed=None):
"""
Sample a Gaussian cluster.
class_size : int
The number of data points in this cluster.
mean : ndarray
Cluster of this cluster.
axes : list of ndarray
Principle axes of this cluster.
sd : list of float
Standard deviations of this cluster's principal axes.
"""
# assemble covariance matrix
cov = np.transpose(axes) @ np.diag(sd**2) @ axes
# sample data
np.random.seed(seed)
X = np.random.multivariate_normal(mean=mean,
cov=cov, size=class_size)
return X
class ExpData(DataDist):
"""
Draw exponentially distributed synthetic data for a cluster.
Attributes
----------
None
Methods
-------
__init__(self, ...)
sample_cluster(self, class_size, mean, axes, sd, ...)
"""
def __init__(self):
"""
Instantiate an ExpData object.
"""
pass
def sample_cluster(self,class_size, mean,axes,sd,seed=None):
"""
Sample an exponentially distributed cluster.
Sampling is performed using spherical coordinates. First, a random
point on the unit sphere is sampled. This point is then scaled by
a draw from an exponential distribution with standard deviation 1.
Finally, the point is stretched or compressed along each principal
axis of the cluster by the corresponding standard deviation.
class_size : int
The number of data points in this cluster.
mean : ndarray
Cluster of this cluster.
axes : list of ndarray
Principle axes of this cluster.
sd : list of float
Standard deviations of this cluster's principal axes.
"""
np.random.seed(seed)
# each row of axes is an axis
n_axes = axes.shape[0]
# sample on the unit sphere
X = np.random.multivariate_normal(mean=np.zeros(n_axes), cov=np.eye(n_axes),
size=class_size)
X /= np.sqrt(np.sum(X**2,axis=1))[:,np.newaxis]
# scale sampled unit vectors by draws from standard exponential dist
X *= np.random.exponential(scale=1,size=class_size)[:,np.newaxis]
# stretch/compress samples along principal axes
X = X @ np.diag(sd) @ axes
# translate sampled data to the desired mean
X = X + mean[np.newaxis,:]
return X
class tData(DataDist):
"""
Draw t-distributed data for a cluster.
Attributes
----------
df : int
Degrees of freedom in Student t distribution. Low values (e.g. 1) lead
to heavy tails. For very high values, the distribution becomes Gaussian.
Methods
-------
__init__(self, ...)
sample_cluster(self, class_size, mean, axes, sd, ...)
"""
def __init__(self, df=1):
"""
Instantiate a tData object.
Parameters
----------
self : tData
This instance of tData
df : int
Degrees of freedom in Student t distribution
"""
self.df = df
def sample_cluster(self,class_size, mean,axes,sd,n_empirical_quantile=1e+6, seed=None):
"""
Sample a t-distributed cluster.
Sampling is performed using spherical coordinates. First, a random
point on the unit sphere is sampled. This point is then scaled by
a draw from a Student t distribution with self.df degrees of freedom,
normalized by the median of the absolute value of the same t
distribution. Finally, the sampled point is stretched or compressed
along each principal axis of the cluster by the corresponding standard
deviation.
class_size : int
The number of data points in this cluster.
mean : ndarray
Cluster of this cluster.
axes : list of ndarray
Principle axes of this cluster.
sd : list of float
Standard deviations of this cluster's principal axes.
n_empirical_quantile : int, default=1e+6
Number of samples used to approximate the median of the absolute
value of the chosen t distribution
"""
np.random.seed(seed)
# compute median for absolute value of t distribution
n = n_empirical_quantile
abs_t_quantile = np.quantile(np.abs(np.random.standard_t(df=self.df,size=int(n))),q=0.68)
# each row of axes is an axis
n_axes = axes.shape[0]
# sample on the unit sphere
X = np.random.multivariate_normal(mean=np.zeros(n_axes), cov=np.eye(n_axes),
size=class_size)
X /= np.sqrt(np.sum(X**2,axis=1))[:,np.newaxis]
# scale sampled unit vectors by draw from normalized t-distribution
scaling = 1/abs_t_quantile
X *= np.random.standard_t(df=self.df, size=class_size)[:,np.newaxis] * scaling
# stretch/compress along principal axes
X = X @ np.diag(sd) @ axes
# mean-shift X
X = X + mean[np.newaxis,:]
return X
|
import sys
import r2pipe
def extract_ehframe(infile, outfile):
# store needed (eh_frame) sessions
ehframe_sessions = []
r2 = r2pipe.open(infile)
r2.cmd("aaa")
allsessions = r2.cmd("iS").split('\n')
for allsession in allsessions:
if "eh_frame" in allsession:
ehframe_sessions.append(allsession)
# ehframe_sessions will contains .eh_frame_hdr and .eh_frame
# we only need .eh_frame
data_size = 0x4
print(ehframe_sessions)
ehframe_session = ehframe_sessions[-1]
ehframe_session_start_addr = ehframe_session.split(' ')[6]
ehframe_session_size = ehframe_session.split(' ')[5]
data_start_addr = hex(int(ehframe_session_start_addr, 16) + int(ehframe_session_size, 16) - data_size - 0x5)
# print(ehframe_session_start_addr)
# print(type(ehframe_session_start_addr))
print("ehframe_session_start_addr:" + hex(int(ehframe_session_start_addr, 16)))
print("ehframe_session_size:" + hex(int((ehframe_session_size), 16)))
print("[+] Seeking to start address")
# seek to start address
r2.cmd("s " + hex(int(data_start_addr, 16)))
# wtf
print("[+] Write result to file")
r2.cmd("wtf " + outfile + " " + str(data_size))
if __name__ == "__main__":
if len(sys.argv) != 3:
print("usage: " + sys.argv[0] + "infile " + "outfile")
exit()
print(sys.argv[1] + sys.argv[2])
extract_ehframe(sys.argv[1], sys.argv[2])
|
"""
decorator example to enforce argument types to
a decorated function.
Because @wraps is used, the metadata is preserved.
AUTHOR
Colt Steele
and some chipotage and testing by Tony Perez
"""
from functools import wraps
def enforce(*types):
"""
decorator function enforcing, and converting, argument data types
"""
def decorator(fn):
def new_function(*args, **kwargs):
# convert args into something mutable, list in this case
newargs = []
for original_argument, type_to_convert in zip(args, types):
newargs.append(type_to_convert(original_argument))
return fn(*newargs, **kwargs)
return new_function
return decorator
@enforce(str, int)
def repeat_msg(msg, times):
"""
function expecting a string and then an integer as their
parameters, the decorator will try to enforce, and if they
are not, the decorator will try to convert them
If successful, print the string (msg) n times (integer)
"""
for i in range(times):
print(msg)
if __name__ == "__main__":
repeat_msg(42, "4") # expect str and an int, the decorator will cast them
|
#return the suffix of the ordinal day number. e.g., the "st" in "31st day of March"
#only works for two-digit or smaller integers
#only uses the whole number portion of any number passed (won't account for decimals)
def ordinalSuffix(number):
#truncate any decimals, and use only last 2 digits of resutling integer
number = number // 1 % 100
if ( (number % 10 == 1) & (number // 10 != 1) ):
suffix = 'st'
elif ( (number % 10 == 2) & (number // 10 != 1) ):
suffix = 'nd'
elif ( (number % 10 == 3) & (number // 10 != 1) ):
suffix = 'rd'
else:
suffix = 'th'
return suffix
#used to find fields based on dow value
def mapDayOfWeekToOrdinalFieldName(dow_number):
if dow_number == 0: return 'mon'
elif dow_number == 1: return 'tue'
elif dow_number == 2: return 'wed'
elif dow_number == 3: return 'thu'
elif dow_number == 4: return 'fri'
elif dow_number == 5: return 'sat'
elif dow_number == 6: return 'sun'
|
from django.urls import path
from evap.results import views
app_name = "results"
urlpatterns = [
path("", views.index, name="index"),
path("semester/<int:semester_id>/evaluation/<int:evaluation_id>", views.evaluation_detail, name="evaluation_detail"),
path("evaluation/<int:evaluation_id>/text_answers_export", views.evaluation_text_answers_export, name="evaluation_text_answers_export"),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.