content stringlengths 5 1.05M |
|---|
from flask import Flask, render_template, Response
from imutils.video import VideoStream
import cv2
import time
import os
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.models import load_model
import numpy as np
import imutils
from s5_test import MultiPersonClassifier
import datetime
if True: # Include project path
import sys
import os
ROOT = os.path.dirname(os.path.abspath(__file__))+"/../"
CURR_PATH = os.path.dirname(os.path.abspath(__file__))+"/"
sys.path.append(ROOT)
import utils.lib_images_io as lib_images_io
import utils.lib_plot as lib_plot
import utils.lib_commons as lib_commons
from utils.lib_openpose import SkeletonDetector
from utils.lib_tracker import Tracker
from utils.lib_tracker import Tracker
from utils.lib_classifier import ClassifierOnlineTest
from utils.lib_classifier import *
###===###===###===###===###===###===###===###===###===###===###===###===###===###===###===###===###===###===###===###===###===###===
def gen(duration, folder_name):
#ç¢ç¢ç¢ç¢ç¢ç¢ç¢ç¢ç¢ç¢ç¢ç¢ç¢ç¢ç¢ç¢ç¢ç¢ç¢ç¢ç¢
timer=0
timer_countdown = int(3)
total = 0
correct = 0
high_hip = 0
low_hip =0
img_num=0
##########################################
#input_folder_name = lib_commons.get_time_string()
output_folder_name = folder_name
#input_folder_path = "src/data/"+ input_folder_name
#if not os.path.exists(input_folder_path):
#os.makedirs(input_folder_path)
SRC_DATA_TYPE = "folder"
SRC_MODEL_PATH = "model/trained_classifier_plank_1223.pickle"
#SDC_DATA_PATH = "src/data/"+ input_folder_name +"/"
DST_FOLDER = "src/static" + "/" +output_folder_name + "/"
if not os.path.exists(DST_FOLDER):
os.makedirs(DST_FOLDER)
#os.makedirs(DST_FOLDER, exist_ok=True)
DST_VIDEO_NAME = "rawplank.mp4"
# -- Settings
cfg_all = lib_commons.read_yaml(ROOT + "config/config.yaml")
cfg = cfg_all["s5_test.py"]
#CLASSES = np.array(cfg_all["classes"])
CLASSES = np.array(['correct', 'high-hip','low-hip'])
SKELETON_FILENAME_FORMAT = cfg_all["skeleton_filename_format"]
# Action recognition: number of frames used to extract features.
WINDOW_SIZE = int(cfg_all["features"]["window_size"])
# Output folder
#DST_FOLDER = args.output_folder + "/" + DST_FOLDER_NAME + "/"
DST_SKELETON_FOLDER_NAME = cfg["output"]["skeleton_folder_name"]
#DST_VIDEO_NAME = cfg["output"]["video_name"]
# framerate of output video.avi
#DST_VIDEO_FPS = float(cfg["output"]["video_fps"])
DST_VIDEO_FPS = 8.7
# Video setttings
# If data_type is webcam, set the max frame rate.
SRC_WEBCAM_MAX_FPS = float(cfg["settings"]["source"]
["webcam_max_framerate"])
# If data_type is video, set the sampling interval.
# For example, if it's 3, then the video will be read 3 times faster.
SRC_VIDEO_SAMPLE_INTERVAL = int(cfg["settings"]["source"]
["video_sample_interval"])
# Openpose settings
OPENPOSE_MODEL = cfg["settings"]["openpose"]["model"]
OPENPOSE_IMG_SIZE = cfg["settings"]["openpose"]["img_size"]
# Display settings
img_disp_desired_rows = int(cfg["settings"]["display"]["desired_rows"])
#####################################################
#functions for openpose
def select_images_loader(src_data_type, src_data_path):
if src_data_type == "video":
images_loader = lib_images_io.ReadFromVideo(
src_data_path,
sample_interval=SRC_VIDEO_SAMPLE_INTERVAL)
elif src_data_type == "folder":
images_loader = lib_images_io.ReadFromFolder(
folder_path=src_data_path)
elif src_data_type == "webcam":
if src_data_path == "":
webcam_idx = 0
elif src_data_path.isdigit():
webcam_idx = int(src_data_path)
else:
webcam_idx = src_data_path
images_loader = lib_images_io.ReadFromWebcam(
SRC_WEBCAM_MAX_FPS, webcam_idx)
return images_loader
def remove_skeletons_with_few_joints(skeletons):
''' Remove bad skeletons before sending to the tracker '''
good_skeletons = []
for skeleton in skeletons:
px = skeleton[2:2+13*2:2]
py = skeleton[3:2+13*2:2]
num_valid_joints = len([x for x in px if x != 0])
num_leg_joints = len([x for x in px[-6:] if x != 0])
total_size = max(py) - min(py)
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# IF JOINTS ARE MISSING, TRY CHANGING THESE VALUES:
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if num_valid_joints >= 5 and total_size >= 0.1 and num_leg_joints >= 0:
# add this skeleton only when all requirements are satisfied
good_skeletons.append(skeleton)
return good_skeletons
def draw_result_img(img_disp, img_num, humans, dict_id2skeleton,
skeleton_detector, multiperson_classifier):
''' Draw skeletons, labels, and prediction scores onto image for display '''
# Resize to a proper size for display
r, c = img_disp.shape[0:2]
desired_cols = int(1.0 * c * (img_disp_desired_rows / r))
img_disp = cv2.resize(img_disp,
dsize=(desired_cols, img_disp_desired_rows))
# Draw all people's skeleton
skeleton_detector.draw(img_disp, humans)
# Draw bounding box and label of each person
if len(dict_id2skeleton):
for id, label in dict_id2label.items():
skeleton = dict_id2skeleton[id]
# scale the y data back to original
skeleton[1::2] = skeleton[1::2] / scale_h
# print("Drawing skeleton: ", dict_id2skeleton[id], "with label:", label, ".")
lib_plot.draw_action_result(img_disp, id, skeleton, label)
# Add blank to the left for displaying prediction scores of each class
img_disp = lib_plot.add_white_region_to_left_of_image(img_disp)
cv2.putText(img_disp, "Frame:" + str(img_num),
(20, 20), fontScale=1.5, fontFace=cv2.FONT_HERSHEY_PLAIN,
color=(0, 0, 0), thickness=2)
# Draw predicting score for only 1 person
if len(dict_id2skeleton):
classifier_of_a_person = multiperson_classifier.get_classifier(
id='min')
classifier_of_a_person.draw_scores_onto_image(img_disp)
return img_disp
def get_the_skeleton_data_to_save_to_disk(dict_id2skeleton):
'''
In each image, for each skeleton, save the:
human_id, label, and the skeleton positions of length 18*2.
So the total length per row is 2+36=38
'''
skels_to_save = []
for human_id in dict_id2skeleton.keys():
label = dict_id2label[human_id]
skeleton = dict_id2skeleton[human_id]
skels_to_save.append([[human_id, label] + skeleton.tolist()])
return skels_to_save
#ç¢ç¢ç¢ç¢ç¢ç¢ç¢ç¢ç¢ç¢ç¢ç¢ç¢ç¢ç¢ç¢ç¢ç¢ç¢ç¢ç¢
cap = VideoStream(src=0).start()
#variables for openpose
skeleton_detector = SkeletonDetector(OPENPOSE_MODEL, OPENPOSE_IMG_SIZE)
multiperson_tracker = Tracker()
multiperson_classifier = MultiPersonClassifier(SRC_MODEL_PATH, CLASSES)
#os.makedirs(DST_FOLDER, exist_ok=True)
#os.makedirs(DST_FOLDER + DST_SKELETON_FOLDER_NAME, exist_ok=True)
while True:
img = cap.read()
img = cv2.resize(img, (1400, 800))
#__________________stage 2 (timing&counting)________________________________________________________________
if timer_countdown < 0:
frame = cv2.resize(img,(432,368))
img_num+=1
### code for openpose
img_disp = frame.copy()
print(f"\nProcessing image {img_num} ...")
# -- Detect skeletons
humans = skeleton_detector.detect(frame)
skeletons, scale_h = skeleton_detector.humans_to_skels_list(humans)
#skeletons = remove_skeletons_with_few_joints(skeletons)
# -- Track people
dict_id2skeleton = multiperson_tracker.track(
skeletons) # int id -> np.array() skeleton
# -- Recognize action of each person
if len(dict_id2skeleton):
dict_id2label = multiperson_classifier.classify(
dict_id2skeleton)
# -- Draw
img_disp = draw_result_img(img_disp, img_num, humans, dict_id2skeleton,
skeleton_detector, multiperson_classifier)
# Print label of a person
if len(dict_id2skeleton):
min_id = min(dict_id2skeleton.keys())
print("prediced label is :", dict_id2label[min_id])
total +=1
#'plank-correct', 'plank-incorrect-high-hip','plank-incorrect-low-hip'
if dict_id2label[min_id] == 'correct':
cv2.putText(img, "Correct",(800,780), font, #might have to change
2, (0,255,0),
4, cv2.LINE_AA)
correct +=1
elif dict_id2label[min_id] == 'high-hip':
cv2.putText(img, "High Hip Detected!",
(800,780), font, #might have to change
2, (0,255,255),
4, cv2.LINE_AA)
high_hip +=1
elif dict_id2label[min_id] == 'low-hip':
cv2.putText(img, "Low Hip Detected!",
(800,780), font, #might have to change
2, (0,0,255),
4, cv2.LINE_AA)
low_hip +=1
video_writer.write(img_disp)
#∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞ timing ∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞∞
cur_time=time.time()
timer = cur_time - pre_time
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img, str(round(timer,2)),
(100, 780), font,
2, (100, 100, 255),
4, cv2.LINE_AA)
#------------------stage 2 (timing&counting)----------------------------------------------------------------
previous = time.time()
#____________________stage 1 (countdown)____________________________________________________________________
while timer_countdown >= 0:
img = cap.read()
img = cv2.resize(img, (1400, 800))
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img, str(timer_countdown),
(650, 450), font,
6, (200, 0, 255),
10, cv2.LINE_AA)
ret, jpeg = cv2.imencode('.jpg', img)
frame = jpeg.tobytes()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
current = time.time()
if current-previous >= 1:
previous = current
timer_countdown = timer_countdown-1
pre_time=time.time()
video_writer = lib_images_io.VideoWriter(DST_FOLDER + DST_VIDEO_NAME, DST_VIDEO_FPS)
#---------------------stage 1 (countdown)-------------------------------------------------------------------
ret, jpeg = cv2.imencode('.jpg', img)
frame = jpeg.tobytes()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
if timer >= duration:
video_writer.stop()
cv2.putText(img, "FINISHED!",
(100,450),font,
8, (0,0,255),
10, cv2.LINE_AA)
ret, jpeg = cv2.imencode('.jpg', img)
frame = jpeg.tobytes()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
total_without_no_response = correct + high_hip + low_hip
# correct_percentage = round(correct/total*100,2)
# high_hip_percentage = round(high_hip/total*100,2)
# low_hip_percentage = round(low_hip/total*100,2)
correct_percentage = round(correct/total_without_no_response*100)
high_hip_percentage = round(high_hip/total_without_no_response*100)
low_hip_percentage = round(low_hip/total_without_no_response*100)
print(f"Correct Percentage: {correct_percentage}")
result_dict = {'correct_percentage':correct_percentage,
'high_hip_percentage':high_hip_percentage,
'low_hip_percentage':low_hip_percentage}
with open(DST_FOLDER+output_folder_name+'.txt','w') as f:
f.write(str(result_dict))
#try:
#print(f"High Hip: {high_hip_percnetage}")
#print(f"Low Hip: {low_hip_percnetage}")
#except:
#pass
ORIGINAL_VIDEO_NAME = DST_VIDEO_NAME
MOD_VIDEO_NAME = "plank.mp4"
#DST_VIDEO_NAME_MOD = "modaction_squats_"+str(folder_num) +".mp4"
os.system(f"ffmpeg -i {DST_FOLDER + ORIGINAL_VIDEO_NAME} -vcodec libx264 {DST_FOLDER + MOD_VIDEO_NAME}")
break
###===###===###===###===###===###===###===###===###===###===###===###===###===###===###===###===###===###===###===###===###===###===
|
# Copyright (c) 2013, 2018 National Technology and Engineering Solutions of Sandia, LLC . Under the terms of Contract
# DE-NA0003525 with National Technology and Engineering Solutions of Sandia, LLC, the U.S. Government
# retains certain rights in this software.
"""Functionality for working with hyperchunk specifications (collections of array/attribute/slice information)."""
import numbers
import numpy
import cherrypy
import slycat.hyperchunks.grammar
def parse(string):
"""Parse a string hyperchunks representation.
Parameters
----------
string: string representation of a hyperchunk.
Returns
-------
hyperchunks: parsed representation of a hyperchunk.
"""
#cherrypy.log.error(string)
parsed_hyperchuncks = slycat.hyperchunks.grammar.hyperchunks_p.parseString(string, parseAll=True).asList()
return slycat.hyperchunks.grammar.Hyperchunks(parsed_hyperchuncks)
def arrays(hyperchunks, array_count):
"""Iterate over the arrays in a set of hyperchunks."""
class Attribute(object):
def __init__(self, expression, hyperslices):
self._expression = expression
self._hyperslices = hyperslices
@property
def expression(self):
return self._expression
@property
def hyperslice_count(self):
return 0 if self._hyperslices is None else len(self._hyperslices)
def hyperslices(self):
"""Iterate over the hyperslices in a hyperchunk."""
if self._hyperslices is not None:
for hyperslice in self._hyperslices:
yield tuple(hyperslice)
class Array(object):
def __init__(self, index, attributes, order, hyperslices):
self._index = index
self._attributes = attributes
self._order = order
self._hyperslices = hyperslices
@property
def index(self):
return self._index
@property
def attribute_count(self):
return 0 if self._attributes is None else len(self._attributes)
@property
def order(self):
return self._order
def attributes(self, attribute_count):
"""Iterate over the attributes in a hyperchunk."""
if self._attributes is not None:
for attributes in self._attributes:
if isinstance(attributes, (numbers.Integral, type(Ellipsis), slice)):
if isinstance(attributes, numbers.Integral):
if attributes < 0:
attributes = slice(attribute_count + attributes, attribute_count + attributes + 1)
else:
attributes = slice(attributes, attributes + 1)
elif isinstance(attributes, type(Ellipsis)):
attributes = slice(0, attribute_count)
start, stop, step = attributes.indices(attribute_count)
for index in numpy.arange(start, stop, step):
yield Attribute(slycat.hyperchunks.grammar.AttributeIndex(index), self._hyperslices)
else:
yield Attribute(attributes, self._hyperslices)
for hyperchunk in hyperchunks:
for arrays in hyperchunk.arrays:
if isinstance(arrays, (numbers.Integral, type(Ellipsis), slice)):
if isinstance(arrays, numbers.Integral):
if arrays < 0:
arrays = slice(array_count + arrays, array_count + arrays + 1)
else:
arrays = slice(arrays, arrays + 1)
elif isinstance(arrays, type(Ellipsis)):
arrays = slice(0, array_count)
start, stop, step = arrays.indices(array_count)
for index in numpy.arange(start, stop, step):
yield Array(index, hyperchunk.attributes, hyperchunk.order, hyperchunk.hyperslices)
else:
cherrypy.log.error("hyperchunks.__init__.py", "Unexpected array: %r" % arrays)
raise ValueError("Unexpected array: %r" % arrays)
def tostring(value):
"""Convert hyperchunks to their string representation.
"""
if isinstance(value, slycat.hyperchunks.grammar.Arrays):
return "|".join([tostring(array) for array in value])
if isinstance(value, slycat.hyperchunks.grammar.Attributes):
return "|".join([tostring(array) for array in value])
if isinstance(value, slycat.hyperchunks.grammar.AttributeIndex):
return "a%s" % value.index
if isinstance(value, slycat.hyperchunks.grammar.BinaryOperator):
return "(" + (" %s " % value.operator).join([tostring(operand) for operand in value.operands]) + ")"
if isinstance(value, slycat.hyperchunks.grammar.FunctionCall):
return "%s(%s)" % (value.name, ", ".join([tostring(arg) for arg in value.args]))
if isinstance(value, slycat.hyperchunks.grammar.Hyperchunk):
sections = []
sections.append(tostring(value.arrays))
if value.attributes is not None:
sections.append(tostring(value.attributes))
if value.order is not None:
sections.append("order:" + tostring(value.order))
if value.hyperslices is not None:
sections.append(tostring(value.hyperslices))
return "/".join(sections)
if isinstance(value, slycat.hyperchunks.grammar.Hyperchunks):
return ";".join([tostring(hyperchunk) for hyperchunk in value])
if isinstance(value, slycat.hyperchunks.grammar.Hyperslices):
return "|".join([tostring(array) for array in value])
if isinstance(value, slycat.hyperchunks.grammar.Hyperslice):
return ",".join([tostring(hyperslice) for hyperslice in value])
if isinstance(value, slycat.hyperchunks.grammar.List):
return "[%s]" % ", ".join([tostring(item) for item in value.values])
if isinstance(value, int):
return repr(value)
if isinstance(value, float):
return repr(value)
if isinstance(value, str):
return '"%s"' % value
if isinstance(value, type(Ellipsis)):
return "..."
if isinstance(value, slice):
return ("%s:%s" % ("" if value.start is None else value.start, "" if value.stop is None else value.stop)) + ("" if value.step is None else ":%s" % value.step)
cherrypy.log.error("hyperchunks.__init__.py", "Unknown value: %s" % value)
raise ValueError("Unknown value: %s" % value)
|
__author__ = "Sergey Aganezov"
__email__ = "aganezov(at)cs.jhu.edu"
__status__ = "production"
import unittest
from bg.genome import BGGenome, BGGenome_JSON_SCHEMA_JSON_KEY, post_load
class BGGenomeTestCase(unittest.TestCase):
def test_initialization_incorrect(self):
# empty genomes are not allowed, a name for genome is mandatory
with self.assertRaises(TypeError):
g = BGGenome()
def test_initialization(self):
# simple correct initialization
g = BGGenome("name")
self.assertEqual(g.name, "name")
def test_hash(self):
# hash of genome instance is proxies to hash value of its name
g = BGGenome("name")
self.assertEqual(hash(g), hash("name"))
def test_json_id(self):
# json id for genome is utilized when genome is serialized to json format and equals to hash value of genome instance
g = BGGenome("name")
json_id = g.json_id
self.assertEqual(json_id, hash(g.name))
self.assertTrue(isinstance(json_id, int))
g.name = "name1"
new_json_id = g.json_id
self.assertEqual(new_json_id, hash(g.name))
self.assertTrue(isinstance(json_id, int))
self.assertNotEqual(json_id, new_json_id)
def test__eq__(self):
# two genome are called equal if they are both os same class and their hash values are equal
g1 = BGGenome("name1")
g2 = BGGenome("name2")
self.assertNotEqual(g1, g2)
g2.name = "name1"
self.assertEqual(g1, g2)
self.assertNotEqual(g1, 5)
self.assertNotEqual(g1, "name1")
self.assertNotEqual(g1, [g1])
def test_json_serialization_no_subclassing(self):
# genome can be serialized into json format keeping all important information
g = BGGenome("name1")
ref_result = {
"name": "name1",
"g_id": g.json_id
}
self.assertDictEqual(g.to_json(schema_info=False), ref_result)
ref_result[BGGenome_JSON_SCHEMA_JSON_KEY] = g.json_schema_name
self.assertDictEqual(g.to_json(), ref_result)
def test_json_deserialization_no_subclassing(self):
# simple case
json_object = {
"name": "name1",
"g_id": 1
}
result = BGGenome.from_json(data=json_object)
self.assertEqual(result.name, "name1")
# g_id is not mandatory for genome deserialization itself, but is required by the supervising class
self.assertEqual(BGGenome.from_json(data={"name": "name1"}).name, "name1")
# BGGenome scheme info shall be ignored at this level, as it is supplied by the supervising class
self.assertEqual(BGGenome.from_json(data={"name": "name1",
BGGenome_JSON_SCHEMA_JSON_KEY: "lalal"}).name, "name1")
# error case when "name" is not present
with self.assertRaises(ValueError):
BGGenome.from_json(data={})
def test_json_deserialization_subclassing(self):
# being provided an explicit JSONSchema, it shall be utilized for json deserialization
class BGGenomeJSONSchemaNameOptional(BGGenome.BGGenomeJSONSchema):
@post_load
def make_object(self, data):
if "name" not in data:
data["name"] = "default_name"
return super(BGGenomeJSONSchemaNameOptional, self).make_object(data=data)
self.assertEqual(BGGenome.from_json(data={}, json_schema_class=BGGenomeJSONSchemaNameOptional).name, "default_name")
def test__lt__(self):
# genome is less than any non BGGenome instance
# with other BGGenome instance it is compared by respective "name" attributes
g1 = BGGenome("genome1")
g2 = BGGenome("genome2")
self.assertLess(g1, g2)
self.assertGreater(g2, g1)
g1 = BGGenome("genome1")
g2 = BGGenome("genome")
self.assertGreater(g1, g2)
self.assertLess(g2, g1)
# BGGenome is always smaller than non-BGGenome objects
objects_to_compare_to = [1, (1,), [1], "a"]
for object_to_compare_to in objects_to_compare_to:
self.assertLess(g1, object_to_compare_to)
self.assertLess(g2, object_to_compare_to)
def test__le__(self):
# Genome is considered less or equal to any other BGGenome is it is either less ("<" implementation
# or equal (__eq__ implementation), than supplied argument
g1 = BGGenome("genome1")
g2 = BGGenome("genome1")
self.assertLessEqual(g1, g2)
self.assertLessEqual(g2, g1)
self.assertTrue(g1 <= g2 <= g1)
g3 = BGGenome("genome")
self.assertLessEqual(g3, g1)
self.assertLessEqual(g3, g1)
if __name__ == '__main__':
unittest.main()
|
# Copyright 2021 VMware, Inc.
# SPDX-License-Identifier: Apache-2.0
import logging
import salt.exceptions
import saltext.vmware.utils.common as utils_common
import saltext.vmware.utils.datacenter as utils_datacenter
import saltext.vmware.utils.vmware as utils_vmware
from saltext.vmware.utils.connect import get_service_instance
log = logging.getLogger(__name__)
try:
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
__virtualname__ = "vmware_dvswitch"
__proxyenabled__ = ["vmware_dvswitch"]
def __virtual__():
if not HAS_PYVMOMI:
return False, "Unable to import pyVmomi module."
return __virtualname__
def _get_switch_config_spec(service_instance, datacenter_name, switch_name):
dc_ref = switch_ref = config_spec = None
dc_ref = utils_datacenter.get_datacenter(service_instance, datacenter_name)
switch_refs = utils_vmware.get_dvss(dc_ref=dc_ref, dvs_names=[switch_name])
if switch_refs:
switch_ref = switch_refs[0]
dvs_props = utils_common.get_properties_of_managed_object(
switch_ref, ["config", "capability"]
)
config_spec = vim.dvs.VmwareDistributedVirtualSwitch.ConfigSpec()
# Copy all of the properties in the config of the of the DVS to a
# DvsConfigSpec
skipped_properties = ["host"]
for prop in config_spec.__dict__.keys():
if prop in skipped_properties:
continue
if hasattr(dvs_props["config"], prop):
setattr(config_spec, prop, getattr(dvs_props["config"], prop))
return dc_ref, switch_ref, config_spec
def manage(
datacenter_name,
switch_name,
uplink_count=None,
uplink_prefix="Uplink ",
switch_version=None,
switch_description=None,
mtu=None,
discovery_protocol=None,
discovery_operation=None,
multicast_filtering_mode=None,
contact_name=None,
contact_description=None,
network_forged_transmits=None,
network_mac_changes=None,
network_promiscuous=None,
health_check_teaming_failover=None,
health_teaming_failover_interval=None,
health_vlan_mtu=None,
health_vlan_mtu_interval=None,
service_instance=None,
):
"""
Creates a new distributed vSwitch or updates an existing vSwitch.
switch_name
Name of the distributed vSwitch to create or update.
uplink_count
Count of uplink per ESXi per host. Optional.
uplink_prefix
The prefix to be used for uplinks. Optional. Default: "Uplink ".
switch_version
The version of the distributed vSwitch to create or update. Optional.
switch_description
Description of the distributed vSwitch. Optional. Default: None.
mtu
Maximum transmission unit for the switch. Optional.
discovery_protocol
Link discovery protocol between Cisco and Link Layer discovery. Optional. Valid values: "cdp", "lldp", "disabled".
discovery_operation
Discovery operation for the switch. Optional. Valid values: "both", "advertise", "listen".
multicast_filtering_mode
Multicast filtering mode for the switch. Optional. Valid values: "basic", "snooping".
contact_name
Administrator contact name. Optional. Default: "".
contact_description
Administrator contact information. Optional. Default: "".
network_forged_transmits
Allow forged transmits. Type: Boolean. Optional. Valid values: "True", "False".
network_mac_changes
Allow mac changes. Type: Boolean. Optional. Valid values: "True", "False".
network_promiscuous
Allow promiscuous mode. Type: Boolean. Optional. Valid values: "True", "False".
health_check_teaming_failover
Enable teaming and failover health check. Type: Boolean. Optional. Valid values: "True", "False".
health_teaming_failover_interval
Teaming and failover health check interval in minutes. Optional.
health_vlan_mtu
Enable VLAN and MTU health check. Type: Boolean. Optional. Valid values: "True", "False".
health_vlan_mtu_interval
VLAN and MTU health check interval in minutes. Optional.
service_instance
Use this vCenter service connection instance instead of creating a new one. Optional.
.. code-block:: bash
salt '*' vmware_dvswitch.manage dvs1
"""
if not service_instance:
service_instance = get_service_instance(opts=__opts__, pillar=__pillar__)
try:
health_spec = product_spec = spec = None
dc_ref, switch_ref, config_spec = _get_switch_config_spec(
service_instance=service_instance,
datacenter_name=datacenter_name,
switch_name=switch_name,
)
if not switch_ref:
spec = vim.DistributedVirtualSwitch.CreateSpec()
spec.configSpec = vim.dvs.VmwareDistributedVirtualSwitch.ConfigSpec()
config_spec = spec.configSpec
config_spec.name = switch_name
if mtu:
config_spec.maxMtu = mtu
if uplink_count:
config_spec.uplinkPortPolicy = vim.DistributedVirtualSwitch.NameArrayUplinkPortPolicy()
for i in range(uplink_count):
config_spec.uplinkPortPolicy.uplinkPortName.append(
"{}{}".format(uplink_prefix, i + 1)
)
if switch_version:
product_spec = vim.dvs.ProductSpec()
product_spec.version = switch_version
if spec:
spec.productInfo = product_spec
if switch_description:
config_spec.description = switch_description
if contact_name or contact_description:
contact_info_spec = vim.DistributedVirtualSwitch.ContactInfo()
contact_info_spec.contact = contact_description
contact_info_spec.name = contact_name
if switch_ref:
contact_info_spec.contact = contact_description or switch_ref.config.contact.contact
contact_info_spec.name = contact_name or switch_ref.config.contact.name
config_spec.contact = contact_info_spec
if discovery_operation or discovery_protocol:
ldp_config_spec = vim.host.LinkDiscoveryProtocolConfig()
ldp_config_spec.operation = discovery_operation
ldp_config_spec.protocol = discovery_protocol
if switch_ref:
ldp_config_spec.protocol = (
discovery_protocol or switch_ref.config.linkDiscoveryProtocolConfig.protocol
)
ldp_config_spec.operation = (
discovery_operation or switch_ref.config.linkDiscoveryProtocolConfig.operation
)
if discovery_protocol == "disabled":
ldp_config_spec.protocol = "cdp"
ldp_config_spec.operation = "none"
config_spec.linkDiscoveryProtocolConfig = ldp_config_spec
if multicast_filtering_mode:
if multicast_filtering_mode == "basic":
config_spec.multicastFilteringMode = "legacyFiltering"
else:
config_spec.multicastFilteringMode = multicast_filtering_mode
if not switch_ref:
utils_vmware.create_dvs(dc_ref=dc_ref, dvs_name=switch_name, dvs_create_spec=spec)
if (
network_promiscuous is not None
or network_mac_changes is not None
or network_forged_transmits is not None
):
policy = vim.dvs.VmwareDistributedVirtualSwitch.SecurityPolicy()
if not switch_ref:
dc_ref, switch_ref, config_spec = _get_switch_config_spec(
service_instance=service_instance,
datacenter_name=datacenter_name,
switch_name=switch_name,
)
if network_promiscuous is None:
network_promiscuous = (
switch_ref.config.defaultPortConfig.securityPolicy.allowPromiscuous.value
)
if network_mac_changes is None:
network_mac_changes = (
switch_ref.config.defaultPortConfig.securityPolicy.macChanges.value
)
if network_forged_transmits is None:
network_forged_transmits = (
switch_ref.config.defaultPortConfig.securityPolicy.forgedTransmits.value
)
if network_promiscuous is not None:
policy.allowPromiscuous = vim.BoolPolicy(value=network_promiscuous)
if network_mac_changes is not None:
policy.macChanges = vim.BoolPolicy(value=network_mac_changes)
if network_forged_transmits is not None:
policy.forgedTransmits = vim.BoolPolicy(value=network_forged_transmits)
config_spec.defaultPortConfig.securityPolicy = policy
if (
health_check_teaming_failover is not None
or health_teaming_failover_interval is not None
or health_vlan_mtu is not None
or health_vlan_mtu_interval is not None
):
if not switch_ref:
dc_ref, switch_ref, config_spec = _get_switch_config_spec(
service_instance=service_instance,
datacenter_name=datacenter_name,
switch_name=switch_name,
)
health_spec = vim.DistributedVirtualSwitch.HealthCheckConfig.Array()
for config in switch_ref.config.healthCheckConfig:
if isinstance(
config, vim.dvs.VmwareDistributedVirtualSwitch.VlanMtuHealthCheckConfig
):
if health_vlan_mtu is not None:
config.enable = health_vlan_mtu
if health_vlan_mtu_interval is not None:
config.interval = health_vlan_mtu_interval
health_spec.append(config)
if isinstance(
config, vim.dvs.VmwareDistributedVirtualSwitch.TeamingHealthCheckConfig
):
if health_check_teaming_failover is not None:
config.enable = health_check_teaming_failover
if health_teaming_failover_interval is not None:
config.interval = health_teaming_failover_interval
health_spec.append(config)
if switch_ref:
utils_vmware.update_dvs(dvs_ref=switch_ref, dvs_config_spec=config_spec)
if product_spec:
utils_vmware.update_dvs_version(dvs_ref=switch_ref, dvs_product_spec=product_spec)
if health_spec:
utils_vmware.update_dvs_health(dvs_ref=switch_ref, dvs_health_spec=health_spec)
return True
except (
vim.fault.DvsFault,
vmodl.fault.NotSupported,
salt.exceptions.VMwareApiError,
vmodl.RuntimeFault,
vmodl.MethodFault,
) as exc:
raise salt.exceptions.SaltException(str(exc))
|
import os
import glob
from flask import Flask
from flask import jsonify
from flask import request, render_template
from webapp import app
#from model.util import *
from SigNet import main1, getpredictions
valid_mimetypes = ['image/jpeg', 'image/png', 'image/tiff']
global model
# def get_predictions(img_name):
# #TODO
# return {
# "bboxes":
# [
# {"x1": 10, "x2": 50, "y1": 10, "y2": 50}
# ],
# }
@app.route('/home')
def home1():
model = main1([])
return render_template('welcome.html')
@app.route('/load')
def index():
#model = main1([])
return render_template('index.html')
# return render_template('index.html')
from PIL import Image
import numpy as np
@app.route('/predict', methods=['POST'])
def predict():
if request.method == 'POST':
custid = request.form['customer ID']
#print ('action' ,request.form['usr'])
if 'file' not in request.files:
return jsonify({'error': 'no file'}), 400
# Image info
img_file = request.files.get('file')
img_name = img_file.filename
mimetype = img_file.content_type
# Return an error if not a valid mimetype
print (img_file)
if mimetype not in valid_mimetypes:
return jsonify({'error': 'bad-type'})
# Write image to static directory
#print (app.config['UPLOAD_FOLDER'])
img_file.save(os.path.join(app.config['UPLOAD_FOLDER'], img_name))
#img = open_image(os.path.join(app.config['UPLOAD_FOLDER'], img_name))
# Run Prediction on the model
results = getpredictions(img_name, custid)
if(results == 1):
results = " Original "
if(results == 0):
results = " Forgery "
# Delete image when done with analysis
#os.remove(os.path.join(app.config['UPLOAD_FOLDER'], img_name))
return jsonify(results)
@app.route('/upload', methods=['POST','GET'])
def upload():
if request.method == 'POST':
custid = request.form['customer ID']
#print ('action' ,request.form['usr'])
if 'file' not in request.files:
return jsonify({'error': 'no file'}), 400
# Image info
img_file = request.files.get('file')
img_name = img_file.filename
mimetype = img_file.content_type
# Return an error if not a valid mimetype
print (img_file)
if mimetype not in valid_mimetypes:
return jsonify({'error': 'bad-type'})
# Write image to static directory
#print (app.config['UPLOAD_FOLDER'])
img_file.save(os.path.join(app.config['UPLOAD_FOLDER'], img_name))
#img = open_image(os.path.join(app.config['UPLOAD_FOLDER'], img_name))
# Run Prediction on the model
results = insertTable(custid,img_name,os.path.join(app.config['UPLOAD_FOLDER'], img_name))
if(results == 1):
results = "Upload Successfully"
if(results == 0):
results = "Not "
results = "Upload Successfully"
# Delete image when done with analysis
#os.remove(os.path.join(app.config['UPLOAD_FOLDER'], img_name))
return jsonify(results)
else:
return render_template('upload.html')
import sqlite3
def createconnection():
con = sqlite3.connect('test8.db')
cursor = con.cursor()
return cursor
def insertTable(Signatureid, filename,picture_file):
insert_query = """INSERT INTO dataset (ID, fileName,file) VALUES(?,?, ?)"""
c = createconnection()
with open(picture_file, 'rb') as picture_file:
ablob = picture_file.read()
c.execute(insert_query, (Signatureid, filename, ablob))
c.connection.commit()
def get_file_from_db(customer_id):
cursor = createconnection()
select_fname = """SELECT file,fileName from dataset where ID = ?"""
cursor.execute(select_fname, (customer_id,))
item = cursor.fetchall()
cursor.connection.commit()
return item
CREATE_TABLE = """CREATE TABLE IF NOT EXISTS dataset (ID TEXT,fileName TEXT, file BLOB)"""
cursor = createconnection()
cursor.execute(CREATE_TABLE)
cursor.connection.commit()
|
from os import getenv
from sqlalchemy import create_engine, Column, String, Integer, Numeric
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from geoalchemy2 import Geometry
USER = getenv("DB_USER", "")
PASSWORD = getenv("DB_PASSWORD", "")
HOST = getenv("DB_HOST", "")
PORT = getenv("DB_PORT", 0)
NAME = getenv("DB_NAME", "")
_DB_STRING = f"postgres://{USER}:{PASSWORD}@{HOST}:{PORT}/{NAME}"
db = create_engine(_DB_STRING)
Base = declarative_base()
Session = sessionmaker(db)
session = Session()
class Sismos(Base):
__tablename__ = 'sismos'
id = Column('id', Integer, primary_key=True)
date = Column('fecha', String)
localtime = Column('hora_local', String)
lat = Column('latitud', Numeric)
long = Column('longitud', Numeric)
localization = Column('localizacion', String)
depth = Column('profundidad', Numeric)
magnitude = Column('magnitud', Numeric)
intensity = Column('intensidad', String)
geom = Column('geom', Geometry(geometry_type="POINT", srid=100000))
def getSismosPointsWithinBoundaries():
sismos = session.execute("select sismos.latitud, sismos.longitud from sismos join \"departamentos\" as dept on st_intersects(dept.geom, sismos.geom)")
data = sismos.fetchall()
sismos.close()
array=[]
for entry in data:
array.append({
"lat": float(entry[0]),
"lgn": float(entry[1]),
})
return array
def getAllSismosPoints(timestamp):
sismos = session.execute("select sismos.latitud, sismos.longitud from sismos;")
data = sismos.fetchall()
sismos.close()
array=[]
for entry in data:
array.append([timestamp, float(entry[0]), float(entry[1])])
return array |
#!/usr/bin/env python
# Copyright Contributors to the Testing Farm project.
# SPDX-License-Identifier: Apache-2.0
"""
Generate RST files documenting modules.
"""
import inspect
import os
import sys
import re
import six
import gluetool
LOGGER = gluetool.log.Logging.setup_logger()
OUTPUT_DIR = 'docs/source'
MOD_TEMPLATE = """
``{{ name }}``
{{ title_underline }}
**{{ description }}**
.. automoddesc:: {{ modpath }}.{{ klass }}
:noindex:
Shared functions
----------------
{{ shared_functions }}
Options
-------
.. argparse::
:filename: source/module_parsers.py
:func: get_parser_{{ klass }}
:prog: {{ name }}
"""
SHARED_TEMPLATE = """
.. automethod:: {{ modpath }}.{{ klass }}.{{ shared_name }}
:noindex:
"""
ARGS_TEMPLATE = """
def get_parser_{{ klass }}():
from {{ modpath }} import {{ klass }}
return {{ klass }}._create_args_parser()
"""
def gather_module_data():
LOGGER.info('gathering data on all available modules')
glue = gluetool.Glue()
glue.modules = glue.discover_modules()
cwd = os.getcwd() + '/'
modules = []
classes = {}
for name, properties in six.iteritems(glue.modules):
# These modules are provided by gluetool, therefore they are not easily importable
# by Sphinx. Skipping them to allow Sphinx to continue with our local modules.
if name in ('bash-completion', 'dep-list', 'yaml-pipeline'):
continue
klass = properties.klass.__name__
if klass in classes:
continue
classes[klass] = True
# get file where class is stored
filepath = inspect.getfile(properties.klass)
# strip the CWD out
filepath = filepath.replace(os.path.commonprefix([cwd, filepath]), '')
modpath = os.path.splitext(filepath)[0].replace('/', '.')
# strip tox modpath out
modpath = re.sub(r'\.tox\..*\.site-packages\.', '', modpath)
modpath = re.sub(r'\.tox\..*\.gluetool_modules_framework.', '', modpath)
# pylint: disable=line-too-long
if properties.klass.description:
description = properties.klass.description
else:
description = 'Module did not provide a description'
filepath = filepath.replace('-', '_')
try:
stat = os.stat(filepath)
except OSError:
stat = None
modules.append({
'name': name,
'description': description,
'klass': klass,
'filepath': filepath,
'modclass': properties.klass,
'modpath': modpath,
'filepath_mtime': stat.st_mtime if stat else six.MAXSIZE
})
return modules
def write_module_doc(module_data, output_dir):
doc_file = '{}/modules/{}.rst'.format(output_dir, module_data['name'])
try:
doc_mtime = os.stat(doc_file).st_mtime
except BaseException:
doc_mtime = 0
if module_data['filepath_mtime'] <= doc_mtime:
LOGGER.info('skipping module {} because it was not modified'.format(module_data['name']))
return
module_data['title_underline'] = '=' * (4 + len(module_data['name']))
shared_functions = module_data['modclass'].shared_functions
if shared_functions:
module_data['shared_functions'] = '\n'.join([
# pylint: disable=line-too-long
gluetool.utils.render_template(SHARED_TEMPLATE, shared_name=name, **module_data)
for name in shared_functions
])
else:
module_data['shared_functions'] = ''
with open(doc_file, 'w') as f:
f.write(gluetool.utils.render_template(MOD_TEMPLATE, **module_data))
f.flush()
LOGGER.info('module {} doc page written'.format(module_data['name']))
def write_args_parser_getters(modules, output_dir):
with open('{}/module_parsers.py'.format(output_dir), 'w') as f:
f.write('# pylint: disable=invalid-name,protected-access\n')
for module_data in modules:
f.write(gluetool.utils.render_template(ARGS_TEMPLATE, **module_data) + '\n')
f.flush()
def write_index_doc(modules, output_dir):
with open('docs/source/modules.txt', 'r') as f:
with open('{}/modules.rst'.format(output_dir), 'w') as g:
g.write(f.read().format(modules='\n'.join(sorted([
# pylint: disable=line-too-long
'{}\n'.format(
gluetool.utils.render_template(
'`{{ name }} <modules/{{ name }}.html>`_\n\n {{ description }}\n',
**module_data
)
)
for module_data in modules
]))))
g.flush()
def main():
output_dir = OUTPUT_DIR if len(sys.argv) == 1 else sys.argv[1]
modules = gather_module_data()
for module_data in modules:
write_module_doc(module_data, output_dir)
write_args_parser_getters(modules, output_dir)
write_index_doc(modules, output_dir)
if __name__ == '__main__':
main()
|
# The MIT License (MIT)
#
# Copyright (c) 2021 Chris J Daly (github user cjdaly)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import board, busio, digitalio, time, neopixel
import adafruit_dotstar, adafruit_mcp4728
i2c = busio.I2C(board.SCL, board.SDA)
mcp4728 = adafruit_mcp4728.MCP4728(i2c)
mcp4728.channel_a.value = 50000
dot = adafruit_dotstar.DotStar(board.APA102_SCK, board.APA102_MOSI, 1)
dot[0]=(0,33,0)
px = neopixel.NeoPixel(board.IO17, 2)
px.fill((0,0,33))
d0 = digitalio.DigitalInOut(board.IO18)
d0.direction=digitalio.Direction.INPUT
d0.pull=digitalio.Pull.UP
d1 = digitalio.DigitalInOut(board.IO12)
d1.direction=digitalio.Direction.INPUT
d1.pull=digitalio.Pull.UP
while True:
v_base=0 ; v0=0 ; v1=0
if d0.value:
px[0] = (0,0,33)
dot[0]=(0,0,33)
else:
px[0] = (0,33,0)
dot[0]=(0,33,0)
v_base = 50000 ; v0 = 1000
#
if d1.value:
px[1] = (0,0,33)
dot[0]=(0,0,33)
else:
px[1] = (0,33,0)
dot[0]=(10,0,20)
v_base = 50000 ; v1 = 2000
#
mcp4728.channel_a.value = v_base + v0 + v1
time.sleep(0.05)
|
# Generated by Django 3.2.5 on 2021-09-10 17:14
import cloudinary.models
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('requests', '0012_alter_request_file'),
]
operations = [
migrations.AlterField(
model_name='request',
name='file',
field=cloudinary.models.CloudinaryField(blank=True, max_length=255, null=True, verbose_name='file'),
),
]
|
"""this video library help to compress the video based on the requirement.
Please complete it by using the required packages or modules from the oto-helper library, and the logic for it to work.
"""
# Add the import statements below
# Write the functions definitions that are required for this module below
def compression(**args, **kwargs):
# Please add the docstrings for this function
|
from sequana import snaketools, sequana_data
from sequana.snaketools import DOTParser
import os, shutil
import tempfile
from sequana import Module, SequanaConfig
from easydev import TempFile
import subprocess
def test_dot_parser():
s = DOTParser(sequana_data("test_dag.dot", "testing"))
s.add_urls(mapper={'bwa_fix': "test.html"})
try:os.remove("test_dag.ann.dot")
except:pass
s.mode = "v1"
s.add_urls(mapper={'bwa_fix': "test.html"})
try:os.remove("test_dag.ann.dot")
except:pass
def test_md5():
from sequana import Module
m = Module("compressor")
data = m.md5()
def test_modules():
assert "dag" in snaketools.modules.keys()
assert snaketools.modules['dag'].endswith("dag.rules")
def test_getcleanup_rules():
filename = snaketools.modules['fastq_sampling']
try:
snaketools.get_cleanup_rules(filename)
except:
pass
def test_snakemake_stats():
# this is created using snakemake with the option "--stats stats.txt"
s = snaketools.SnakeMakeStats(sequana_data("test_snakemake_stats.txt"))
s.plot()
with TempFile() as fout:
s.plot_and_save(filename=fout.name, outputdir=None)
with tempfile.TemporaryDirectory() as tempdir:
s.plot_and_save(filename="test.png", outputdir=tempdir)
def test_plot_stats():
with tempfile.TemporaryDirectory() as indir:
shutil.copy(sequana_data("test_snakemake_stats.txt"),
indir + "/stats.txt")
with tempfile.TemporaryDirectory() as outdir:
snaketools.plot_stats(indir, outdir)
snaketools.plot_stats("dummy", "dummy")
def test_module():
# a rule without README
m = snaketools.Module('mark_duplicates_dynamic')
m.description
print(m)
m # test __repr__
m.__repr__()
m.path
m.snakefile
m.overview
# a rule with README
m = snaketools.Module('dag')
m.description
m.overview
assert m.is_executable()
m.check()
# a pipeline
m = snaketools.Module('compressor')
m.is_executable()
m.check()
m.snakefile
m.name
m
print(m)
assert m.cluster_config.endswith("cluster_config.json")
assert m.schema_config.endswith("schema.yaml")
def test_valid_config():
config = snaketools.SequanaConfig(None)
s = snaketools.Module("compressor")
config = snaketools.SequanaConfig(s.config)
from easydev import TempFile
with TempFile() as fh:
config.save(fh.name)
def test_sequana_config():
s = snaketools.Module("compressor")
config = snaketools.SequanaConfig(s.config)
assert config.config.get("compressor")["source"] == "fastq.gz"
assert config.config.get("kraken:dummy") == None
# --------------------------------- tests different constructors
config = snaketools.SequanaConfig()
config = snaketools.SequanaConfig({"test":1})
assert config.config.test == 1
# with a dictionary
config = snaketools.SequanaConfig(config.config)
# with a sequanaConfig instance
config = snaketools.SequanaConfig(config)
# with a non-yaml file
try:
json = sequana_data('test_summary_fastq_stats.json')
config = snaketools.SequanaConfig(json)
assert False
except:
assert True
try:
config = snaketools.SequanaConfig("dummy_dummy")
assert False
except:
assert True
# Test an exception
s = snaketools.Module("compressor")
config = snaketools.SequanaConfig(s.config)
config._recursive_update(config._yaml_code, {"input_directory_dummy": "test"})
#config.check_config_with_schema(s.schema_config)
# loop over all pipelines, read the config, save it and check the content is
# identical. This requires to remove the templates. We want to make sure the
# empty strings are kept and that "no value" are kept as well
#
# field1: ""
# field2:
#
# is unchanged
from easydev import TempFile
output = TempFile(suffix=".yaml")
for pipeline in snaketools.pipeline_names:
config_filename = Module(pipeline)._get_config()
cfg1 = SequanaConfig(config_filename)
cfg1.cleanup() # remove templates and strip strings
cfg1.save(output.name)
cfg2 = SequanaConfig(output.name)
assert cfg2._yaml_code == cfg1._yaml_code
cfg2._update_config()
assert cfg1.config == cfg2.config
output.delete()
def test_check_config_with_schema():
schema = Module("compressor").schema_config
SequanaConfig(Module("compressor").config).check_config_with_schema(schema)
def test_module_version():
Module("snpeff/1.0").version == "1.0"
def test_message():
snaketools.message("test")
def test_pipeline_manager():
# test missing input_directory
cfg = SequanaConfig({})
try:
pm = snaketools.PipelineManager("custom", cfg)
assert False
except:
assert True
# normal behaviour but no input provided:
config = Module("compressor")._get_config()
cfg = SequanaConfig(config)
cfg.cleanup() # remove templates
try:
pm = snaketools.PipelineManager("custom", cfg)
assert False
except:
assert True
# normal behaviour
cfg = SequanaConfig(config)
cfg.cleanup() # remove templates
file1 = sequana_data("Hm2_GTGAAA_L005_R1_001.fastq.gz")
cfg.config.input_directory, cfg.config.input_pattern = os.path.split(file1)
pm = snaketools.PipelineManager("custom", cfg)
assert pm.paired == False
cfg = SequanaConfig(config)
cfg.cleanup() # remove templates
cfg.config.input_directory, cfg.config.input_pattern = os.path.split(file1)
cfg.config.input_pattern = "Hm*gz"
#file1 = sequana_data("Hm2_GTGAAA_L005_R1_001.fastq.gz")
pm = snaketools.PipelineManager("custom", cfg)
pm.plot_stats()
assert pm.paired == True
pm.getlogdir("fastqc")
pm.getwkdir("fastqc")
pm.getrawdata()
pm.getreportdir("test")
pm.getname("fastqc")
# Test different configuration of input_directory, input_readtag,
# input_pattern
# Test the _R[12]_ paired
with tempfile.TemporaryDirectory() as tmpdir:
cfg = SequanaConfig()
cfgname = tmpdir + "/config.yaml"
cfg.config.input_pattern = "*fastq.gz"
cfg.config.input_directory = tmpdir
cfg.config.input_readtag = "_R[12]_"
cfg._update_yaml()
cfg.save(cfgname)
cmd = "touch {}/test_R1_.fastq.gz".format(tmpdir)
subprocess.call(cmd.split())
cmd = "touch {}/test_R2_.fastq.gz".format(tmpdir)
subprocess.call(cmd.split())
pm = snaketools.PipelineManager("test", cfgname)
assert pm.paired == True
# Test the _[12]_ paired
with tempfile.TemporaryDirectory() as tmpdir:
cfg = SequanaConfig()
cfgname = tmpdir + "/config.yaml"
cfg.config.input_pattern = "*fastq.gz"
cfg.config.input_directory = tmpdir
cfg.config.input_readtag = "_[12]."
cfg._update_yaml()
cfg.save(cfgname)
cmd = "touch {}/test_1.fastq.gz".format(tmpdir)
subprocess.call(cmd.split())
cmd = "touch {}/test_2.fastq.gz".format(tmpdir)
subprocess.call(cmd.split())
pm = snaketools.PipelineManager("test", cfgname)
assert pm.paired is True
# Test the _R[12]_ single end
with tempfile.TemporaryDirectory() as tmpdir:
cfg = SequanaConfig()
cfgname = tmpdir + "/config.yaml"
cfg.config.input_pattern = "*fastq.gz"
cfg.config.input_directory = tmpdir
cfg.config.input_readtag = "_R[12]_"
cfg._update_yaml()
cfg.save(cfgname)
cmd = "touch {}/test_R1_.fastq.gz".format(tmpdir)
subprocess.call(cmd.split())
pm = snaketools.PipelineManager("test", cfgname)
assert pm.paired is False
# Test the _R[12]_ single end
with tempfile.TemporaryDirectory() as tmpdir:
cfg = SequanaConfig()
cfgname = tmpdir + "/config.yaml"
cfg.config.input_pattern = "*fq.gz" # wrong on purpose
cfg.config.input_directory = tmpdir
cfg.config.input_readtag = "_R[12]_"
cfg._update_yaml()
cfg.save(cfgname)
cmd = "touch {}/test_R1_.fastq.gz".format(tmpdir)
subprocess.call(cmd.split())
try:
pm = snaketools.PipelineManager("test", cfgname)
assert False
except:
assert True
# Test the _R[12]_ single end
with tempfile.TemporaryDirectory() as tmpdir:
cfg = SequanaConfig()
cfgname = tmpdir + "/config.yaml"
cfg.config.input_pattern = "*fastq.gz"
cfg.config.input_directory = tmpdir
cfg.config.input_readtag = "R[12]_"
cfg._update_yaml()
cfg.save(cfgname)
cmd = "touch {}/testR1_.fastq.gz".format(tmpdir)
subprocess.call(cmd.split())
cmd = "touch {}/testR2_.fastq.gz".format(tmpdir)
subprocess.call(cmd.split())
try:
pm = snaketools.PipelineManager("test", cfgname)
assert False
except:
assert True
def test_pipeline_manager_generic():
cfg = SequanaConfig({})
file1 = sequana_data("Hm2_GTGAAA_L005_R1_001.fastq.gz")
cfg.config.input_directory, cfg.config.input_pattern = os.path.split(file1)
cfg.config.input_pattern = "Hm*gz"
pm = snaketools.PipelineManagerGeneric("quality_control", cfg)
pm.getlogdir("fastqc")
pm.getwkdir("fastqc")
pm.getrawdata()
pm.getreportdir("test")
pm.getname("fastqc")
gg = globals()
gg['__snakefile__'] = "dummy"
pm.setup(gg)
del gg['__snakefile__']
class WF():
included_stack = ["dummy", 'dummy']
wf = WF()
gg['workflow'] = wf
pm.setup(gg)
pm.teardown()
with tempfile.TemporaryDirectory() as dd:
multiqc = open(dd + "/multiqc.html", "w")
multiqc.write("test")
multiqc.close()
newfile = dd + "/multiqc.html_tmp_"
pm.clean_multiqc(dd + "/multiqc.html")
def test_file_name_factory():
import glob
def inner_test(ff):
len(ff)
print(ff)
ff.filenames
ff.realpaths
ff.all_extensions
ff.pathnames
ff.pathname
ff.extensions
#list
list_files = glob.glob("*.py")
ff = snaketools.FileFactory(list_files)
inner_test(ff)
# glob
ff = snaketools.FileFactory("*py")
inner_test(ff)
directory = os.path.dirname(sequana_data("Hm2_GTGAAA_L005_R1_001.fastq.gz"))
ff = snaketools.FastQFactory(directory + "/Hm2*fastq.gz", verbose=True)
assert ff.tags == ['Hm2_GTGAAA_L005']
ff.get_file1(ff.tags[0])
ff.get_file2(ff.tags[0])
assert len(ff) == 1
def test_copy_requirements():
# We need 4 cases:
# 1- http
# 2- a sequana file (phix)
# 3- an existing file elsewhere (here just a temporary file)
# 4- an existing file in the same directory as the target dir
from easydev import TempFile
fh = tempfile.TemporaryDirectory()
targetdir = fh.name
# Case 3: a temporary file
temprequire = TempFile()
# Case 4: a local file (copy of the temp file)
# TODO
#localfile = temprequire.name.split(os.sep)[-1]
#shutil.copy(temprequire.name, targetdir)
cfg = snaketools.SequanaConfig()
cfg.config.requirements = ["phiX174.fa", temprequire.name,
#localfile,
"https://raw.githubusercontent.com/sequana/sequana/master/README.rst"]
cfg._update_yaml()
cfg.copy_requirements(target=fh.name)
# error
cfg.config.requirements = ['dummy']
try:
cfg.copy_requirements(target=fh.name)
assert False
except:
assert True
def test_onsuccess(tmpdir):
directory = tmpdir.mkdir("onsuccess")
p1 = directory.join("Makefile")
p2 = directory.join("cleanup.py")
onsuc = snaketools.OnSuccess()
onsuc.makefile_filename = p1
onsuc.makefile_cleanup = p2
def test_onsuccess_cleaner():
fh = tempfile.TemporaryDirectory()
onsucc = snaketools.OnSuccessCleaner()
onsucc.makefile_filename = fh.name + os.sep + "Makefile"
onsucc.add_bundle()
onsucc.add_makefile()
def test_build_dynamic_rule():
code = "whatever"
fh = tempfile.TemporaryDirectory()
directory = fh.name
snaketools.build_dynamic_rule(code, directory)
def test_init():
snaketools.init("compressor.rules", globals())
assert "expected_output" in globals()
def test_get_pipeline_statistics():
df = snaketools.get_pipeline_statistics()
def test_create_cleanup():
with tempfile.TemporaryDirectory() as fout:
snaketools.create_cleanup(fout)
def test_fastqfactory():
try:
snaketools.FastQFactory("*", read_tag='error')
assert False
except:
assert True
try:
snaketools.FastQFactory("*", read_tag='[12]')
assert False
except:
assert True
directory = os.path.dirname(sequana_data("Hm2_GTGAAA_L005_R1_001.fastq.gz"))
ff = snaketools.FastQFactory(directory + os.sep + "Hm2*gz", read_tag='R[12]')
assert ff.paired is True
assert ff.tags == ['Hm2_GTGAAA_L005_']
ff = snaketools.FastQFactory(directory + os.sep + "Hm2*gz", read_tag=None)
assert ff.paired is False
assert sorted(ff.tags) == sorted(['Hm2_GTGAAA_L005_R2_001', 'Hm2_GTGAAA_L005_R1_001'])
def test_makefile():
with tempfile.TemporaryDirectory() as fout:
mk = snaketools.Makefile()
mk.makefile_filename = fout + "/Makefile"
mk.add_remove_done()
mk.add_bundle()
mk.save()
def test_bundle():
with tempfile.TemporaryDirectory() as fout:
os = snaketools.OnSuccess()
os.makefile_filename = fout + "/Makefile"
os.cleanup_filename = fout + "/sequana_cleanup.py"
os.add_makefile()
os.create_recursive_cleanup()
|
def main():
from scrapyts.exceptions import (DownloadError, ParseError)
import argparse
import sys
import traceback
# python scrapyts.py url -t 18 --begin 4 --end 150 --step 2 --display --nodownload --index --prefix watch --sufix "by Yhash"
parser = argparse.ArgumentParser(description="A simple youtube video/audio downloader.")
parser.add_argument('url', help='youtube video url')
parser.add_argument('-t', '--tag', help='Tag of the video to be downloaded', type=int)
parser.add_argument('-b', '--begin', help='The index of the first video in a playlist that you want to download.', type=int)
parser.add_argument('-e', '--end', help='The index of the last video in a playlist that you want to download.', type=int)
parser.add_argument('-d', '--display', help='Display a table of available video/audio from the given url.', action='store_true')
parser.add_argument('-nd', '--nodownload', help='Download the video or audio.', action='store_true')
parser.add_argument('-i', '--index', help='Prefix filename with the index of the video from the playlist.', action='store_true')
parser.add_argument('-ap', '--aslist', help='Treat url of a specific video as a url of a playlist.', action='store_true')
parser.add_argument('-r', '--resume', help='Resume a partial file.', action='store_true')
parser.add_argument('-y', '--proxy', help='Used proxy instead of direct connection.')
args = parser.parse_args()
# Well if proxy is avaible then use it!
if args.proxy:
import scrapyts.config
# scrapyts.config.proxy = 'http://' + args.proxy
# Try to change scrapyts.config.proxy value
scrapyts.config.set_proxy(args.proxy)
from scrapyts.cdownloader import CLIDownloader
try:
CLIDownloader().run(args.url, tag=args.tag,
first=args.begin,
last=args.end,
display=args.display,
download=not args.nodownload,
add_index=args.index,
as_list=args.aslist,
resume=args.resume)
except (DownloadError, ParseError, ValueError) as e:
#traceback.print_exc()
traceback.print_exception(*sys.exc_info(), file=open('scrapyts.log', 'tw'))
print(e, file=sys.stderr) |
# This is a file which does actually have a variable called 'answers'.
# This file is used in the test_check.py file which tests the check module
answers = 1
|
import tabix
import os
from ..util import file_util, seq_util, vcf_util
class PreprocGnomAD():
def __init__(self, data):
self.header = ""
self.fieldlist = []
self.dirpath = data['dirpath']
self.outfile_title = data['outfile_title']
self.rawfiles = data['rawfiles']
self.refversion = data['refversion']
self.source_name = data['source_name']
self.out = self.get_out_filename()
self.rawfileset = {}
self.set_rawfiles_by_chrom()
def get_out_filename(self):
out = os.path.join(self.dirpath, self.outfile_title + ".chr#CHROM#" + '.mti')
return out
def set_header(self):
chromlist = list(self.rawfileset.keys())
tb = tabix.open(self.rawfileset[chromlist[0]])
i = 0
fieldset = {}
for rec in tb.querys("chr"+chromlist[0] + ":1-10000000"):
i += 1
for f1 in rec[7].split(';'):
if '=' in f1 and f1[:len('vep=')] != 'vep=':
arr = f1.split('=')
fieldset[arr[0]] = 1
if i > 1000:
break
self.fieldlist = list(fieldset.keys())
arrheader = ["CHROM", "POS", "ID", "REF", "ALT"]
arrheader.append(self.source_name + "=" + "|".join(self.fieldlist))
self.header = "#" + '\t'.join(arrheader)
def get_header(self):
if self.header == "":
self.set_header()
return self.header
def convert_mti_record(self, rec):
mti_rec = []
mti_rec.append(rec[0].replace('chr', ''))
mti_rec.append(rec[1])
mti_rec.append(rec[2])
mti_rec.append(rec[3])
mti_rec.append(rec[4])
d = {}
for f1 in rec[7].split(';'):
if "=" in f1:
arr = f1.split('=')
d[arr[0]] = vcf_util.encode_value(arr[1])
datlist = []
for fn in self.fieldlist:
try:
datlist.append(d[fn])
except KeyError:
datlist.append("")
mti_rec.append('|'.join(datlist))
return mti_rec
def convert_to_mti_chrom(self, chrom):
out = self.out.replace('#CHROM#', chrom)
f = open(out, "w")
f.write(self.get_header() + "\n")
tb = tabix.open(self.rawfileset[chrom])
chromlen = seq_util.CHROM_LEN[self.refversion][chrom]
i = 0
for rec in tb.querys("chr"+chrom + ":1-" + str(chromlen)):
mti_rec = self.convert_mti_record(rec)
f.write('\t'.join(mti_rec) + '\n')
if i % 100000 == 0:
print("Processing...", rec[0] + ':' + rec[1])
i += 1
f.close()
print("Bzipping and Tabixing...", out)
file_util.save_tabixgz(out)
print("Saved...", out + ".gz")
file_util.check_and_remove(out + '.gz.tbi', out, 3)
def convert_to_mti(self):
for chrom in seq_util.MAIN_CHROM_LIST:
if chrom in self.rawfileset.keys():
self.convert_to_mti_chrom(chrom)
def set_rawfiles_by_chrom(self):
for rawfile in self.rawfiles:
if rawfile.endswith('.vcf.bgz'):
for namefield in rawfile.split('/')[-1].split('.'):
if "chr" in namefield:
chrom = namefield.replace('chr','')
self.rawfileset[chrom] = rawfile
def run(self):
self.convert_to_mti()
|
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
dataset = pd.read_csv('./data/titanic_train.csv')
survived_passengers = dataset['Survived'] == 1
survived_passengers_ages = dataset[survived_passengers]['Age']
survived_passengers_ages_with_missing_filled = survived_passengers_ages.fillna(survived_passengers_ages.median())
age_counts = survived_passengers_ages_with_missing_filled.value_counts().sort_index()
labels = age_counts.index.values
plt.bar(np.arange(len(age_counts)), age_counts, align='center')
plt.xticks(np.arange(len(age_counts)), labels)
plt.title("Titanic Survivors: Men compared to Women")
plt.show()
|
import json
import pickle
import argparse
import re
import collections
import string
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
regex = re.compile(r'\b(a|an|the)\b', re.UNICODE)
return re.sub(regex, ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def get_tokens(s):
if not s: return []
return normalize_answer(s).split()
def compute_f1(a_gold, a_pred):
gold_toks = get_tokens(a_gold)
pred_toks = get_tokens(a_pred)
common = collections.Counter(gold_toks) & collections.Counter(pred_toks)
num_same = sum(common.values())
if len(gold_toks) == 0 or len(pred_toks) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks)
if num_same == 0:
return 0
precision = 1.0 * num_same / len(pred_toks)
recall = 1.0 * num_same / len(gold_toks)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def gen_pv_data(std_dev_file, preds_file, output_file):
"""
generate data for plausible answer verifier
Args:
std_dev_file: official dev file
preds_file: atrlp model prediction file
output_file:
Returns: a file
"""
dev = json.load(open(std_dev_file, 'r', encoding='utf-8'))
preds = json.load(open(preds_file, 'r', encoding='utf-8'))
for article in dev['data']:
for paragraph in article["paragraphs"]:
for qa in paragraph['qas']:
qid = qa['id']
pred = preds[qid]
qa['is_impossible'] = True
qa['plausible_answers'] = [{'text': pred, 'answer_start': 1}]
json.dump(dev, open(output_file, 'w', encoding='utf-8'))
print("generate pv data finished! ")
def gen_answer_refine_file(std_dev_file, nbest_file, output_file, split):
"""
generate answer refine file, for choose refine answer
Args:
std_dev_file: official dev file
nbest_file: atrlp prediction nbest file
Returns: a file
"""
data = json.load(open(std_dev_file, 'r', encoding='utf-8'))
all_nbest = pickle.load(open(nbest_file, 'rb'))
count = 0
for article in data['data']:
for p in article['paragraphs']:
# del p['context']
new_qas = []
for qa in p['qas']:
qid = qa['id']
gold_answers = qa['answers']
if split != 'test' and not gold_answers:
continue
nbest = all_nbest[qid][:5]
most_text = nbest[0]['text']
new_qa = []
for i, nb in enumerate(nbest):
pred = nb['text']
if split == 'train':
a = qa['answers'][0]['text']
f1 = compute_f1(a, pred)
elif split == 'dev':
f1 = max(compute_f1(a['text'], pred) for a in gold_answers)
else:
f1 = 0.
if pred in most_text or most_text in pred:
new_qa.append({"f1_score": f1,
"pred_answer": pred,
"question": qa['question'],
"id": f"{qid}_{i}"})
if split == 'train':
if new_qa[0]["f1_score"] > 0:
new_qas.extend(new_qa)
else:
new_qas.extend(new_qa)
p['qas'] = new_qas
count += len(new_qas)
print(count)
json.dump(data, open(output_file, 'w', encoding='utf-8'))
print("generate answer refine file finished! ")
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--run-type', required=True, help="Generate data type : pv or reg")
parser.add_argument('--std-dev-file', required=True, help="Official eval file")
parser.add_argument('--input-file', required=True, help="Previous model output ")
parser.add_argument("--output-file", required=True, help="Generate data output")
parser.add_argument("--split", required=False, help="data type")
args = parser.parse_args()
if args.run_type == 'pv':
gen_pv_data(args.std_dev_file, args.input_file, args.output_file)
elif args.run_type == 'reg':
gen_answer_refine_file(args.std_dev_file, args.input_file, args.output_file, args.split)
else:
raise
if __name__ == '__main__':
main()
|
import click
from catacomb.common import constants
from catacomb.utils import catacomb_handler, formatter
@click.command(
constants.CMD_OPEN_NAME, help=constants.CMD_OPEN_DESC,
short_help=constants.CMD_OPEN_DESC)
@click.argument("tomb_name", nargs=1)
@click.option(
"--new", "-n", is_flag=True, default=False,
help=constants.CMD_OPEN_NEW_DESC)
@click.pass_context
def open(ctx, tomb_name, new):
"""Opens the tomb specified by the user, granting access to all the
commands stored within it.
Arguments:
tomb_name (str): The name/alias of the tomb.
new (bool): If True, create a new tomb (if it doesn't exist) and then
switch to it.
"""
if catacomb_handler.get_current_tomb_name(ctx).lower() == tomb_name:
# Don't do anything if the specified tomb is already open.
if new:
formatter.print_warning(constants.WARN_TOMB_EXISTS.format(
tomb_name))
else:
formatter.print_warning(constants.CMD_OPEN_SELF_WARN.format(
tomb_name))
elif new:
# Create a new tomb and switch to it.
if not catacomb_handler.is_existing_tomb(ctx, tomb_name):
description = click.prompt(constants.CMD_OPEN_NEW_DESC_PROMPT)
catacomb_handler.create_tomb(ctx, tomb_name, description)
catacomb_handler.open_tomb(ctx, tomb_name)
formatter.print_success(constants.CMD_OPEN_NEW_OK.format(
tomb_name))
else:
# Do nothing if a tomb with the provided alias already exists.
formatter.print_warning(constants.WARN_TOMB_EXISTS.format(
tomb_name))
elif catacomb_handler.is_existing_tomb(ctx, tomb_name):
# Otherwise open a new tomb if it exists.
catacomb_handler.open_tomb(ctx, tomb_name)
formatter.print_success(constants.CMD_OPEN_OK.format(tomb_name))
else:
formatter.print_warning(constants.WARN_TOMB_NOT_FOUND.format(
tomb_name))
|
# coding: utf-8
import json
def transform(item):
if not item:
return
elif isinstance(item, dict):
if 'description' in item:
del item['description']
for key in item:
transform(item[key])
elif isinstance(item, list):
for i in item:
transform(i)
def run():
with open('microservice.json') as f:
content = json.loads(f.read())
transform(content)
with open('microservice-simple.json', 'w') as f:
f.write(json.dumps(content, ensure_ascii=False, indent=4))
if __name__ == '__main__':
run()
|
import json
import os
import unittest
from pathlib import Path
from defusedxml import ElementTree
import utilities.file_utilities as file_utilities
from mhs_common.errors.ebxml_handler import handle_ebxml_error
class TestEbxmlHandler(unittest.TestCase):
message_dir = Path(os.path.dirname(os.path.abspath(__file__))) / 'test_messages'
def test_non_200(self):
self.assertEqual(handle_ebxml_error(202, {'Content-Type': 'text/xml'}, ''), (202, ''))
def test_non_ebxml_fault(self):
self.assertEqual(handle_ebxml_error(200, {'Content-Type': 'text/xml'}, '<a><b></b></a>'),
(200, '<a><b></b></a>'))
def test_invalid_xml(self):
with self.assertRaises(ElementTree.ParseError):
handle_ebxml_error(200, {'Content-Type': 'text/xml'}, '<a><b><b></a>')
def test_single_error(self):
message = file_utilities.get_file_string(self.message_dir / 'ebxml_response_error_single.xml')
resp_json = json.loads(handle_ebxml_error(200, {'Content-Type': 'text/xml'}, message)[1])
self.assert_json_error_root(resp_json)
self.assert_json_with_first_error(resp_json)
def test_multiple_errors(self):
message = file_utilities.get_file_string(self.message_dir / 'ebxml_response_error_multiple.xml')
resp_json = json.loads(handle_ebxml_error(200, {'Content-Type': 'text/xml'}, message)[1])
self.assert_json_error_root(resp_json)
self.assert_json_with_first_error(resp_json)
self.assert_json_with_second_error(resp_json)
def test_no_content_type(self):
with self.assertRaises(ValueError):
handle_ebxml_error(200, {}, 'Some body')
def test_non_xml_content_type(self):
with self.assertRaises(ValueError):
handle_ebxml_error(200, {'Content-Type': 'text/html'}, 'Some body')
def test_empty_body(self):
code, body = handle_ebxml_error(200, {'Content-Type': 'text/xml'}, '')
self.assertEqual(code, 200)
self.assertEqual(body, '')
def assert_json_error_root(self, resp_json):
self.assertEqual(resp_json['error_message'], "Error(s) received from Spine. Contact system administrator.")
self.assertEqual(resp_json['process_key'], "EBXML_ERROR_HANDLER0005")
def assert_json_with_first_error(self, resp_json):
self.assertEqual(resp_json['errors'][0]['Description'], "501319:Unknown eb:CPAId")
self.assertEqual(resp_json['errors'][0]['codeContext'], "urn:oasis:names:tc:ebxml-msg:service:errors")
self.assertEqual(resp_json['errors'][0]['errorCode'], "ValueNotRecognized")
self.assertEqual(resp_json['errors'][0]['errorType'], "ebxml_error")
self.assertEqual(resp_json['errors'][0]['severity'], "Error")
def assert_json_with_second_error(self, resp_json):
self.assertEqual(resp_json['errors'][1]['Description'], "501320:Unknown something else")
self.assertEqual(resp_json['errors'][1]['codeContext'], "urn:oasis:names:tc:ebxml-msg:service:errors")
self.assertEqual(resp_json['errors'][1]['errorCode'], "ValueNotRecognized")
self.assertEqual(resp_json['errors'][1]['errorType'], "ebxml_error")
self.assertEqual(resp_json['errors'][1]['severity'], "Error")
|
import sys
import os
import math
import random
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFilter
def crop(src_pixels, dst_pixels, x, y, w, h, dx, dy):
for ky in range(h):
for kx in range(w):
p = src_pixels[x+kx, y+ky];
dst_pixels[dx+kx, dy+ky] = p
#end
#end
#end
def convert(srcimg):
w, h = srcimg.size
#print("%d,%d" % (w, h))
src_pixels = srcimg.load()
dstimg = Image.new('RGBA', [int(w/4), int(h*4)], (0x00,0x00,0x00,0x00))
clipboard = srcimg.crop((int(w/4*0), 0, int(w/4*1), h))
dstimg.paste(clipboard, (0, 0, int(w/4), int(h*1)))
clipboard = srcimg.crop((int(w/4*1), 0, int(w/4*2), h))
dstimg.paste(clipboard, (0, h*1, int(w/4), int(h*2)))
clipboard = srcimg.crop((int(w/4*2), 0, int(w/4*3), h))
dstimg.paste(clipboard, (0, h*2, int(w/4), int(h*3)))
clipboard = srcimg.crop((int(w/4*3), 0, int(w/4*4), h))
dstimg.paste(clipboard, (0, h*3, int(w/4), int(h*4)))
return dstimg
#end
if __name__ == '__main__':
args = sys.argv
src = args[1]
srcimg = Image.open(src, 'r')
dstimg = convert(srcimg)
dstimg.save("out.png")
#EOF
|
from .register import GenericRegister
|
"""This module contains the mappers to compute the Hessian/gradients in a distributed fashion
Copyright (c) 2017 The ADMML Authors.
All rights reserved. Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at :
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from math import exp
import numpy as np
def mapHv(algoParam, vector, w, mu):
"""Map function to compute the appropriate Hessian/Gradient based on
``algoParam['LOSS']`` and ``algoParam['PROBLEM']``.
Parameters
----------
algoParam : dict
This contains the algorithm parameters for ML/ADMM algorithm.
The default can be set as: ``algoParam = setDefaultAlgoParams()``.
Here ``algoParam['LOSS']`` and ``algoParam['PROBLEM']`` should be
specified
vector : numpy.ndarray
Data sample (a row of data matrix) which contributes to Hessian/Gradient
w : numpy.ndarray
weight vector at the current internal newtonian iteration.
mu : float
The :math:`\\mu` value only for Huber/Pseudo-Huber Loss. Else it's None.
Returns
-------
H_i : numpy.ndarray
The i-th sample's contribution to the Hessian
v_i : numpy.ndarray
The i-th sample's contribution to the gradient
"""
if algoParam['LOSS'] == 'logistic' and algoParam['PROBLEM'] == 'binary':
return _mapHv_logistic_binary(vector, w)
elif algoParam['LOSS'] == 'hinge' and algoParam['PROBLEM'] == 'binary': # SHALL BE PROVIDED IN NEXT VERSION
raise ValueError('ERROR: Currently Unsupported.')
elif algoParam['LOSS'] == 'sq_hinge' and algoParam['PROBLEM'] == 'binary':
return _mapHv_sq_hinge_binary(vector, w, algoParam['D'])
elif algoParam['LOSS'] == 'smooth_hinge' and algoParam['PROBLEM'] == 'binary':
return _mapHv_smooth_hinge_binary(vector, w, algoParam['D'])
elif algoParam['LOSS'] == 'pseudo_huber' and algoParam['PROBLEM'] == 'regression':
return _mapHv_pseudo_huber_regression(vector, w, mu)
elif algoParam['LOSS'] == 'huber' and algoParam['PROBLEM'] == 'regression':
return _mapHv_huber_regression(vector, w, mu)
else:
raise ValueError('ERROR: Unsupported Loss Function.')
def _mapHv_logistic_binary(vector, w):
"""COMPUTE DISTRIBUTED HESSIAN/GRADIENT
Parameters
----------
vector : numpy.ndarray
Data sample (a row of data matrix) which contributes to Hessian/Gradient
w : numpy.ndarray
weight vector at the current internal newtonian iteration.
Returns
-------
H_i : numpy.ndarray
The i-th sample's contribution to the Hessian
v_i : numpy.ndarray
The i-th sample's contribution to the gradient
"""
y = vector[0]
x = vector[1:]
eta = y * (np.dot(x, w))
p = 1 / (1 + exp(-eta))
v_i = -y * (1 - p) * x
H_i = (1 - p) * p * (np.outer(x, x))
return H_i, v_i
def _mapHv_sq_hinge_binary(vector, w, D):
"""COMPUTE DISTRIBUTED HESSIAN/GRADIENT
Parameters
----------
vector : numpy.ndarray
Data sample (a row of data matrix) which contributes to Hessian/Gradient
w : numpy.ndarray
weight vector at the current internal newtonian iteration.
D : int
the dimension of the sample
Returns
-------
H_i : numpy.ndarray
The i-th sample's contribution to the Hessian
v_i : numpy.ndarray
The i-th sample's contribution to the gradient
"""
y = vector[0]
x = vector[1:]
z = y * (np.dot(w, x))
if z >= 1.0:
H_i = np.zeros([D, D])
v_i = np.zeros(D)
else:
H_i = np.outer(x, x)
v_i = (-y * (1.0 - z)) * x
return H_i, v_i
def _mapHv_smooth_hinge_binary(vector, w, D):
"""COMPUTE DISTRIBUTED HESSIAN/GRADIENT (DO NOT USE!!)
Parameters
----------
vector : numpy.ndarray
Data sample (a row of data matrix) which contributes to Hessian/Gradient
w : numpy.ndarray
weight vector at the current internal newtonian iteration.
D : int
the dimension of the sample
Returns
-------
H_i : numpy.ndarray
The i-th sample's contribution to the Hessian
v_i : numpy.ndarray
The i-th sample's contribution to the gradient
"""
y = vector[0]
x = vector[1:]
z = y * (np.dot(w, x))
if z >= 1.0:
H_i = np.zeros([D, D])
v_i = np.zeros(D)
elif 0.0 < z < 1.0:
H_i = np.outer(x, x)
v_i = (-y * (1.0 - z)) * x
else:
H_i = np.zeros([D, D])
v_i = -y * x
return H_i, v_i
def _mapHv_pseudo_huber_regression(vector, w, mu):
"""COMPUTE DISTRIBUTED HESSIAN/GRADIENT
Parameters
----------
vector : numpy.ndarray
Data sample (a row of data matrix) which contributes to Hessian/Gradient
w : numpy.ndarray
weight vector at the current internal newtonian iteration.
mu : float
The :math:`\\mu` value only for Pseudo-Huber Loss.
Returns
-------
H_i : numpy.ndarray
The i-th sample's contribution to the Hessian
v_i : numpy.ndarray
The i-th sample's contribution to the gradient
"""
y = vector[0]
x = vector[1:]
s = (y - np.dot(x, w))
v_i = -(s / np.sqrt(mu ** 2 + s ** 2)) * x
H_i = ((mu ** 2) / np.sqrt(mu ** 2 + s ** 2) ** 3) * np.outer(x, x)
return H_i, v_i
def _mapHv_huber_regression(vector, w, mu):
"""COMPUTE DISTRIBUTED HESSIAN/GRADIENT
Parameters
----------
vector : numpy.ndarray
Data sample (a row of data matrix) which contributes to Hessian/Gradient
w : numpy.ndarray
weight vector at the current internal newtonian iteration.
mu : float
The :math:`\\mu` value only for Huber Loss.
Returns
-------
H_i : numpy.ndarray
The i-th sample's contribution to the Hessian
v_i : numpy.ndarray
The i-th sample's contribution to the gradient
"""
def trimg(s, mu):
return max(-mu, min(mu, s))
def trimH(s, mu):
return float(abs(s) <= mu)
y = vector[0]
x = vector[1:]
s = (y - np.dot(x, w))
v_i = -trimg(s, mu) * x
H_i = trimH(s, mu) * np.outer(x, x)
return H_i, v_i
|
"""
Blink RGB LEDs using Raspberry PI GPIO
"""
import RPi.GPIO as GPIO
import time
COLORS = {'red': [True, False, False],
'green': [False, True, False],
'blue': [False, False, True],
'yellow': [True, True, False],
'purple': [True, False, True],
'cyan': [False, True, False],
'white': [True, True, True]}
def rgb_blink(pins, color, sleep=1):
"""Blink LED using given GPIO pin, number of times and speed.
Args:
- pins (list): GPIO pins in order of RGB
- color (str): LED color
- sleep (int): sleep in seconds (default: 1)
Returns:
- None (blinks led)
"""
color_pins = COLORS[color]
for pidx, pin in enumerate(pins):
if color_pins[pidx]:
GPIO.output(pin, GPIO.HIGH)
time.sleep(sleep)
for pin in pins:
GPIO.output(pin, GPIO.LOW)
|
import threading
import multiprocessing
import Queue
import logging
import xml.sax
import nltk.data
from nltk.tokenize import *
import re
class WikipediaReader(threading.Thread):
'''allows to read a wikipedia xml dump file and extract articles
'''
'''constructor
@param path the path to the wikiepdia dump file
@param queue the queue to which articles shall be read as dictionary with the following fields:
'type': 'article' or 'redirect'
'id': the id of the article (not present for redirects)
'title': the title of the article or redirect
'text': the actual text (not present for redirects or when the extraction of text was disabled)
'target': the name of the article to which the redirect points to (not present for articles)
@param extract_text boolean whether text shall also be extracted (defaults to true)
'''
def __init__(self, path, queue, extract_text=True):
threading.Thread.__init__(self)
# multiprocessing.Process.__init__(self)
self._reader = WikipediaArticleReader(queue, extract_text)
self._path = path
def run(self):
xml.sax.parse(self._path, self._reader)
logging.info('Finished parsing file "%s"' % self._path)
def articles_parsed(self):
'''returns the number of articles already parsed
'''
return self._reader.articles_parsed()
class WikipediaArticleReader(xml.sax.handler.ContentHandler):
'''A SAX content handler that reads articles from a wikipedia file
'''
'''constructor
@param queue the queue to which articles shall be read
@param extract_text boolean whether text shall also be extracted (defaults to true)
'''
def __init__(self, queue, extract_text=True):
self._queue = queue
self._reset()
self._current_tag = u''
self._article_counter = 0
self._id_done = False
self._rev_id_done = False
self._ns_done = False
self._extract_text = extract_text
def _reset(self):
self._id_done = False
self._rev_id_done = False
self._ns_done = False
self._item = {
'type': u'article',
'id': u'',
'rev_id': u'',
'ns': u'',
'title': u'',
'text': u'',
'target': u''
}
def articles_parsed(self):
return self._article_counter
def startElement(self, name, attrs):
# print name
self._current_tag = name
if self._current_tag == 'redirect':
self._item['type'] = u'redirect'
if not 'title' in attrs.getNames():
logging.warning('Attribute "title" not in redirect tag of article "%s"'
% (self._item['title'].encode('ascii', 'ignore')))
self._item['target'] = attrs.getValue('title')
#if self._current_tag == 'template':
# self._item['type'] = u'template'
#if not 'title' in attrs.getNames():
# logging.warning('Attribute "title" not in redirect tag of article "%s"'
# % (self._item['title'].encode('ascii', 'ignore')))
#self._item['target'] = attrs.getValue('title')
def characters(self, content):
if self._current_tag == 'title':
self._item['title'] += content
# print self._item['title']
elif self._current_tag == 'id' and not self._id_done:
self._item['id'] += content
elif self._current_tag == 'id' and self._id_done and not self._rev_id_done:
self._item['rev_id'] += content
elif self._current_tag == 'ns' and not self._ns_done:
self._item['ns'] += content
elif self._current_tag == 'text' and self._extract_text and not self._item['type'] == u'redirect':
self._item['text'] += content
def endElement(self, name):
self._current_tag = u''
if name == 'id' and not self._id_done:
self._id_done = True
elif name == 'id' and self._id_done and not self._rev_id_done:
self._rev_id_done = True
elif name == 'ns':
self._ns_done = True
elif name == 'page':
self._article_counter += 1
# consider just articles and templates pages and do not handle empty titles
if len(self._item['title']) > 0 and (long(self._item['ns']) == 0 ):
try:
self._item['id'] = long(self._item['id'])
self._queue.put(self._item)
except ValueError:
logging.error('Article "%s" could not be parsed, as %s is not a valid integer id'
% (
self._item['title'].encode('ascii', 'ignore'), self._item['id'].encode('ascii', 'ignore')))
# log progress
if self._article_counter % 100 == 0:
logging.info('%d articles parsed' % (self._article_counter))
# reset article
self._reset()
|
import numpy as np
import sys,os
import cv2
#set caffe-ssd path
caffe_root = '/home/di/workspace/caffe-ssd/build/install/'
sys.path.insert(0, caffe_root + 'python')
import caffe
import time
#caffe.set_device(0)
#caffe.set_mode_gpu()
#net_file= 'MobileNetSSD_deploy.prototxt'
net_file= 'MobileNetSSD_deploy_truncated.prototxt'
caffe_model='MobileNetSSD_deploy10695.caffemodel'
if not os.path.exists(caffe_model):
print("MobileNetSSD_deploy.affemodel does not exist,")
print("use merge_bn.py to generate it.")
exit()
net = caffe.Net(net_file,caffe_model,caffe.TEST)
CLASSES = ('background','person')
def preprocess(src):
img = cv2.resize(src, (300,300))
img = img - 127.5
img = img * 0.007843
return img
def postprocess(img, out):
h = img.shape[0]
w = img.shape[1]
box = out['detection_out'][0,0,:,3:7] * np.array([w, h, w, h])
cls = out['detection_out'][0,0,:,1]
conf = out['detection_out'][0,0,:,2]
return (box.astype(np.int32), conf, cls)
def detect(img_path):
origimg = cv2.imread(img_path)
img = preprocess(origimg)
img = img.astype(np.float32)
img = img.transpose((2, 0, 1))
net.blobs['data'].data[...] = img
start = time.time()
out = net.forward()
use_time=time.time() - start
print("time="+str(round(use_time*1000,3))+"ms")
box, conf, cls = postprocess(origimg, out)
for i in range(len(box)):
if conf[i] > 0.3:
p1 = (box[i][0], box[i][1])
p2 = (box[i][2], box[i][3])
cv2.rectangle(origimg, p1, p2, (0,255,0))
p3 = (max(p1[0], 15), max(p1[1], 15))
title = "%s:%.2f" % (CLASSES[int(cls[i])], conf[i])
cv2.putText(origimg, title, p3, cv2.FONT_ITALIC, 0.6, (0, 255, 0), 1)
cv2.imshow("SSD", origimg)
cv2.waitKey(1) & 0xff
#Exit if ESC pressed
return True
def get_prior_box(img_path):
origimg = cv2.imread(img_path)
img = preprocess(origimg)
img = img.astype(np.float32)
img = img.transpose((2, 0, 1))
net.blobs['data'].data[...] = img
start = time.time()
out = net.forward()
#get priorbox
priorbox=out["mbox_priorbox"]
print priorbox,np.shape(priorbox)
pb=priorbox.flatten()
with open("./priorbox_flatten.txt",'w') as f:
f.write(",".join(map(str,pb)))
#for view priorbox
pb=np.reshape(pb,(1917,8))
with open("./priorbox.txt",'w') as f:
f.write("")
with open("./priorbox.txt",'a') as f:
for line in pb:
f.write(",".join(map(str,line))+",\n")
if __name__ == '__main__':
get_prior_box("test.jpeg")
|
from direct.directnotify import DirectNotifyGlobal
from direct.distributed.DistributedObjectAI import DistributedObjectAI
class DistributedMMPianoAI(DistributedObjectAI):
notify = DirectNotifyGlobal.directNotify.newCategory("DistributedMMPianoAI")
def requestSpeedUp(self):
pass
def requestChangeDirection(self):
pass
def setSpeed(self, todo0, todo1, todo2):
pass
def playSpeedUp(self, todo0):
pass
def playChangeDirection(self, todo0):
pass
|
#Your program should print each number from 1 to 100 in turn.
#When the number is divisible by 3 then instead of printing the number it should print "Fizz".
#`When the number is divisible by 5, then instead of printing the number it should print "Buzz".`
#`When the number is divisible by 5, then instead of printing the number it should print "Buzz".`
# `And if the number is divisible by both 3 and 5 e.g. 15 then instead of the number it should print "FizzBuzz"`
for i in range(101):
if (i%3==0) and (i%5==0):
print('FizzBuzz')
elif (i%3==0):
print('Fizz')
elif (i%5==0):
print('Buzz')
else:
print(i) |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Cnvnator(MakefilePackage):
"""A tool for CNV discovery and genotyping
from depth-of-coverage by mapped reads."""
homepage = "https://github.com/abyzovlab/CNVnator"
url = "https://github.com/abyzovlab/CNVnator/archive/v0.3.3.tar.gz"
version('0.3.3', sha256='58c5acf61f9a1e5febf546c196f8917a5e084b729e5c4cfd3eba83471b3fe5c1')
depends_on('samtools')
depends_on('htslib')
depends_on('root')
depends_on('bzip2')
depends_on('curl')
depends_on('lzma')
depends_on('zlib')
def edit(self, spec, prefix):
makefile = FileFilter('Makefile')
# Replace -fopenmp with self.compiler.openmp_flag
makefile.filter('-fopenmp', self.compiler.openmp_flag)
# Replace CXX with CXXFLAGS
makefile.filter('CXX.*=.*',
r'CXXFLAGS = -DCNVNATOR_VERSION=\"$(VERSION)\"'
' $(OMPFLAGS)'
' {0}'.format(self.compiler.cxx11_flag))
makefile.filter('$(CXX)', '$(CXX) $(CXXFLAGS)', string=True)
# Replace -I$(SAMDIR) with -I$(SAMINC)
makefile.filter('-I$(SAMDIR)', '-I$(SAMINC)', string=True)
# Link more libs
makefile.filter('^override LIBS.*',
'override LIBS += -lz -lbz2 -lcurl -llzma')
def build(self, spec, prefix):
make('ROOTSYS={0}'.format(spec['root'].prefix),
'SAMINC={0}'.format(spec['samtools'].prefix.include),
'SAMDIR={0}'.format(spec['samtools'].prefix.lib),
'HTSDIR={0}'.format(spec['htslib'].prefix.lib))
def install(self, spec, prefix):
mkdir(prefix.bin)
install('cnvnator', prefix.bin)
|
# SPDX-License-Identifier: Apache-2.0
# Copyright Contributors to the Rez Project
from rez.utils.resources import ResourcePool, ResourceHandle
from rez.utils.data_utils import cached_property
from rez.plugin_managers import plugin_manager
from rez.config import config
from rez.exceptions import ResourceError
from contextlib import contextmanager
import threading
import os.path
import time
def get_package_repository_types():
"""Returns the available package repository implementations."""
return plugin_manager.get_plugins('package_repository')
def create_memory_package_repository(repository_data):
"""Create a standalone in-memory package repository from the data given.
See rezplugins/package_repository/memory.py for more details.
Args:
repository_data (dict): Package repository data.
Returns:
`PackageRepository` object.
"""
cls_ = plugin_manager.get_plugin_class("package_repository", "memory")
return cls_.create_repository(repository_data)
class PackageRepositoryGlobalStats(threading.local):
"""Gathers stats across package repositories.
"""
def __init__(self):
# the amount of time that has been spent loading package from ,
# repositories, since process start
self.package_load_time = 0.0
@contextmanager
def package_loading(self):
"""Use this around code in your package repository that is loading a
package, for example from file or cache.
"""
t1 = time.time()
yield None
t2 = time.time()
self.package_load_time += t2 - t1
package_repo_stats = PackageRepositoryGlobalStats()
class PackageRepository(object):
"""Base class for package repositories implemented in the package_repository
plugin type.
Note that, even though a package repository does determine where package
payloads should go, it is not responsible for creating or copying these
payloads.
"""
# see `install_variant`.
remove = object()
@classmethod
def name(cls):
"""Return the name of the package repository type."""
raise NotImplementedError
def __init__(self, location, resource_pool):
"""Create a package repository.
Args:
location (str): A string specifying the location of the repository.
This could be a filesystem path, or a database uri, etc.
resource_pool (`ResourcePool`): The pool used to manage package
resources.
"""
self.location = location
self.pool = resource_pool
def __str__(self):
return "%s@%s" % (self.name(), self.location)
def register_resource(self, resource_class):
"""Register a resource with the repository.
Your derived repository class should call this method in its __init__ to
register all the resource types associated with that plugin.
"""
self.pool.register_resource(resource_class)
def clear_caches(self):
"""Clear any cached resources in the pool."""
self.pool.clear_caches()
@cached_property
def uid(self):
"""Returns a unique identifier for this repository.
This must be a persistent identifier, for example a filepath, or
database address + index, and so on.
Returns:
hashable value: Value that uniquely identifies this repository.
"""
return self._uid()
def __eq__(self, other):
return (
isinstance(other, PackageRepository)
and other.name() == self.name()
and other.uid == self.uid
)
def is_empty(self):
"""Determine if the repository contains any packages.
Returns:
True if there are no packages, False if there are at least one.
"""
for family in self.iter_package_families():
for pkg in self.iter_packages(family):
return False
return True
def get_package_family(self, name):
"""Get a package family.
Args:
name (str): Package name.
Returns:
`PackageFamilyResource`, or None if not found.
"""
raise NotImplementedError
def iter_package_families(self):
"""Iterate over the package families in the repository, in no
particular order.
Returns:
`PackageFamilyResource` iterator.
"""
raise NotImplementedError
def iter_packages(self, package_family_resource):
"""Iterate over the packages within the given family, in no particular
order.
Args:
package_family_resource (`PackageFamilyResource`): Parent family.
Returns:
`PackageResource` iterator.
"""
raise NotImplementedError
def iter_variants(self, package_resource):
"""Iterate over the variants within the given package.
Args:
package_resource (`PackageResource`): Parent package.
Returns:
`VariantResource` iterator.
"""
raise NotImplementedError
def get_package(self, name, version):
"""Get a package.
Args:
name (str): Package name.
version (`Version`): Package version.
Returns:
`PackageResource` or None: Matching package, or None if not found.
"""
fam = self.get_package_family(name)
if fam is None:
return None
for pkg in fam.iter_packages():
if pkg.version == version:
return pkg
return None
def get_package_from_uri(self, uri):
"""Get a package given its URI.
Args:
uri (str): Package URI
Returns:
`PackageResource`, or None if the package is not present in this
package repository.
"""
return None
def get_variant_from_uri(self, uri):
"""Get a variant given its URI.
Args:
uri (str): Variant URI
Returns:
`VariantResource`, or None if the variant is not present in this
package repository.
"""
return None
def ignore_package(self, pkg_name, pkg_version, allow_missing=False):
"""Ignore the given package.
Ignoring a package makes it invisible to further resolves.
Args:
pkg_name (str): Package name
pkg_version(`Version`): Package version
allow_missing (bool): if True, allow for ignoring a package that
does not exist. This is useful when you want to copy a package
to a repo and you don't want it visible until the copy is
completed.
Returns:
int:
* -1: Package not found
* 0: Nothing was done, package already ignored
* 1: Package was ignored
"""
raise NotImplementedError
def unignore_package(self, pkg_name, pkg_version):
"""Unignore the given package.
Args:
pkg_name (str): Package name
pkg_version(`Version`): Package version
Returns:
int:
* -1: Package not found
* 0: Nothing was done, package already visible
* 1: Package was unignored
"""
raise NotImplementedError
def remove_package(self, pkg_name, pkg_version):
"""Remove a package.
Note that this should work even if the specified package is currently
ignored.
Args:
pkg_name (str): Package name
pkg_version(`Version`): Package version
Returns:
bool: True if the package was removed, False if it wasn't found.
"""
raise NotImplementedError
def remove_package_family(self, pkg_name, force=False):
"""Remove an empty package family.
Args:
pkg_name (str): Package name
force (bool): If Trur, delete even if not empty.
Returns:
bool: True if the family was removed, False if it wasn't found.
"""
raise NotImplementedError
def remove_ignored_since(self, days, dry_run=False, verbose=False):
"""Remove packages ignored for >= specified number of days.
Args:
days (int): Remove packages ignored >= this many days
dry_run: Dry run mode
verbose (bool): Verbose mode
Returns:
int: Number of packages removed. In dry-run mode, returns the
number of packages that _would_ be removed.
"""
raise NotImplementedError
def pre_variant_install(self, variant_resource):
"""Called before a variant is installed.
If any directories are created on disk for the variant to install into,
this is called before that happens.
Note that it is the responsibility of the `BuildProcess` to call this
function at the appropriate time.
"""
pass
def on_variant_install_cancelled(self, variant_resource):
"""Called when a variant installation is cancelled.
This is called after `pre_variant_install`, but before `install_variant`,
which is not expected to be called.
Variant install cancellation usually happens for one of two reasons -
either the variant installation failed (ie a build error occurred), or
one or more of the package tests failed, aborting the installation.
Note that it is the responsibility of the `BuildProcess` to call this
function at the appropriate time.
"""
pass
def install_variant(self, variant_resource, dry_run=False, overrides=None):
"""Install a variant into this repository.
Use this function to install a variant from some other package repository
into this one.
Args:
variant_resource (`VariantResource`): Variant to install.
dry_run (bool): If True, do not actually install the variant. In this
mode, a `Variant` instance is only returned if the equivalent
variant already exists in this repository; otherwise, None is
returned.
overrides (dict): Use this to change or add attributes to the
installed variant. To remove attributes, set values to
`PackageRepository.remove`.
Returns:
`VariantResource` object, which is the newly created variant in this
repository. If `dry_run` is True, None may be returned.
"""
raise NotImplementedError
def get_equivalent_variant(self, variant_resource):
"""Find a variant in this repository that is equivalent to that given.
A variant is equivalent to another if it belongs to a package of the
same name and version, and it has the same definition (ie package
requirements).
Note that even though the implementation is trivial, this function is
provided since using `install_variant` to find an existing variant is
nonintuitive.
Args:
variant_resource (`VariantResource`): Variant to install.
Returns:
`VariantResource` object, or None if the variant was not found.
"""
return self.install_variant(variant_resource, dry_run=True)
def get_parent_package_family(self, package_resource):
"""Get the parent package family of the given package.
Args:
package_resource (`PackageResource`): Package.
Returns:
`PackageFamilyResource`.
"""
raise NotImplementedError
def get_parent_package(self, variant_resource):
"""Get the parent package of the given variant.
Args:
variant_resource (`VariantResource`): Variant.
Returns:
`PackageResource`.
"""
raise NotImplementedError
def get_variant_state_handle(self, variant_resource):
"""Get a value that indicates the state of the variant.
This is used for resolve caching. For example, in the 'filesystem'
repository type, the 'state' is the last modified date of the file
associated with the variant (perhaps a package.py). If the state of
any variant has changed from a cached resolve - eg, if a file has been
modified - the cached resolve is discarded.
This may not be applicable to your repository type, leave as-is if so.
Returns:
A hashable value.
"""
return None
def get_last_release_time(self, package_family_resource):
"""Get the last time a package was added to the given family.
This information is used to cache resolves via memcached. It can be left
not implemented, but resolve caching is a substantial optimisation that
you will be missing out on.
Returns:
int: Epoch time at which a package was changed/added/removed from
the given package family. Zero signifies an unknown last package
update time.
"""
return 0
def make_resource_handle(self, resource_key, **variables):
"""Create a `ResourceHandle`
Nearly all `ResourceHandle` creation should go through here, because it
gives the various resource classes a chance to normalize / standardize
the resource handles, to improve caching / comparison / etc.
"""
if variables.get("repository_type", self.name()) != self.name():
raise ResourceError("repository_type mismatch - requested %r, "
"repository_type is %r"
% (variables["repository_type"], self.name()))
variables["repository_type"] = self.name()
if variables.get("location", self.location) != self.location:
raise ResourceError("location mismatch - requested %r, repository "
"location is %r" % (variables["location"],
self.location))
variables["location"] = self.location
resource_cls = self.pool.get_resource_class(resource_key)
variables = resource_cls.normalize_variables(variables)
return ResourceHandle(resource_key, variables)
def get_resource(self, resource_key, **variables):
"""Get a resource.
Attempts to get and return a cached version of the resource if
available, otherwise a new resource object is created and returned.
Args:
resource_key (`str`): Name of the type of `Resources` to find
variables: data to identify / store on the resource
Returns:
`PackageRepositoryResource` instance.
"""
handle = self.make_resource_handle(resource_key, **variables)
return self.get_resource_from_handle(handle, verify_repo=False)
def get_resource_from_handle(self, resource_handle, verify_repo=True):
"""Get a resource.
Args:
resource_handle (`ResourceHandle`): Handle of the resource.
Returns:
`PackageRepositoryResource` instance.
"""
if verify_repo:
# we could fix the handle at this point, but handles should
# always be made from repo.make_resource_handle... for now,
# at least, error to catch any "incorrect" construction of
# handles...
if resource_handle.variables.get("repository_type") != self.name():
raise ResourceError("repository_type mismatch - requested %r, "
"repository_type is %r"
% (resource_handle.variables["repository_type"],
self.name()))
if resource_handle.variables.get("location") != self.location:
raise ResourceError("location mismatch - requested %r, "
"repository location is %r "
% (resource_handle.variables["location"],
self.location))
resource = self.pool.get_resource_from_handle(resource_handle)
resource._repository = self
return resource
def get_package_payload_path(self, package_name, package_version=None):
"""Defines where a package's payload should be installed to.
Args:
package_name (str): Nmae of package.
package_version (str or `Version`): Package version.
Returns:
str: Path where package's payload should be installed to.
"""
raise NotImplementedError
def _uid(self):
"""Unique identifier implementation.
You may need to provide your own implementation. For example, consider
the 'filesystem' repository. A default uri might be 'filesystem@/tmp_pkgs'.
However /tmp_pkgs is probably a local path for each user, so this would
not actually uniquely identify the repository - probably the inode number
needs to be incorporated also.
Returns:
Hashable value.
"""
return (self.name(), self.location)
class PackageRepositoryManager(object):
"""Package repository manager.
Manages retrieval of resources (packages and variants) from `PackageRepository`
instances, and caches these resources in a resource pool.
"""
def __init__(self, resource_pool=None):
"""Create a package repo manager.
Args:
resource_pool (`ResourcePool`): Provide your own resource pool. If
None, a default pool is created based on config settings.
"""
if resource_pool is None:
cache_size = config.resource_caching_maxsize
if cache_size < 0: # -1 == disable caching
cache_size = None
resource_pool = ResourcePool(cache_size=cache_size)
self.pool = resource_pool
self.repositories = {}
def get_repository(self, path):
"""Get a package repository.
Args:
path (str): Entry from the 'packages_path' config setting. This may
simply be a path (which is managed by the 'filesystem' package
repository plugin), or a string in the form "type@location",
where 'type' identifies the repository plugin type to use.
Returns:
`PackageRepository` instance.
"""
# normalise repo path
parts = path.split('@', 1)
if len(parts) == 1:
parts = ("filesystem", parts[0])
repo_type, location = parts
if repo_type == "filesystem":
# choice of abspath here vs realpath is deliberate. Realpath gives
# canonical path, which can be a problem if two studios are sharing
# packages, and have mirrored package paths, but some are actually
# different paths, symlinked to look the same. It happened!
#
location = os.path.abspath(location)
normalised_path = "%s@%s" % (repo_type, location)
# get possibly cached repo
repository = self.repositories.get(normalised_path)
# create and cache if not already cached
if repository is None:
repository = self._get_repository(normalised_path)
self.repositories[normalised_path] = repository
return repository
def are_same(self, path_1, path_2):
"""Test that `path_1` and `path_2` refer to the same repository.
This is more reliable than testing that the strings match, since slightly
different strings might refer to the same repository (consider small
differences in a filesystem path for example, eg '//svr/foo', '/svr/foo').
Returns:
True if the paths refer to the same repository, False otherwise.
"""
if path_1 == path_2:
return True
repo_1 = self.get_repository(path_1)
repo_2 = self.get_repository(path_2)
return (repo_1.uid == repo_2.uid)
def get_resource(self, resource_key, repository_type, location,
**variables):
"""Get a resource.
Attempts to get and return a cached version of the resource if
available, otherwise a new resource object is created and returned.
Args:
resource_key (`str`): Name of the type of `Resources` to find
repository_type (`str`): What sort of repository to look for the
resource in
location (`str`): location for the repository
variables: data to identify / store on the resource
Returns:
`PackageRepositoryResource` instance.
"""
path = "%s@%s" % (repository_type, location)
repo = self.get_repository(path)
resource = repo.get_resource(**variables)
return resource
def get_resource_from_handle(self, resource_handle):
"""Get a resource.
Args:
resource_handle (`ResourceHandle`): Handle of the resource.
Returns:
`PackageRepositoryResource` instance.
"""
repo_type = resource_handle.get("repository_type")
location = resource_handle.get("location")
if not (repo_type and location):
raise ValueError("PackageRepositoryManager requires "
"resource_handle objects to have a "
"repository_type and location defined")
path = "%s@%s" % (repo_type, location)
repo = self.get_repository(path)
resource = repo.get_resource_from_handle(resource_handle)
return resource
def clear_caches(self):
"""Clear all cached data."""
self.repositories.clear()
self.pool.clear_caches()
def _get_repository(self, path, **repo_args):
repo_type, location = path.split('@', 1)
cls = plugin_manager.get_plugin_class('package_repository', repo_type)
repo = cls(location, self.pool, **repo_args)
return repo
# singleton
package_repository_manager = PackageRepositoryManager()
|
import numpy as np
from typing import Optional
from scipy import optimize as opt
from math import *
class Fourier:
"""
Fourier optimizer: a heuristic optimization method for QAOA,
implemented according to the paper
"Quantum Approximate Optimization Algorithm: Performance, Mechanism, and Implementation on Near-Term Devices"
"""
def __init__(self,
p: int = 1,
q: Optional[int] = None, # 4
r: Optional[int] = 0,
alpha: Optional[float] = 0.6,
optimize_method: Optional[str] = 'COBYLA',
initial_point: Optional[list] = None
) -> None:
"""
initialize a optimizer of FOURIER with parameters q and R
Args:
p: the parameter in QAOA paper
q: the maximum frequency component allowed in the amplitude parameters <⃗u, ⃗v>
r: the number of random perturbations to add
alpha:
"""
self._p = p
self._q = q if q is not None and q < self._p else self._p
self._r = r
self._alpha = alpha
self._optimize_method = optimize_method
self._initial_point = initial_point
self._objective_function = None
@property
def q(self):
return self._q
@q.setter
def q(self, aq):
self._q = aq if aq < self._p else self._p
@property
def r(self):
return self._r
@r.setter
def r(self, ar):
self._r = ar
def calculate_gb(self, step, pargs):
upb = min(self._q, step)
u, v = pargs[:upb], pargs[upb:]
gamma, beta = np.zeros(step), np.zeros(step)
for i in range(1, step + 1):
for k in range(1, upb + 1):
gamma[i - 1] += u[k - 1] * sin((k - 0.5) * (i - 0.5) * pi / step)
beta[i - 1] += v[k - 1] * cos((k - 0.5) * (i - 0.5) * pi / step)
return gamma, beta
def loss_function(self, pargs, step):
gamma, beta = self.calculate_gb(step, pargs)
return self._objective_function(np.append(gamma, beta), step)
def _minimize(self, objective_function):
"""
minimize the loss function
Args:
loss: the loss function
Returns:
x: the optimized gamma and beta
value: the optimized value of loss function
nfev: is the number of objective function calls
"""
self._objective_function = objective_function
nfev = 0
ul, vl = None, None
u_best, v_best = None, None
for j in range(1, self._p + 1):
u_list, v_list = [], []
min_val = float("inf")
if j == 1:
ul, vl = list(self._initial_point[: j]), list(self._initial_point[self._q: self._q+j])
else:
if j <= self._q:
ul.append(0)
vl.append(0)
for r in range(self._r + 1):
u_nx, v_nx = u_best.copy(), v_best.copy()
if r > 0:
for i, _ in enumerate(u_best):
u_nx[i] = u_nx[i] + self._alpha * np.random.normal(loc=0, scale=fabs(u_best[i]))
v_nx[i] = v_nx[i] + self._alpha * np.random.normal(loc=0, scale=fabs(v_best[i]))
if j <= self._q:
u_nx.append(0)
v_nx.append(0)
u_list.append(u_nx)
v_list.append(v_nx)
for idx in range(len(u_list) + 1):
if idx == 0:
u_cal, v_cal = ul, vl
else:
u_cal, v_cal = u_list[idx - 1], v_list[idx - 1]
res = opt.minimize(self.loss_function,
x0=np.append(u_cal, v_cal),
args=j,
method=self._optimize_method, # 'COBYLA',
tol=1e-6,
jac=opt.rosen_der,
options={'gtol': 1e-6, 'maxiter': 30, 'disp': True})
upb = min(self._q, j)
if idx == 0:
ul, vl = list(res["x"][:upb]), list(res["x"][upb:])
func_val = res["fun"]
if func_val < min_val:
min_val = func_val
u_best, v_best = list(res["x"][:j]), list(res["x"][j:])
nfev += res["nfev"]
gamma_list, beta_list = self.calculate_gb(self._p, u_best + v_best)
return {"gamma": gamma_list, "beta": beta_list, "optimal value": min_val, "nfev": nfev}
def optimize(self, objective_function, p):
if self._initial_point is None:
self._initial_point = np.array([np.random.random() for x in range(2 * p)])
return self._minimize(objective_function)
|
import asyncio
from typing import Type
from ..config import Config
from ..typing import ASGIFramework
class UnexpectedMessage(Exception):
pass
class Lifespan:
def __init__(self, app: Type[ASGIFramework], config: Config) -> None:
self.app = app
self.config = config
self.startup = asyncio.Event()
self.shutdown = asyncio.Event()
self.app_queue: asyncio.Queue = asyncio.Queue()
self.supported = True
self._support_checked = asyncio.Event()
async def handle_lifespan(self) -> None:
scope = {"type": "lifespan"}
try:
asgi_instance = self.app(scope)
except Exception:
self._support_checked.set()
self.supported = False
if self.config.error_logger is not None:
self.config.error_logger.warning(
"ASGI Framework Lifespan error, continuing without Lifespan support"
)
else:
self._support_checked.set()
try:
await asgi_instance(self.asgi_receive, self.asgi_send)
except asyncio.CancelledError:
pass
except Exception:
if self.config.error_logger is not None:
self.config.error_logger.exception("Error in ASGI Framework")
async def wait_for_startup(self) -> None:
await self._support_checked.wait()
if not self.supported:
if hasattr(self.app, "startup"): # Compatibility with Quart 0.6.X
await self.app.startup() # type: ignore
return
await self.app_queue.put({"type": "lifespan.startup"})
await asyncio.wait_for(self.startup.wait(), timeout=self.config.startup_timeout)
async def wait_for_shutdown(self) -> None:
await self._support_checked.wait()
if not self.supported:
if hasattr(self.app, "cleanup"): # Compatibility with Quart 0.6.X
await self.app.cleanup() # type: ignore
return
await self.app_queue.put({"type": "lifespan.shutdown"})
await asyncio.wait_for(self.shutdown.wait(), timeout=self.config.shutdown_timeout)
async def asgi_receive(self) -> dict:
return await self.app_queue.get()
async def asgi_send(self, message: dict) -> None:
if message["type"] == "lifespan.startup.complete":
self.startup.set()
elif message["type"] == "lifespan.shutdown.complete":
self.shutdown.set()
else:
raise UnexpectedMessage(message["type"])
|
__author__ = 'valkyrie_Z'
import os
from urllib.request import urlretrieve
import zipfile
import pickle
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import pandas as pd
from cnn_model import *
def show_all_kinds_images(figsize_input, X_data, y_data):
#show the 43 kinds data
#figsize_input - images sizes
plt.figure(figsize=figsize_input)
gs1 = gridspec.GridSpec(7,7)
gs1.update(wspace=0.1, hspace=0.1)
for i in range(43):
ax1 = plt.subplot(gs1[i])
plt.axis('on')
ax1.set_xticklabels([])
ax1.set_yticklabels([])
ax1.set_aspect('equal')
index_y = np.argwhere(y_data == i)
ind_plot = np.random.randint(1,len(index_y))
plt.imshow(X_data[int(index_y[ind_plot])])
plt.text(3,3,str(i), color = 'k', backgroundcolor = 'c')
plt.axis('off')
plt.show()
def show_datas_num(data_set):
#show datas number and souted it
data_i = [[i, sum(data_set == i )] for i in range(len(np.unique(data_set)))]
data_i_sorted = sorted(data_i, key=lambda x: x[1])
data_pd = pd.read_csv('signnames.csv')
data_pd['Occurance'] = pd.Series(np.asarray(data_i_sorted).T[1], index=np.asarray(data_i_sorted).T[0])
data_pd_sorted = data_pd.sort_values(['Occurance'], ascending=[0]).reset_index()
data_pd_sorted = data_pd_sorted.drop('index', 1)
data_pd_sorted
plt.figure(figsize = (12, 8))
plt.bar(range(43), height = data_pd_sorted["Occurance"])
plt.show()
if __name__ == '__main__':
url_link = "https://s3-us-west-1.amazonaws.com/udacity-selfdrivingcar/traffic-signs-data.zip"
file_name = "traffic-signs-data.zip"
Process_data = augment_images(url_link, file_name)
# print("X_train_origen: " + str(X_train_origen.shape) + str(y_train_origen.shape))
# print("X_valid_origen: " + str(X_valid_origen.shape) + str(y_valid_origen.shape))
# print("X_test_origen: " + str(X_test_origen.shape) + str(y_test_origen.shape))
X_train_origen, y_train_origen = Process_data.get_train_data_origen()
X_valid_origen, y_valid_origen = Process_data.get_vaild_data_origen()
X_test_origen, y_test_origen = Process_data.get_test_data_origen()
# Number of training examples
n_train = X_train_origen.shape[0]
# Number of validation examples
n_validation = X_valid_origen.shape[0]
# Number of testing examples.
n_test = X_test_origen.shape[0]
# What's the shape of an traffic sign image?
image_shape = X_train_origen[0].shape
# How many unique classes/labels there are in the dataset.
n_classes = len(np.unique(y_train_origen))
print("Number of training examples =", n_train)
print("Number of testing examples =", n_test)
print("Number of valid examples = ", n_validation)
print("Image data shape =", image_shape)
print("Number of classes =", n_classes)
# #show all kinds of images
# show_all_kinds_images((8,8) , X_train_origen, y_train_origen)
# #show datas number and souted it
# show_datas_num(y_train_origen)
X_train, y_train = Process_data.train_data_peocess()
X_valid, y_valid = Process_data.vaild_data_process()
X_test, y_test = Process_data.test_data_process()
cnn_m = cnn_model_c()
cnn_m.create()
cnn_m.train(X_train, y_train, X_valid, y_valid, X_test, y_test)
|
from cursor import data
from cursor import device
from cursor import path
from cursor import renderer
def save_wrapper(pc, projname, fname):
folder = data.DataDirHandler().jpg(projname)
jpeg_renderer = renderer.JpegRenderer(folder)
jpeg_renderer.render(pc, scale=4.0, thickness=6)
jpeg_renderer.save(fname)
def svg_save_wrapper(pc, projname, fname):
folder = data.DataDirHandler().svg(projname)
svg_renderer = renderer.SvgRenderer(folder)
svg_renderer.render(pc)
svg_renderer.save(fname)
def rect(x, y):
p = path.Path()
p.add(0.2, 0.2)
p.add(0.2, 0.8)
p.add(0.8, 0.8)
p.add(0.8, 0.2)
p.add(0.2, 0.2)
p.translate(x, y)
return p
if __name__ == "__main__":
pc = path.PathCollection()
for y in range(30):
p = path.Path()
for x in range(41):
p.add(x, y)
p.add(x + 1, y)
if x != 41:
pc.add(rect(x, y))
pc.add(p)
for x in range(42):
p = path.Path()
for y in range(30):
p.add(x, y)
p.add(x, y + 1)
pc.add(p)
# pc.fit(device.Paper.sizes[device.PaperSize.LANDSCAPE_A3], padding_mm=10)
# svg_save_wrapper(pc, "grid_battle", "millimeter_papier")
device.SimpleExportWrapper().ex(
pc,
device.PlotterType.ROLAND_DXY1200,
device.PaperSize.LANDSCAPE_A3,
10,
"grid_battle",
"1_insides",
)
|
""" Functions for generating statistical summaries from multi-state life tables """
import numpy as np
import pandas as pd
import itertools
import METER.table as tb
def bootstrapLE(data, transition_names, states, initial_states, n=1000, initial_time=0, censor_states='default', group_names='default', conditions='default', loud=False):
"""
Run a bootstrap on the life expectancy for a given set of groups
Parameters
----------
data : pandas dataframe
the data in wide format as generated by :py:func:`.wide_format`
transition_names : list
a list of the names of the columns that contain the transition times
states: list
the names of the states in the model
initial_states : list
a list of initial states to estimate from
n : int
the number of bootstraps to run, by default 1000.
initial_time: int
to estimate life expectancy after a given time (by default 0)
censor_states: list
the states you want each group's life expectancy to be censored at (by default no censoring)
if provided this list must be the same length as initial_states
group_names: list
what the groups (whose structure is defined both by the initial states and censor states given)
are to be called. by default this is the same as the initial states. if provided this list must be the same length as initial_states
conditions : list
a list of dictionaries of conditions you want each group to be subject to (by default none).
ex. [{'Race': 'White', 'Smoking': 'Yes'}, {'Race': 'Black', 'Smoking': 'No'}]
loud : bool
by default this is false. If it is set to true a small summary of the results of each bootstrap
as well as the best estimate calculated initially are printed to the console.
Returns
----------
pandas dataframe
a dataframe containing the results of each run of the bootstrap. Each row will include that
bootstrap life expectancy for each group as well as each of the possible group differences.
"""
if censor_states == 'default':
censor_states = states[-2]*len(initial_states)
if group_names == 'default':
group_names = initial_states
if conditions == 'default':
conditions = [{}] * len(initial_states)
rows = []
colnames = []
results = []
for i in range(0, len(initial_states)):
results.append(tb.censorLE(data, transition_names, states, initial_states[i], initial_time, censor_states[i], conditions[i]))
if loud:
print("BEST ESTIMATE")
for i in range(0, len(results)):
if loud:
print(group_names[i] + " Life Expectancy: " + str(results[i]))
colnames.append(group_names[i] + " Life Expectancy")
pair_results = []
for pair in itertools.combinations([i for i in range(0, len(results))], 2):
diff = results[pair[0]] - results[pair[1]]
pair_results.append(diff)
colnames.append(group_names[pair[0]] + "-" + group_names[pair[1]] + " Difference")
if loud:
print(group_names[pair[0]] + "-" + group_names[pair[1]] + " Difference: " + str(diff))
rows.append(results + pair_results)
for i in range(0, len(rows[0])):
rows[0][i] = round(rows[0][i], 1)
for i in range(0, n):
boot_df = data.sample(frac=1, replace=True)
results = []
for y in range(0, len(censor_states)):
results.append(tb.censorLE(boot_df, transition_names, states, initial_states[y], initial_time, censor_states[y], conditions[y]))
if loud:
print("BOOT RUN " + str(i + 1))
if loud:
for y in range(0, len(results)):
print(group_names[y] + " Life Expectancy: " + str(results[y]))
pair_results = []
for pair in itertools.combinations([x for x in range(0, len(results))], 2):
diff = results[pair[0]] - results[pair[1]]
pair_results.append(diff)
if loud:
print(group_names[pair[0]] + "-" + group_names[pair[1]] + " Difference: " + str(diff))
rows.append(results + pair_results)
boot_results = pd.DataFrame(rows,columns=colnames)
boot_results.index.name = "Bootstrap Run"
boot_results.rename(index={0: 'Best Estimate'}, inplace=True)
return boot_results
def confidence_interval(data, column_name, confidence_level):
"""
get a 95% confidence interval from a bootstrap dataframe column
Parameters
----------
data : pandas dataframe
the bootstrap dataframe generated by :py:func:`.bootstrapLE`
column_name : string
the statistic that you want the interval for, specified by the name of the column
containing it
confidence_level : float
a real number between 0 and 1 that represents the desired confidence level.
eg. 0.95 for 95%.
Returns
----------
list
a two-element list with the lower bound and upper bound.
"""
results = data[column_name].tolist()
results.sort()
lower_bound = int((1 - confidence_level) / 2 * len(results)) - 1
upper_bound = int((confidence_level + 1) / 2 * len(results)) - 1
if lower_bound < 0:
lower_bound = 0
return [round(float(results[lower_bound]), 1), round(float(results[upper_bound]), 1)]
def summary_results(bootstrap, confidence_level=0.95):
"""
Summarize the results of a bootstrap.
Parameters
----------
bootstrap : pandas dataframe
the bootstrap dataframe generated by :py:func:`.bootstrapLE`
confidence_level : float
the confidence level which you want to generate confidence intervals for
Returns
----------
pandas dataframe
A dataframe summarizing the point estimates and confidence intervals for each quantity.
"""
best_estimates = []
confidence_intervals = []
for col in bootstrap.columns:
best_estimates.append(bootstrap[col]['Best Estimate'])
boot_df = bootstrap.drop(['Best Estimate'])
boot_df.index.name = 'Bootstrap Run'
for col in boot_df.columns:
confidence_intervals.append(confidence_interval(boot_df, col, confidence_level))
summary_column = []
for i in range(0, len(best_estimates)):
summary_column.append(str(best_estimates[i]) + " " + str(confidence_intervals[i]))
index = boot_df.columns
sum_df = pd.DataFrame(list(zip(summary_column, index)),columns=['Multi-State Modelling (Estimate [CI])', 'Measure'])
return sum_df
|
import config
from gui.dc_gan_widget import DcGanWidget
from PySide2.QtWidgets import QApplication
def set_params(widget, opt):
widget.model.net.alpha1 = widget.alpha1 = opt.a1
widget.model.net.alpha2 = widget.alpha2 = opt.a2
widget.model.net.alpha3 = widget.alpha3 = opt.a3
widget.model.net.alpha4 = widget.alpha4 = opt.a4
widget.model.net.loop_count = widget.loop_num = opt.loop_count
if __name__ == '__main__':
manual_seed = 1989 # if not None will set constant generations
# end of setting configurations
opt = config.get_configurations()
# Initialise the application
app = QApplication([])
# Call the main widget
ex = DcGanWidget(app_name=opt.app_name, opt=opt, manual_seed=manual_seed)
if not opt.eval:
app.exec_()
else:
# set alphas
set_params(ex, opt)
# create 50K Samples
ex.generate_for_eval() |
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# Administrative division loader
# ----------------------------------------------------------------------
# Copyright (C) 2007-2015 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Python modules
from __future__ import absolute_import
# NOC modules
from .base import BaseLoader
from noc.gis.models.division import Division
class AdmDivLoader(BaseLoader):
"""
Administrative division loader
"""
name = "admdiv"
model = Division
fields = ["id", "parent", "name", "short_name"]
mapped_fields = {"parent": "admdiv"}
|
#!/usr/bin/env python
"Only tensions analysis for VAMPy project"
import wx
from wxgui import tension, resources
class tensVamPyApp(wx.App):
'''Actual wxPython application'''
def OnInit(self):
customartprovider = resources.CustomArtProvider()
wx.ArtProvider.Push(customartprovider)
frame = tension.TensionsFrame(parent=None, id=-1)
frame.Show()
return True
app = tensVamPyApp(False)
app.MainLoop()
|
"""
Support for FRITZ!DECT Switches.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/switch.fritzdect/
"""
import logging
from requests.exceptions import RequestException, HTTPError
import voluptuous as vol
from homeassistant.components.switch import (SwitchDevice, PLATFORM_SCHEMA)
from homeassistant.const import (
CONF_HOST, CONF_PASSWORD, CONF_USERNAME)
import homeassistant.helpers.config_validation as cv
from homeassistant.const import TEMP_CELSIUS, ATTR_TEMPERATURE
REQUIREMENTS = ['fritzhome==1.0.4']
_LOGGER = logging.getLogger(__name__)
# Standard Fritz Box IP
DEFAULT_HOST = 'fritz.box'
ATTR_CURRENT_CONSUMPTION = 'current_consumption'
ATTR_CURRENT_CONSUMPTION_UNIT = 'current_consumption_unit'
ATTR_CURRENT_CONSUMPTION_UNIT_VALUE = 'W'
ATTR_TOTAL_CONSUMPTION = 'total_consumption'
ATTR_TOTAL_CONSUMPTION_UNIT = 'total_consumption_unit'
ATTR_TOTAL_CONSUMPTION_UNIT_VALUE = 'kWh'
ATTR_TEMPERATURE_UNIT = 'temperature_unit'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Add all switches connected to Fritz Box."""
from fritzhome.fritz import FritzBox
host = config.get(CONF_HOST)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
# Log into Fritz Box
fritz = FritzBox(host, username, password)
try:
fritz.login()
except Exception: # pylint: disable=broad-except
_LOGGER.error("Login to Fritz!Box failed")
return
# Add all actors to hass
for actor in fritz.get_actors():
# Only add devices that support switching
if actor.has_switch:
data = FritzDectSwitchData(fritz, actor.actor_id)
data.is_online = True
add_devices([FritzDectSwitch(hass, data, actor.name)], True)
class FritzDectSwitch(SwitchDevice):
"""Representation of a FRITZ!DECT switch."""
def __init__(self, hass, data, name):
"""Initialize the switch."""
self.units = hass.config.units
self.data = data
self._name = name
@property
def name(self):
"""Return the name of the FRITZ!DECT switch, if any."""
return self._name
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
attrs = {}
if self.data.has_powermeter and \
self.data.current_consumption is not None and \
self.data.total_consumption is not None:
attrs[ATTR_CURRENT_CONSUMPTION] = "{:.1f}".format(
self.data.current_consumption)
attrs[ATTR_CURRENT_CONSUMPTION_UNIT] = "{}".format(
ATTR_CURRENT_CONSUMPTION_UNIT_VALUE)
attrs[ATTR_TOTAL_CONSUMPTION] = "{:.3f}".format(
self.data.total_consumption)
attrs[ATTR_TOTAL_CONSUMPTION_UNIT] = "{}".format(
ATTR_TOTAL_CONSUMPTION_UNIT_VALUE)
if self.data.has_temperature and \
self.data.temperature is not None:
attrs[ATTR_TEMPERATURE] = "{}".format(
self.units.temperature(self.data.temperature, TEMP_CELSIUS))
attrs[ATTR_TEMPERATURE_UNIT] = "{}".format(
self.units.temperature_unit)
return attrs
@property
def current_power_watt(self):
"""Return the current power usage in Watt."""
try:
return float(self.data.current_consumption)
except ValueError:
return None
@property
def is_on(self):
"""Return true if switch is on."""
return self.data.state
def turn_on(self, **kwargs):
"""Turn the switch on."""
if not self.data.is_online:
_LOGGER.error("turn_on: Not online skipping request")
return
try:
actor = self.data.fritz.get_actor_by_ain(self.data.ain)
actor.switch_on()
except (RequestException, HTTPError):
_LOGGER.error("Fritz!Box query failed, triggering relogin")
self.data.is_online = False
def turn_off(self, **kwargs):
"""Turn the switch off."""
if not self.data.is_online:
_LOGGER.error("turn_off: Not online skipping request")
return
try:
actor = self.data.fritz.get_actor_by_ain(self.data.ain)
actor.switch_off()
except (RequestException, HTTPError):
_LOGGER.error("Fritz!Box query failed, triggering relogin")
self.data.is_online = False
def update(self):
"""Get the latest data from the fritz box and updates the states."""
if not self.data.is_online:
_LOGGER.error("update: Not online, logging back in")
try:
self.data.fritz.login()
except Exception: # pylint: disable=broad-except
_LOGGER.error("Login to Fritz!Box failed")
return
self.data.is_online = True
try:
self.data.update()
except Exception: # pylint: disable=broad-except
_LOGGER.error("Fritz!Box query failed, triggering relogin")
self.data.is_online = False
class FritzDectSwitchData:
"""Get the latest data from the fritz box."""
def __init__(self, fritz, ain):
"""Initialize the data object."""
self.fritz = fritz
self.ain = ain
self.state = None
self.temperature = None
self.current_consumption = None
self.total_consumption = None
self.has_switch = False
self.has_temperature = False
self.has_powermeter = False
self.is_online = False
def update(self):
"""Get the latest data from the fritz box."""
if not self.is_online:
_LOGGER.error("Not online skipping request")
return
try:
actor = self.fritz.get_actor_by_ain(self.ain)
except (RequestException, HTTPError):
_LOGGER.error("Request to actor registry failed")
self.state = None
self.temperature = None
self.current_consumption = None
self.total_consumption = None
raise Exception('Request to actor registry failed')
if actor is None:
_LOGGER.error("Actor could not be found")
self.state = None
self.temperature = None
self.current_consumption = None
self.total_consumption = None
raise Exception('Actor could not be found')
try:
self.state = actor.get_state()
self.current_consumption = (actor.get_power() or 0.0) / 1000
self.total_consumption = (actor.get_energy() or 0.0) / 100000
except (RequestException, HTTPError):
_LOGGER.error("Request to actor failed")
self.state = None
self.temperature = None
self.current_consumption = None
self.total_consumption = None
raise Exception('Request to actor failed')
self.temperature = actor.temperature
self.has_switch = actor.has_switch
self.has_temperature = actor.has_temperature
self.has_powermeter = actor.has_powermeter
|
# Generated by Django 3.0.8 on 2020-08-14 13:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('timetableapp', '0003_auto_20200814_1834'),
]
operations = [
migrations.AlterField(
model_name='professor',
name='professor_id',
field=models.CharField(max_length=2000, primary_key=True, serialize=False),
),
]
|
PROFESSIONAL_KEY = 9527 # 系统设置
WECHAT_CONNECTED = 100 # socket连接
HEART_BEAT = 101 # 心跳
WECHAT_DISCONNECT = 102 # socket断开
WECHAT_LOGIN = 200 # 微信登录
WECHAT_LOGOUT = 201 # 微信登出
CHAT_MESSAGE = 202 # 聊天消息
GROUP_MEMBER_DETAILS = 203 # 群成员详情
GROUP_MEMBER_EVENT = 204 # 群成员变动
GET_ACCOUNT_DETAILS = 300 # 账号详情
GET_CONTACTS_LIST = 301 # 联系人列表
GET_CONTACT_DETAILS = 302 # 联系人详情
SEND_TEXT = 303
SEND_FILE = 304
USER_LOGOUT = 305
SEND_ANNOUNCEMENT = 306
ACCEPT_NEW_CONTACT = 307
SET_REMARK = 308
SHARE_CHATROOM = 309
REMOVE_CHATROOM_MEMBER = 310
REMOVE_CONTACT = 311
SEND_MINI_PROGRAM = 312
SEND_LINK_CARD = 313
DECRYPT_IMAGE = 314
CREATE_CHATROOM = 315
SET_CHATROOM_NAME = 316
GET_CONTACT_STATUS = 317
GET_LOGIN_QRCODE = 318
SEND_CARD = 319
GET_GROUP_ENTER_URL = 320
ACCOUNT_DETAILS = 400
CONTACTS_LIST = 401
CONTACT_DETAILS = 402
CREATE_GROUP_CALLBACK = 403
LOGIN_QRCODE = 404
GROUP_ENTER_URL = 405
SEND_TEXT_CALLBACK = 406
SEND_XML_CALLBACK = 407
SEND_IMAGE_CALLBACK = 408
|
from prometheus_client import Counter, Histogram
from django.conf import settings
from django_prometheus.utils import PowersOf, Time, TimeSince
DEFAULT_LATENCY_BUCKETS = (
0.01,
0.025,
0.05,
0.075,
0.1,
0.25,
0.5,
0.75,
1.0,
2.5,
5.0,
7.5,
10.0,
25.0,
50.0,
75.0,
float("inf"),
)
class MiddlewareMixin(object):
def __init__(self, get_response=None):
self.get_response = get_response
super(MiddlewareMixin, self).__init__()
def __call__(self, request):
response = None
if hasattr(self, 'process_request'):
response = self.process_request(request)
if not response:
response = self.get_response(request)
if hasattr(self, 'process_response'):
response = self.process_response(request, response)
return response
class Metrics(object):
_instance = None
@classmethod
def get_instance(cls):
if not cls._instance:
cls._instance = cls()
return cls._instance
def register_metric(
self, metric_cls, name, documentation, labelnames=tuple(), **kwargs
):
return metric_cls(name, documentation, labelnames=labelnames, **kwargs)
def __init__(self, *args, **kwargs):
self.register()
def register(self):
self.requests_total = self.register_metric(
Counter,
"django_http_requests_before_middlewares_total",
"Total count of requests before middlewares run.",
)
self.responses_total = self.register_metric(
Counter,
"django_http_responses_before_middlewares_total",
"Total count of responses before middlewares run.",
)
self.requests_latency_before = self.register_metric(
Histogram,
"django_http_requests_latency_including_middlewares_seconds",
(
"Histogram of requests processing time (including middleware "
"processing time)."
),
)
self.requests_unknown_latency_before = self.register_metric(
Counter,
"django_http_requests_unknown_latency_including_middlewares_total",
(
"Count of requests for which the latency was unknown (when computing "
"django_http_requests_latency_including_middlewares_seconds)."
),
)
self.requests_latency_by_view_method = self.register_metric(
Histogram,
"django_http_requests_latency_seconds_by_view_method",
"Histogram of request processing time labelled by view.",
["view", "method"],
buckets=getattr(
settings, "PROMETHEUS_LATENCY_BUCKETS", DEFAULT_LATENCY_BUCKETS
),
)
self.requests_unknown_latency = self.register_metric(
Counter,
"django_http_requests_unknown_latency_total",
"Count of requests for which the latency was unknown.",
)
# Set in process_request
self.requests_ajax = self.register_metric(
Counter, "django_http_ajax_requests_total", "Count of AJAX requests."
)
self.requests_by_method = self.register_metric(
Counter,
"django_http_requests_total_by_method",
"Count of requests by method.",
["method"],
)
self.requests_by_transport = self.register_metric(
Counter,
"django_http_requests_total_by_transport",
"Count of requests by transport.",
["transport"],
)
# Set in process_view
self.requests_by_view_transport_method = self.register_metric(
Counter,
"django_http_requests_total_by_view_transport_method",
"Count of requests by view, transport, method.",
["view", "transport", "method"],
)
self.requests_body_bytes = self.register_metric(
Histogram,
"django_http_requests_body_total_bytes",
"Histogram of requests by body size.",
buckets=PowersOf(2, 30),
)
# Set in process_template_response
self.responses_by_templatename = self.register_metric(
Counter,
"django_http_responses_total_by_templatename",
"Count of responses by template name.",
["templatename"],
)
# Set in process_response
self.responses_by_status = self.register_metric(
Counter,
"django_http_responses_total_by_status",
"Count of responses by status.",
["status"],
)
self.responses_by_status_view_method = self.register_metric(
Counter,
"django_http_responses_total_by_status_view_method",
"Count of responses by status, view, method.",
["status", "view", "method"],
)
self.responses_body_bytes = self.register_metric(
Histogram,
"django_http_responses_body_total_bytes",
"Histogram of responses by body size.",
buckets=PowersOf(2, 30),
)
self.responses_by_charset = self.register_metric(
Counter,
"django_http_responses_total_by_charset",
"Count of responses by charset.",
["charset"],
)
self.responses_streaming = self.register_metric(
Counter,
"django_http_responses_streaming_total",
"Count of streaming responses.",
)
# Set in process_exception
self.exceptions_by_type = self.register_metric(
Counter,
"django_http_exceptions_total_by_type",
"Count of exceptions by object type.",
["type"],
)
self.exceptions_by_view = self.register_metric(
Counter,
"django_http_exceptions_total_by_view",
"Count of exceptions by view.",
["view"],
)
class PrometheusBeforeMiddleware(MiddlewareMixin):
"""Monitoring middleware that should run before other middlewares."""
metrics_cls = Metrics
def __init__(self, get_response=None):
super(PrometheusBeforeMiddleware, self).__init__(get_response)
self.metrics = self.metrics_cls.get_instance()
def process_request(self, request):
self.metrics.requests_total.inc()
request.prometheus_before_middleware_event = Time()
def process_response(self, request, response):
self.metrics.responses_total.inc()
if hasattr(request, "prometheus_before_middleware_event"):
self.metrics.requests_latency_before.observe(
TimeSince(request.prometheus_before_middleware_event)
)
else:
self.metrics.requests_unknown_latency_before.inc()
return response
class PrometheusAfterMiddleware(MiddlewareMixin):
"""Monitoring middleware that should run after other middlewares."""
metrics_cls = Metrics
def __init__(self, get_response=None):
super(PrometheusAfterMiddleware, self).__init__(get_response)
self.metrics = self.metrics_cls.get_instance()
def _transport(self, request):
return "https" if request.is_secure() else "http"
def _method(self, request):
m = request.method
if m not in (
"GET",
"HEAD",
"POST",
"PUT",
"DELETE",
"TRACE",
"OPTIONS",
"CONNECT",
"PATCH",
):
return "<invalid method>"
return m
def label_metric(self, metric, request, response=None, **labels):
return metric.labels(**labels) if labels else metric
def process_request(self, request):
transport = self._transport(request)
method = self._method(request)
self.label_metric(self.metrics.requests_by_method, request, method=method).inc()
self.label_metric(
self.metrics.requests_by_transport, request, transport=transport
).inc()
if request.is_ajax():
self.label_metric(self.metrics.requests_ajax, request).inc()
content_length = int(request.META.get("CONTENT_LENGTH") or 0)
self.label_metric(self.metrics.requests_body_bytes, request).observe(
content_length
)
request.prometheus_after_middleware_event = Time()
def _get_view_name(self, request):
view_name = "<unnamed view>"
if hasattr(request, "resolver_match"):
if request.resolver_match is not None:
if request.resolver_match.view_name is not None:
view_name = request.resolver_match.view_name
return view_name
def process_view(self, request, view_func, *view_args, **view_kwargs):
transport = self._transport(request)
method = self._method(request)
if hasattr(request, "resolver_match"):
name = request.resolver_match.view_name or "<unnamed view>"
self.label_metric(
self.metrics.requests_by_view_transport_method,
request,
view=name,
transport=transport,
method=method,
).inc()
def process_template_response(self, request, response):
if hasattr(response, "template_name"):
self.label_metric(
self.metrics.responses_by_templatename,
request,
response=response,
templatename=str(response.template_name),
).inc()
return response
def process_response(self, request, response):
method = self._method(request)
name = self._get_view_name(request)
status = str(response.status_code)
self.label_metric(
self.metrics.responses_by_status, request, response, status=status
).inc()
self.label_metric(
self.metrics.responses_by_status_view_method,
request,
response,
status=status,
view=name,
method=method,
).inc()
if hasattr(response, "charset"):
self.label_metric(
self.metrics.responses_by_charset,
request,
response,
charset=str(response.charset),
).inc()
if hasattr(response, "streaming") and response.streaming:
self.label_metric(self.metrics.responses_streaming, request, response).inc()
if hasattr(response, "content"):
self.label_metric(
self.metrics.responses_body_bytes, request, response
).observe(len(response.content))
if hasattr(request, "prometheus_after_middleware_event"):
self.label_metric(
self.metrics.requests_latency_by_view_method,
request,
response,
view=self._get_view_name(request),
method=request.method,
).observe(TimeSince(request.prometheus_after_middleware_event))
else:
self.label_metric(
self.metrics.requests_unknown_latency, request, response
).inc()
return response
def process_exception(self, request, exception):
self.label_metric(
self.metrics.exceptions_by_type, request, type=type(exception).__name__
).inc()
if hasattr(request, "resolver_match"):
name = request.resolver_match.view_name or "<unnamed view>"
self.label_metric(self.metrics.exceptions_by_view, request, view=name).inc()
if hasattr(request, "prometheus_after_middleware_event"):
self.label_metric(
self.metrics.requests_latency_by_view_method,
request,
view=self._get_view_name(request),
method=request.method,
).observe(TimeSince(request.prometheus_after_middleware_event))
else:
self.label_metric(self.metrics.requests_unknown_latency, request).inc()
|
import csv
def write_interactions(filename,no_agents,num_rings):
agent_set=set()
if no_agents%num_rings!=0:
raise ValueError('Number of agents is not divisible by number of rings')
num_ring_agents = no_agents//num_rings
for ring in range(num_rings):
for agent_index in range(num_ring_agents):
agent_set.add((agent_index + ring*num_ring_agents, (agent_index+1)%num_ring_agents+ ring*num_ring_agents))
agent_set.add(((agent_index+1)%num_ring_agents+ ring*num_ring_agents, agent_index + ring*num_ring_agents))
agent_set.add((ring*num_ring_agents,((ring+1)*num_ring_agents)%no_agents))
agent_set.add((((ring+1)*num_ring_agents)%no_agents, ring*num_ring_agents))
with open(filename, 'w', newline='') as file:
fieldnames = ['Agent Index','Interacting Agent Index']
writer = csv.DictWriter(file, fieldnames=fieldnames)
writer.writeheader()
for x,y in agent_set:
writer.writerow({'Agent Index':x,'Interacting Agent Index':y})
write_interactions('interactions_list.csv', 30, 3)
|
import uuid
import json
from datetime import datetime
SECONDS_IN_DAY = 60 * 60 * 24
class BaseFunctionBuilder():
def __init__(self, command_name):
self.command_name = command_name
def is_registered(self):
"""
Determines if function is already registered in redis database.
Makes a `RG.DUMPREGISTRATIONS` call. Seeks for match between self.command_name and RegistrationData arguments.
Returns:
is registered (boolean)
"""
dumped_registrations = execute("RG.DUMPREGISTRATIONS")
if not dumped_registrations:
return False
for registration in dumped_registrations:
data = dict(zip(registration[0::2], registration[1::2]))
registration_data = dict(zip(data['RegistrationData'][0::2], data['RegistrationData'][1::2]))
if self.command_name in registration_data['args']:
return True
return False
def register_command(self):
"""
Registers a redis gears function to redis.
This is a super class placeholder function meant to be overridden.
Raises:
NotImplementedError()
"""
raise NotImplementedError(self.__class__.__name__)
class CreateNewGameFunctionBuilder(BaseFunctionBuilder):
def __init__(self):
super().__init__(command_name='create_new_game')
def register_command(self):
"""
Registers create_new_game redis gears fucntion to redis.
For each generate_new_game call creates a new HASH under game namespace:
GAME:[game_id] owner [user_id], secret [hash], private [bool], playercount [int]
Returns:
redis key [GAME:game_id]
Trigger example:
RG.TRIGGER create_new_game USER:123 1 secret123
"""
def subcall(user, private=0, secret=""):
game_id = uuid.uuid4().hex
key = f"GAME:{game_id}"
execute("HSET", key, "owner", user, "secret", str(secret), "private", int(private), "playercount", 0)
execute("EXPIRE", key, SECONDS_IN_DAY)
return game_id
(
GB('CommandReader')
.map(lambda x: subcall(*x[1:]))
.register(trigger=self.command_name, mode='sync')
)
class CreateUserFunctionBuilder(BaseFunctionBuilder):
def __init__(self):
super().__init__(command_name='create_new_user')
def register_command(self):
"""
Registers create_new_user redis gears fucntion to redis.
For each create_new_user call creates a new HASH under user namespace:
USER:[u_id] name [str], settings [str], secret [str]
Returns:
redis key [USER:u_id]
Trigger example:
RG.TRIGGER create_new_user hhaa Player1 '' aahh
"""
def subcall(user_id, name, settings='{}', secret=""):
key = f"USER:{user_id}"
execute("HSET", key, "name", name, "setttings", settings, "secret", str(secret))
execute("EXPIRE", key, SECONDS_IN_DAY * 30)
return key
(
GB('CommandReader')
.map(lambda x: subcall(*x[1:]))
.register(trigger=self.command_name)
)
database_functions = [
CreateNewGameFunctionBuilder(),
CreateUserFunctionBuilder()
]
for db_function in database_functions:
if not db_function.is_registered():
db_function.register_command() |
import unittest
from string_permutation import check_inclusion
class StringPermutationTests(unittest.TestCase):
"""Tests for string permutation challenge."""
def test_case_1(self):
self.assertTrue(check_inclusion("ab", "eidbaooo"))
def test_case_2(self):
self.assertFalse(check_inclusion("ab", "eidboaoo"))
def test_case_3(self):
self.assertFalse(check_inclusion("hello", "ooolleoooleh"))
if __name__ == "__main__":
unittest.main(verbosity=2)
|
from kinetica_proc import ProcData
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
import h2o
"""
This is a demonstration of how to use H2O for model learning and inference in a distributed Kinetica UDF.
This UDF is registered and executed from register_execute_train_test.py
The demo is based on this H2O tutorial:
https://github.com/h2oai/h2o-tutorials/blob/master/h2o-open-tour-2016/chicago/intro-to-h2o.ipynb
This demonstration assumes the situation where the data is large and learning a single model on all data is not
feasible (e.g. due to memory constraints). A possible approach to address this challenge is to learn multiple
models, each on a fraction of the data, and then to combine their inference output (e.g. by averaging).
In this demonstration we assume to be on one such fraction of the data. We walk through the process of
* accessing the data directly from a Kinetica table via the UDF API's proc_data handle
* using H2O to derive a GLM model from it
* store the model into a Kinetica table via KiFS
The model is then accessible from another distributed UDF that could do the inference step with it.
Note that when using proc_data to access data from Kinetica tables, we are in a distributed environment. Each
instance of the UDF accesses the data of its respective TOM. This can make sense when you have large data and
like to learn multiple models - each on a fraction of the data. However, in this situation it is important to
keep two things in mind:
1) The data needs to be distributed across TOMs such that its distribution is (nearly) the same on each TOM. When
you use KiFS this should automatically be the case since the data is distributed randomly.
2) At the inference step all models are applied to a record and the prediction results need to be combined.
This could be done efficiently through another distributed UDF.
"""
"""Initialize demo dependencies"""
h2o.init(nthreads=-1)
"""Get H2O data frame via Kinetica UDF API"""
print('Receiving h2o df...')
proc_data = ProcData()
h20_df = proc_data.to_h2odf()
print('h2o df shape: {}'.format(h20_df.shape))
"""Use H2O API to learn a GLM model"""
print('Partitioning data')
splits = h20_df.split_frame(ratios=[0.7, 0.15], seed=1)
train = splits[0]
valid = splits[1]
test = splits[2]
print('Identify response and predictor variables')
y = 'bad_loan'
x = list(h20_df.columns)
x.remove(y) # remove the response
x.remove('int_rate') # remove the interest rate column because it's correlated with the outcome
x.remove('record_id')
print('Predictor columns: {}'.format(x))
print('Train a default GLM')
glm_fit1 = H2OGeneralizedLinearEstimator(family='binomial', model_id='glm_fit1')
glm_fit1.train(x=x, y=y, training_frame=train)
"""Store the model into a Kinetica table to have it available for inference, e.g. in an ensemble"""
print('Serializing model object, save to KiFS')
tmp_model1_path = h2o.save_model(glm_fit1, '/opt/gpudb/kifs/mount/GLM_model', force=True)
h2o.cluster().shutdown()
proc_data.complete()
|
'''
@Date: 2019-11-11 23:23:36
@Author: ywyz
@LastModifiedBy: ywyz
@Github: https://github.com/ywyz
@LastEditors: ywyz
@LastEditTime: 2019-11-11 23:31:03
'''
import math
def isVaild(side1, side2, side3):
if side1 + side2 <= side3 or side2 + side3 <= side1 or side1 + side3 <= side2:
return False
else:
return area(side1, side2, side3)
def area(side1, side2, side3):
s = (side1 + side2 + side3) / 2
triangleArea = math.sqrt(s * (s - side1) * (s - side2) * (s - side3))
return triangleArea |
from datetime import date
print('Qual seu sexo?'
'\n1 para feminino'
'\n2 para masculino')
s = int(input('\nInsira: '))
if s == 1:
print('Pessoas do sexo feminino não precisam se alistar obrigatoriamente.')
else:
ano = int(input('Digite seu ano de nascimento: '))
idade = date.today().year - ano
print(f'\nQuem nasceu em {ano} completa {idade} anos em 2022.')
if idade < 18:
if 18 - idade == 1:
print(f'Falta {18 - idade} ano para o alistamento, '
f'que deverá ser feito em {date.today().year + (18 - idade)}.')
elif (18 - idade) > 1:
print(f'Faltam {18 - idade} anos para o alistamento, '
f'que deverá ser feito em {date.today().year + (18 - idade)}.')
elif idade == 18:
print(f'O alistamento deverá ser feito neste ano.')
else:
if idade - 18 > 1:
print(f'O período de alistamento foi há {idade - 18} anos no ano de {date.today().year - (idade - 18)}.')
elif idade == 1:
print(f'O período de alistamento foi há {idade - 18} ano no ano de {date.today().year - (idade - 18)}.')
|
# Generated by Django 2.0.6 on 2018-10-26 10:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='career',
field=models.TextField(null=True),
),
]
|
# Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from unittest import TestCase
from uw_libraries.mylib import get_account, get_account_html
from uw_libraries.util import fdao_mylib_override
from restclients_core.exceptions import DataFailureException
from datetime import date
@fdao_mylib_override
class MyLibInfoTest(TestCase):
def test_get_account(self):
account = get_account("javerage")
self.assertEquals(account.holds_ready, 1)
self.assertEquals(account.fines, 5.35)
self.assertEquals(account.items_loaned, 3)
self.assertEquals(account.get_next_due_date_str(),
"2020-10-15T02:00:00+00:00")
self.assertIsNotNone(str(account))
self.assertEquals(
account.json_data(),
{'holds_ready': 1,
'fines': 5.35,
'items_loaned': 3,
'next_due': '2020-10-15T02:00:00+00:00'})
account = get_account("jnewstudent")
self.assertIsNone(account.next_due)
self.assertEquals(account.holds_ready, 0)
self.assertEquals(account.fines, 0.0)
self.assertEquals(account.items_loaned, 0)
def test_html_response(self):
response = get_account_html("javerage")
self.assertEquals(response, (
b'<p>You have 7 items checked out.<br>\nYou have items '
b'due back on 2014-04-29.<br>\nYou don\'t owe any fines.</p>\n<a '
b'href="http://alliance-primo.hosted.exlibrisgroup.com/'
b'primo_library/libweb/action/dlBasketGet.do?vid=UW&redirectTo='
b'myAccount">Go to your account</a>'))
def test_bad_json(self):
self.assertRaises(Exception, get_account, "badjsonuser")
try:
get_account("badjsonuser")
self.assertTrue(False, "Shouldn't get here")
except Exception as ex:
self.assertTrue("example bad data" in str(ex))
def test_invalid_user(self):
# Testing error message in a 200 response
self.assertRaises(DataFailureException, get_account, "invalidnetid")
# Testing non-200 response
self.assertRaises(DataFailureException, get_account, "invalidnetid123")
try:
get_account("invalidnetid")
except DataFailureException as ex:
self.assertTrue("User not found" in str(ex.msg))
def test_with_timestamp(self):
response = get_account_html('javerage', timestamp=1391122522900)
self.assertEquals(response, (
b'<p>You have 7 items checked out.<br>\n You have items '
b'due back on 2014-04-29.<br>\n You don\'t owe any fines.</p>\n '
b'<a href="http://alliance-primo.hosted.exlibrisgroup.com/'
b'primo_library/libweb/action/dlBasketGet.do?vid=UW&'
b'redirectTo=myAccount">Go to your account</a>'))
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from .position import Grid, OmniscientReference, Point
class InputPoints(Exception):
pass
class NoOutput(Exception):
pass
class Solver:
""" The TSP Solver
Movement from point to point is tracked more on a Grid Basis than
a Point to Point Basis
"""
def __init__(self, points, n_grid=5, start_coords=np.array([0,0])):
""" Initialize the data model for the TSP Solver
Parameters
------------
points: ndarray
The coordinates for the points to Solve for Travelling Route
n_grid: int
The number of grids to be used
start_coords: ndarray
The starting coordinates for the TSP. This is also the proposed ending point
"""
points = np.array(points)
if (points.shape[1] != 2):
raise InputPoints("Points must be a 2-dimensional ndarray")
self._points = points
self._n_grids = n_grid
self._grids = []
# A list mapping all points to their respective Grids
# This is to avoid issues of looking up
self.point_to_grid_index = []
self.start_pos = Point(start_coords)
self._result = None
self._use_greedy = False
def __str__(self):
return f"<TSPSolver for {len(self._points)} Coords>"
@classmethod
def input_from_file(cls, file_name):
x = pd.read_csv(filename, index_col=['CityId']).values
return cls(values)
def get_grids(self):
""" Returns the Grid Points Coordinates """
x_max, y_max = self._points.max(axis=0)
# Assume all start points are from 0
x_points = np.linspace(0, x_max, int(self.n_grids))
y_points = np.linspace(0, y_max, int(self.n_grids))
x_grid_points, y_grid_points = np.meshgrid(x_points, y_points)
# TODO Look for a more efficient way to do this
# Get all the coordinate points of the grid
for i in range(len(x_points)-1):
for j in range(len(y_points)-1):
self._grids.append(Grid((
Point([x_points[i], y_points[j]]), Point([x_points[i], y_points[j+1]]),
Point([x_points[i+1], y_points[j]]), Point([x_points[i+1], y_points[j+1]])
)))
def map_grids_to_points(self):
""" Create a grid configuration for all points """
# TODO Look for an effective way to load up the points into their Grids
# BUG Check why lower values of n_grids returns only a single grid Graph
self.get_grids()
points = self._points.copy()
# convert points to list so that we can replace instances of points with Point
self._points = list(self._points)
for i, point in enumerate(points):
for grid in self._grids:
if grid.contains(Point(point)):
# Replace Point with the Point Object
self._points[i] = Point(point)
grid.add_point(self._points[i])
break
self._points = np.array(self._points)
# Compute grid centric points and omniscient_reference point
self.compute_cost()
def write_output(self, filename=None):
""" Writes out the output to a file """
indexes = []
with open(filename, 'a') as fh:
for point in self.result:
fh.write(f"{point}\n")
def plot(self):
x_max, y_max = self._points.max
@property
def n_grids(self):
return self._n_grids
@property
def grids(self):
return self._grids
@property
def points(self):
return self._points
def navigate(self, greedy=False):
""" Start navigating and solving the problem """
# NOTE This looks very ineffective. It needs the help of some data Science
# Get the Grid index of the starting point
if greedy:
self._use_greedy = True
for grid in self.grids:
if grid.contains(self.start_pos):
grid.add_point(self.start_pos)
break
# list of visited cities in that order
points_visited = [self.start_pos,]
# keep moving until you have it all sorted out
current_pos = self.start_pos
while len(points_visited) < len(self._points):
next_pos = self.get_next_pos(current_pos)
points_visited.append(next_pos)
current_pos = next_pos
print(f'next-stop: {current_pos}')
self._result = points_visited
@property
def result(self):
if not self._result:
self.navigate()
return self._result
def get_next_pos(self, current_pos):
""" Get the next position to move to
The next position is dependent on three costs:
- moving from the current pos to that points
- moving from the current pos to the current_pos GridCenter
- moving from the current pos GridCenter to Centric Grid Center
"""
# Ugh, I believe there is a more data scientific way to do this
grid_with_lowest_point = None
lowest_cost_point = None
# TODO marginalize this value even though something tells me it doesn't matter
cost_for_lowest_cost_point = 10000000
# Use Numpy to get the cost of the available points matrix and return the lowest
for grid in self._grids:
for point in grid.points:
if self._use_greedy:
total_cost = current_pos.distance_cost(point)
else:
# Still got to wrap my head around this cost Function
total_cost = current_pos.distance_cost(point) * (grid.point_cost(current_pos) \
* self.centric_grid_point.grid_cost(grid)) ** 2
# Total cost as long as it is not the current or the starting
if total_cost < cost_for_lowest_cost_point and point != current_pos \
and point != self.start_pos:
lowest_cost_point = point
cost_for_lowest_cost_point = total_cost
grid_with_lowest_point = grid
# Remove point from the available grid points
grid_with_lowest_point.remove_point(lowest_cost_point)
self.compute_cost()
return lowest_cost_point
def compute_cost(self):
""" Recompute the cost of all grids """
for grid in self._grids:
grid.compute_cost()
# Get the OmniscientReference point after matching all points
self.centric_grid_point = OmniscientReference.derive_from_grids(self._grids)
def visualize_input(self, type='all'):
""" Display data in graph
This displays the data given using matplotlib's pyplot
Parameters
-----------
type: str
Values: 'all', 'grid-only', 'point-only'
When `grid-only`, the graph contains only the grids
When `point-only`, the graph contains only points
`all`, the graph displays both grids and points
"""
# Get A better way to do this Visualization; ugh
# BUG Fix overlapping rectangle Edges; `I suck at this ("")`
plt.title("Data Visualization for TSP ")
if type != 'grid-only':
points = self._points.copy()
for i, point in enumerate(points):
points[i] = point.coord
points = np.vstack(points)
plt.scatter(points[:, 0], points[:, 1])
# TODO Draw Rectangle for Data Visualization
if type != 'point-only':
grid_colors = 'rbg'
for i, grid in enumerate(self._grids):
grid.plot(plt, color=grid_colors[i%3])
self.centric_grid_point.plot(plt)
plt.show()
def visualize_output(self):
if not self._result:
raise NoOutput("First Run `Solver.navigate()` to get the route")
# plot the results
points = self._result.copy()
for i, point in enumerate(self._result):
points[i] = point.coord
points = np.vstack(points)
plt.plot(points[:, 0], points[:, 1])
plt.scatter(points[:, 0], points[:, 1], color='green')
if not self._use_greedy:
grid_colors = 'rbg'
for i, grid in enumerate(self._grids):
grid.plot(plt, color=grid_colors[i%3])
self.centric_grid_point.plot(plt)
plt.show() |
"""Contains the test fixtures for making tests."""
from __future__ import absolute_import
from .hook import Hook
from .suite import Suite
from .test import Test
|
import sys
with open(sys.argv[1], 'r') as test_cases:
for test in test_cases:
dicci = { 'a': '0', 'b':'1' , 'c':'2','d':'3','e':'4',
'f':'5','g':'6','h':'7','i':'8','j':'9'}
stringe = test.strip()
salida = ""
for i in stringe:
if i in dicci:
salida += dicci[i]
elif i.isdigit():
salida += i
if salida != '':
print (salida)
else:
print ('NONE') |
"""
Simple handlers for serializers.
"""
BINARY_PROTOCOL = 'wamp.2.msgpack'
JSON_PROTOCOL = 'wamp.2.json'
NONE_PROTOCOL = ''
|
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.db.models.signals import pre_save
from django.conf import settings
from django.dispatch import receiver
from ensembl.production.masterdb.models import MasterBiotype
from django.core.mail import send_mail
@receiver(pre_save, sender=MasterBiotype)
def master_biotype_update(sender, instance: MasterBiotype, **kwargs):
"""
Add signal to production DB app to automatically notify `ensembl-production` mailing list when someone update a biotype.
Triggered if one of these field is modified:
- misc_non_coding
- object_type
- biotype_group
- attrib_type
- db_type
- is_current
:param instance: updated instance
:param sender: object MasterBioType Model
:param kwargs: dict Updates parameters
:return: None
"""
watched_fields = [
'object_type',
'biotype_group',
'attrib_type',
'db_type',
'is_current',
'so_acc'
]
updated_fields = []
from_fixtures = kwargs.get('raw', False)
created = instance.biotype_id is None
if not (from_fixtures or created):
# only trigger when this is no fixture load or new item
previous = MasterBiotype.objects.get(biotype_id=instance.biotype_id)
for field in watched_fields:
old_val = getattr(previous, field)
new_val = getattr(instance, field)
if isinstance(old_val, list):
if len(old_val) == 1:
old_val = old_val[0]
else:
old_val = set(sorted(old_val))
if isinstance(new_val, list):
if len(new_val) == 1:
new_val = new_val[0]
else:
new_val = set(sorted(new_val))
if old_val != new_val:
updated_fields += [(field, old_val, new_val)]
if updated_fields:
# send email to config email.
send_mail(
'[Production MasterDB] Biotype updated !',
'%s just modified important fields on MasterBioType table in production Master DB, please check '
'for:' % instance.modified_by + "".join(
["\n- %s: %s (initially:%s)" % (field, prev, new) for (field, prev, new) in updated_fields]),
getattr(settings, 'DEFAULT_FROM_EMAIL', 'me@localhost'),
[getattr(settings, 'MASTER_DB_ALERTS_EMAIL', 'me@localhost')],
fail_silently=settings.DEBUG
)
|
from conans import ConanFile, tools
import os
class CplexConan(ConanFile):
name = "CPLEX"
version = "12.7.1"
settings = {"os": ["Windows"],
"compiler": {"Visual Studio": {"version": ['14', '15']}},
"arch": ["x86_64"],
"build_type": ["Release", "Debug", "RelWithDebInfo"]}
description = """IBM's ILOG CPLEX for linear optimization.
In the current version it just sets the libraries cplex1271, ilocplex
and concert for linking (as specified in the C++ Tutorial of CPLEX).
If other libraries are needed this Conanfile has to be extended.
In order to execute CPLEX Programs the corresponding DLLs have to be supplied
This funcionality will be added in the near future."""
include_dirs = [
"concert/include/ilconcert",
"concert/include/ilconcert/ilsched",
"concert/include/ilconcert/ilxml",
"cplex/include/ilcplex",
"cpoptimizer/include/ilcp"
]
library_dirs = [
"concert/lib",
"cplex/lib",
"cpoptimizer/lib"
]
def package(self):
cplex_install_dir = os.environ['CPLEX_STUDIO_DIR1271']
for d in self.include_dirs:
self.copy("*", src = cplex_install_dir + "/" + d, dst = d)
if self.settings.build_type == "Release" or self.settings.build_type == "RelWithDebInfo":
libdir = "x64_windows_vs2015/stat_mda"
elif self.settings.build_type == "Debug":
libdir = "x64_windows_vs2015/stat_mdd"
else:
exit(1)
print("Getting libraries from: " + libdir)
for d in self.library_dirs:
self.copy("*", src = cplex_install_dir + "/" + d + "/" + libdir, dst = d)
def package_info(self):
self.cpp_info.includedirs = [
"concert/include",
"cplex/include",
"cpoptimizer/include"
]
self.cpp_info.libs = [
"cplex1271.lib",
"ilocplex.lib",
"concert.lib"
]
self.cpp_info.libdirs = self.library_dirs
|
def lerarquivo(nome):
"""
:param nome: Nome do arquivo a ser lido
:return:Ser um retorno
"""
try:
arqui = open(nome, 'r')
except:
print('ERRO ao ler o arquivo!')
else:
a = arqui.read()
print('')
print(a)
print('')
arqui.close()
def leiaint(msg):
"""
:param msg: Texto a ser mostrado no input
:return:Retorna o valor int(inteiro)
"""
while True:
try:
nintl = int(input(msg))
except (TypeError, ValueError):
print(f'\033[31mO valor digitado não é inteiro.\033[m')
continue
except (KeyboardInterrupt):
print('\n\033[31mEntrada de dados encerrada pelo usuário\033[m')
return 0
else:
return nintl
def leianumbin(numbin):
"""
:param numbin: Texto a ser mostrado no input
:return: Retorna o valor binário
"""
cont = 0
while True:
bin = str(input(numbin)).strip().upper()
for v in bin:
if v != '0' and v != '1' and v != 'Q':
cont += 1
if cont > 0:
print('\033[31m>>> O VALOR DIGITADO NÃO É BINÁRIO !!!\033[m')
cont = 0
return bin
def leiadinheiro(num):
"""
:param num: Texto a ser mostrado no input
:return: Retorna o valor na forma monetária
"""
ndef = str(input(num)).strip()
if ndef.replace('.', '').isnumeric() or ndef.replace(',', '').isnumeric():
return float(ndef.replace(',', '.'))
while True:
print(f'\033[31mERRO: "{ndef}" é um preço inválido!\033[m')
ndef = str(input(num)).strip()
if ndef.replace('.', '').isnumeric() or ndef.replace(',', '').isnumeric():
return float(ndef.replace(',', '.'))
def leiafloat(msg):
"""
:param msg: Texto a ser mostrado no input
:return: Retorna um valor real
"""
while True:
try:
nfloatl = float(input(msg))
except (TypeError, ValueError):
print(f'\033[31mO valor digitado não é Real.\033[m')
continue
except (KeyboardInterrupt):
print('\n\033[31mEntrada de dados encerrada pelo usuário\033[m')
return 0
else:
return nfloatl
def leiaresposta(txt='', numresp=1):
"""
:param txt: Texto a ser mostrado no input
:param numresp: Número possivel de respostas
:return: Retorna a resposta
"""
while True:
try:
resp = int(input(txt))
except:
print('\033[31mERRO! Resposta invalida\033[m')
else:
if resp > numresp or resp <= 0:
print('\033[31mERRO! Resposta invalida\033[m')
continue
else:
return resp
def leiarespostaSN(txt='', minmai='mai'):
"""
:param txt: Texto a ser mostrado no input
:param minmai: Se a resposta será retornada como maiúscula ou minúscula
:return: Retorna um valor S (SIM) ou N (NÃO)
"""
while True:
r = str(input(txt))
if minmai == 'mai':
if r not in 'S' and r not in 's' and r not in 'n' and r not in 'N':
print('\033[31mERRO! Digite um valor valído [S/N]..\033[m')
continue
r = r.upper()
if r == 'S' or 'N':
return r
if minmai == 'min':
if r not in 'S' or r not in 's' or r not in 'n' or r not in 'N':
print('\033[31mERRO! Digite um valor valído [S/N]..\033[m')
continue
r = r.lower()
if r == 's' or 'n':
return r
|
import json
import os
import sys
import argparse
sys.path.insert(0, os.path.dirname(os.path.abspath(os.path.join(__file__, os.pardir))))
from allennlp.data.dataset_readers.dataset_utils.text2sql_utils import process_sql_data
from allennlp.semparse.contexts.sql_context_utils import SqlVisitor, format_grammar_string
from allennlp.semparse.contexts.text2sql_table_context import GRAMMAR_DICTIONARY
from parsimonious.grammar import Grammar
# still TODO:
# JOIN, seems hard.
# Added query to pos_value - check this, very unclear if it is the correct way to handle this.
# not all functions can take * as an argument.
# Check whether LIKE can take non string arguments (example in scholar dataset)
def parse_dataset(filename: str, filter_by: str = None, verbose: bool = False):
grammar_string = format_grammar_string(GRAMMAR_DICTIONARY)
grammar = Grammar(grammar_string)
filter_by = filter_by or "13754332dvmklfdsaf-3543543"
data = json.load(open(filename))
num_queries = 0
num_parsed = 0
filtered_errors = 0
non_basic_as_aliases = 0
as_count = 0
queries_with_weird_as = 0
for i, sql_data in enumerate(process_sql_data(data)):
sql_visitor = SqlVisitor(grammar)
if any([x[:7] == "DERIVED"] for x in sql_data.sql):
# NOTE: DATA hack alert - the geography dataset doesn't alias derived tables consistently,
# so we fix the data a bit here instead of completely re-working the grammar.
sql_to_use = []
for j, token in enumerate(sql_data.sql):
if token[:7] == "DERIVED" and sql_data.sql[j - 1] == ")":
sql_to_use.append("AS")
sql_to_use.append(token)
previous_token = None
query_has_weird_as = False
for j, token in enumerate(sql_to_use[:-1]):
if token == "AS" and previous_token is not None:
table_name = sql_to_use[j + 1][:-6]
if table_name != previous_token:
non_basic_as_aliases += 1
query_has_weird_as = True
as_count += 1
previous_token = token
if query_has_weird_as:
queries_with_weird_as += 1
sql_string = " ".join(sql_to_use)
else:
sql_string = " ".join(sql_data.sql)
num_queries += 1
try:
sql_visitor.parse(sql_string)
num_parsed += 1
except Exception as e:
if filter_by in sql_string:
filtered_errors += 1
if verbose and filter_by not in sql_string:
print()
print(e)
print(" ".join(sql_data.text))
print(sql_data.sql)
try:
import sqlparse
print(sqlparse.format(sql_string, reindent=True))
except Exception:
print(sql_string)
if (i + 1) % 500 == 0:
print(f"\tProcessed {i + 1} queries.")
return (
num_parsed,
num_queries,
filtered_errors,
non_basic_as_aliases,
as_count,
queries_with_weird_as,
)
def main(
data_directory: int, dataset: str = None, filter_by: str = None, verbose: bool = False
) -> None:
"""
Parameters
----------
data_directory : str, required.
The path to the data directory of https://github.com/jkkummerfeld/text2sql-data
which has been preprocessed using scripts/reformat_text2sql_data.py.
dataset : str, optional.
The dataset to parse. By default all are parsed.
filter_by : str, optional
Compute statistics about a particular error and only print errors which don't contain this string.
verbose : bool, optional.
Whether to print information about incorrectly parsed SQL.
"""
directory_dict = {path: files for path, names, files in os.walk(data_directory) if files}
for directory, data_files in directory_dict.items():
if "query_split" in directory or (dataset is not None and dataset not in directory):
continue
print(f"Parsing dataset at {directory}")
parsed = 0
total_non_aliases = 0
total_as_count = 0
total_queries_with_weird_as = 0
total = 0
for json_file in data_files:
print(f"\tParsing split at {json_file}")
file_path = os.path.join(directory, json_file)
num_parsed, num_queries, filtered_errors, non_basic_as_aliases, as_count, queries_with_weird_as = parse_dataset(
file_path, filter_by, verbose
)
parsed += num_parsed
total += num_queries
total_non_aliases += non_basic_as_aliases
total_as_count += as_count
total_queries_with_weird_as += queries_with_weird_as
print(f"\tParsed {parsed} out of {total} queries, coverage {parsed/total}")
print(
f"\tFound {total_non_aliases} out of {total_as_count} non simple AS aliases. percentage: {total_non_aliases/total_as_count}"
)
print(
f"\tFound {total_queries_with_weird_as} out of {total} queries with > 1 weird AS. percentage: {total_queries_with_weird_as/total}"
)
if filter_by is not None:
print(
f"\tOf {total - parsed} errors, {filtered_errors/ (total - parsed + 1e-13)} contain {filter_by}"
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Check the coverage of a SQL Grammar on the text2sql datasets."
)
parser.add_argument("--data", type=str, help="The path to the text2sql data directory.")
parser.add_argument(
"--dataset",
type=str,
default=None,
help="The dataset to check coverage for. Defaults to all datasets.",
)
parser.add_argument("--filter", type=str, default=None, help="A string to filter by.")
parser.add_argument("--verbose", help="Verbose output.", action="store_true")
args = parser.parse_args()
main(args.data, args.dataset, args.filter, args.verbose)
|
from log import Log
import json
from typing import Dict
# https://python-3-patterns-idioms-test.readthedocs.io/en/latest/Singleton.html
class Config:
class __Config:
"""Singleton class for Config"""
def __init__(self, path: str):
self.path = path
self.log = Log("config")
if self.path:
self._load_config()
else:
self.log.error("config path not set, exiting")
exit(1)
def _load_config(self) -> None:
"""Read in JSON config"""
try:
with open(self.path, "r") as f:
self.config = json.load(f)
self.log.info("read in config")
except Exception as e:
self.log.error("error reading in config: {e}".format(e=str(e)))
def get_config(self, section: str) -> Dict[str, str]:
"""Return the dictionary of config items for a given section (i.e. "slack", "zeek", etc.)"""
try:
return self.config[section]
except Exception as e:
self.log.error("error retrieving config for \"{section}\": {e}".format(section=section, e=str(e)))
return None
# Static class variables
_instance = None
path = None
# Singleton handler
def __new__(cls):
if not Config._instance:
Config._instance = Config.__Config(Config.path)
return Config._instance
def get_config(self, section: str) -> """Dict[str, str] or Dict[str, Dict[str, str]]""":
return Config._instance.get_config(section) |
# -*- coding: utf-8 -*-
from __future__ import print_function
from .models import DiscussionMarker
from sqlalchemy.orm import load_only
def main():
for marker in DiscussionMarker.query.options(load_only("id", "identifier")).all():
print(marker.identifier)
if __name__ == "__main__":
main()
|
#-- GAUDI jobOptions generated on Fri Jul 24 17:14:59 2015
#-- Contains event types :
#-- 13100008 - 98 files - 2062195 events - 558.40 GBytes
#-- Extra information about the data processing phases:
#-- Processing Pass Step-124834
#-- StepId : 124834
#-- StepName : Reco14a for MC
#-- ApplicationName : Brunel
#-- ApplicationVersion : v43r2p7
#-- OptionFiles : $APPCONFIGOPTS/Brunel/DataType-2012.py;$APPCONFIGOPTS/Brunel/MC-WithTruth.py;$APPCONFIGOPTS/Persistency/Compression-ZLIB-1.py
#-- DDDB : fromPreviousStep
#-- CONDDB : fromPreviousStep
#-- ExtraPackages : AppConfig.v3r164
#-- Visible : Y
#-- Processing Pass Step-125836
#-- StepId : 125836
#-- StepName : Stripping20-NoPrescalingFlagged for Sim08 - Implicit merging.
#-- ApplicationName : DaVinci
#-- ApplicationVersion : v32r2p1
#-- OptionFiles : $APPCONFIGOPTS/DaVinci/DV-Stripping20-Stripping-MC-NoPrescaling.py;$APPCONFIGOPTS/DaVinci/DataType-2012.py;$APPCONFIGOPTS/DaVinci/InputType-DST.py;$APPCONFIGOPTS/Persistency/Compression-ZLIB-1.py
#-- DDDB : fromPreviousStep
#-- CONDDB : fromPreviousStep
#-- ExtraPackages : AppConfig.v3r164
#-- Visible : Y
from Gaudi.Configuration import *
from GaudiConf import IOHelper
IOHelper('ROOT').inputFiles(['LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000001_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000002_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000003_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000004_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000005_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000006_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000007_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000008_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000009_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000010_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000011_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000012_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000013_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000014_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000015_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000016_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000017_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000018_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000019_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000020_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000021_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000022_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000023_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000024_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000025_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000026_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000027_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000028_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000029_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000030_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000031_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000032_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000033_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000034_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000035_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000036_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000037_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000038_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000039_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000040_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000041_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000042_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000043_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000044_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000045_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000046_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000047_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000048_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000049_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000050_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000051_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000052_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000053_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000054_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000055_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000056_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000057_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000058_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000059_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000060_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000061_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000062_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000063_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000064_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000065_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000066_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000067_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000068_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000069_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000070_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000071_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000072_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000073_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000074_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000075_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000076_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000077_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000078_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000079_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000080_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000081_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000082_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000083_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000084_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000085_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000086_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000087_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000088_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000089_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000090_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000091_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000092_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000093_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000094_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000095_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000096_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000097_2.AllStreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00042454/0000/00042454_00000098_2.AllStreams.dst'
], clear=True)
|
from datetime import datetime
import nlp
import pandas as pd
from sklearn.preprocessing import StandardScaler
class PullRequestVectorizator:
def __init__(self):
self.nlp = nlp.nlp()
def getPRinfos(self, df) :
pr_df = df.iloc[:,:22]
pr_df = pr_df.drop_duplicates(subset=['PR_Number'])
pr_df.sort_values(by=['PR_Created_At'], inplace=True, ascending=True)
pr_df.index = [i for i in range(len(pr_df))]
pr_df['PR_Title_and_Body'] = pr_df['PR_Title'].astype(str) + ' ' + pr_df['PR_Body'].astype(str)
return pr_df
def getFirstPRTmstmp(self, df) :
dt = df['PR_Created_At'].loc[0]
return datetime.strptime(dt, '%Y-%m-%d %H:%M:%S')
def getPRcommentsLabels(self, df, pr_number) :
temp = df.loc[df['PR_Number'] == pr_number]
return temp[['Inclusiveness_Scale_1_to_10', 'Constructiveness_Scale_1_to_10']]
def getPRFreqPerUser(self, df) :
usersIDs_df = df['User']
PRs_per_user = {}
prfu = {}
for PR_id in range(len(df)):
user_id = usersIDs_df[PR_id]
if PRs_per_user.get(user_id) == None:
PRs_per_user[user_id] = 1
prfu[str(PR_id)] = 1
else:
PRs_per_user[user_id] += 1
prfu[str(PR_id)] = PRs_per_user[user_id]
prfu_df = pd.DataFrame(columns=['PR_freq_per_user'])
for PR_id in range(len(df)):
prfu_df.loc[PR_id] = prfu[str(PR_id)]
return prfu_df
def getTimesBetwPRs(self, pr_df) :
first_PR_ts = self.getFirstPRTmstmp(pr_df)
PR_timestamps_df = pr_df['PR_Created_At']
times_since_last_PR = {}
for PR_id in range(len(pr_df)) :
if PR_id == 0 :
times_since_last_PR[str(PR_id)] = 0
else :
PR_dt = datetime.strptime(PR_timestamps_df[PR_id], '%Y-%m-%d %H:%M:%S')
prev_PR_dt = datetime.strptime(PR_timestamps_df[PR_id-1], '%Y-%m-%d %H:%M:%S')
diff = (PR_dt - prev_PR_dt).total_seconds()
if diff < 0 : diff = 0
times_since_last_PR[str(PR_id)] = diff
times_df = pd.DataFrame(columns=['times_since_prev_PR'])
for PR_id in range(len(pr_df)) :
times_df.loc[PR_id] = times_since_last_PR[str(PR_id)]
return times_df
def getTimesBetween(self, pr_df, feature1, feature2) :
temp = {}
for item in range(len(pr_df)) :
if type(pr_df[feature1][item]) == str and type(pr_df[feature2][item]) == str :
time1 = datetime.strptime(pr_df[feature1][item], '%Y-%m-%d %H:%M:%S')
time2 = datetime.strptime(pr_df[feature2][item], '%Y-%m-%d %H:%M:%S')
diff = (time1 - time2).total_seconds()
if diff < 0 : diff = 0
temp[str(item)] = diff
else :
temp[str(item)] = None
times_df = pd.DataFrame(columns=['Time_between_' + feature1 + '_and_' + feature2])
for item in range(len(pr_df)) :
times_df.loc[item] = temp[str(item)]
return times_df
def getICdistrDiscriminatives(self, df, pr_df) :
dist_dicriminative_infos = ['I_mean', 'I_std', 'I_min', 'I_25%', 'I_50%', 'I_75%', 'I_max',
'I_var', 'I_mode', 'C_mean', 'C_std', 'C_min', 'C_25%', 'C_50%',
'C_75%', 'C_max', 'C_var', 'C_mode']
IC_dist_discriminatives_df = pd.DataFrame(columns=dist_dicriminative_infos)
IC_dist_discriminatives = {}
for pr_id in range(len(pr_df)) :
IC_dist_discriminatives[pr_id] = []
pr_comments_labels = self.getPRcommentsLabels(df, pr_df['PR_Number'][pr_id])
info = pr_comments_labels.describe()
for i in info.index[1:] :
IC_dist_discriminatives[pr_id].append(info.loc[i]['Inclusiveness_Scale_1_to_10'])
IC_dist_discriminatives[pr_id].append(pr_comments_labels['Inclusiveness_Scale_1_to_10'].var())
IC_dist_discriminatives[pr_id].append(pr_comments_labels['Inclusiveness_Scale_1_to_10'].mode())
for i in info.index[1:] :
IC_dist_discriminatives[pr_id].append(info.loc[i]['Constructiveness_Scale_1_to_10'])
IC_dist_discriminatives[pr_id].append(pr_comments_labels['Constructiveness_Scale_1_to_10'].var())
IC_dist_discriminatives[pr_id].append(pr_comments_labels['Constructiveness_Scale_1_to_10'].mode())
for pr_id in range(len(pr_df)) :
IC_dist_discriminatives_df.loc[pr_id] = IC_dist_discriminatives[pr_id]
return IC_dist_discriminatives_df
def vectorize(self, df) :
self.pr_df = self.getPRinfos(df)
tfidfs_df = self.nlp.getTFIDFs(self.pr_df['PR_Title_and_Body'])
sa_df = self.nlp.getSentimentalAnalisys(self.pr_df['PR_Title_and_Body'])
prfu_df = self.getPRFreqPerUser(self.pr_df)
times_btw_df = self.getTimesBetwPRs(self.pr_df)
tbcc_df = self.getTimesBetween(self.pr_df, 'PR_Closed_At', 'PR_Created_At')
tbcc_df.columns = ['time_between_creation_and_closing']
others_df = self.pr_df[['PR_Number', 'PR_State', 'PR_Additions', 'PR_Deletions',
'PR_Comments_Num', 'PR_Commits_Num', 'PR_Merged', 'PR_Review_Comments_Num']]
others_df['PR_State'].replace('open', 0, inplace=True)
others_df['PR_State'].replace('closed', 1, inplace=True)
others_df['PR_Merged'].replace(False, 0, inplace=True)
others_df['PR_Merged'].replace(True, 1, inplace=True)
IC_dist_discriminatives_df = self.getICdistrDiscriminatives(df, self.pr_df)
temp_df = pd.concat([tfidfs_df, prfu_df, times_btw_df, tbcc_df, others_df], axis=1)
scaled_temp = StandardScaler().fit_transform(temp_df)
scaled_temp_df = pd.DataFrame(scaled_temp, columns=temp_df.columns)
vectors_df = pd.concat([scaled_temp_df, sa_df, IC_dist_discriminatives_df], axis=1)
return vectors_df |
from bs4 import BeautifulSoup
import pprint
print('Begin Evil Zillow Scrape')
data = {}
with open('zillow.html', encoding='utf8') as fp:
soup = BeautifulSoup(fp, 'html.parser')
# URL
canonical_list = soup.find_all(rel='canonical')
data['url'] = canonical_list[0].attrs['href']
# Key Details: bedrooms, bathrooms, square feet
price_list = soup.find_all('span', class_='ds-value')
data['price'] = price_list[0].contents[0]
keydetail_list = soup.find_all('span', class_='ds-bed-bath-living-area')
data['bedrooms'] = keydetail_list[0].span.contents[0]
data['bathrooms'] = keydetail_list[2].span.contents[0]
data['sqft'] = keydetail_list[3].span.contents[0]
status_list = soup.find_all('span', class_='ds-status-details')
data['status'] = status_list[0].text
zestimate_list = soup.find_all('span', class_='ds-estimate-value')
data['zestimate'] = zestimate_list[0].text
address_list = soup.find_all('h1', class_='ds-address-container')
data['address'] = address_list[0].span.contents[0]
data['address_csz'] = address_list[0].span.next_sibling.contents[0].replace(u'\xa0', '') #remove
# Overview
zstats_list = soup.find_all('div', class_='ds-overview-stat-value')
zstats = {}
zstats['time'] = zstats_list[0].text
zstats['views'] = zstats_list[1].text
zstats['saves'] = zstats_list[2].text
data['zillow_stats'] = zstats
overview_list = soup.find_all('div', class_='ds-overview-section')
data['description'] = overview_list[1].contents[0].text
# Facts and features
homefacts_list = soup.find_all('ul', class_='ds-home-fact-list')
homefacts = {}
homefacts['type'] = homefacts_list[0].contents[0].contents[2].text
homefacts['year_built'] = homefacts_list[0].contents[1].contents[2].text
homefacts['heating'] = homefacts_list[0].contents[2].contents[2].text
homefacts['cooling'] = homefacts_list[0].contents[3].contents[2].text
homefacts['parking'] = homefacts_list[0].contents[4].contents[2].text
homefacts['lot'] = homefacts_list[0].contents[5].contents[2].text
homefacts['price_sqft'] = homefacts_list[0].contents[6].contents[2].text
data['facts_features'] = homefacts
# Rental value
rental_list = soup.find_all(id='ds-rental-home-values')
data['zestimate_rent'] = rental_list[0].text.replace('Rental valueRent Zestimate®','')
# finish
pp = pprint.PrettyPrinter(indent=2)
pp.pprint(data)
pass |
from qiskit import *
from qiskit import Aer, QuantumCircuit
from qiskit.circuit import Parameter
from qiskit.circuit.library import RealAmplitudes, ZZFeatureMap
from qiskit.opflow import StateFn, PauliSumOp, AerPauliExpectation, ListOp, Gradient
from qiskit.utils import QuantumInstance
from qiskit import IBMQ
provider=IBMQ.load_account()
from qiskit_machine_learning.neural_networks import OpflowQNN
from math import pi, sqrt
import pickle
def neuron(params1, ratio):
qreg_q = QuantumRegister(1, 'q')
creg_c = ClassicalRegister(1, 'c')
qc1 = QuantumCircuit(qreg_q, creg_c)
backend = provider.get_backend("ibmq_lima")
qc1.h(0)
qc1.ry(pi/params1[0], 0)
qc1.rx(pi/-1 * params1[1], 0)
qc1.rx(pi/0.99, 0)
qc1.measure(0, 0)
res = backend.run(transpile(qc1,backend), shots=100)
result = res.result()
out1 = result.get_counts(qc1)
output = out1['0']/out1['1']
if output >= ratio:
circuit = QuantumCircuit(2, 1)
circuit.ry(pi, 0)
circuit.cx(0, 1)
circuit.u(0.5*pi/3, pi/3, 0, 0)
circuit.u(params1[0]*pi/3, pi/3, 0, 1)
circuit.cx(0, 1)
circuit.measure(0, 0)
res = backend.run(transpile(circuit,backend), shots=1)
result = res.result()
out2 = result.get_counts(circuit)
print(list(out2))
if list(out2)[0] == "1":
return [True, output]
else:
return [False]
else:
return [False]
def avg(input_):
l = len(input_)
v = 0
for i in input_:
v += i
return v / l
def new_network(datadir, input_):
weights =[[[0.5] for i in range(0, 100)] for i in range(0, 10)]
in1 = [[0.005] for i in range(0, 10)]
output = []
for i in range(len(weights)):
for j in range(len(weights[i])):
fires = neuron([avg(in1[i]), avg(weights[i][j])], 0.05)
if fires[0]:
if i <= 7:
in1[i + 1].append(input_)
else:
output.append(input_)
weights[i][j].append(fires[1]*5)
else:
if i > 8:
output.append(0)
file = open(datadir + "/weights.pkl")
pickle.dump(weights, file)
return(avg(output))
new_network("<PATH TO FOLDER CONTAINING THIS FILE AND 'ai.pkl'>", 2)
|
# 어떤 걸 기준으로 먼저 시행해야 시간이 줄어들 수 있는지 생각해보자.
# (이전작업종료시간- 다음작업시작시간) + 다음작업걸리는시간 을 비교해서 짧은 걸 먼저 처리하자.
# 갯수 만큼의 리스트를 만들고, 1초가 지날 때 마다 업데이트 해주는 방식 > index의 문제가 있어서 불가능할 듯 ? >> index를 잘 설정해주는 방향으로 ..?
# 그냥 시작시간이랑 걸리는 시간으로 계산해서 나갈까 ?? >> 그럼 종료되었다는 걸 어떻게 표현할래 ?
import collections
def solution(jobs) :
list_implementing = []
list_time = [1 for _ in range(len(jobs))]
jobs = sorted(jobs, key = lambda x : x[0])
jobs = collections.deque(jobs)
time_total = 0
while jobs :
if not list_implementing :
index_current = jobs[0][0]
list_implementing.append(jobs.popleft())
else :
if list_time[index_current] == list_implementing[0][1] :
list_implementing.pop()
return
answer =solution([[0, 3], [1, 9], [2, 6]])
print(answer) |
fruits = ['apple', 'pear', 'grapefruit', 'pineapple', 'avocado']
fruits.append('blueberry') #'blueberry'添加到列表中
fruits.remove('apple') #将'apple'从列表中删除
position = fruits.index('pineapple') #判断'pineapple'是在什么位置
fruits.insert(position, 'lemon') #在'pineapple'前插入'lemon'
is_mango_in = 'mango' in fruits #判断'mango'是否在列表中
types_fruits = len(set(fruits)) #计算fruits中一共有几种水果
print(fruits)
print(f"There are {types_fruits} fruits")
|
# Author: Luigi Berducci
# Data: 06 Feb 2019
# Purpose: implement ID3 algorithm
from anytree import Node, RenderTree
from anytree.exporter import DotExporter
import pandas as pd
import math
import datetime
class ID3:
def __init__(self, data, dependentVariable):
self.tree = Node("Root")
self.data = data
self.var = dependentVariable
self.values = set( data[self.var].values )
def createDecisionTree(self):
attributes = self.data.columns.to_list()
attributes.remove(self.var)
self.recursiveTree(self.data, attributes, self.tree)
def recursiveTree(self, data, attributes, parentNode):
if(len(attributes)==0):
return
bestAttribute = self.getMaxGainNode(data, attributes)
for value in set(data[bestAttribute].values):
newData = data[ data[bestAttribute] == value ]
newAttributes = attributes.copy()
newAttributes.remove(bestAttribute)
if(self.isCompletelyInformative(newData)):
result = newData[self.var].unique()[0]
currentNode = Node("{}=={} -> {}".format(bestAttribute, value, result), parentNode)
else:
currentNode = Node("{}=={}".format(bestAttribute, value), parentNode)
self.recursiveTree(newData, newAttributes, currentNode)
def isCompletelyInformative(self, data):
outcome = data[self.var]
return len(outcome.unique())==1
def getMaxGainNode(self, data, attributes):
maxGain = 0
root = None
for att in attributes:
attGain = self.computeInformationGain(data, att)
if(attGain>=maxGain):
root = att
maxGain = attGain
return root
def computeInformationGain(self, data, attribute):
infoGain = self.computeEntropy(data)
nInstances = len(data)
attributeValues = set( data[attribute].values )
for val in attributeValues:
subset = data[ data[attribute]==val ]
nSubset = len(subset)
valEntropy = self.computeEntropy(subset)
infoGain += - (nSubset/nInstances) * valEntropy
return infoGain
def computeEntropy(self, data):
entropy = 0
nInstaces = len(data)
for val in self.values:
subset = data[ data[self.var]==val ]
nSubset = len(subset)
ratio = nSubset/nInstaces
if(ratio==0):
entropy += 0
else:
entropy += -ratio * math.log2(ratio)
return entropy
def printTree(self):
if(self.tree==None):
return
for pre, fill, node in RenderTree(self.tree):
print("{}{}".format(pre, node.name))
|
# zadanie 9
# Przepisz jeden z napisanych przez siebie iteratorów na generator.
#
# na potrzeby tego zadania przepisze iterator z zadania 7 na generator
def parzysty(dane):
for i in range(0, len(dane), 1):
yield dane[i]
liczby = parzysty([0, 1, 2, 3, 4, 5, 6, 7, 8])
print(next(liczby), end=', ')
print(next(liczby), end=', ')
print(next(liczby), end=', ')
print(next(liczby), end=', ')
print(next(liczby)) |
# Copyright 2020 Advanced Remanufacturing and Technology Centre
# Copyright 2020 ROS-Industrial Consortium Asia Pacific Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
from trainer.P1Trainer import P1Trainer
from trainer.P2Trainer import P2Trainer
from trainer.P3Trainer import P3Trainer
from windows.Counting import CountingWindow
from windows.Deploy import DeployWindow
from windows.Main import MainWindow
from windows.Train import TrainWindow
from datetime import date
from PySide2 import QtCore
# Clear all stored session_config.txt usecase_config.txt
if os.path.exists('../data/session_config.txt') and os.path.exists('../data/session_config.txt'):
p1 = subprocess.Popen(['rm', '../data/session_config.txt'])
p1.communicate()
p2 = subprocess.Popen(['rm', '../data/usecase_config.txt'])
p2.communicate()
def test_init_MainWindow(qtbot):
widget = MainWindow()
qtbot.addWidget(widget)
widget.show()
assert widget.isVisible() is True
def test_openTrain_MainWindow(qtbot):
widget = MainWindow()
qtbot.addWidget(widget)
qtbot.mouseClick(widget.train_button, QtCore.Qt.LeftButton)
assert widget.train_window.isVisible() is True
assert widget.isTrainOpen is True
qtbot.mouseClick(widget.train_button, QtCore.Qt.LeftButton)
assert widget.train_window.isVisible() is False
assert widget.isTrainOpen is False
def test_openDeploy_MainWindow(qtbot):
widget = MainWindow()
qtbot.addWidget(widget)
qtbot.mouseClick(widget.deploy_button, QtCore.Qt.LeftButton)
assert widget.deploy_window.isVisible() is True
assert widget.isDeployOpen is True
qtbot.mouseClick(widget.deploy_button, QtCore.Qt.LeftButton)
assert widget.deploy_window.isVisible() is False
assert widget.isDeployOpen is False
def test_closeAll_MainWindow(qtbot):
widget = MainWindow()
qtbot.addWidget(widget)
qtbot.mouseClick(widget.quit_button, QtCore.Qt.LeftButton)
assert widget.isVisible() is False
assert widget.train_window.isVisible() is False
assert widget.deploy_window.isVisible() is False
def test_emptySession_emptyUseCase_DeployWindow(qtbot):
widget = DeployWindow()
qtbot.addWidget(widget)
if (os.path.exists('../data/session_config.txt') and
os.path.exists('../data/session_config.txt')):
p1 = subprocess.Popen(['rm', '../data/session_config.txt'])
p1.communicate()
p2 = subprocess.Popen(['rm', '../data/usecase_config.txt'])
p2.communicate()
assert widget._path_to_model == 'filepath/to/onnx/model'
assert widget._path_to_label_list == 'filepath/to/classes/list/txt'
assert widget.usecase_mode == 0
def test_invalidSession_invalidUseCase_DeployWindow(qtbot):
test_session_config_content = ['test_filepath_to_model\n',
'test_filepath_to_label_list\n',
'visualize\n']
# If session_config.txt is not present, create one.
outF = open('../data/session_config.txt', 'w+')
for line in test_session_config_content:
outF.write(line)
outF.close()
test_usecase_config_content = ['-1\n']
outF = open('../data/usecase_config.txt', 'w+')
for line in test_usecase_config_content:
outF.write(line)
outF.close()
widget = DeployWindow()
qtbot.addWidget(widget)
assert widget._path_to_model == 'filepath/to/onnx/model'
assert widget._path_to_label_list == 'filepath/to/classes/list/txt'
assert widget.usecase_mode == '-1'
def test_validSession_validUseCase_DeployWindow(qtbot):
test_session_config_content = ['./data/model/squeezenet1.1-7.onnx\n',
'/data/label_list/imagenet_classes.txt\n',
'visualize\n',
'CPU\n']
outF = open('../data/session_config.txt', 'w+')
for line in test_session_config_content:
outF.write(line)
outF.close()
test_usecase_config_content = ['0\n']
outF = open('../data/usecase_config.txt', 'w+')
for line in test_usecase_config_content:
outF.write(line)
outF.close()
widget = DeployWindow()
qtbot.addWidget(widget)
assert widget._path_to_model == './data/model/squeezenet1.1-7.onnx'
assert widget._path_to_label_list == './data/label_list/imagenet_classes.txt'
assert widget.usecase_mode == '0'
def test_deployPackage_DeployWindow(qtbot):
widget = DeployWindow()
qtbot.addWidget(widget)
qtbot.mouseClick(widget.run_button, QtCore.Qt.LeftButton)
isDeployScriptRunning = widget._deploy_process.poll()
assert isDeployScriptRunning is None
assert widget._is_running is True
widget._deploy_process.kill()
qtbot.mouseClick(widget.run_button, QtCore.Qt.LeftButton)
isKillScriptRunning = widget._kill_process.poll()
assert isKillScriptRunning is None
assert widget._is_running is False
def test_setUseCase_Classification_DeployWindow(qtbot):
widget = DeployWindow()
qtbot.addWidget(widget)
qtbot.keyClicks(widget.usecase_config_button, 'Counting')
assert widget.counting_window.isVisible() is True
def test_setUseCase_DeployWindow(qtbot):
widget = DeployWindow(True)
qtbot.addWidget(widget)
classification_index = 0
color_matching_index = 0
for i in range(0, len(widget.usecase_list)):
if widget.usecase_list[i] == 'Classification':
classification_index = i
if widget.usecase_list[i] == 'Color-Matching':
color_matching_index = i
widget.setUseCase(classification_index)
usecase_config_lines = [line.rstrip('\n') for
line in open('../data/usecase_config.txt')]
assert len(usecase_config_lines) == 1
widget.setUseCase(color_matching_index)
usecase_config_lines = [line.rstrip('\n') for
line in open('../data/usecase_config.txt')]
assert len(usecase_config_lines) == 2
def test_setModel_DeployWindow(qtbot):
widget = DeployWindow(True)
qtbot.addWidget(widget)
qtbot.mouseClick(widget.model_button, QtCore.Qt.LeftButton)
assert widget._path_to_model == 'dummy_model_filepath'
def test_setLabelList_DeployWindow(qtbot):
widget = DeployWindow(True)
qtbot.addWidget(widget)
qtbot.mouseClick(widget.list_button, QtCore.Qt.LeftButton)
assert widget._path_to_label_list == 'dummy_label_list_filepath'
def test_setP1_TrainWindow(qtbot):
widget = TrainWindow()
qtbot.addWidget(widget)
qtbot.mouseClick(widget.p1_button, QtCore.Qt.LeftButton)
assert widget._precision_level == 1
def test_setP2_TrainWindow(qtbot):
widget = TrainWindow()
qtbot.addWidget(widget)
qtbot.mouseClick(widget.p2_button, QtCore.Qt.LeftButton)
assert widget._precision_level == 2
def test_setP3_TrainWindow(qtbot):
widget = TrainWindow()
qtbot.addWidget(widget)
qtbot.mouseClick(widget.p3_button, QtCore.Qt.LeftButton)
assert widget._precision_level == 3
def test_setModel_TrainWindow(qtbot):
widget = TrainWindow()
qtbot.addWidget(widget)
qtbot.mouseClick(widget.p1_button, QtCore.Qt.LeftButton)
qtbot.keyClicks(widget.model_selector, 'resnet')
assert widget.model_name == 'resnet'
qtbot.mouseClick(widget.p2_button, QtCore.Qt.LeftButton)
qtbot.keyClicks(widget.model_selector, 'fasterrcnn')
assert widget.model_name == 'fasterrcnn'
qtbot.mouseClick(widget.p3_button, QtCore.Qt.LeftButton)
qtbot.keyClicks(widget.model_selector, 'maskrcnn')
assert widget.model_name == 'maskrcnn'
def test_runLabelme_TrainWindow(qtbot):
widget = TrainWindow()
qtbot.addWidget(widget)
qtbot.mouseClick(widget.label_button, QtCore.Qt.LeftButton)
isLabelmeRunning = widget.label_process.poll()
assert isLabelmeRunning is None
widget.label_process.kill()
def test_validateTraining_TrainWindow(qtbot):
widget = TrainWindow()
qtbot.addWidget(widget)
widget._precision_level = 2
widget.validateTraining()
assert widget.buttonConnected is False
widget._is_model_ready = True
widget.validateTraining()
assert widget.buttonConnected is False
widget._is_dataset_linked = True
widget.validateTraining()
assert widget.buttonConnected is False
widget._is_labellist_linked = True
widget.validateTraining()
assert widget.buttonConnected is False
widget._precision_level = 1
widget.validateTraining()
assert widget.buttonConnected is False
widget.buttonConnected = False
widget._precision_level = 2
widget._is_dataset_labelled = True
widget.validateTraining()
assert widget.buttonConnected is True
def test_validateDataset_TrainWindow(qtbot):
widget = TrainWindow()
qtbot.addWidget(widget)
widget._precision_level = 1
widget._path_to_dataset = 'invalid_path_to_dataset'
qtbot.mouseClick(widget.validate_button, QtCore.Qt.LeftButton)
assert widget._is_dataset_labelled is False
widget._precision_level = 2
qtbot.mouseClick(widget.validate_button, QtCore.Qt.LeftButton)
assert widget._is_dataset_labelled is False
widget._precision_level = 3
qtbot.mouseClick(widget.validate_button, QtCore.Qt.LeftButton)
assert widget._is_dataset_labelled is False
def test_populateModelSelector_TrainWindow(qtbot):
widget = TrainWindow()
qtbot.addWidget(widget)
widget._precision_level = 1
widget.populateModelSelector()
assert len(widget._model_list) == 7
widget._precision_level = 2
widget.populateModelSelector()
assert widget._model_list[0] == 'fasterrcnn'
widget._precision_level = 3
widget.populateModelSelector()
assert widget._model_list[0] == 'maskrcnn'
def test_checkModelReady_TrainWindow():
widget = TrainWindow(True)
widget.setModel(1)
assert widget._is_model_ready is True
def test_setLabelList_TrainWindow(qtbot):
widget = TrainWindow(True)
qtbot.addWidget(widget)
qtbot.mouseClick(widget.list_button, QtCore.Qt.LeftButton)
assert widget._is_labellist_linked is True
def test_setDataset_TrainWindow(qtbot):
widget = TrainWindow(True)
qtbot.addWidget(widget)
qtbot.mouseClick(widget.dataset_button, QtCore.Qt.LeftButton)
assert widget._is_dataset_linked is True
def test_conformDatasetToCOCO_TrainWindow(qtbot):
if not os.path.exists('../data/datasets/p2p3_dummy_dataset'):
p1 = subprocess.Popen(['mkdir', '-p', '../data/datasets/p2p3_dummy_dataset/train_dataset'])
p1.communicate()
p2 = subprocess.Popen(['mkdir', '-p', '../data/datasets/p2p3_dummy_dataset/val_dataset'])
p2.communicate()
widget = TrainWindow(True)
qtbot.addWidget(widget)
qtbot.mouseClick(widget.generate_button, QtCore.Qt.LeftButton)
assert widget.label_train_process is not None
widget.label_train_process.kill()
assert widget.label_val_process is not None
widget.label_val_process.kill()
# Clean up test materials.
if os.path.exists('../data/datasets/p2p3_dummy_dataset'):
p3 = subprocess.Popen(['rm', '-r', '../data/datasets/p2p3_dummy_dataset'])
p3.communicate()
def test_closeWindow_CountingWindow(qtbot):
path_to_labellist = '../data/label_list/coco_classes.txt'
path_to_usecase_config = '../data/usecase_config.txt'
widget = CountingWindow(path_to_labellist, path_to_usecase_config)
qtbot.addWidget(widget)
widget.show()
qtbot.mouseClick(widget.cancel_button, QtCore.Qt.LeftButton)
assert widget.isVisible() is False
def test_writeToUseCaseConfig_CountingWindow(qtbot):
path_to_labellist = '../data/label_list/coco_classes.txt'
path_to_usecase_config = '../data/usecase_config.txt'
widget = CountingWindow(path_to_labellist, path_to_usecase_config)
qtbot.addWidget(widget)
widget.show()
widget._select_list = ['person']
qtbot.mouseClick(widget.finish_button, QtCore.Qt.LeftButton)
select_list = [line.rstrip('\n') for
line in open('../data/usecase_config.txt')]
assert select_list[0] == '1'
assert select_list[1] == 'person'
assert widget.isVisible() is False
def test_addObject_CountingWindow():
path_to_labellist = '../data/label_list/coco_classes.txt'
path_to_usecase_config = '../data/usecase_config.txt'
widget = CountingWindow(path_to_labellist, path_to_usecase_config)
widget.addObject(0)
assert len(widget._select_list) == 1
widget.addObject(0)
assert len(widget._select_list) == 1
widget.addObject(1)
assert len(widget._select_list) == 2
widget.removeObject(0)
widget.removeObject(0)
assert len(widget._select_list) == 0
widget.removeObject(0)
assert len(widget._select_list) == 0
def test_P1Trainer():
if not os.path.exists('../data/datasets/hymenoptera_data'):
p1 = subprocess.Popen(['wget',
'https://download.pytorch.org/tutorial/hymenoptera_data.zip',
'--directory-prefix=../data/datasets/'])
p1.communicate()
p2 = subprocess.Popen(['unzip',
'../data/datasets/hymenoptera_data.zip',
'-d',
'../data/datasets/'])
p2.communicate()
path_to_dataset = '../data/datasets/hymenoptera_data'
model_name = 'inception'
label_list = ['ants', 'bees']
p1_trainer = P1Trainer(path_to_dataset, model_name, label_list)
p1_trainer.train(True)
output_pth_filename = './trainer/P1TrainFarm/' + model_name + '_' + str(date.today()) + '.pth'
output_model_filename = '../data/model/' + model_name + '_' + str(date.today()) + '.onnx'
assert os.path.exists(output_pth_filename) is True
assert os.path.exists(output_model_filename) is True
p1_model_array = ['alexnet', 'vgg', 'squeezenet', 'densenet', 'resnet', 'mobilenet']
for model in p1_model_array:
model_ft, input_size = p1_trainer.initialize_model(model,
len(label_list),
True)
assert model_ft is not None
model_ft, input_size = p1_trainer.initialize_model('invalid_model',
len(label_list),
True)
assert model_ft is None
# Clean up test materials.
if (os.path.exists('../data/datasets/hymenoptera_data') and
os.path.exists('../data/datasets/hymenoptera_data.zip') and
os.path.exists('./trainer/P1TrainFarm')):
p3 = subprocess.Popen(['rm',
'-r',
'../data/datasets/hymenoptera_data',
'../data/datasets/hymenoptera_data.zip',
'./trainer/P1TrainFarm',
output_model_filename])
p3.communicate()
def test_P2Trainer():
path_to_dataset = 'path_to_dummy_dataset'
model_name = 'fasterrcnn'
path_label_list = '../data/label_list/coco_classes.txt'
if os.path.exists(path_label_list):
label_list = [line.rstrip('\n') for line in open(path_label_list)]
else:
label_list = ['ant', 'bees']
p2_trainer = P2Trainer(path_to_dataset, model_name, label_list)
p2_trainer.train(True)
assert p2_trainer.create_process is not None
p2_trainer.create_process.kill()
assert p2_trainer.run_process is not None
p2_trainer.run_process.kill()
assert p2_trainer.build_export_process is not None
p2_trainer.build_export_process.kill()
assert p2_trainer.export_process is not None
p2_trainer.export_process.kill()
if os.path.exists('./trainer/P2TrainFarm'):
p1 = subprocess.Popen(['rm', '-r', './trainer/P2TrainFarm'])
p1.communicate()
def test_P3Trainer():
path_to_dataset = 'path_to_dummy_dataset'
model_name = 'maskrcnn'
path_label_list = '../data/label_list/coco_classes.txt'
if os.path.exists(path_label_list):
label_list = [line.rstrip('\n') for line in open(path_label_list)]
else:
label_list = ['ant', 'bees']
p3_trainer = P3Trainer(path_to_dataset, model_name, label_list)
p3_trainer.train(True)
assert p3_trainer.create_process is not None
p3_trainer.create_process.kill()
assert p3_trainer.run_process is not None
p3_trainer.run_process.kill()
assert p3_trainer.build_export_process is not None
p3_trainer.build_export_process.kill()
assert p3_trainer.export_process is not None
p3_trainer.export_process.kill()
if os.path.exists('./trainer/P3TrainFarm'):
p1 = subprocess.Popen(['rm', '-r', './trainer/P3TrainFarm'])
p1.communicate()
|
import argparse
def new_ui_entry(ns: argparse.Namespace):
from .app import run, init
init(ns=ns)
run(no_init=True)
def old_ui_entry(_: argparse.Namespace):
from .viewer_old import ui
ui.run()
def main():
args = argparse.ArgumentParser(description="Moons - Vicar Image Processing And Analysis")
subs = args.add_subparsers(title="Graphical utilities")
from . import app
p: argparse.ArgumentParser = app.get_parser(parent=subs)
p.set_defaults(func=new_ui_entry)
old_ui = subs.add_parser(
"alt-viewer",
help="Older alternative viewer",
description="Older alternative viewer"
)
old_ui.set_defaults(func=old_ui_entry)
ns, _ = args.parse_known_args()
if hasattr(ns, 'func'):
ns.func(ns)
else:
args.print_help()
|
# encoding:utf-8
import sys
sys.path.append("..")
import numpy as np
from numpy.linalg import norm
from configx.configx import ConfigX
config = ConfigX()
def l1(x):
return norm(x, ord=1)
def l2(x):
return norm(x)
def normalize(rating, minVal=config.min_val, maxVal=config.max_val):
'get the normalized value using min-max normalization'
if maxVal > minVal:
return float(rating - minVal) / (maxVal - minVal) + 0.01
elif maxVal == minVal:
return rating / maxVal
else:
print('error... maximum value is less than minimum value.')
raise ArithmeticError
def denormalize(rating, minVal=config.min_val, maxVal=config.max_val):
return minVal + (rating - 0.01) * (maxVal - minVal)
def sigmoid(z):
return 1.0 / (1.0 + np.exp(-z))
def sigmoid_deriv(z):
return sigmoid(z) * (1.0 - sigmoid(z))
def sigmoid_2(z):
return 1.0 / (1.0 + np.exp(-z / 2.0))
|
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
import time
# option = webdriver.FirefoxProfile("/Users/nishu/Library/Application Support/Firefox/Profiles/2m3vd669.robot")
driver = webdriver.Chrome()
driver.get("https://www.toutiao.com/")
time.sleep(2)
driver.execute_script('window.open("%s")' % "http://www.baidu.com")
# text = driver.find_element_by_xpath('//*[@id="rightModule"]/div[2]/div/div/p')
# inputEle = driver.find_element_by_xpath('//*[@id="rightModule"]/div[1]/div/div/div/input')
# action = ActionChains(driver)
# action.move_to_element_with_offset(text, 0, 0)
# action.click_and_hold()
# action.move_by_offset(100, 0)
# action.release()
# action.perform()
# action.key_down(Keys.COMMAND).send_keys("c").key_up(Keys.COMMAND).perform()
# c1 = {u"name":u"uid_tt",u"value": u"8e7aaaca3ea51397034b5aeb5dd4e301",u"domain":u".toutiao.com"}
# c2 = {u"name":u"ccid",u"value": u"3caa67295422191672595c00c082f815",u"domain":u".toutiao.com"}
# c4 = {u"name":u"sid_tt",u"value":u"c7aec0b2d374767f67dffba040b89989",u"domain":u".toutiao.com"}
# driver.add_cookie(c1)
# driver.add_cookie(c2)
# driver.add_cookie(c4)
# driver.get("https://mp.toutiao.com/profile_v3/graphic/publish")
# time.sleep(3)
# publishTextareaElement = driver.find_element_by_class_name("ql-editor")
# publishTextareaElement.click()
# time.sleep(2)
# action = ActionChains(driver)
# action.key_down(Keys.CONTROL).send_keys("t").key_up(Keys.CONTROL).perform()
|
# Original Author: Codec04
# Re-built by Itz-fork
# Project: Gofile2
import os
from re import findall
from setuptools import setup, find_packages
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
# Requirements
if os.path.isfile('requirements.txt'):
with open('requirements.txt') as req:
reques = req.read().splitlines()
else:
reques = [
'requests',
'aiohttp'
]
# Readme
if os.path.isfile('README.md'):
with open(('README.md'), encoding='utf-8') as readmeh:
big_description = readmeh.read()
else:
big_description = "Gofile2 is an API wrapper for Gofile API"
# Version (https://github.com/pyrogram/pyrogram/blob/97b6c32c7ff707fd2721338581e7dad5072f745e/setup.py#L30)
with open("gofile2/__init__.py", encoding="utf-8") as f:
v = findall(r"__version__ = \"(.+)\"", f.read())[0]
setup(name='gofile2',
version=v,
description='An API wrapper for Gofile API',
url='https://github.com/Itz-fork/Gofile2',
author='Itz-fork, Codec04',
author_email='itz-fork@users.noreply.github.com',
license='MIT',
packages=find_packages(),
download_url=f"https://github.com/Itz-fork/Gofile2/releases/tag/Gofile2-{v}",
keywords=['Gofile', 'Api-wrapper', 'Gofile2'],
long_description=big_description,
long_description_content_type='text/markdown',
install_requires=reques,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Education',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.9',
],
)
|
import sys
import os
from datetime import datetime
demo_dir = os.path.dirname(os.path.abspath(__file__))
taichi_elements_path = os.path.dirname(demo_dir)
sys.path.append(taichi_elements_path)
def create_output_folder(prefix):
folder = prefix + '_' + datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
os.mkdir(folder)
return folder
|
#! /usr/bin/env python
"""
This file publishes a square trajectory for the kuka
"""
from scara_square_trajectory_publisher import SquareTrajectoryPublisher
import rospy
import rospkg
rospack = rospkg.RosPack()
import sys
sys.path.insert(0, rospack.get_path('first_assignment')+"/scripts")
from sensor_msgs.msg import JointState
from geometry_msgs.msg import Pose, PoseArray
from IK_function import scara_IK
import numpy as np
def main():
rospy.init_node('trajectory_publisher')
topic_name = rospy.get_param('topic_name', 'controller/joint_states')
rate = rospy.Rate(10)
trajectory_publisher = SquareTrajectoryPublisher()
trajectory_publisher.set_topic_name(topic_name)
trajectory_publisher._joint_names = ['lwr_a1_joint', 'lwr_a2_joint', 'lwr_e1_joint', 'lwr_a3_joint',
'lwr_a4_joint', 'lwr_a5_joint', 'lwr_a6_joint']
trajectory_publisher._path.header.frame_id = 'lwr_base_link'
while not rospy.is_shutdown():
trajectory_publisher.publish_path()
trajectory_publisher.execute_step()
rate.sleep()
if __name__ == '__main__':
main() |
import unittest
from queue import Queue, Empty
from unittest.mock import Mock
from cltl.combot.infra.event import Event
from cltl.combot.infra.event.memory import SynchronousEventBus
from cltl.combot.event.emissor import TextSignalEvent
from cltl.eliza.api import Eliza
from cltl_service.eliza.service import ElizaService
class ElizaServiceTest(unittest.TestCase):
def setUp(self) -> None:
eliza_mock = unittest.mock.MagicMock(Eliza)
eliza_mock.respond.side_effect = [f"response {i}" for i in range(10)]
self.event_bus = SynchronousEventBus()
self.service = ElizaService("inputTopic", "outputTopic", eliza_mock, self.event_bus, None)
def tearDown(self) -> None:
if self.service:
self.service.stop()
def test_service_all_utterances(self):
events = Queue()
def handler(ev):
events.put(ev)
self.event_bus.subscribe("outputTopic", handler)
self.service.start()
event = events.get(timeout=1)
self.assertEqual("TextSignalEvent", event.payload.type)
self.assertEqual("response 0", event.payload.text)
self.assertRaises(Empty, lambda: events.get(timeout=0.01))
self.event_bus.publish("inputTopic", Event.for_payload(TextSignalEvent.create("signal id", 1, "bla", [])))
event = events.get(timeout=1)
self.assertEqual("TextSignalEvent", event.payload.type)
self.assertEqual("response 1", event.payload.text)
self.assertRaises(Empty, lambda: events.get(timeout=0.01))
|
#!/usr/bin/python
# -*- coding: latin-1 -*-
"""---------------------------------------------------------------------------------*
* Copyright (c) 2010-2018 Pauli Parkkinen, Eelis Solala, Wen-Hua Xu, *
* Sergio Losilla, Elias Toivanen, Jonas Juselius *
* *
* Permission is hereby granted, free of charge, to any person obtaining a copy *
* of this software and associated documentation files (the "Software"), to deal *
* in the Software without restriction, including without limitation the rights *
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell *
* copies of the Software, and to permit persons to whom the Software is *
* furnished to do so, subject to the following conditions: *
* *
* The above copyright notice and this permission notice shall be included in all*
* copies or substantial portions of the Software. *
* *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR *
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE *
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER *
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, *
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE *
* SOFTWARE. *
*----------------------------------------------------------------------------------"""
import numpy, re, scipy.special
import bubbleblow
def norm_const(alpha,l):
"""Returns the normalization constant for a Cartesian GTO of given alpha and i,j,k"""
# returns (2*x-1)!!
# fact = lambda x: reduce(lambda y,z: y*z,range(1,max(2,2*x),2))
fact = lambda x: 1
return 2.0**(sum(l)+0.75)* alpha**(0.5*sum(l)+0.75) / numpy.pi**0.75 /numpy.sqrt( fact(l[0]) * fact(l[1]) * fact(l[2]))
sb_rad={1:0.35,6:0.70}
def a_factor(z1,z2):
chi=sb_rad[z1]/sb_rad[z2]
u=(chi-1.0)/(chi+1.0)
return u/(u**2-1.0)
class Gaussian:
def __init__(self,coeffs,expos,l):
self.expos=numpy.array(expos)
self.coeffs=numpy.array(coeffs) * numpy.array([norm_const(alpha,l) for alpha in self.expos])
self.l=numpy.array(l) # l_x, l_y and l_z
def __call__(self,pos,ext=False):
# r=numpy.array(pos) # Pos is an array or tuple with the position
r_sq=numpy.dot(pos,pos)
if ext:
factor=1.-numpy.exp(-4.*r_sq)
else:
factor=1.
# C_i * x**l_x * y**l_y * z**l_z * exp(-alpha*r**2)
return (pos**self.l).prod() * sum(self.coeffs*numpy.exp(-self.expos*r_sq)) *factor
def __repr__(self):
return str(self.expos)+" "+str(self.coeffs)+" "+str(self.l)+"\n"
class Atom:
def __init__(self,name,z,coords):
self.name=name
self.z=z
self.coords=numpy.array(coords)
self.basis=[]
self.mo=[]
return
def orbital(self,i,pos):
# return sum(mos[i,:]*( self.basis[i](pos-self.coords) for i in len(self.mos)))
return self.basis[i](numpy.array(pos)-self.coords)
def __str__(self):
return self.name+" "+repr(self.z)+" "+repr(self.coords)
class WF:
def __init__(self,filename='molden.input'):
""" Initializes a WF from a molden format file """
moldenfile=open(filename)
line=""
#Skip first lines
while not re.match("\[Atoms\]",line):
line=moldenfile.readline()
#Read atom types and coordinates
self.atoms=[]
while True:
line=moldenfile.readline()
if re.match("\[GTO\]",line): break
vals=line.split()
self.atoms.append(Atom(vals[0],int(vals[2]), [float(i) for i in vals[3:]]))
# self.basis_table={}
self.basis_table=[]
#Read basis sets
while True:
line=moldenfile.readline()
if re.match("\[MO\]",line): break
# Read which atom basis sets are we reading
curr_at=int(line.split()[0]) - 1
# Read basis for this atom
while True:
# Read the type and the number of primitives
line=moldenfile.readline()
if re.match("\s*$",line): break
vals=line.split()
type,nprim=vals[0],int(vals[1])
expos=[]
coeffs=[]
# Read primitives
for i in range(nprim):
vals=moldenfile.readline().split()
expos.append(float(vals[0]))
coeffs.append(float(vals[1]))
# Generate each primitive corresponding to each set i,j,k set of exponents
for i in expos_from_type(type):
# self.basis_table[len(self.basis_table)]=curr_at
self.basis_table.append(curr_at)
self.atoms[curr_at].basis.append(Gaussian(coeffs,expos,i))
# print atoms[curr_at].basis
self.basis_size=sum([len(i.basis) for i in self.atoms])
#Read MO coefficients
self.occs=[]
# self.mos=[]
i=-1
while True:
line=moldenfile.readline()
match=re.match(" Occup=(.*)",line)
if match:
i+=1
tmp=float(match.group(1))
if tmp==0.0: break
self.occs.append(tmp)
for j in self.atoms:
j.mo.append([])
for j in range(self.basis_size):
line=moldenfile.readline()
vals=line.split()
self.atoms[self.basis_table[j]].mo[i].append(float(vals[1]))
else:
continue
def plot_mo(self,i,pos):
""" Return phi_i(r) """
wf=0.
for atom in self.atoms:
bas=[]
x=numpy.array(pos)-atom.coords
for basis in atom.basis:
bas.append(basis(x))
wf+=sum(numpy.array(atom.mo[i]) *numpy.array(bas))
return wf
def plot_mo_sq_bub(self,mo_id,atom_id,pos):
""" Return stuff """
bub=numpy.zeros(len(pos))
rest=0.
for i,atom in enumerate(wf.atoms):
for j,basis in enumerate(atom.basis):
if i==atom_id:
if sum(basis.l)==0:
for k,l in enumerate(pos):
x=numpy.array(l)
bub[k]+=basis(x)*atom.mo[mo_id][j]
else:
x=wf.atoms[atom_id].coords-atom.coords
rest+=basis(x)*atom.mo[mo_id][j]
return bub*(bub+2.*rest)
def dens(self,pos):
""" Return |Psi(r)|² """
wf=numpy.zeros(len(self.occs))
for i,atom in enumerate(self.atoms):
x=numpy.array(pos)-atom.coords
bas=[]
for basis in atom.basis:
bas.append(basis(x))
wf+=[sum(numpy.array(j)*bas) for j in atom.mo]
wf*=wf
return sum( numpy.array(self.occs) * wf)
def dens_bub2(self,atom_id,pos):
""" Return |Psi(r)|² """
wf=numpy.zeros(len(self.occs))
for i,atom in enumerate(self.atoms):
x=numpy.array(pos)-atom.coords
bas=[]
for basis in atom.basis:
if (atom==self.atoms[atom_id] and sum(basis.l)>0):
bas.append(0.0)
else:
bas.append(basis(x))
wf+=[sum(numpy.array(j)*bas) for j in atom.mo]
wf*=wf
return sum( numpy.array(self.occs) * wf)
def dens_bub3(self,atom_id,pos):
""" Return stuff """
return sum(numpy.array([self.plot_mo_sq_bub(j,atom_id,pos)*k for j,k in enumerate (self.occs)]))
def dens_bub(self,atom_id,pos):
""" Return |Psi(r)|² using only the basis functions centered
at atom atom_id"""
wf=numpy.zeros(len(self.occs))
atom=self.atoms[atom_id]
x=numpy.array(pos)-atom.coords
bas=[]
for basis in atom.basis:
bas.append(basis(x))
wf+=[sum(numpy.array(j)*bas) for j in atom.mo]
wf*=wf
return sum( numpy.array(self.occs) * wf)
def s_f(self,atom_id,pos):
a=sb_rad[wf.atoms[atom_id].z]/0.52917726
return 0.5*scipy.special.erfc(4.0*(numpy.linalg.norm(wf.atoms[atom_id].coords-pos)-a))
f=lambda x: 0.5*x*(3.-x**2)
# f=lambda x: scipy.special.erf(4.0*x/(1.0-x**2))
# f=lambda x: scipy.special.erf(10.0*x)
s=[]
for atom1 in self.atoms:
refpos=atom1.coords
s.append(1.0)
for atom2 in self.atoms:
# if atom2!=self.atoms[atom_id]:
if atom2!=atom1:
r1=numpy.linalg.norm(pos-refpos)
r2=numpy.linalg.norm(pos-atom2.coords)
r12=numpy.linalg.norm(refpos-atom2.coords)
mu=(r1-r2)/r12
mu=mu+a_factor(atom1.z,atom2.z)*(1.-mu**2)
for j in range(5):
mu=f(mu)
# mu=f(mu)
s[-1]*=0.5*(1.-mu)
return s[atom_id]/sum(s)
# factor=0.5*scipy.special.erfc(numpy.linalg.norm(pos-self.atoms[atom_id].coords)-3)
# return s[atom_id]/sum(s) * factor
def bubble(self,atom_id,r_array):
""" Return a density bubble by multiplying rho with Becke's fuzzy
function centered on that atom, and averaging radially over each
spherical shell"""
bub=numpy.zeros(len(r_array))
pts=sampling_points(1)
np=len(pts)
for r in enumerate(r_array):
for point in r*pts:
pos=point+wf.atoms[atom_id].coords
bub[j]+=wf.dens(pos)*wf.s_f(atom_id,pos)/np
return bub
def sampling_points(i):
"""Return an array of points for integration over the spherical surface
for a sphere of r=1"""
if(i==1):
s=1.0
pts=[[ s,0.0,0.0],[-s,0.0,0.0],[0.0, s,0.0],[0.0,-s,0.0],[0.0,0.0, s],[0.0,0.0,-s]]
else if (i==2):
s=1.0/numpy.sqrt(3.0)
pts= [[0,s,s],[0,s,-s],[0,-s,s],[0,-s,-s],\
[s,0,s],[s,0,-s],[-s,0,s],[-s,0,-s],\
[s,s,0],[s,-s,0],[-s,s,0],[-s,-s,0]]
else if (i==3):
s=1.0/numpy.sqrt(3.0)
pts=[[s,s,s],[s,s,-s],[s,-s,s],[s,-s,-s],[-s,s,s],[-s,s,-s],[-s,-s,s],[-s,-s,-s]]
return pts
def expos_from_type(type):
if type=="s":
return [[0,0,0]]
elif type=="p":
return [[1,0,0],[0,1,0],[0,0,1]]
elif type=="d":
return [[2,0,0],[0,2,0],[0,0,2],\
[1,1,0],[1,0,1],[0,1,1]]
elif type=="f":
return [[3,0,0],[0,3,0],[0,0,3],\
[1,2,0],[2,1,0],[2,0,1],\
[1,0,2],[0,1,2],[0,2,1],\
[1,1,1]]
if __name__=="__main__":
wf=WF()
n=1000
maxr=8.
d=maxr/n
# x=[-maxr+2*maxr/(n-1)*i for i in range (n)]
# for j in [5,6]:
# for j in range(len(wf.occs)):
# f=open('orb_'+str(j)+'.dat','w')
# for i in x:
# f.write("{0:14.10f} {1:14.10f}\n".format(i,wf.plot_mo(j,[0.0,0.0,i])))
# f.close()
##################################################
# Print density along z axis
# x=[-maxr+2*maxr/(n-1)*i for i in range (n)]
# for i in x:
# print i,wf.dens([0.0,0.0,i])
##################################################
##################################################
# Generate bublib
# x=[maxr/n*i for i in range (n+1)]
# f=open('bublib.dat','w')
# g=open('coord.xyz','w')
# g.write('{0:6d}\n'.format(len(wf.atoms)))
# g.write('\n')
# for i,atom in enumerate(wf.atoms):
# print "Generating radial density "+str(i)
# bubble=wf.bubble(i,x)
# print "Generating bubble "+str(i)
# bubble_trimmed=bubbleblow.Bubble([x,bubble],id=atom.name+str(i),z=atom.z)
# print "Writing bubble "+str(i)
# g.write('{0:10s}{1:14.10f}{2:14.10f}{3:14.10f}\n'.format\
# (atom.name+str(i),atom.coords[0]*0.52917726,\
# atom.coords[1]*0.52917726,atom.coords[2]*0.52917726))
# f.write(str(bubble_trimmed)+"\n")
# f.close()
##################################################
### Substract bubbles from dens_z
x=[-maxr+2*maxr/(n-1)*i for i in range (n)]
for i in x:
pos=numpy.array([0.0,0.0,i])
d=wf.dens(pos)
b=[wf.bubble(j,[numpy.linalg.norm(pos-wf.atoms[j].coords)])[0] for j in range(len(wf.atoms))]
print i,d,b[0],b[1],b[2],b[3],d-sum(b)
##################################################
# for j in [0,2]:
# f=open(str(j)+"_bubble_fuzz.dat","w")
# dens=[]
# nel=0.
# outcore=False
# factor=wf.dens(wf.atoms[j].coords)/wf.dens_bub3(j,wf.atoms[j].coords)
# for i in x:
# avg=(\
# wf.dens_bub3(j,numpy.array([ i,0.0,0.0])+wf.atoms[j].coords)+\
# wf.dens_bub3(j,numpy.array([-i,0.0,0.0])+wf.atoms[j].coords)+\
# wf.dens_bub3(j,numpy.array([0.0, i,0.0])+wf.atoms[j].coords)+\
# wf.dens_bub3(j,numpy.array([0.0,-i,0.0])+wf.atoms[j].coords)+\
# wf.dens_bub3(j,numpy.array([0.0,0.0, i])+wf.atoms[j].coords)+\
# wf.dens_bub3(j,numpy.array([0.0,0.0,-i])+wf.atoms[j].coords))/6\
# *factor
# avg=(\
# wf.dens(numpy.array([ i,0.0,0.0])+wf.atoms[j].coords)+\
# wf.dens(numpy.array([-i,0.0,0.0])+wf.atoms[j].coords)+\
# wf.dens(numpy.array([0.0, i,0.0])+wf.atoms[j].coords)+\
# wf.dens(numpy.array([0.0,-i,0.0])+wf.atoms[j].coords)+\
# wf.dens(numpy.array([0.0,0.0, i])+wf.atoms[j].coords)+\
# wf.dens(numpy.array([0.0,0.0,-i])+wf.atoms[j].coords))/6
# avg=0.0
# for k in [[ i,0.0,0.0],[-i,0.0,0.0],[0.0, i,0.0],[0.0,-i,0.0],[0.0,0.0, i],[0.0,0.0,-i]]:
# r=k+wf.atoms[j].coords
# avg+=wf.dens(r)*wf.s_f(j,r)/6.
# if not outcore:
# avg=min(\
# wf.dens(numpy.array([ i,0.0,0.0])+wf.atoms[j].coords),\
# wf.dens(numpy.array([-i,0.0,0.0])+wf.atoms[j].coords),\
# wf.dens(numpy.array([0.0, i,0.0])+wf.atoms[j].coords),\
# wf.dens(numpy.array([0.0,-i,0.0])+wf.atoms[j].coords),\
# wf.dens(numpy.array([0.0,0.0, i])+wf.atoms[j].coords),\
# wf.dens(numpy.array([0.0,0.0,-i])+wf.atoms[j].coords))
# dens.append(avg)
# if(len(dens)>1):
# nel+=4*numpy.pi*(dens[-1]*i**2+dens[-2]*(i-d)**2)*d*0.5
# z=numpy.log(dens[-2]/dens[-1])/d
# c=dens[-1]*numpy.exp(z*i)
# nel_tot=nel+4*numpy.pi*c*numpy.exp(-z*i)*(2+z*i*(2+z*i))/z**3
# if i>0.2 and nel_tot>min(wf.atoms[j].z,2):
# print nel_tot
# outcore=True
# else:
# avg=c*numpy.exp(-z*i)
# f.write("{0:14.10f} {1:14.10f}\n".format(i,avg))
# f.write("{0:14.10f} {1:14.10f} {2:14.10f} {3:14.10f} {4:14.10f} {5:14.10f} {6:14.10f}\n".format(i,\
# wf.dens(numpy.array([ i,0.0,0.0])+wf.atoms[j].coords),\
# wf.dens(numpy.array([-i,0.0,0.0])+wf.atoms[j].coords),\
# wf.dens(numpy.array([0.0, i,0.0])+wf.atoms[j].coords),\
# wf.dens(numpy.array([0.0,-i,0.0])+wf.atoms[j].coords),\
# wf.dens(numpy.array([0.0,0.0, i])+wf.atoms[j].coords),\
# wf.dens(numpy.array([0.0,0.0,-i])+wf.atoms[j].coords)))
# quit()
# g=open(str(j)+"_d.dat","w")
# for j in range(len(dens)-1):
# g.write("{0:14.10f} {1:14.10f}\n".format(x[j],dens[j+1]/dens[j]))
|
"""
Tests for fm/negfc*.py (3D ADI cube)
"""
import copy
from .helpers import aarc, np, parametrize, fixture
from vip_hci.fm import (confidence, firstguess, mcmc_negfc_sampling,
nested_negfc_sampling, nested_sampling_results,
speckle_noise_uncertainty, cube_planet_free,
show_walk_plot, show_corner_plot)
from vip_hci.psfsub import median_sub, pca, pca_annular, pca_annulus
# ====== utility function for injection
@fixture(scope="module")
def injected_cube_position(example_dataset_adi):
"""
Inject a fake companion into an example cube.
Parameters
----------
example_dataset_adi : fixture
Taken automatically from ``conftest.py``.
Returns
-------
dsi : VIP Dataset
injected_position_yx : tuple(y, x)
"""
print("injecting fake planet...")
dsi = copy.copy(example_dataset_adi)
# we chose a shallow copy, as we will not use any in-place operations
# (like +=). Using `deepcopy` would be safer, but consume more memory.
gt = (30, 0, 300)
dsi.inject_companions(gt[2], rad_dists=gt[0], theta=gt[1])
return dsi, dsi.injections_yx[0], gt
# ====== Actual negfc tests for different parameters
@parametrize("pca_algo, negfc_algo, ncomp, mu_sigma, fm, force_rpa, conv_test",
[
(pca_annular, firstguess, 3, False, 'stddev', False, None),
(pca, firstguess, 3, True, None, False, None),
(median_sub, firstguess, None, False, 'sum', False, None),
(pca_annulus, mcmc_negfc_sampling, 3, False, 'stddev', False, 'gb'),
(pca_annulus, mcmc_negfc_sampling, 5, True, None, True, 'ac'),
(pca_annulus, nested_negfc_sampling, 3, False, 'sum', False, None)
])
def test_algos(injected_cube_position, pca_algo, negfc_algo, ncomp, mu_sigma,
fm, force_rpa, conv_test):
ds, yx, gt = injected_cube_position
# run firstguess with simplex only if followed by mcmc or nested sampling
if pca_algo == median_sub:
algo_options = {'imlib': 'opencv', 'verbose': False}
else:
algo_options = {'imlib': 'opencv'}
res0 = firstguess(ds.cube, ds.angles, ds.psf, ncomp=ncomp,
planets_xy_coord=np.array([[yx[1], yx[0]]]), fwhm=ds.fwhm,
simplex=negfc_algo == firstguess, algo=pca_algo, fmerit=fm,
mu_sigma=mu_sigma, force_rPA=force_rpa,
aperture_radius=2, annulus_width=4*ds.fwhm,
algo_options=algo_options)
res = (res0[0][0], res0[1][0], res0[2][0])
init = np.array(res)
if negfc_algo == firstguess:
# use injection of 180 companions in empty cube to estimate error bars
cube_emp = cube_planet_free(res, ds.cube, ds.angles, ds.psf,
imlib='opencv')
algo_options = {'imlib': 'opencv'}
if pca_algo != median_sub:
algo_options['ncomp'] = ncomp
if pca_algo == pca_annular:
algo_options['radius_int'] = res0[0][0]-2*ds.fwhm
algo_options['asize'] = 4*ds.fwhm
algo_options['delta_rot'] = 1
if pca_algo == pca:
# just test it once because very slow
sp_unc = speckle_noise_uncertainty(cube_emp, res,
np.arange(0, 360, 3), ds.angles,
algo=pca_algo, psfn=ds.psf,
fwhm=ds.fwhm, aperture_radius=2,
fmerit=fm, mu_sigma=mu_sigma,
verbose=True, full_output=False,
algo_options=algo_options,
nproc=1)
else:
sp_unc = (2, 2, 0.1*gt[2])
# compare results
for i in range(3):
aarc(res[i], gt[i], rtol=1e-1, atol=3*sp_unc[i])
elif negfc_algo == mcmc_negfc_sampling:
# define fake unit transmission (to test that branch of the algo)
trans = np.zeros([2,10])
trans[0] = np.linspace(0, ds.cube.shape[-1], 10, endpoint=True)
trans[1,:] = 1
# run MCMC
if force_rpa:
niteration_limit = 400
else:
niteration_limit = 210
res = negfc_algo(ds.cube, ds.angles, ds.psf, initial_state=init,
algo=pca_algo, ncomp=ncomp, annulus_width=4*ds.fwhm,
aperture_radius=2, fwhm=ds.fwhm, mu_sigma=mu_sigma,
sigma='spe+pho', fmerit=fm, imlib='opencv',
nwalkers=100, niteration_min=200,
niteration_limit=niteration_limit, conv_test=conv_test,
nproc=1, save=True, transmission=trans,
force_rPA=force_rpa, verbosity=2)
burnin = 0.3
if force_rpa:
labels = ['f']
isamples = res[:, int(res.shape[1]//(1/burnin)):, :].reshape((-1, 1))
else:
labels = ['r', 'theta', 'f']
isamples = res[:, int(res.shape[1]//(1/burnin)):, :].reshape((-1, 3))
show_walk_plot(res, save=True, labels=labels)
show_corner_plot(res, burnin=burnin, save=True, labels=labels)
# infer most likely values + confidence intervals
val_max, ci = confidence(isamples, cfd=68.27, gaussian_fit=False,
verbose=True, save=True, labels=labels)
# infer mu and sigma from gaussian fit
mu, sigma = confidence(isamples, cfd=68.27, bins=100, gaussian_fit=True,
verbose=True, save=True, labels=labels)
# make sure it is between 0 and 360 for theta for both mu and gt
if not force_rpa:
if val_max['theta']-gt[1] > 180:
val_max['theta'] -= 360
elif val_max['theta']-gt[1] < -180:
val_max['theta'] += 360
if mu[1]-gt[1] > 180:
mu[1] -= 360
elif mu[1]-gt[1] < -180:
mu[1] += 360
# compare results for each param
for i, lab in enumerate(labels):
if force_rpa:
j = i+2
else:
j = i
ci_max = np.amax(np.abs(ci[lab]))
aarc(val_max[lab], gt[j], atol=3*ci_max) # diff within 3 sigma
aarc(mu[i], gt[j], atol=3*sigma[i]) # diff within 3 sigma
else:
# run nested sampling
res = negfc_algo(init, ds.cube, ds.angles, ds.psf, ds.fwhm,
mu_sigma=mu_sigma, sigma='spe', fmerit=fm,
annulus_width=4*ds.fwhm, aperture_radius=2,
ncomp=ncomp, algo=pca_algo, w=(5, 5, 200),
method='single', npoints=100, dlogz=0.1,
decline_factor=None, rstate=None, verbose=True,
algo_options={'imlib': 'opencv'})
# infer mu, sigma from nested sampling result
mu_sig = nested_sampling_results(res, burnin=0.3, bins=None, save=False)
# compare results for each param
for i in range(3):
# diff within 3 sigma
aarc(mu_sig[i, 0], gt[i], atol=3*mu_sig[i, 1])
|
'''FLAsk support for OIDC Access Tokens -- FLAAT. A set of decorators for authorising
access to OIDC authenticated REST APIs.'''
# This code is distributed under the MIT License
# pylint
# vim: tw=100 foldmethod=indent
# pylint: disable=invalid-name, superfluous-parens
# pylint: disable=logging-not-lazy, logging-format-interpolation, logging-fstring-interpolation
# pylint: disable=wrong-import-position, no-self-use, line-too-long
from functools import wraps
import json
import os
import sys
from itertools import count
is_py2 = sys.version[0] == '2'
if is_py2:
# pylint: disable=import-error
from Queue import Queue, Empty
else:
from queue import Queue, Empty
from threading import Thread
import logging
# Gracefully load modules:
available_web_frameworks = ['flask', 'aiohttp', 'fastapi']
try:
from flask import request
except ModuleNotFoundError:
available_web_frameworks.remove('flask')
try:
from aiohttp import web
except ModuleNotFoundError:
available_web_frameworks.remove('aiohttp')
try:
import asyncio
from fastapi.responses import JSONResponse
except ModuleNotFoundError:
available_web_frameworks.remove('fastapi')
from aarc_g002_entitlement import Aarc_g002_entitlement
from aarc_g002_entitlement import Aarc_g002_entitlement_Error
from aarc_g002_entitlement import Aarc_g002_entitlement_ParseError
from . import tokentools
from . import issuertools
from . import flaat_exceptions
from .caches import Issuer_config_cache
logger = logging.getLogger(__name__)
name = "flaat"
#defaults; May be overwritten per initialisation of flaat
verbose = 0
verify_tls = True
def ensure_is_list(item):
'''Make sure we have a list'''
if isinstance(item, str):
return [item]
return item
def check_environment_for_override(env_key):
''' Override the actual group membership, if environment is set. '''
try:
env_val = os.getenv(env_key)
if env_val is not None:
avail_entitlement_entries = json.loads(env_val)
return avail_entitlement_entries
except TypeError as e:
logger.error(F"Cannot decode JSON group list from the environment:"
F"{env_val}\n{e}")
except json.JSONDecodeError as e:
logger.error(F"Cannot decode JSON group list from the environment:"
F"{env_val}\n{e}")
return None
def formatted_entitlements(entitlements):
def my_mstr(self):
"""Return the nicely formatted entitlement"""
str_str = '\n'.join(
[
' namespace_id: {namespace_id}' +
'\n delegated_namespace: {delegated_namespace}' +
'\n subnamespaces: {subnamespaces}' +
'\n group: {group}' +
'\n subgroups: {subgroups}' +
'\n role_in_subgroup {role}' +
'\n group_authority: {group_authority}'
]
).format(
namespace_id = self.namespace_id,
delegated_namespace = self.delegated_namespace,
group = self.group,
group_authority = self.group_authority,
subnamespaces = ','.join(['{}'.format(ns) for ns in self.subnamespaces]),
subgroups = ','.join(['{}'.format(grp) for grp in self.subgroups]),
role ='{}'.format(self.role) if self.role else 'n/a'
)
return str_str
return ('\n' + '\n\n'.join([my_mstr(x) for x in entitlements]) + '\n')
class Flaat():
'''FLAsk support for OIDC Access Tokens.
Provide decorators and configuration for OIDC'''
# pylint: disable=too-many-instance-attributes
def __init__(self):
self.trusted_op_list = None
self.iss = None
self.op_hint = None
self.trusted_op_file = None
self.verbose = verbose
self.verify_tls = True
self.client_id = None
self.client_secret = None
self.last_error = ''
self.issuer_config_cache = Issuer_config_cache() # maps issuer to issuer configs # formerly issuer_configs
self.accesstoken_issuer_cache = {} # maps accesstoken to issuer
self.num_request_workers = 10
self.client_connect_timeout = 1.2 # seconds
# No leading slash ('/') in ops_that_support_jwt !!!
self.ops_that_support_jwt = \
[ 'https://iam-test.indigo-datacloud.eu',
'https://iam.deep-hybrid-datacloud.eu',
'https://iam.extreme-datacloud.eu',
'https://wlcg.cloud.cnaf.infn.it',
'https://aai.egi.eu/oidc',
'https://aai-dev.egi.eu/oidc',
'https://oidc.scc.kit.edu/auth/realms/kit',
'https://unity.helmholtz-data-federation.de/oauth2',
'https://login.helmholtz-data-federation.de/oauth2',
'https://login-dev.helmholtz.de/oauth2',
'https://login.helmholtz.de/oauth2',
'https://b2access.eudat.eu/oauth2',
'https://b2access-integration.fz-juelich.de/oauth2',
'https://services.humanbrainproject.eu/oidc',
'https://login.elixir-czech.org/oidc',
]
self.claim_search_precedence = ['userinfo', 'access_token']
self.request_id = "unset"
self.supported_web_frameworks = available_web_frameworks
if 'flask' in available_web_frameworks:
self.web_framework = 'flask'
elif 'aiohttp' in available_web_frameworks:
self.web_framework = 'aiohttp'
elif 'fastapi' in available_web_frameworks:
self.web_framework = 'fastapi'
self.raise_error_on_return = True # else just return an error
def get_request_id(self, request_object):
'''Return a string identifying the request'''
# request_object = self._find_request_based_on_web_framework(request, args, kwargs)
the_id=""
try:
if self.web_framework == "flask":
the_id = F"{str(request_object.remote_addr)}--" \
+ str(request_object.base_url)
elif self.web_framework == "aiohttp":
the_id = str(request_object.remote) + "--" \
+ str(request_object.url)
elif self.web_framework == "fastapi":
the_id = F"{str(request_object.client.host)}:{str(request_object.client.port)}--" \
+ str(request_object.url)
except AttributeError as e:
logger.error(F"Cannot identify the request: {e}\n{the_id}")
return(the_id)
def set_cache_lifetime(self, lifetime):
'''Set cache lifetime of requests_cache zn seconds, default: 300s'''
issuertools.cache_options.set_lifetime(lifetime)
def set_cache_allowable_codes(self, allowable_codes):
'''set http status code that will be cached'''
issuertools.cache_options.set_allowable_codes(allowable_codes)
def set_cache_backend(self, backend):
'''set the cache backend'''
issuertools.cache_options.backend = backend
def set_trusted_OP(self, iss):
'''Define OIDC Provider. Must be a valid URL. E.g. 'https://aai.egi.eu/oidc/'
This should not be required for OPs that put their address into the AT (e.g. keycloak, mitre,
shibboleth)'''
self.iss = iss.rstrip('/')
def set_trusted_OP_list(self, trusted_op_list):
'''Define a list of OIDC provider URLs.
E.g. ['https://iam.deep-hybrid-datacloud.eu/', 'https://login.helmholtz.de/oauth2/', 'https://aai.egi.eu/oidc/'] '''
self.trusted_op_list = []
for issuer in trusted_op_list:
self.trusted_op_list.append(issuer.rstrip('/'))
# iss_config = issuertools.find_issuer_config_in_list(self.trusted_op_list, self.op_hint,
# exclude_list = [])
# self.issuer_config_cache.add_list(iss_config)
def set_trusted_OP_file(self, filename='/etc/oidc-agent/issuer.config', hint=None):
'''Set filename of oidc-agent's issuer.config. Requires oidc-agent to be installed.'''
self.trusted_op_file = filename
self.op_hint = hint
def set_OP_hint(self, hint):
'''String to specify the hint. This is used for regex searching in lists of providers for
possible matching ones.'''
self.op_hint = hint
def set_verbosity(self, level):
'''Verbosity level of flaat:
0: No output
1: Errors
2: More info, including token info
3: Max'''
self.verbose = level
tokentools.verbose = level
issuertools.verbose = level
def set_verify_tls(self, param_verify_tls=True):
'''Whether to verify tls connections. Only use for development and debugging'''
self.verify_tls = param_verify_tls
issuertools.verify_tls = param_verify_tls
def set_client_id(self, client_id):
'''Client id. At the moment this one is sent to all matching providers. This is only
required if you need to access the token introspection endpoint. I don't have a use case for
that right now.'''
# FIXME: consider client_id/client_secret per OP.
self.client_id = client_id
def set_client_secret(self, client_secret):
'''Client Secret. At the moment this one is sent to all matching providers.'''
self.client_secret = client_secret
def set_last_error(self, error):
'''Store an error message'''
self.last_error = error
def extend_last_error(self, error):
if self.last_error == '':
self.last_error = error
else:
self.last_error = F"{self.last_error}\n{error}"
def get_last_error(self):
'''Retrieve and clear the error message'''
retval = self.last_error
# self.last_error = ''
return retval
def self_clear_last_error(self):
'''Clear last error message'''
self.last_error = ''
def set_num_request_workers(self, num):
'''set number of request workers'''
self.num_request_workers = num
issuertools.num_request_workers = num
def get_num_request_workers(self):
'''get number of request workers'''
return (self.num_request_workers)
def set_client_connect_timeout(self, num):
'''set timeout for flaat connecting to OPs'''
self.client_connect_timeout = num
def get_client_connect_timeout(self):
'''get timeout for flaat connecting to OPs'''
return (self.client_connect_timeout)
def set_iss_config_timeout(self, num):
'''set timeout for connections to get config from OP'''
issuertools.timeout = num
def get_iss_config_timeout(self):
'''set timeout for connections to get config from OP'''
return (issuertools.timeout)
def set_timeout(self, num):
'''set global timeouts for http connections'''
self.set_iss_config_timeout(num)
self.set_client_connect_timeout(num)
def get_timeout(self):
'''get global timeout for https connections'''
return ((self.get_iss_config_timeout(), self.get_client_connect_timeout()))
def set_claim_search_precedence(self, a_list):
'''set order in which to search for specific claim'''
self.claim_search_precedence = a_list
def get_claim_search_precedence(self):
'''get order in which to search for specific claim'''
return (self.claim_search_precedence)
def set_web_framework(self, framework_name):
'''specify the web framework. Currently supported are 'flaat' and 'aiohttp' '''
if framework_name in self.supported_web_frameworks:
self.web_framework = framework_name
else:
logger.error("Specified Web Framework '%s' is not supported" % framework_name)
sys.exit (42)
def _find_issuer_config_everywhere(self, access_token):
'''Use many places to find issuer configs
'''
# 0: Use accesstoken_issuer cache to find issuerconfig:
if self.verbose > 0:
logger.info('0: Trying to find issuer in cache')
try:
issuer = self.accesstoken_issuer_cache[access_token]
iss_config = self.issuer_config_cache.get(issuer)
if self.verbose > 1:
logger.info(F" 0: returning {iss_config['issuer']}")
return [iss_config]
except KeyError as e:
# issuer not found in cache
pass
# 1: find info in the AT
if self.verbose > 0:
logger.info('1: Trying to find issuer in access_token')
at_iss = tokentools.get_issuer_from_accesstoken_info(access_token)
if at_iss is not None:
trusted_op_list_buf = []
if self.trusted_op_list is not None:
if len(self.trusted_op_list) >0:
trusted_op_list_buf = self.trusted_op_list
if self.iss is not None:
trusted_op_list_buf.append(self.iss)
if at_iss.rstrip('/') not in trusted_op_list_buf:
logger.warning(F'The issuer {at_iss} of the received access_token is not trusted')
self.set_last_error(F'The issuer {at_iss} of the received access_token is not trusted')
# newline="\n"
# logger.warning(F"list: {newline.join(trusted_op_list_buf)}")
return None
iss_config = issuertools.find_issuer_config_in_at(access_token)
if iss_config is not None:
return [iss_config]
# 2: use a provided string
if self.verbose > 0:
logger.info('2: Trying to find issuer from "set_iss"')
iss_config = issuertools.find_issuer_config_in_string(self.iss)
if iss_config is not None:
return [iss_config]
# 3: Try the provided list of providers:
if self.verbose > 0:
logger.info('3: Trying to find issuer from trusted_op_list')
iss_config = issuertools.find_issuer_config_in_list(self.trusted_op_list, self.op_hint,
exclude_list = self.ops_that_support_jwt)
if iss_config is not None:
return iss_config
# 4: Try oidc-agent's issuer config file
if self.verbose > 0:
logger.info('Trying to find issuer from "set_OIDC_provider_file"')
iss_config = issuertools.find_issuer_config_in_file(self.trusted_op_file, self.op_hint,
exclude_list = self.ops_that_support_jwt)
if iss_config is not None:
return iss_config
self.set_last_error("Issuer config not found")
return None
# def verify_at_is_from_truested_iss(self, access_token):
# '''verify that the AT is issued by a trusted issuer'''
def get_info_thats_in_at(self, access_token):
# FIXME: Add here parameter verify=True, then go and verify the token
'''return the information contained inside the access_token itself'''
# '''analyse access_token and return info'''
accesstoken_info = None
if access_token:
accesstoken_info = tokentools.get_accesstoken_info(access_token)
# at_head=None
# at_body=None
# if accesstoken_info is not None and not {}:
# at_head = accesstoken_info['header']
# at_body = accesstoken_info['body']
# return (at_head, at_body)
return (accesstoken_info)
def get_issuer_from_accesstoken(self, access_token):
'''get the issuer that issued the accesstoken'''
try:
issuer = self.accesstoken_issuer_cache[access_token]
return(issuer)
except KeyError:
# update the accesstoken_issuer_cache:
self.get_info_from_userinfo_endpoints(access_token)
try:
issuer = self.accesstoken_issuer_cache[access_token]
return(issuer)
except KeyError:
return None
def get_info_from_userinfo_endpoints(self, access_token):
'''Traverse all reasonable configured userinfo endpoints and query them with the
access_token. Note: For OPs that include the iss inside the AT, they will be directly
queried, and are not included in the search (because that makes no sense).
Returns user_info object or None. If None is returned self.last_error is set with a
meaningful message.
Also updates
- accesstoken_issuer_cache
- issuer_config_cache
'''
# user_info = "" # return value
user_info = None # return value
# get a sensible issuer config. In case we don't have a jwt AT, we poll more OPs
issuer_config_list = self._find_issuer_config_everywhere(access_token)
self.issuer_config_cache.add_list(issuer_config_list)
# If there is no issuer in the cache by now, we're dead
if len(self.issuer_config_cache) == 0 :
logger.warning('No issuer config found, or issuer not supported')
return None
# get userinfo
param_q = Queue(self.num_request_workers*2)
result_q = Queue(self.num_request_workers*2)
def thread_worker_get_userinfo():
'''Thread worker'''
def safe_get(q):
try:
return q.get(timeout=5)
except Empty:
return None
while True:
item = safe_get(param_q)
if item is None:
break
result = issuertools.get_user_info(item['access_token'], item['issuer_config'])
result_q.put(result)
param_q.task_done()
result_q.task_done()
for i in range (self.num_request_workers):
t = Thread(target=thread_worker_get_userinfo)
t.daemon = True
t.start()
if self.verbose > 0:
logger.debug (F"len of issuer_config_cache: {len(self.issuer_config_cache)}")
for issuer_config in self.issuer_config_cache:
# logger.info(F"tyring to get userinfo from {issuer_config['issuer']}")
# user_info = issuertools.get_user_info(access_token, issuer_config)
params = {}
params['access_token'] = access_token
params['issuer_config'] = issuer_config
param_q.put(params)
# Collect results from threadpool
param_q.join()
result_q.join()
try:
while not result_q.empty():
retval = result_q.get(block=False, timeout=self.client_connect_timeout)
if retval is not None:
(user_info, issuer_config) = retval
issuer = issuer_config['issuer']
if self.verbose > 1:
logger.debug(F"got issuer: {issuer}")
self.issuer_config_cache.add_config(issuer, issuer_config)
# logger.info(F"storing in accesstoken cache: {issuer} -=> {access_token}")
self.accesstoken_issuer_cache[access_token] = issuer
return (user_info)
except Empty:
logger.info("EMPTY result in thead join")
# pass
except Exception as e:
logger.error("Error: Uncaught Exception: {}".format(str(e)))
if user_info is None:
self.set_last_error ("User Info not found or not accessible. Something may be wrong with the Access Token.")
return(user_info)
def get_info_from_introspection_endpoints(self, access_token):
'''If there's a client_id and client_secret defined, we access the token introspection
endpoint and return the info obtained from there'''
# get introspection_token
introspection_info = None
issuer_config_list = self._find_issuer_config_everywhere(access_token)
self.issuer_config_cache.add_list(issuer_config_list)
if len(self.issuer_config_cache) == 0 :
logger.info("Issuer Configs yielded None")
self.set_last_error("Issuer of Access Token is not supported")
return None
for issuer_config in self.issuer_config_cache:
introspection_info = issuertools.get_introspected_token_info(access_token, issuer_config,
self.client_id, self.client_secret)
if introspection_info is not None:
break
return(introspection_info)
def get_all_info_by_at(self, access_token):
'''Collect all possible user info and return them as one json
object.'''
if access_token is None:
self.set_last_error('No access token found')
return None
accesstoken_info = self.get_info_thats_in_at(access_token)
user_info = self.get_info_from_userinfo_endpoints(access_token)
introspection_info = self.get_info_from_introspection_endpoints(access_token)
# FIXME: We have to verify the accesstoken
# And verify that it comes from a trusted issuer!!
if accesstoken_info is not None:
timeleft = tokentools.get_timeleft(accesstoken_info)
if timeleft < 0:
self.set_last_error('Token expired for %d seconds' % abs(timeleft))
return None
if user_info is None:
return None
# return tokentools.merge_tokens ([accesstoken_info['header'], accesstoken_info['body'], user_info, introspection_info])
return tokentools.merge_tokens ([accesstoken_info, user_info, introspection_info])
def _find_request_based_on_web_framework(self, request, args, kwargs):
'''use configured web_framework and return the actual request object'''
if self.web_framework == 'flask':
return request
if self.web_framework == 'aiohttp':
return args[0]
if self.web_framework == 'fastapi':
return kwargs["request"]
return None
def _return_formatter_wf(self, return_value, status=200):
'''Return the object appropriate for the chosen web framework'''
if status != 200:
logger.error(F'Incoming request [{self.request_id}] http status: {status} - {self.get_last_error()}')
if self.raise_error_on_return:
if self.web_framework == 'flask':
raise flaat_exceptions.FlaatExceptionFlask(reason=return_value, status_code=status)
if self.web_framework == 'aiohttp':
raise flaat_exceptions.FlaatExceptionAio(reason=return_value, status_code=status)
if self.web_framework == 'fastapi':
raise flaat_exceptions.FlaatExceptionFastapi(reason=return_value, status_code=status)
else:
if self.web_framework == 'flask':
return (return_value, status)
if self.web_framework == 'aiohttp':
return web.Response(text=return_value, status=status)
if self.web_framework == 'fastapi':
return JSONResponse(content=return_value, status_code=status)
#return return_value
return None
def _get_all_info_from_request(self, param_request):
'''gather all info about the user that we can find.
Returns a "supertoken" json structure.'''
access_token = tokentools.get_access_token_from_request(param_request)
if access_token is None:
self.set_last_error("No Access Token Found.")
return None
# logger.info (F"access_token: {access_token}")
return self.get_all_info_by_at(access_token)
def _wrap_async_call(self, func, *args, **kwargs):
'''wrap function call so that it is awaited when necessary,
depending on the web framework used.
'''
def get_or_create_eventloop():
try:
return asyncio.get_event_loop()
except RuntimeError as ex:
if "There is no current event loop in thread" in str(ex):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
return asyncio.get_event_loop()
if self.web_framework == 'fastapi':
if (asyncio.iscoroutine(func) or asyncio.iscoroutinefunction(func)):
return get_or_create_eventloop().run_until_complete(func(*args, **kwargs))
logger.info(F'Incoming request [{self.request_id}] Success')
return func(*args, **kwargs)
def login_required(self, on_failure=None):
'''Decorator to enforce a valid login.
Optional on_failure is a function that will be invoked if there was no valid user detected.
Useful for redirecting to some login page'''
def wrapper(view_func):
@wraps(view_func)
def decorated(*args, **kwargs):
try:
if os.environ['DISABLE_AUTHENTICATION_AND_ASSUME_AUTHENTICATED_USER'].lower() == 'yes':
return self._wrap_async_call(view_func, *args, **kwargs)
except KeyError: # i.e. the environment variable was not set
pass
request_object = self._find_request_based_on_web_framework(request, args, kwargs)
self.request_id = self.get_request_id(request_object)
all_info = self._get_all_info_from_request(request_object)
if all_info is None:
if self.verbose > 0:
self.extend_last_error(F"No information about user found in {str(self.get_claim_search_precedence())}")
logger.warning(self.get_last_error())
return self._return_formatter_wf(\
('No valid authentication found: %s' % self.get_last_error()), 401)
if on_failure:
return self._return_formatter_wf(on_failure(self.get_last_error()), 401)
return self._wrap_async_call(view_func, *args, **kwargs)
return decorated
return wrapper
def _determine_number_of_required_matches(self, match, req_group_list):
'''determine the number of requi`example.py`red matches from parameters'''
# How many matches do we need?
required_matches = None
if match == 'all':
required_matches = len(req_group_list)
if match == 'one':
required_matches = 1
if isinstance (match, int):
required_matches = match
if required_matches > len(req_group_list):
required_matches = len(req_group_list)
if self.verbose > 1:
logger.info(' required matches: {}'.format(required_matches))
return required_matches
def _get_entitlements_from_claim(self, all_info, claim):
'''extract groups / entitlements from given claim (in userinfo or access_token)'''
# search group / entitlement entries in specified claim (in userinfo or access_token)
for location in self.claim_search_precedence:
avail_group_entries = None
if location == "userinfo":
avail_group_entries = all_info.get(claim)
if location == "access_token":
avail_group_entries = all_info['body'].get(claim)
if avail_group_entries is not None:
break
if avail_group_entries is None:
self.set_last_error('Not authorised (claim does not exist: "%s")' % claim)
if self.verbose:
logger.warning('Claim does not exist: "%s".' % claim)
logger.debug(json.dumps(all_info, sort_keys=True, indent=4, separators=(',', ': ')))
return (None, self.get_last_error())
if not isinstance(avail_group_entries, list):
self.set_last_error('Not authorised (claim does not point to a list: "%s")' % avail_group_entries)
if self.verbose:
logger.debug('Claim does not point to a list: "%s".' % avail_group_entries)
logger.debug(json.dumps(all_info, sort_keys=True, indent=4, separators=(',', ': ')))
avail_group_entries = [avail_group_entries]
return (avail_group_entries, None)
def group_required(self, group=None, claim=None, on_failure=None, match='all'):
'''Decorator to enforce membership in a given group.
group is the name (or list) of the group to match
match specifies how many of the given groups must be matched. Valid values for match are
'all', 'one', or an integer
on_failure is a function that will be invoked if there was no valid user detected.
Useful for redirecting to some login page'''
def wrapper(view_func):
@wraps(view_func)
def decorated(*args, **kwargs):
try:
if os.environ['DISABLE_AUTHENTICATION_AND_ASSUME_VALID_GROUPS'].lower() == 'yes':
return self._wrap_async_call(view_func, *args, **kwargs)
except KeyError: # i.e. the environment variable was not set
pass
user_message = 'Not enough required group memberships found.'
request_object = self._find_request_based_on_web_framework(request, args, kwargs)
self.request_id = self.get_request_id(request_object)
all_info = self._get_all_info_from_request(request_object)
if all_info is None:
if on_failure:
return self._return_formatter_wf(on_failure(self.get_last_error()), 401)
return self._return_formatter_wf('No valid authentication found. %s' % self.get_last_error(), 401)
req_group_list = ensure_is_list (group)
required_matches = self._determine_number_of_required_matches(match, req_group_list)
if not required_matches:
logger.error('Error interpreting the "match" parameter')
return self._return_formatter_wf('Error interpreting the "match" parameter', 403)
if self.verbose>1:
logger.debug(json.dumps(all_info, sort_keys=True, indent=4, separators=(',', ': ')))
# copy entries from incoming claim
(avail_group_entries, user_message) = self._get_entitlements_from_claim(all_info, claim)
override_group_entries = check_environment_for_override('DISABLE_AUTHENTICATION_AND_ASSUME_GROUPS')
if override_group_entries is not None:
avail_group_entries = override_group_entries
if not avail_group_entries:
return self._return_formatter_wf(user_message, 403)
# now we do the actual checking
matches_found = 0
for entry in avail_group_entries:
for g in req_group_list:
if entry == g:
matches_found += 1
if self.verbose > 0:
logger.info('found %d of %d matches' % (matches_found, required_matches))
if self.verbose > 1:
logger.info(F'Available Groups: {str(avail_group_entries)}')
logger.info(F'Required Groups: {str(req_group_list)}')
if matches_found >= required_matches:
return self._wrap_async_call(view_func, *args, **kwargs)
user_message = 'You are not authorised'
# Either we returned above or there was no matching group
if on_failure:
return self._return_formatter_wf(on_failure(user_message), 403)
return self._return_formatter_wf(user_message+ self.get_last_error(), 403)
return decorated
return wrapper
def aarc_g002_entitlement_required(self, entitlement=None, claim=None, on_failure=None, match='all'):
'''Decorator to enforce membership in a given group defined according to AARC-G002.
entitlement is the name (or list) of the entitlement to match
match specifies how many of the given groups must be matched. Valid values for match are
'all', 'one', or an integer
on_failure is a function that will be invoked if there was no valid user detected.
Useful for redirecting to some login page'''
return self.aarc_g002_group_required(entitlement, claim, on_failure, match)
def aarc_g002_group_required(self, group=None, claim=None, on_failure=None, match='all'):
'''Decorator to enforce membership in a given group defined according to AARC-G002.
group is the name (or list) of the entitlement to match
match specifies how many of the given groups must be matched. Valid values for match are
'all', 'one', or an integer
on_failure is a function that will be invoked if there was no valid user detected.
Useful for redirecting to some login page'''
# rename for clarity, don't use group below
entitlement=group
del(group)
def wrapper(view_func):
@wraps(view_func)
def decorated(*args, **kwargs):
try:
if os.environ['DISABLE_AUTHENTICATION_AND_ASSUME_AUTHENTICATED_USER'].lower() == 'yes':
return self._wrap_async_call(view_func, *args, **kwargs)
except KeyError: # i.e. the environment variable was not set
pass
user_message = 'Not enough required entitlements found.'
request_object = self._find_request_based_on_web_framework(request, args, kwargs)
self.request_id = self.get_request_id(request_object)
all_info = self._get_all_info_from_request(request_object)
if all_info is None:
if on_failure:
return self._return_formatter_wf(on_failure(self.get_last_error()), 401)
return self._return_formatter_wf('No valid authentication found. %s' % self.get_last_error(), 401)
req_entitlement_list = ensure_is_list (entitlement)
required_matches = self._determine_number_of_required_matches(match, req_entitlement_list)
if not required_matches:
logger.error('Error interpreting the "match" parameter')
return self._return_formatter_wf('Error interpreting the "match" parameter', 403)
if self.verbose>1:
logger.debug(json.dumps(all_info, sort_keys=True, indent=4, separators=(',', ': ')))
# copy entries from incoming claim
(avail_entitlement_entries, user_message) = self._get_entitlements_from_claim(all_info, claim)
override_entitlement_entries = check_environment_for_override('DISABLE_AUTHENTICATION_AND_ASSUME_ENTITLEMENTS')
if override_entitlement_entries is not None:
avail_entitlement_entries = override_entitlement_entries
if not avail_entitlement_entries:
return self._return_formatter_wf(user_message, 403)
if self.verbose > 1:
logger.info(F'Available Entitlements: {str(avail_entitlement_entries)}')
logger.info(F'Required Entitlements: {str(req_entitlement_list)}')
# generate entitlement objects from input strings
def e_expander(es):
"""Helper function to catch exceptions in list comprehension"""
try:
return Aarc_g002_entitlement(es, strict=False)
except ValueError:
return None
except Aarc_g002_entitlement_ParseError:
return None
except Aarc_g002_entitlement_Error:
return None
# logger.info("Parsing entitlements")
try:
avail_entitlements = [ e_expander(es) for es in avail_entitlement_entries if e_expander(es) is not None]
except ValueError as e:
logger.error (F"Failed to parse available entitlements: {e}")
logger.error (F" available entitlement_entries: {avail_entitlement_entries}")
try:
req_entitlements = [ e_expander(es) for es in req_entitlement_list if e_expander(es) is not None]
except ValueError as e:
logger.error (F"Failed to parse required entitlement(s): {e}")
logger.error (F" required entitlement_list: {req_entitlement_list}")
# logger.info("done")
if self.verbose > 1:
logger.info(F'Available Entitlements: {formatted_entitlements(avail_entitlements)}')
logger.info(F'Required Entitlements: {formatted_entitlements(req_entitlements)}')
# now we do the actual checking
matches_found = 0
# for required in req_entitlements:
for required in req_entitlements:
for avail in avail_entitlements:
if required.is_contained_in(avail):
matches_found += 1
if self.verbose > 0:
logger.info('found %d of %d matches' % (matches_found, required_matches))
if matches_found >= required_matches:
return self._wrap_async_call(view_func, *args, **kwargs)
user_message = 'You are not authorised'
# Either we returned above or there was no matching entitlement
if on_failure:
return self._return_formatter_wf(on_failure(user_message ), 403)
return self._return_formatter_wf(user_message , 403)
return decorated
return wrapper
|
sim_setting_control = {
"interval": 1.0, # seconds of each step
"threadNum": 1, # this .so is single thread version, this parameter is useless
"saveReplay": True, # set to True if your want to replay the traffic in GUI
"rlTrafficLight": True, # set to True to control the signal
"changeLane": False, # set to False if changing lane is not considered
}
sim_setting_default = {
"interval": 1.0, # seconds of each step
"threadNum": 1, # this .so is single thread version, this parameter is useless
"saveReplay": True, # set to True if your want to replay the traffic in GUI
"rlTrafficLight": False, # set to False to control the signal by default
"changeLane": False, # set to False if changing lane is not considered
"plan": [5, 30, 30, 30, 30, 30, 30, 30, 30]
} |
# !/usr/bin/python
# -*- coding: utf-8 -*-
# @time : 2021/7/24 21:45
# @author : Mo
# @function: Layer and Loss
from torch import nn
import numpy as np
import torch
__all__ = ["PriorMultiLabelSoftMarginLoss",
"LabelSmoothingCrossEntropyV2",
"LabelSmoothingCrossEntropy",
"MultiLabelCircleLoss",
"FocalLoss",
"DiceLoss",
"FCLayer",
"Swish",
"Mish",
]
class PriorMultiLabelSoftMarginLoss(nn.Module):
def __init__(self, prior=None, num_labels=None, reduction="mean", eps=1e-9, tau=1.0):
"""PriorCrossEntropy
categorical-crossentropy-with-prior
urls: [通过互信息思想来缓解类别不平衡问题](https://spaces.ac.cn/archives/7615)
args:
prior: List<float>, prior of label, 先验知识. eg. [0.6, 0.2, 0.1, 0.1]
num_labels: int, num of labels, 类别数. eg. 10
reduction: str, Specifies the reduction to apply to the output, 输出形式.
eg.``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``
eps: float, Minimum of maths, 极小值. eg. 1e-9
tau: float, weight of prior in loss, 先验知识的权重, eg. ``1.0``
returns:
Tensor of loss.
examples:
>>> loss = PriorCrossEntropy(prior)(logits, label)
"""
super(PriorMultiLabelSoftMarginLoss, self).__init__()
self.loss_mlsm = torch.nn.MultiLabelSoftMarginLoss(reduction=reduction)
if not prior: prior = np.array([1/num_labels for _ in range(num_labels)]) # 如果不存在就设置为num
if type(prior) ==list: prior = np.array(prior)
self.log_prior = torch.tensor(np.log(prior + eps)).unsqueeze(0)
self.eps = eps
self.tau = tau
def forward(self, logits, labels):
# 使用与输入label相同的device
logits = logits + self.tau * self.log_prior.to(labels.device)
loss = self.loss_mlsm(logits, labels)
return loss
class LabelSmoothingCrossEntropyV2(nn.Module):
""" 平滑的交叉熵, LabelSommth-CrossEntropy
This is the autograd version, you can also try the LabelSmoothSoftmaxCEV2 that uses derived gradients
url: https://github.com/CoinCheung/pytorch-loss
examples:
>>> criteria = LabelSmoothingCrossEntropyV2()
>>> logits = torch.randn(8, 19, 384, 384) # nchw, float/half
>>> lbs = torch.randint(0, 19, (8, 384, 384)) # nhw, int64_t
>>> loss = criteria(logits, lbs)
"""
def __init__(self, lb_smooth=0.1, reduction="mean", ignore_index=-100):
super(LabelSmoothingCrossEntropyV2, self).__init__()
self.log_softmax = nn.LogSoftmax(dim=1)
self.lb_ignore = ignore_index
self.lb_smooth = lb_smooth
self.reduction = reduction
def forward(self, logits, label):
# overcome ignored label
logits = logits.float() # use fp32 to avoid nan
with torch.no_grad():
num_classes = logits.size(1)
label = label.clone().detach()
ignore = label.eq(self.lb_ignore)
n_valid = ignore.eq(0).sum()
label[ignore] = 0
lb_pos, lb_neg = 1. - self.lb_smooth, self.lb_smooth / num_classes
# b.fill_(0)就表示用0填充b,是in_place操作。 input.scatter_(dim, index, src)将src中数据根据index中的索引按照dim的方向填进input中。
label_unsq = label.unsqueeze(1)
lb_one_hot = torch.empty_like(logits).fill_(lb_neg).scatter_(1, label_unsq, lb_pos).detach()
logs = self.log_softmax(logits)
loss = - torch.sum(logs * lb_one_hot, dim=1)
loss[ignore] = 0
if self.reduction == "mean":
loss = loss.sum() / n_valid
if self.reduction == "sum":
loss = loss.sum()
return loss
class LabelSmoothingCrossEntropyV1(nn.Module):
def __init__(self, eps=0.1, reduction="mean", ignore_index=-100):
"""【直接smooth输入logits效果不好】LabelSmoothingCrossEntropy, no-softmax-input
eps==0-1, 通过控制ce权重、新增后置项来处理来平滑
urls: [pytorch | labelSmooth](https://zhuanlan.zhihu.com/p/265704145)
args:
ignore_index: (int, optional): Specifies a target value that is ignored and does not contribute to the input gradient. Default: -100
reduction: str, Specifies the reduction to apply to the output, 输出形式.
eg.``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``
eps: float, Minimum of maths, 极小值. eg. 0.1
returns:
Tensor of loss.
examples:
>>> loss = LabelSmoothingCrossEntropyV1()(logits, label)
"""
super(LabelSmoothingCrossEntropyV1, self).__init__()
self.ignore_index = ignore_index
self.reduction = reduction
self.eps = eps
def forward(self, logits, labels): # logits --- logistic unit
V = max(logits.size()[-1] - 1, 1)
logits_smooth = (1 - self.eps) * logits + self.eps / V
logits_smooth_logsigmoid = torch.nn.functional.logsigmoid(logits_smooth)
loss = -(labels * logits_smooth_logsigmoid + (1 - labels) * logits_smooth_logsigmoid)
loss = loss.sum(dim=1) # / logits.size(1) # only return N loss values
if "mean" == self.reduction:
loss = loss.mean()
elif "sum" == self.reduction:
loss = loss.sum()
else:
_
return loss
class LabelSmoothingCrossEntropy(nn.Module):
def __init__(self, eps=0.1, reduction="mean", ignore_index=-100):
"""LabelSmoothingCrossEntropy, no-softmax-input
对logits进行smoothing, 即log_softmax后进行操作
args:
ignore_index: (int, optional): Specifies a target value that is ignored and does not contribute to the input gradient. Default: -100
reduction: str, Specifies the reduction to apply to the output, 输出形式.
eg.``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``
eps: float, Minimum of maths, 极小值. eg. 0.1
returns:
Tensor of loss.
examples:
>>> loss = LabelSmoothingCrossEntropy()(logits, label)
"""
super(LabelSmoothingCrossEntropy, self).__init__()
self.ignore_index = ignore_index
self.reduction = reduction
self.eps = eps
def forward(self, logits, labels):
V = max(logits.size()[-1] - 1, 1)
loss = (1 - self.eps) * (-(labels * torch.nn.functional.logsigmoid(logits) +
(1 - labels) * torch.nn.functional.logsigmoid(-logits))) + self.eps / V
loss = loss.sum(dim=1) / logits.size(1) # only return N loss values
if "mean" == self.reduction:
loss = loss.mean()
elif "sum" == self.reduction:
loss = loss.sum()
else:
_
return loss
class MultiLabelCircleLoss(nn.Module):
def __init__(self, reduction="mean", inf=1e12):
"""CircleLoss of MultiLabel, 多个目标类的多标签分类场景,希望“每个目标类得分都不小于每个非目标类的得分”
多标签分类的交叉熵(softmax+crossentropy推广, N选K问题), LSE函数的梯度恰好是softmax函数
让同类相似度与非同类相似度之间拉开一定的margin。
- 使同类相似度比最大的非同类相似度更大。
- 使最小的同类相似度比最大的非同类相似度更大。
- 所有同类相似度都比所有非同类相似度更大。
urls: [将“softmax+交叉熵”推广到多标签分类问题](https://spaces.ac.cn/archives/7359)
args:
reduction: str, Specifies the reduction to apply to the output, 输出形式.
eg.``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``
inf: float, Minimum of maths, 无穷大. eg. 1e12
returns:
Tensor of loss.
examples:
>>> label, logits = [[1, 1, 1, 1], [0, 0, 0, 1]], [[0, 1, 1, 0], [1, 0, 0, 1],]
>>> label, logits = torch.tensor(label).float(), torch.tensor(logits).float()
>>> loss = MultiLabelCircleLoss()(logits, label)
"""
super(MultiLabelCircleLoss, self).__init__()
self.reduction = reduction
self.inf = inf # 无穷大
def forward(self, logits, labels):
logits = (1 - 2 * labels) * logits # <3, 4>
logits_neg = logits - labels * self.inf # <3, 4>, 减去选中多标签的index
logits_pos = logits - (1 - labels) * self.inf # <3, 4>, 减去其他不需要的多标签Index
zeros = torch.zeros_like(logits[..., :1]) # <3, 1>
logits_neg = torch.cat([logits_neg, zeros], dim=-1) # <3, 5>
logits_pos = torch.cat([logits_pos, zeros], dim=-1) # <3, 5>
neg_loss = torch.logsumexp(logits_neg, dim=-1) # <3, >
pos_loss = torch.logsumexp(logits_pos, dim=-1) # <3, >
loss = neg_loss + pos_loss # pos比零大, neg比零小
if "mean" == self.reduction:
loss = loss.mean()
else:
loss = loss.sum()
return loss
class FocalLoss(nn.Module):
def __init__(self, alpha=0.5, gamma=2, reduction="mean"):
"""FocalLoss
聚焦损失, 不确定的情况下alpha==0.5效果可能会好一点
url: https://github.com/CoinCheung/pytorch-loss
Usage is same as nn.BCEWithLogits:
>>> loss = criteria(logits, lbs)
"""
super(FocalLoss, self).__init__()
self.reduction = reduction
self.alpha = alpha
self.gamma = gamma
def forward(self, logits, labels):
probs = torch.sigmoid(logits)
coeff = torch.abs(labels - probs).pow(self.gamma).neg()
log_0_probs = torch.where(logits >= 0, -logits + nn.functional.softplus(logits, -1, 50), -nn.functional.softplus(logits, 1, 50))
log_1_probs = torch.where(logits >= 0, nn.functional.softplus(logits, -1, 50), logits - nn.functional.softplus(logits, 1, 50))
loss = labels * self.alpha * log_1_probs + (1. - labels) * (1. - self.alpha) * log_0_probs
loss = loss * coeff
if self.reduction == "mean":
loss = loss.mean()
if self.reduction == "sum":
loss = loss.sum()
return loss
class DiceLossV1(nn.Module):
def __init__(self, reduction="mean", epsilon=1e-9):
"""【ERROR, 不收敛-原因未知】Dice-Loss, 切块损失, 用于不均衡数据, 但是收敛困难
paper: Dice Loss for Data-imbalanced NLP Tasks
url: https://arxiv.org/pdf/1911.02855.pdf
args:
reduction: str, Specifies the reduction to apply to the output, 输出形式.
eg.``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``
epsilon: float, Minimum of maths, 无穷小. eg. 1e-9
returns:
Tensor of loss.
examples:
>>> label, logits = [[1, 1, 1, 1], [0, 0, 0, 1]], [[0, 1, 1, 0], [1, 0, 0, 1],]
>>> label, logits = torch.tensor(label).float(), torch.tensor(logits).float()
>>> loss = DiceLoss()(logits, label)
"""
super(DiceLossV1, self).__init__()
self.reduction = reduction
self.epsilon = epsilon
def forward(self, logits, labels):
prob = torch.sigmoid(logits) # <2, 4>
# logits: [N, C], index: [N, ]
index = labels.unsqueeze(1).view(prob.size(0), -1) # <2, 4>
prob = torch.gather(prob, dim=1, index=index)
dsc_i = 1 - ((1 - prob) * prob + self.epsilon) / ((1 - prob) * prob + 1 + self.epsilon)
if "mean" == self.reduction:
loss = dsc_i.mean()
else:
loss = dsc_i.sum()
return loss
class DiceLoss(nn.Module):
def __init__(self, epsilon=1e-9):
"""Dice-Loss, 切块损失, 用于不均衡数据, 但是收敛困难, 不太稳定
paper: Dice Loss for Data-imbalanced NLP Tasks
url: https://arxiv.org/pdf/1911.02855.pdf
args:
reduction: str, Specifies the reduction to apply to the output, 输出形式.
eg.``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``
epsilon: float, Minimum of maths, 无穷小. eg. 1e-9
returns:
Tensor of loss.
examples:
>>> label, logits = [[1, 1, 1, 1], [0, 0, 0, 1]], [[0, 1, 1, 0], [1, 0, 0, 1],]
>>> label, logits = torch.tensor(label).long(), torch.tensor(logits).float()
>>> loss = DiceLoss()(logits, label)
"""
super(DiceLoss, self).__init__()
self.epsilon = epsilon
def forward(self, logits, labels): # 利用预测值与标签相乘当作交集
predict = torch.sigmoid(logits)
intersect = predict * labels + self.epsilon
unionset = predict + labels + self.epsilon
loss = 1 - 2 * intersect.sum() / unionset.sum()
return loss
class FCLayer(nn.Module):
def __init__(self, input_dim, output_dim, dropout_rate=0.1, is_active=True,
is_dropout=True, active_type="mish"):
"""
FC-Layer, mostly last output of model
args:
input_dim: input dimension, 输入维度, eg. 768
output_dim: output dimension, 输出维度, eg. 32
dropout_rate: dropout rate, 随机失活, eg. 0.1
is_dropout: use dropout or not, 是否使用随机失活dropout, eg. True
is_active: use activation or not, 是否使用激活函数如tanh, eg. True
active_type: type of activate function, 激活函数类型, eg. "tanh", "relu"
Returns:
Tensor of batch.
"""
super(FCLayer, self).__init__()
self.linear = nn.Linear(input_dim, output_dim)
self.dropout = nn.Dropout(dropout_rate) # probability of an element to be zeroed
self.is_dropout = is_dropout
self.active_type = active_type
self.is_active = is_active
self.softmax = nn.Softmax(1)
self.sigmoid = nn.Sigmoid()
self.relu = nn.ReLU(inplace=True)
self.tanh = nn.Tanh()
self.gelu = nn.GELU()
def forward(self, x):
if self.is_dropout:
x = self.dropout(x)
x = self.linear(x)
if self.is_active:
if self.active_type.upper() == "MISH":
x = x * torch.tanh(nn.functional.softplus(x))
elif self.active_type.upper() == "SWISH":
x = x * torch.sigmoid(x)
elif self.active_type.upper() == "TANH":
x = self.tanh(x)
elif self.active_type.upper() == "GELU":
x = self.gelu(x)
elif self.active_type.upper() == "RELU":
x = self.relu(x)
else:
x = self.relu(x)
return x
class Swish(nn.Module):
def __init__(self):
""" Swish函数可以看做是介于线性函数与ReLU函数之间的平滑函数.(sigmoid和Relu的拼凑)
Searching for Activation Functions
Applies the swish function element-wise:
f(x)=x⋅sigmoid(βx)
paper: https://arxiv.org/abs/1710.05941(2017)
"""
super(Swish, self).__init__()
def forward(self, x):
return x * torch.sigmoid(x)
class Mish(nn.Module):
def __index__(self):
"""
Script provides functional interface for Mish activation function.
Applies the mish function element-wise:
mish(x) = x * tanh(softplus(x)) = x * tanh(ln(1 + exp(x)))
See additional documentation for mish class.
"""
super().__init__()
def forword(self, x):
x = x * torch.tanh(nn.functional.softplus(x))
return x
if __name__ == '__main__':
label, logits = [[1, 1, 1, 1], [0, 0, 0, 1]], [[0, 1, 1, 0], [1, 0, 0, 1], ]
label, logits = torch.tensor(label).long(), torch.tensor(logits).float()
dice = DiceLoss()
loss = dice(logits, label)
print(loss)
dice2 = DiceLossV1()
loss = dice2(logits, label)
print(loss)
lsce = LabelSmoothingCrossEntropy()
loss = lsce(logits, label)
print(loss)
lsce = LabelSmoothingCrossEntropyV1()
loss = lsce(logits, label)
print(loss)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created at 2015-12-07T20:41:54.110455 by corral 0.0.1
# =============================================================================
# DOCS
# =============================================================================
"""carpyncho main loader
"""
# =============================================================================
# IMPORTS
# =============================================================================
import os
import glob
from corral import run
from corral.conf import settings
from carpyncho.models import Tile, Pawprint, PawprintXTile
# =============================================================================
# LOADER
# =============================================================================
class Loader(run.Loader):
"""Scan the pending directory and register the tiles and pawprint
found and also store the files in a more convenient way
"""
def only_dirs(self, path):
for dname in os.listdir(path):
dpath = os.path.abspath(os.path.join(path, dname))
if os.path.isdir(dpath):
yield dname, dpath
def retrieve_master_path(self, path):
pattern = os.path.join(path, "*.dat")
files = glob.glob(pattern)
return files[0] if files else None
def list_pawprints(self, path):
pattern = os.path.join(path, "pawprints", "*.fits")
return glob.glob(pattern)
def setup(self):
self.stored_tiles_dir = os.path.join(settings.DATA_PATH, "tiles")
if not os.path.isdir(self.stored_tiles_dir):
os.makedirs(self.stored_tiles)
self.stored_pawprints_dir = os.path.join(
settings.DATA_PATH, "pawprints")
if not os.path.isdir(self.stored_pawprints_dir):
os.makedirs(self.stored_pawprints_dir)
self.tiles, self.pawprints = {}, {}
for tile_name, dpath in self.only_dirs(settings.INPUT_PATH):
master_path = self.retrieve_master_path(dpath)
if master_path:
self.tiles[tile_name] = master_path
pawprints = self.list_pawprints(dpath)
if pawprints:
self.pawprints[tile_name] = pawprints
def generate(self):
for tile_name, tile_path in self.tiles.items():
tile = Tile(name=tile_name)
tile.store_file(tile_path)
yield tile
self.session.commit()
os.remove(tile_path)
for tile_name, pawprints in self.pawprints.items():
tile = self.session.query(Tile).filter_by(name=tile_name).first()
for pwp_path in pawprints:
name = os.path.splitext(os.path.basename(pwp_path))[0]
pwp = self.session.query(Pawprint).filter_by(name=name).first()
if pwp is None:
pwp = Pawprint(name=name)
pwp.store_file(pwp_path)
yield pwp
pxt = self.session.query(PawprintXTile).filter_by(
pawprint=pwp, tile=tile).first()
if pxt is None:
pxt = PawprintXTile(pawprint=pwp, tile=tile)
yield pxt
self.session.commit()
os.remove(pwp_path)
|
from django.contrib import messages
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth import login, get_user_model
from django.contrib.auth.decorators import login_required
from django.http import Http404
from django.shortcuts import get_list_or_404, get_object_or_404, render, redirect
from django.views.generic import CreateView
from django.views.generic.detail import DetailView
from django.views.generic.edit import CreateView, FormView, UpdateView
from django.views.generic.list import ListView
from django.urls import reverse, reverse_lazy
from django.utils.decorators import method_decorator
from django.views import View
from decimal import Decimal
from .forms import PriceListForm, PriceListCurrencyForm, PriceListProductForm, PriceListProductEditForm
from .models import PriceList, PriceListCurrency, PriceListCurrencyRate, PriceListProduct, PriceListProductPrice
User = get_user_model()
@method_decorator(login_required, name='dispatch')
class ListCurrencyView(ListView):
template_name = 'currency/list.html'
model = PriceListCurrency
# Order the list by the title
def get_queryset(self):
queryset = super(ListCurrencyView, self).get_queryset()
queryset = queryset.order_by('title')
return queryset
@method_decorator(login_required, name='dispatch')
class CreateCurrencyView(CreateView):
template_name = 'currency/create.html'
model = PriceListCurrency
form_class = PriceListCurrencyForm
success_url = reverse_lazy('list_currency')
pricelist = None
# Set the success url to be the current url
def get_success_url(self):
return self.request.path
# Set the pricelist form field from the product pk and raise a 404 if not owner
def get_initial(self):
self.pricelist = get_object_or_404(PriceList, pk=self.kwargs.get('pk'))
if not self.pricelist.is_owner(self.request.user) and not self.request.user.is_staff:
raise Http404
return {
'pricelist':self.pricelist,
}
#Add pricelist data to the context
def get_context_data(self, *args, **kwargs):
context = super(CreateCurrencyView, self).get_context_data(*args, **kwargs)
context['pricelist'] = self.pricelist
return context
# on save, inital rate
def form_valid(self, form):
currency = form.save()
PriceListCurrencyRate.objects.create(currency=currency,
rate=form.cleaned_data['baserate'])
return super(CreateCurrencyView, self).form_valid(form)
@method_decorator(login_required, name='dispatch')
class EditCurrencyView(UpdateView):
template_name = 'currency/edit.html'
model = PriceListCurrency
form_class = PriceListCurrencyForm
success_url = reverse_lazy('create_currency')
currencyrate = None
# Set the base price to the last
def get_initial(self):
self.currencyrate = PriceListCurrencyRate.objects.order_by('date_effective').filter(currency=self.get_object()).last()
return { 'baserate':self.currencyrate.rate, }
def form_valid(self, form):
# Save the main form
currency = form.save()
# Set the success_url with the pricelist pk
print(currency)
self.success_url = reverse_lazy('create_currency', kwargs={'pk': currency.pricelist.pk})
# Create a new price if it has changed and not a base currency
if self.currencyrate.rate != form.cleaned_data['baserate'] and not currency.base:
PriceListCurrencyRate.objects.create(currency=currency,
rate=form.cleaned_data['baserate'])
return super(EditCurrencyView, self).form_valid(form)
@method_decorator(login_required, name='dispatch')
class DeleteCurrencyView(View):
success_url = reverse_lazy('create_currency')
def get(self, request, **kwargs):
currency = get_object_or_404(PriceListCurrency, pk=self.kwargs['pk'])
self.success_url = reverse_lazy('create_currency', kwargs={'pk': currency.pricelist.pk})
# Check request user is the owner or a staff member
if not currency.pricelist.is_owner(self.request.user) and not self.request.user.is_staff:
messages.error(request, 'You do not have the authority to delete this currency.')
return redirect(self.success_url)
if currency.base:
messages.error(request, 'Base currency cannot be deleted.')
return redirect(self.success_url)
messages.success(request, 'Currency "{}" has been successfully deleted from pricelist "{}"'.format(currency.title, currency.pricelist))
currency.delete()
return redirect(self.success_url)
@method_decorator(login_required, name='dispatch')
class ListPriceListView(ListView):
template_name = 'pricelist/list.html'
model = PriceList
# Show users lists only if not staff
def get_queryset(self):
queryset = super(ListPriceListView, self).get_queryset()
if not self.request.user.is_staff:
queryset = queryset.filter(creator=self.request.user)
return queryset
@method_decorator(login_required, name='dispatch')
class CreatePriceListView(CreateView):
template_name = 'pricelist/create.html'
model = PriceList
form_class = PriceListForm
success_url = reverse_lazy('list_pricelist')
# on save, add owner and base currency and base rate
def form_valid(self, form):
pricelist = form.save(commit=False)
pricelist.owner = self.request.user
pricelist.save()
currency = PriceListCurrency.objects.create(pricelist=pricelist,
title="BASE", code='BAS', symbol='$', base=True)
PriceListCurrencyRate.objects.create(currency=currency,
rate=Decimal('1.00'))
return super(CreatePriceListView, self).form_valid(form)
@method_decorator(login_required, name='dispatch')
class DetailedPriceListView(DetailView):
template_name = 'pricelist/detail.html'
model = PriceList
@method_decorator(login_required, name='dispatch')
class EditPriceListView(UpdateView):
template_name = 'pricelist/edit.html'
model = PriceList
form_class = PriceListForm
success_url = reverse_lazy('list_pricelist')
# Only the owner or staff can edit
def dispatch(self, request, *args, **kwargs):
if not self.get_object().is_owner(self.request.user) and not self.request.user.is_staff:
raise Http404()
return super(EditPriceListView, self).dispatch(request, *args, **kwargs)
@method_decorator(login_required, name='dispatch')
class DeletePriceListView(View):
success_url = reverse_lazy('list_pricelist')
def get(self, request, **kwargs):
pricelist = get_object_or_404(PriceList, pk=self.kwargs['pk'])
# Check request user is the owner or a staff member
if not pricelist.is_owner(self.request.user) and not self.request.user.is_staff:
messages.error(request, 'You do not have the authority to delete this pricelist.')
return redirect(self.success_url)
messages.success(request, 'Pricelist "{}" has been successfully deleted.'.format(pricelist.title))
pricelist.delete()
return redirect(self.success_url)
@method_decorator(login_required, name='dispatch')
class CreatePriceListProductView(CreateView):
template_name = 'pricelistproduct/create.html'
model = PriceListProduct
form_class = PriceListProductForm
pricelist = None
# Set the success url to be the current url
def get_success_url(self):
return self.request.path
# Set the pricelist form field from the product pk and raise a 404 if not owner
def get_initial(self):
self.pricelist = get_object_or_404(PriceList, pk=self.kwargs.get('pk'))
if not self.pricelist.is_owner(self.request.user) and not self.request.user.is_staff:
raise Http404
return {
'pricelist':self.pricelist,
'user':self.request.user,
}
#Add pricelist data to the context
def get_context_data(self, *args, **kwargs):
context = super(CreatePriceListProductView, self).get_context_data(*args, **kwargs)
context['pricelist'] = self.pricelist
return context
# on save, inital base price
def form_valid(self, form):
product = form.save()
PriceListProductPrice.objects.create(listproduct=product,
price=form.cleaned_data['baseprice'])
return super(CreatePriceListProductView, self).form_valid(form)
@method_decorator(login_required, name='dispatch')
class EditPriceListProductView(UpdateView):
template_name = 'pricelistproduct/edit.html'
model = PriceListProduct
form_class = PriceListProductEditForm
success_url = reverse_lazy('create_pricelistproduct')
productprice = None
# Make sure the pricelist belongs to the user
def get_queryset(self):
queryset = super(EditPriceListProductView, self).get_queryset()
if not self.request.user.is_staff:
queryset = queryset.filter(pricelist__creator=self.request.user)
return queryset
# Set the base price to the last
def get_initial(self):
self.productprice = PriceListProductPrice.objects.order_by('date_effective').filter(listproduct=self.get_object()).last()
return {
'baseprice':self.productprice.price,
}
def form_valid(self, form):
product = form.save()
self.success_url = reverse_lazy('create_pricelistproduct', kwargs={'pk': product.pricelist.pk})
# Create a new price if it has changed
if self.productprice.price != form.cleaned_data['baseprice']:
productprice = PriceListProductPrice.objects.create(
listproduct=product,
price=form.cleaned_data['baseprice'])
return super(EditPriceListProductView, self).form_valid(form)
@method_decorator(login_required, name='dispatch')
class DeletePriceListProductView(View):
success_url = reverse_lazy('create_pricelistproduct')
def get(self, request, **kwargs):
product = get_object_or_404(PriceListProduct, pk=self.kwargs['pk'])
self.success_url = reverse_lazy('create_pricelistproduct', kwargs={'pk': product.pricelist.pk})
# Check request user is the owner or a staff member
if not product.pricelist.is_owner(self.request.user) and not self.request.user.is_staff:
messages.error(request, 'You do not have the authority to delete this product.')
return redirect(self.success_url)
messages.success(request, 'Product "{}" has been successfully deleted from pricelist "{}"'.format(product.product.title, product.pricelist))
product.delete()
return redirect(self.success_url)
@method_decorator(login_required, name='dispatch')
class ListRateView(ListView):
pass
@method_decorator(login_required, name='dispatch')
class CreateRateView(CreateView):
pass
@method_decorator(login_required, name='dispatch')
class EditRateView(UpdateView):
pass
|
@annot('void -> void')
def test():
pass
def main():
test()
return 0
|
import gzip
import os
import subprocess
def get_chrom(cfile):
with gzip.open(cfile,'rb') as f:
next(f)
chrom = f.read().decode('utf-8').replace('\n','')
return chrom.upper()
def itoseq(seqint,kmer):
nucleotides = {0:'A',1:'C',2:'G',3:'T'}
binrep = 0
seq = ""
while(seqint > 0):
seq = nucleotides[seqint & 3] + seq
seqint >>= 2
while len(seq) < kmer:
seq = 'A' + seq
return seq
'''
does not append 1, used for integer indexing
'''
def seqtoi(seq):
nucleotides = {'A':0,'C':1,'G':2,'T':3}
binrep = 0
for i in range(0,len(seq)):
binrep <<= 2
binrep |= nucleotides[seq[i]]
return binrep
def is_dna(sequence,length=0):
valid_dna = 'ACGT'
check = all(i in valid_dna for i in sequence.upper())
if check and length > 0:
check = (len(sequence) == length)
return check
def isbound_escore(seq,etable,kmer=8,bsite_cutoff=0.4,nbsite_cutoff=0.35):
nucleotides = {'A':0,'C':1,'G':2,'T':3}
grapper = (2<<(8*2-1))-1
binrep = seqtoi(seq[0:kmer])
elist = [etable[binrep]]
for i in range(kmer,len(seq)):
binrep = ((binrep << 2) | seqtoi(seq[i])) & grapper
elist.append(etable[binrep])
if max(elist) < nbsite_cutoff:
return "unbound"
else:
isbound = False
for i in range(0,len(elist)):
if elist[i] > bsite_cutoff:
if isbound:
return "bound"
else:
isbound = True
else:
isbound = False
return "ambiguous"
"""
return: "is bound wild > is bound mut"
"""
def isbound_escore_18mer(seq18mer,pbm_name,escore_dir,spec_ecutoff=0.35,nonspec_ecutoff=0.4):
eshort_path = "%s/%s_escore.txt" % (escore_dir,pbm_name)
# TODO: avoid IO, maybe using global var?
short2long_map = "%s/index_short_to_long.csv" % (escore_dir)
# -- this definitely needs to go to a database
with open(eshort_path) as f:
eshort = [float(line) for line in f]
with open(short2long_map) as f:
next(f)
emap = [int(line.split(",")[1])-1 for line in f]
elong = [eshort[idx] for idx in emap]
wild = seq18mer[:-1]
mut = seq18mer[:8] + seq18mer[-1] + seq18mer[9:-1]
return "%s>%s" % (isbound_escore(wild,elong,bsite_cutoff=spec_ecutoff,nbsite_cutoff=nonspec_ecutoff),
isbound_escore(mut,elong,bsite_cutoff=spec_ecutoff,nbsite_cutoff=nonspec_ecutoff))
def delete_file(filename):
'''
this simple function is used to delete user file after USER_DATA_EXPIRY
seconds
'''
if os.path.exists(filename):
os.remove(filename)
print("Deleted: %s"%filename)
else:
print("%s doesn't exist for deletion"%filename)
def line_count(file_path):
num = subprocess.check_output(['wc', '-l', file_path])
num = num.split()
return int(num[0])
# https://stackoverflow.com/questions/2130016/splitting-a-list-into-n-parts-of-approximately-equal-length
def chunkify(lst,n):
return [lst[i::n] for i in range(n)]
|
from absl import app
from absl import flags
from absl import logging
import numpy as np
import os.path as path
from sklearn.model_selection import train_test_split
import transformations.reader.generic as generic_reader
import transformations.tfhub_module as tfhub_module
import transformations.torchhub_model as torchhub_model
import transformations.pca as pca
import transformations.nca as nca
import transformations.random_proj as random_proj
FLAGS = flags.FLAGS
flags.DEFINE_enum("variant", None, ["matrix", "textfile", "folder", "mnist_data", "cifar_data", "tfds", "torchvision"], "Input for running the tool")
flags.DEFINE_list("transformations", [], "List of transformations (can be empty, which exports the raw features) to be applied in that order (starting from the second only using a matrix as a input")
flags.DEFINE_integer("subsamples", None, "Number of subsamples to export")
flags.DEFINE_string("export_path", ".", "Path to folder (should exist) where the features and labels matrices should be stored")
flags.DEFINE_string("export_features", None, "Features export file name")
flags.DEFINE_string("export_labels", None, "Labels export file name")
def _get_transform_fns():
fns = []
for t in FLAGS.transformations:
val = t.strip().lower()
if val == "tfhub_module":
fns.append(tfhub_module.load_and_apply if len(fns) == 0 else tfhub_module.apply)
elif val == "pca":
fns.append(pca.load_and_apply if len(fns) == 0 else pca.apply)
elif val == "nca":
fns.append(nca.load_and_apply if len(fns) == 0 else nca.apply)
elif val == "random_proj":
fns.append(random_proj.load_and_apply if len(fns) == 0 else random_proj.apply)
elif val == "torchhub_model":
fns.append(torchhub_model.load_and_apply if len(fns) == 0 else torchhub_model.apply)
else:
raise app.UsageError("Transformation '{}' is not valid!".format(t))
return fns
def main(argv):
if not path.exists(FLAGS.export_path):
raise app.UsageError("Path to the export folder '{}' needs to exist!".format(FLAGS.export_path))
if FLAGS.variant == "matrix" and len(FLAGS.transformations) == 0:
raise app.UsageError("Loading and rexporting the labels and features matrix without transformation is stupid! Use the command line and 'cp'!")
# Apply transformations
transform_fns = _get_transform_fns()
for i, fn in enumerate(transform_fns):
if i == 0:
features, dim, samples, labels = fn()
else:
features, dim, samples, labels = fn(features, dim, samples, labels)
if len(transform_fns) == 0:
features, dim, samples, labels = generic_reader.read()
if FLAGS.subsamples is not None and FLAGS.subsamples > 0 and FLAGS.subsamples < samples:
logging.log(logging.INFO, "Subsampling {} sampels".format(FLAGS.subsamples))
features, _, labels, _ = train_test_split(features,
labels,
test_size = None,
train_size = FLAGS.subsamples,
stratify = labels)
samples = FLAGS.subsamples
# Export data
export_folder = FLAGS.export_path
features_path = path.join(export_folder, FLAGS.export_features)
logging.log(logging.INFO, "Saving features with shape {} to '{}'".format(np.shape(features), features_path))
np.save(features_path, features)
labels_path = path.join(export_folder, FLAGS.export_labels)
logging.log(logging.INFO, "Saving labels with shape {} to '{}'".format(np.shape(labels), labels_path))
np.save(labels_path, labels)
if __name__ == "__main__":
flags.mark_flag_as_required("variant")
flags.mark_flag_as_required("export_features")
flags.mark_flag_as_required("export_labels")
app.run(main)
|
import argparse
import matplotlib.pyplot as plt
import os
import collections
from tqdm import tqdm
import numpy as np
import math
import torch
import torch.nn as nn
import torch.optim as optim
from .data import Dataset, DataLoader
#from .data.tickers import tickers_df, ticker_feature_count
from .data.tickers import TickerStream
from .model import Oracle
from .validation import validate
class Trainer:
def __init__(self, df, window, stride, products,
savedir, epochs, batch_size, num_workers, lr):
self.savedir = savedir
self.epochs = epochs
self.lr = lr
edge = math.ceil(0.8 * len(df))
train = Dataset(df[:edge], window, stride)
val = Dataset(df[edge:], window, stride)
self.dl_kws = dict(num_workers=num_workers,
batch_size=batch_size,
products=products)
self.train_dl = DataLoader(train, resample=False, **self.dl_kws)
self.val_dl = DataLoader(val, **self.dl_kws)
def train(self, model):
train_losses, val_losses, accuracies = [], [], []
#optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.8)
optimizer = optim.Adam(model.parameters(), lr=self.lr)
val_metrics = validate(model, self.val_dl, self.dl_kws['products'])
val_losses.append(np.mean(val_metrics['loss']))
for e in range(self.epochs):
train_metrics = self.train_epoch(e, model, self.train_dl, optimizer)
train_losses.extend(train_metrics['loss'])
val_metrics = validate(model, self.val_dl, self.dl_kws['products'])
val_losses.append(np.mean(val_metrics['loss']))
accuracies.append(val_metrics['mean_accuracy'])
model_path = os.path.join(self.savedir, 'oracle.pt')
torch.save(model, model_path)
f, ax = plt.subplots(1, 1, figsize=(10, 4))
for y in (train_losses, val_losses, accuracies):
y = np.array(y) / max(y)
x = np.linspace(0, self.epochs + 1, len(y))
ax.plot(x, y)
plt.savefig(os.path.join(self.savedir, 'loss_accuracy.png'))
def train_epoch(self, epoch, model, dl, optimizer):
model.train()
metrics = collections.defaultdict(list)
criterion = nn.NLLLoss()
f, ax = plt.subplots(1, 1, figsize=(10, 4))
with tqdm(total=len(dl)) as pbar:
for j, (batch, targets) in enumerate(dl):
metrics['positives'].append(targets.sum().item())
optimizer.zero_grad()
hypothesis = model(batch)
losses = []
for i, (prediction, target) in enumerate(zip(hypothesis, targets)):
losses.append(criterion(prediction, target))
loss = torch.sum(torch.stack(losses))
loss.backward()
self.plot_grad_flow(ax, model.named_parameters())
optimizer.step()
metrics['loss'].append(loss.item())
pbar.update(1)
desc = f'loss: {loss.item():.6f}'
pbar.set_description(desc)
plt.savefig(os.path.join(self.savedir, f'grads.{epoch}.png'))
return metrics
@staticmethod
def plot_grad_flow(ax, named_parameters):
'''Usage: Plug this function in Trainer class after loss.backwards() as
"plot_grad_flow(self.model.named_parameters())" to visualize the gradient flow'''
ave_grads = []
max_grads= []
layers = []
for n, p in named_parameters:
if(p.requires_grad) and ("bias" not in n):
layers.append(n)
ave_grads.append(p.grad.abs().mean())
max_grads.append(p.grad.abs().max())
ax.bar(np.arange(len(max_grads)) + 0.5, max_grads, alpha=0.1, lw=1, color="c")
ax.bar(np.arange(len(max_grads)) + 0.5, ave_grads, alpha=0.1, lw=1, color="b")
ax.set_xticks(np.arange(len(ave_grads)) + 0.5)
ax.set_xticklabels(layers, rotation="vertical")
ax.set_xlim(( 0, len(ave_grads)))
ax.set_ylim((-0.001, max(max_grads)))
ax.set_xlabel("Layers")
ax.set_ylabel("average gradient")
ax.set_title("Gradient flow")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Train predictive models')
parser.add_argument('--savedir', type=str, default='./',
help='Path to store training related files')
parser.add_argument('--inputs', nargs='+', default=[],
help='One or more ticker logs to use for training')
parser.add_argument('--epochs', type=int, default=5,
help='Number of training epochs')
parser.add_argument('--products', default=None,
help='Path to list of targeted products')
parser.add_argument('--window', type=int, default=9,
help='Number of input timesteps for predictive model')
parser.add_argument('--stride', type=int, default=9,
help='Number of timesteps being predicted')
parser.add_argument('--stream_window', type=int, default=900,
help='Number of seconds per averaged timestep')
parser.add_argument('--d_hidden', type=int, default=100,
help='LSTM hidden layer dimension')
parser.add_argument('--n_layers', type=int, default=1,
help='Number of LSTMS to stack')
parser.add_argument('--lr', type=float, default=0.01,
help='Initial learning rate')
parser.add_argument('--batch_size', type=int, default=4,
help='Number of samples per batch')
parser.add_argument('--num_workers', type=int, default=4,
help='Number of worker processes for loading data')
args = parser.parse_args()
if args.products is None:
#products = None
products = streamable(*args.inputs, window=stream_window)
else:
with open(args.products, 'r') as f:
products = [l.strip() for l in f.readlines() if not l.startswith('#')]
products = [l for l in products if l]
stream = TickerStream(products, args.inputs, args.stream_window)
df = stream.tickers_df()
#df = tickers_df(*args.inputs, products=products, window=args.stream_window)
n_products = len(products)
n_features = stream.ticker_feature_count * n_products
n_classes = 2
model = Oracle(n_features, args.d_hidden, args.n_layers, n_products, n_classes)
trainer = Trainer(df, args.window, args.stride, products,
args.savedir, args.epochs, args.batch_size, args.num_workers,
args.lr)
trainer.train(model)
|
import os, inspect
import matlab_web_desktop_proxy
import jupyter_matlab_proxy
from pathlib import Path
from matlab_web_desktop_proxy import mwi_environment_variables as mwi_env
def test_get_env():
port = 10000
base_url = "foo/"
actual_env_config = jupyter_matlab_proxy._get_env(port, base_url)
expected_env_config = {
mwi_env.get_env_name_app_port(): str(port),
mwi_env.get_env_name_base_url(): f"{base_url}matlab",
mwi_env.get_env_name_app_host(): "127.0.0.1",
mwi_env.get_env_name_mhlm_context(): "MATLAB_JUPYTER",
}
assert actual_env_config == expected_env_config
def test_setup_matlab():
"""Tests for a valid Server Process Configuration Dictionary
This test checks if the jupyter proxy returns the expected Server Process Configuration
Dictionary for the Matlab process.
"""
# Setup
# port = 10000
# base_url = "foo/"
package_path = Path(inspect.getfile(matlab_web_desktop_proxy)).parent
icon_path = str(package_path / "icons" / "matlab.svg")
expected_matlab_setup = {
"command": ["matlab-web-desktop-app", "--config", "jupyter_config"],
"timeout": 100,
"environment": jupyter_matlab_proxy._get_env,
"absolute_url": True,
"launcher_entry": {
"title": "MATLAB",
"icon_path": icon_path,
},
}
actual_matlab_setup = jupyter_matlab_proxy.setup_matlab()
assert expected_matlab_setup == actual_matlab_setup
assert os.path.isfile(actual_matlab_setup["launcher_entry"]["icon_path"])
|
blackList = []
ignoredList = []
# with open( 'webapp/helpFiles/tagsBlackList.txt' ) as file:
# blackList = file.read().split("\n")
# with open( 'webapp/helpFiles/tagsIgnoredList.txt' ) as file:
# ignoredList = file.read().split("\n")
def distanceBetweenDictionaries(query, gallery):
distance = 0
for key in query:
multiplier = 1
if key not in blackList and (key in query) and (key in gallery):
if key in ignoredList:
multiplier = 80
distance += distaceBetweenPair(query[key], gallery[key], key)*multiplier
return distance
def distaceBetweenPair(p, q, key):
distance = 0
if str(p) != str(q):
distance += 1
return distance
|
class Board:
'''
A 3x3 board for TicTacToe
'''
__board = ''
__print_board = ''
def __init__(self):
self.__board = [['1', '2', '3'], ['4', '5', '6'], ['7', '8', '9']]
self.make_printable()
def __str__(self):
self.make_printable()
return self.__print_board
def get_board(self):
return self.__board
def make_printable(self):
c_gap = ' ----------- '
r_gap = ' | '
self.__print_board = c_gap + '\n'
for l_board in self.__board:
self.__print_board += r_gap
for d in l_board:
self.__print_board += d
self.__print_board += r_gap
self.__print_board += '\n' + c_gap + '\n'
def is_space_available(self, pos: str):
row1 = self.__board[0]
row2 = self.__board[1]
row3 = self.__board[2]
if not pos.isdigit():
return False
r1 = range(1, 4).count(int(pos)) and row1[int(pos) - 1] == str(pos)
r2 = range(4, 7).count(int(pos)) and row2[int(pos) - 4] == str(pos)
r3 = range(7, 10).count(int(pos)) and row3[int(pos) - 7] == str(pos)
return r1 or r2 or r3
def is_full(self):
board = self.__board
for row in board:
for elem in row:
if elem.isdigit():
return False
return True
def place(self, marker: str, pos: str):
if not (int(pos) >= 1 and int(pos) <= 9):
return False
for row in self.__board:
if pos in row:
row[row.index(pos)] = marker
return True
return False
|
try:
from urllib import quote_plus #python 2
except:
pass
try:
from urllib.parse import quote_plus #python 3
except:
pass
from django.contrib import messages
from django.contrib.contenttypes.models import ContentType
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.db.models import Q
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.shortcuts import render, get_object_or_404, redirect
from django.utils import timezone
from comments.forms import CommentForm
from comments.models import Comment
from .forms import PostForm,NewWritingForm
from .models import Post, Genre
from django.contrib.auth.decorators import login_required
import random
@login_required(login_url="/login/")
def post_detail(request, slug=None):
instance = get_object_or_404(Post, slug=slug)
if instance.publish > timezone.now().date() or instance.draft == 'YES':
if not request.user.is_staff or not request.user.is_superuser:
raise Http404
genre = Genre.objects.all()
initial_data = {
"content_type": instance.get_content_type,
"object_id": instance.id
}
context = {
"genre":genre,
"title": instance.title,
"instance": instance,
}
return render(request, "post.html", context)
def post_list(request):
today = timezone.now().date()
queryset_list = Post.objects.active() #.order_by("-timestamp")
favourites_list = Post.objects.filter(favs=True).filter(draft="NO")
print favourites_list
if favourites_list.count()< 5:
favourites_queryset_list = favourites_list
else:
favourites_queryset_list = random.sample(favourites_list,5)
genre = Genre.objects.all()
if request.user.is_staff or request.user.is_superuser:
queryset_list = Post.objects.all()
query = request.GET.get("q")
if query:
queryset_list = queryset_list.filter(
Q(title__icontains=query)|
Q(content__icontains=query)|
Q(user__first_name__icontains=query) |
Q(user__last_name__icontains=query)
).distinct()
paginator = Paginator(queryset_list, 3) # Show 25 contacts per page
page_request_var = "page"
page = request.GET.get(page_request_var)
try:
queryset = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
queryset = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
queryset = paginator.page(paginator.num_pages)
context = {
"genre":genre,
"object_list": queryset,
"favourites" : favourites_queryset_list,
"title": "List",
"page_request_var": page_request_var,
"today": today,
}
print context['favourites']
return render(request, "index.html", context)
def post_list_genre(request, genre):
today = timezone.now().date()
genre = genre
print(genre)
try:
genre_id = Genre.objects.filter(title=genre)[0].id
queryset_list = Post.objects.active().filter(genre=genre_id)
print queryset_list
genre = Genre.objects.all()
except:
pass
# queryset_list = Post.objects.active()
favourites_list = Post.objects.filter(favs=True).filter(draft="NO")
print favourites_list
if favourites_list.count()< 5:
favourites_queryset_list = favourites_list
else:
favourites_queryset_list = random.sample(favourites_list,5)
if request.user.is_staff or request.user.is_superuser:
queryset_list = Post.objects.filter(genre=genre_id)
query = request.GET.get("q")
if query:
queryset_list = queryset_list.filter(
Q(title__icontains=query)|
Q(content__icontains=query)|
Q(user__first_name__icontains=query) |
Q(user__last_name__icontains=query)
).distinct()
paginator = Paginator(queryset_list, 3) # Show 25 contacts per page
page_request_var = "page"
page = request.GET.get(page_request_var)
try:
queryset = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
queryset = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
queryset = paginator.page(paginator.num_pages)
context = {
"genre":genre,
"object_list": queryset,
"favourites" : favourites_queryset_list,
"title": "List",
"page_request_var": page_request_var,
"today": today,
}
return render(request, "index.html", context)
def post_update(request, slug=None):
if not request.user.is_staff or not request.user.is_superuser:
raise Http404
instance = get_object_or_404(Post, slug=slug)
form = PostForm(request.POST or None, request.FILES or None, instance=instance)
if form.is_valid():
instance = form.save(commit=False)
instance.save()
messages.success(request, "<a href='#'>Item</a> Saved", extra_tags='html_safe')
return HttpResponseRedirect(instance.get_absolute_url())
context = {
"title": instance.title,
"instance": instance,
"form":form,
}
return render(request, "create.html", context)
@login_required(login_url="/login/")
def post_create(request):
if request.user.is_staff or request.user.is_superuser:
form = PostForm(request.POST or None, request.FILES or None)
if form.is_valid():
instance = form.save(commit=False)
instance.user = request.user
instance.save()
# message success
messages.success(request, "Successfully Created")
return HttpResponseRedirect(instance.get_absolute_url())
context = {
"form": form,
}
return render(request, "create.html", context)
else:
form = NewWritingForm(request.POST or None, request.FILES or None)
if form.is_valid():
instance = form.save(commit=False)
instance.name = request.user.first_name
instance.email = request.user.email
instance.save()
return HttpResponseRedirect('/')
context = {
"form": form,
"genre":Genre.objects.all()
}
return render(request, "create.html", context)
def post_delete(request, slug=None):
if not request.user.is_staff or not request.user.is_superuser:
raise Http404
instance = get_object_or_404(Post, slug=slug)
instance.delete()
messages.success(request, "Successfully deleted")
return redirect("posts:list")
|
#!/usr/bin/env python
'''Test that the window caption can be set.
Expected behaviour:
Two windows will be opened, one with the caption "Window caption 1"
counting up every second; the other with a Unicode string including
some non-ASCII characters.
Press escape or close either window to finished the test.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import time
import unittest
from pyglet import window
class WINDOW_CAPTION(unittest.TestCase):
def test_caption(self):
w1 = window.Window(400, 200, resizable=True)
w2 = window.Window(400, 200, resizable=True)
count = 1
w1.set_caption('Window caption %d' % count)
w2.set_caption(u'\u00bfHabla espa\u00f1ol?')
last_time = time.time()
while not (w1.has_exit or w2.has_exit):
if time.time() - last_time > 1:
count += 1
w1.set_caption('Window caption %d' % count)
last_time = time.time()
w1.dispatch_events()
w2.dispatch_events()
w1.close()
w2.close()
if __name__ == '__main__':
unittest.main()
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import argparse
import cv2
from maskrcnn_benchmark.config import cfg
from predictor import COCODemo
from refinenet.code.linemod_tools import LabelInfo
import time
import os
import json
import numpy as np
import glob
from evaluation.evaluation import *
from plyfile import PlyData
from visualization import *
def main(val_path):
parser = argparse.ArgumentParser(description="PyTorch Object Detection Webcam Demo")
parser.add_argument(
"--config-file",
default="../configs/caffe2/keypoints_R_101_FPN.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument(
"--confidence-threshold",
type=float,
default=0.7,
help="Minimum score for the prediction to be shown",
)
parser.add_argument(
"--min-image-size",
type=int,
default=224,
help="Smallest size of the image to feed to the model. "
"Model was trained with 800, which gives best results",
)
parser.add_argument(
"--show-mask-heatmaps",
dest="show_mask_heatmaps",
help="Show a heatmap probability for the top masks-per-dim masks",
action="store_true",
)
parser.add_argument(
"--masks-per-dim",
type=int,
default=2,
help="Number of heatmaps per dimension to show",
)
parser.add_argument(
"opts",
help="Modify model config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
# load config from file and command-line arguments
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
# prepare object that handles inference plus adds predictions on top of image
coco_demo = COCODemo(
cfg,
confidence_threshold=args.confidence_threshold,
show_mask_heatmaps=args.show_mask_heatmaps,
masks_per_dim=args.masks_per_dim,
min_image_size=args.min_image_size,
)
print("testing ...")
# val_path='./../datasets/linemod/ape_train'
# val_list=glob.glob(val_path+'/*.png')
# val_label=os.path.join(val_path,'val.txt')
# metric
val_img_list = glob.glob(val_path + '/test_data/*.png')
K = np.array([[572.4114, 0., 325.2611],
[0., 573.57043, 242.04899],
[0., 0., 1.]])
# add_v = 0
# rep_v = 0
# length = 0
ab_path=os.path.dirname(os.path.abspath(__file__))
if not os.path.exists(os.path.join(ab_path,'Occ-LINEMOD')):
os.mkdir(os.path.join(ab_path,'Occ-LINEMOD'))
for imgpath in val_img_list[:]:
per_dict = {}
img = cv2.imread(imgpath)
imgname = imgpath.split('/')[-1]
img = cv2.imread(imgpath)
try:
# print(imgname)
labels, box, score, kpts = coco_demo.run_on_opencv_image(img)
labels_np = labels.cpu().numpy()
np_kpts = kpts
bb2d_pred=[]
bb2d_gt=[]
print(imgname)
for obj_id in range(1,8):
ind = np.where(labels_np == obj_id)
try:
obj_kpts = np_kpts[ind[0][0]] # [8,3]
except:
continue
pose_gt,model,K,pose_pred = get_data(val_path, obj_id, imgname, K, obj_kpts)
box=get_obb(model)
b2d_pred=project_K(box,pose_pred,K)
b2d_gt=project_K(box,pose_gt,K)
bb2d_pred.append(b2d_pred)
bb2d_gt.append(b2d_gt)
#vis
bb82d_pred=np.array(bb2d_pred)
bb82d_gt=np.array(bb2d_gt)
draw_multi_bbox(img,bb82d_pred[:,None,...],bb82d_gt[:,None,...],os.path.join(ab_path,'Occ-LINEMOD',imgname))
except:
continue
return True
def get_data(root_path, obj_id, imgname, K, kpts, num_kpts=12):
#
obj = {1: "ape",
2: "can",
3: "cat",
4: "driller",
5: "duck",
6: "glue",
7: "holepuncher"}
obj_name = obj[obj_id]
# info
pose_gt, model, fps = get_info(root_path, obj_name, imgname)
# solve pnp
kpts_3d = fps[num_kpts]
kpts_2d=kpts[:,:2]
# print(kpts_3d)
pose_pred = pnp(kpts_3d, kpts_2d, K)
#
# with open('./distance/' + obj_name + '.txt', 'r') as f:
# diameter = float(f.readline()) / 100.
# f.close()
# # compute error
# # print(obj_name)
# if obj_name=='glue':
# rep, _ = projection_2ds(pose_pred, pose_gt, model, K) # return True or False
# add, _ = adds_metric(pose_pred, pose_gt, model, diameter) # True or False
# else:
# rep,_ = projection_2d(pose_pred, pose_gt, model, K) # return True or False
# add, _ = add_metric(pose_pred, pose_gt, model, diameter) # True or False
return pose_gt,model,K,pose_pred
def readply(filepath):
ply = PlyData.read(filepath)
data = ply.elements[0].data
x = data['x']
y = data['y']
z = data['z']
return np.stack([x, y, z], axis=-1)
def get_info(root_path, obj_name, imgname):
modelpath = os.path.join(root_path, 'models', obj_name)
name = int(imgname.split("_")[-1].split('.')[0])
# gt pose
posename = "pose" + str(name) + '.npy'
posepath = os.path.join(root_path, "blender_poses", obj_name)
pose_gt = np.load(os.path.join(posepath, posename))
# model
modelname = obj_name + '.ply'
model = readply(os.path.join(modelpath, modelname))
# kpts:
fps_8 = np.loadtxt(os.path.join(modelpath, 'fps_8.txt'))
fps_12 = np.loadtxt(os.path.join(modelpath, 'fps_12.txt'))
fps_16 = np.loadtxt(os.path.join(modelpath, 'fps_16.txt'))
fps = {8: fps_8, 12: fps_12, 16: fps_16}
return pose_gt, model, fps
for i in range(1,8):
print("{}:".format(i))
main('/home/whs/pose_estimation/maskrcnn-benchmark-master/datasets/occluded_linemod/data',)
# main('/home/whs/pose_estimation/maskrcnn-benchmark-master/datasets/occluded_linemod/data', 6)
|
import sys
import re
import pandas as pd
import numpy as np
from tcrdist import repertoire_db
import warnings
def mixcr_to_tcrdist2(chain:str,
organism:str,
seqs_fn:str = None,
clones_fn:str = None):
"""
Converts .clns.txt or .result.txt outputs from mixcr to tcrdist2
formatted input.
Parameters
----------
chain : str
'alpha', 'beta', 'gamma', or 'delta'
organism : str
'human' or 'mouse"
seqs_fn : str or None
path to mixcr parses sequences files which can contain duplicates
clones_fn : str or None
path to mixcr parsed clones file (.clns.txt), clones have a clone_id and count
Returns
-------
df : pd.DataFrame
DataFrame with column names specified in notes.
Example
-------
.. code-block:: python
import os
from tcrdist.repertoire import TCRrep
from tcrdist import mixcr
clones_fn = os.path.join('tcrdist',
'test_files_compact',
'SRR5130260.1.test.fastq.output.clns.txt')
df = mixcr.mixcr_to_tcrdist2(chain = "delta",
organism = "human",
clones_fn = clones_fn)
df = mixcr.remove_entries_with_invalid_vgene(df,
chain = "delta",
organism = "human")
Notes
-----
A seq_fn or clones_fn may be passed as input but not both.
Columns of output `df` are:
"v_[abgd]_gene", "d_[abgd]_gene:, "j_[abgd]_gene",
"cdr3_d_nucseq", "cdr3_d_nucseq" where [abgd] matches the
chain argument.
If clones_fn is specifed, the df returned will contain
"clone_id" and "count" columns
"""
if seqs_fn is not None and clones_fn is not None:
raise ValueError ("one of seq_fn or clones_fn must be left blank")
if seqs_fn is None and clones_fn is None:
raise ValueError ("one of seq_fn or clones_fn must be provided")
gene_names = { 'alpha': ['v_a_gene','d_a_gene','j_a_gene',"cdr3_a_nucseq","cdr3_a_aa"],
'beta' : ['v_b_gene','d_b_gene','j_b_gene',"cdr3_b_nucseq","cdr3_b_aa"],
'gamma': ['v_g_gene','d_g_gene','j_g_gene',"cdr3_g_nucseq","cdr3_g_aa"],
'delta': ['v_d_gene','d_d_gene','j_d_gene',"cdr3_d_nucseq","cdr3_d_aa"]}
if chain not in gene_names.keys():
raise KeyError ("chain must be 'alpha','beta','gamma', or 'delta'")
if seqs_fn is not None:
seqs_df = pd.read_csv(seqs_fn, "\t")
seqs_df = seqs_df[['allVHitsWithScore','allDHitsWithScore', 'allJHitsWithScore', 'nSeqCDR3','aaSeqCDR3']].copy()
for k in ['allVHitsWithScore','allDHitsWithScore', 'allJHitsWithScore']:
# cleanup see function defintioins above (take only the top hit and convert allele *00 to *01)
seqs_df[k] = seqs_df[k].apply(_take_top_mixcr_gene_hit).\
apply(_allele_00_to_01).\
apply(_change_TRAVDV_to_TRAVdashDV)
seqs_df = seqs_df.rename(columns = { 'allVHitsWithScore' : gene_names[chain][0],
'allDHitsWithScore' : gene_names[chain][1],
'allJHitsWithScore' : gene_names[chain][2],
'nSeqCDR3' : gene_names[chain][3],
'aaSeqCDR3' : gene_names[chain][4]})
df = seqs_df.copy()
elif clones_fn is not None:
clones_df = pd.read_csv(clones_fn, "\t")
clones_df = clones_df[['cloneId', 'cloneCount','allVHitsWithScore','allDHitsWithScore', 'allJHitsWithScore', 'nSeqCDR3','aaSeqCDR3']].copy()
for k in ['allVHitsWithScore','allDHitsWithScore', 'allJHitsWithScore']:
# cleanup see function defintioins above (take only the top hit and convert allele *00 to *01)
clones_df[k] = clones_df[k].apply(_take_top_mixcr_gene_hit).\
apply(_allele_00_to_01).\
apply(_change_TRAVDV_to_TRAVdashDV)
clones_df = clones_df.rename(columns = { 'cloneId' : "clone_id",
'cloneCount' : "count",
'allVHitsWithScore' : gene_names[chain][0],
'allDHitsWithScore' : gene_names[chain][1],
'allJHitsWithScore' : gene_names[chain][2],
'nSeqCDR3' : gene_names[chain][3],
'aaSeqCDR3' : gene_names[chain][4]})
df = clones_df.copy()
return(df)
def remove_entries_with_invalid_vgene(df, chain:str,organism:str):
"""
Uses _validate_gene_names to remove cells, or clones rows that lack a valid v_gene name
This is based on checking gene name against:
repertoire_db.RefGeneSet(db_file = "gammadelta_db.tsv" OR "alphabesta_db.tsv).all_genes
It also removes genes not associated with the specified chain
Reports any gene names deemed invalid
Parameters
----------
df : pd.DataFrame
DataFrame produced by mixcr.mixcr_to_tcrdist2
chain : str
'alpha', 'beta', 'gamma', or 'delta'
organism : str
'human' or 'mouse"
Returns
-------
df : pd.DataFrame
a copied subset of the orginal dataframe containing only those rows with valid v gene names
"""
gene_names = { 'alpha': ['v_a_gene','d_a_gene','j_a_gene'],
'beta' : ['v_b_gene','d_b_gene','j_b_gene'],
'gamma': ['v_g_gene','d_g_gene','j_g_gene'],
'delta': ['v_d_gene','d_d_gene','j_d_gene']}
v = _validate_gene_names(series = df[gene_names[chain][0]], chain = chain, organism = organism)
n_invalid_v_names = df[v == False].shape[0]
invalid_names =df[v == False][gene_names[chain][0]].unique()
if n_invalid_v_names > 0:
sys.stderr.write(f"Because of invalid v_gene names, dropping {n_invalid_v_names} with names:\n")
for n in invalid_names:
sys.stderr.write(f"{n}\n")
return df[v].copy()
def _valid_cdr3(cdr3):
"""
Examples
--------
>>> _valid_cdr3("AAAA")
True
>>> _valid_cdr3("AA.A")
False
"""
amino_acids = ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y']
valid = np.all([aa in amino_acids for aa in cdr3])
return valid
def remove_entries_with_invalid_cdr3(df, chain:str):
chain_names = { 'alpha': 'cdr3_a_aa',
'beta' : 'cdr3_b_aa',
'gamma': 'cdr3_g_aa',
'delta': 'cdr3_d_aa',}
cdr3_x_aa = chain_names[chain]
print(cdr3_x_aa)
v = df[cdr3_x_aa].apply(lambda x : _valid_cdr3(x))
n_invalid_cdr3 = df[v == False].shape[0]
invalid_names =df[v == False][cdr3_x_aa].unique()
warnings.warn(f"Because of invalid cdr3a names, dropping {n_invalid_cdr3}: {invalid_names}\n")
return df[v].copy()
def _change_TRAVDV_to_TRAVdashDV(s:str):
"""
Reconciles mixcr name like TRAV29/DV5*01 to tcrdist2 name TRAV29DV5*01
Parameters
----------
s : str
Examples
--------
>>> _change_TRAVDV_to_TRAVdashDV('TRAV29DV5*01')
'TRAV29/DV5*01'
>>> _change_TRAVDV_to_TRAVdashDV('TRAV38-2DV8*01')
'TRAV38-2/DV8*01'
>>> _change_TRAVDV_to_TRAVdashDV('TRDV*01')
'TRDV*01'
Notes
-----
This reconciles such gene names to match the tcrdist2 reference db.
see database for more details: repertoire_db.RefGeneSet(db_file = "gammadelta_db.tsv").all_genes
"""
if isinstance(s, str):
m = re.match(pattern = "(TRAV[0-9]+)(DV.*)", string = s)
m2 = re.match(pattern = "(TRAV[0-9]+-[1-2])(DV.*)", string = s)
if m:
new_s = "/".join(m.groups())
return(new_s)
elif m2:
new_s = "/".join(m2.groups())
return(new_s)
else:
return(s)
else:
return(np.NaN)
def _allele_00_to_01(s:str):
"""
Converts gene names from X*00 to X*01
Parameters
----------
s : str
Example
-------
>>> _allele_00_to_01('TRDD3*00')
'TRDD3*01'
"""
if isinstance(s, str):
allele01 = s.replace("*00","*01")
else:
allele01 = np.NaN
return(allele01)
def _take_top_mixcr_gene_hit(s):
"""
Parameters
----------
s : str
Examples
--------
>> _take_top_mixcr_gene_hit('TRDD3*00(45),TRDD2*00(40)')
'TRDD3*00'
>> _take_top_mixcr_gene_hit('TRDD3*00(45)')
'TRDD3*00'
>> _take_top_mixcr_gene_hit(None)
None
Tests
-----
assert _take_top_mixcr_gene_hit('TRDD3*00(45),TRDD2*00(40)') == 'TRDD3*00'
assert _take_top_mixcr_gene_hit('TRDD3*00(45)') == 'TRDD3*00'
assert isinstance(_take_top_mixcr_gene_hit(np.NaN),float)
assert _take_top_mixcr_gene_hit(np.NaN) is np.NaN
"""
if isinstance(s, str):
top_hit = s.split(",")[0].split("(")[0]
else:
top_hit = np.NaN
return(top_hit)
def _validate_gene_names(series, chain:str, organism:str):
"""
For efficiency reasons define the list of valid genes based on organism and chain.
then test an entire series of gene names against it
Parameters
----------
series : pd.Series
series containing gene names to be validated
chain : str
'alpha','beta','gamma', or 'delta'
organism : str
'human' or 'mouse"
Returns
-------
valid : pd.Series
series of booleans where True means name is valid and in tcrdist database
Example
-------
>>> df = pd.DataFrame({'v_d_gene':['TRDV3*01','TRDV1*01', 'TRAV29/DV5*01', 'TRAV38-2/DV8*01', "TRBV1*01"]})
>>> v = mixcr._validate_gene_names( series = df['v_d_gene'], chain = 'delta', organism = 'human')
>>> assert np.all(v == pd.Series([True,True,True,True,False]))
True
"""
# Check inputs
if organism not in ['human','mouse']:
raise KeyError("organism must be 'human' or 'mouse")
if chain not in ['alpha','beta','gamma','delta']:
raise KeyError("chain must be 'alpha','beta','gamma', or 'delta'")
if chain in ['gamma','delta']:
# Lookup appropriate gammadelta_db
all_genes = repertoire_db.RefGeneSet(db_file = "gammadelta_db.tsv").all_genes
# Lookup appropriate organism
all_genes= all_genes[organism]
if chain == 'gamma':
all_genes = [x for x in all_genes if all_genes[x].chain =='A']
if chain == 'delta':
all_genes = [x for x in all_genes if all_genes[x].chain =='B']
if chain in ['alpha','beta']:
# Lookup appropriate alphabeta_db
all_genes = repertoire_db.RefGeneSet(db_file = "alphabeta_db.tsv").all_genes
# Lookup appropriate organism
all_genes = all_genes[organism]
if chain == 'alpha':
all_genes = [x for x in all_genes if all_genes[x].chain =='A']
if chain == 'beta':
all_genes = [x for x in all_genes if all_genes[x].chain =='B']
valid = series.apply(lambda x : x in all_genes )
return(valid)
|
# MIT License
#
# Copyright (c) 2018 Evgeny Medvedev, evge.medvedev@gmail.com
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from bitcoinetl.enumeration.chain import Chain
from bitcoinetl.mappers.transaction_mapper import BtcTransactionMapper
from bitcoinetl.service.btc_service import BtcService
from blockchainetl.executors.batch_work_executor import BatchWorkExecutor
from blockchainetl.jobs.base_job import BaseJob
from blockchainetl.utils import dynamic_batch_iterator
# Add required_signatures, type, addresses, and value to transaction inputs
class EnrichTransactionsJob(BaseJob):
def __init__(
self,
transactions_iterable,
batch_size,
bitcoin_rpc,
max_workers,
item_exporter,
chain=Chain.BITCOIN):
self.transactions_iterable = transactions_iterable
self.btc_service = BtcService(bitcoin_rpc, chain)
self.batch_size = batch_size
self.batch_work_executor = BatchWorkExecutor(batch_size, max_workers, exponential_backoff=False)
self.item_exporter = item_exporter
self.transaction_mapper = BtcTransactionMapper()
def _start(self):
self.item_exporter.open()
def _export(self):
self.batch_work_executor.execute(self.transactions_iterable, self._enrich_transactions)
def _enrich_transactions(self, transactions):
transactions = [self.transaction_mapper.dict_to_transaction(transaction) for transaction in transactions]
all_inputs = [transaction.inputs for transaction in transactions]
flat_inputs = [input for inputs in all_inputs for input in inputs]
for transaction_input_batch in dynamic_batch_iterator(flat_inputs, lambda: self.batch_size):
input_transactions_map = self._get_input_transactions_as_map(transaction_input_batch)
for input in transaction_input_batch:
output = self._get_output_for_input(input, input_transactions_map) \
if input.spent_transaction_hash is not None else None
if output is not None:
input.required_signatures = output.required_signatures
input.type = output.type
input.addresses = output.addresses
input.value = output.value
for transaction in transactions:
self.item_exporter.export_item(self.transaction_mapper.transaction_to_dict(transaction))
def _get_input_transactions_as_map(self, transaction_inputs):
transaction_hashes = [input.spent_transaction_hash for input in transaction_inputs
if input.spent_transaction_hash is not None]
transaction_hashes = set(transaction_hashes)
if len(transaction_hashes) > 0:
transactions = self.btc_service.get_transactions_by_hashes(transaction_hashes)
return {transaction.hash: transaction for transaction in transactions}
else:
return {}
def _get_output_for_input(self, transaction_input, input_transactions_map):
spent_transaction_hash = transaction_input.spent_transaction_hash
input_transaction = input_transactions_map.get(spent_transaction_hash)
if input_transaction is None:
raise ValueError('Input transaction with hash {} not found'.format(spent_transaction_hash))
spent_output_index = transaction_input.spent_output_index
if input_transaction.outputs is None or len(input_transaction.outputs) < (spent_output_index + 1):
raise ValueError(
'There is no output with index {} in transaction with hash {}'.format(
spent_output_index, spent_transaction_hash))
output = input_transaction.outputs[spent_output_index]
return output
def _end(self):
self.batch_work_executor.shutdown()
self.item_exporter.close()
|
from PhysicsTools.Heppy.analyzers.gen.GeneratorAnalyzer import GeneratorAnalyzer
from PhysicsTools.Heppy.analyzers.gen.GenHeavyFlavourAnalyzer import GenHeavyFlavourAnalyzer
from PhysicsTools.Heppy.analyzers.gen.HiggsDecayModeAnalyzer import HiggsDecayModeAnalyzer
from PhysicsTools.Heppy.analyzers.gen.PDFWeightsAnalyzer import PDFWeightsAnalyzer
from PhysicsTools.Heppy.analyzers.gen.LHEWeightAnalyzer import LHEWeightAnalyzer
|
from django.http import HttpResponse
from mii_sorter.tasks import sort
def start_sort(request):
sort.delay()
return HttpResponse('OK, sort started') |
"""
Contains all classes and functions that provide utility of some kind, such as
debug printing capability.
"""
class DebugConsole:
"""
Represents a simple mechanism for creating and writing colored output
messages to the console.
Attributes:
colors (dict): The dictionary of color codes to use (if allowed).
is_verbose (bool): Whether or not non-fatal messages should be shown.
"""
def __init__(self, use_color, is_verbose):
self.colors = {
"complete": "*\033[92m*\033[0m* " if use_color else "Complete: ",
"error": " \033[91m*\033[0m " if use_color else "Error: ",
"fatal": "*\033[91m*\033[0m* " if use_color else "Fatal Error: ",
"success": " \033[92m*\033[0m " if use_color else "Success: ",
"warn": " \033[93m*\033[0m " if use_color else "Warning: "
}
self.is_verbose = is_verbose
def complete(self, msg, *objs):
"""
Writes the specified completion message to the console if verbose
messaging is allowed.
:param msg: The message to write.
:param objs: The collection of objects to format.
"""
if self.is_verbose:
print(self.colors["complete"] + msg.format(*objs))
def error(self, msg, *objs):
"""
Writes the specified error message to the console if verbose messaging
is allowed.
:param msg: The message to write.
:param objs: The collection of objects to format.
"""
if self.is_verbose:
print(self.colors["error"] + msg.format(*objs))
def fatal(self, msg, *objs):
"""
Writes the specified fatal error message to the console.
Fatal error messages are always shown and are always followed by
immediate program termination.
:param msg: The message to write.
:param objs: The collection of objects to format.
"""
print(self.colors["fatal"] + msg.format(*objs))
def info(self, msg, *objs):
"""
Writes the specified information message to the console if verbose
messaging is allowed.
:param msg: The message to write.
:param objs: The collection of objects to format.
"""
if self.is_verbose:
print(msg.format(*objs))
def success(self, msg, *objs):
"""
Writes the specified success message to the console if verbose
messaging is allowed.
:param msg: The message to write.
:param objs: The collection of objects to format.
"""
if self.is_verbose:
print(self.colors["success"] + msg.format(*objs))
def warn(self, msg, *objs):
"""
Writes the specified warning message to the console if verbose
messaging is allowed.
:param msg: The message to write.
:param objs: The collection of objects to format.
"""
if self.is_verbose:
print(self.colors["warn"] + msg.format(*objs))
|
import sys
from clcrypto import generate_salt, check_password
from models import User, make_connection
import argparse
def configure_parser():
new_parser = argparse.ArgumentParser(prog='python manage_user.py')
new_parser.add_argument('-u', '--username')
new_parser.add_argument('-p', '--password')
new_parser.add_argument('-l', '--list', action='store_true', help='żądanie wylistowania wszystkich wiadomości')
new_parser.add_argument('-s', '--send', dest='message', help='treść wiadomości')
new_parser.add_argument('-t', '--to', dest='to', help='adresat awiadomości')
return new_parser
def print_all_message(args, cursor):
pass
def send_message_to_user(args, cursor):
pass
if __name__ == '__main__':
parser = configure_parser()
args = parser.parse_args(sys.argv[1:])
cnx = make_connection()
cursor = cnx.cursor()
try:
if args.username and args.password and args.list:
'''
Jeśli użytkownik zażądał wylistowania komunikatów ( -l), należy sprawdzić jego login i hasło pobrane z
parametrów -u oraz -p, następnie pobrać z bazy wszystkie komunikaty do tego użytkownika i pokazać je w
kolejności od najnowszego do najstarszego.
'''
print_all_message(args, cursor)
elif args.username and args.password and args.message and args.to:
'''
Jeśli użytkownik chce wysłać komunikat do innego ( -s), należy sprawdzić jego login i hasło pobrane z
parametrów -u i -p, następnie sprawdzić, czy podano adresata i czy adresat istnieje ( -t), następnie
zapisać w bazie danych komunikat pobrany parametrem -s).
'''
send_message_to_user(args, cursor)
else:
'''
Jeśli użytkownik wprowadził parametry w konfiguracji innej niż podane na slajdach, ma mu się wyświetlić
komunikaty pomocy.
'''
parser.print_help()
finally:
cursor.close()
cnx.close()
|
import numpy as np
from psopy import init_feasible
from psopy import minimize
from scipy.optimize import rosen
class TestQuick:
"""Quick simple tests for early validation."""
def test_unconstrained(self):
"""Test against the Rosenbrock function."""
x0 = np.random.uniform(0, 2, (1000, 5))
sol = np.array([1., 1., 1., 1., 1.])
res = minimize(rosen, x0)
converged = res.success
assert converged, res.message
np.testing.assert_array_almost_equal(sol, res.x, 3)
def test_constrained(self):
"""Test against the following function::
y = (x0 - 1)^2 + (x1 - 2.5)^2
under the constraints::
x0 - 2.x1 + 2 >= 0
-x0 - 2.x1 + 6 >= 0
-x0 + 2.x1 + 2 >= 0
x0, x1 >= 0
"""
cons = ({'type': 'ineq', 'fun': lambda x: x[0] - 2 * x[1] + 2},
{'type': 'ineq', 'fun': lambda x: -x[0] - 2 * x[1] + 6},
{'type': 'ineq', 'fun': lambda x: -x[0] + 2 * x[1] + 2},
{'type': 'ineq', 'fun': lambda x: x[0]},
{'type': 'ineq', 'fun': lambda x: x[1]})
x0 = init_feasible(cons, low=0, high=2, shape=(1000, 2))
options = {'g_rate': 1., 'l_rate': 1., 'max_velocity': 4.,
'stable_iter': 50}
sol = np.array([1.4, 1.7])
res = minimize(lambda x: (x[0] - 1)**2 + (x[1] - 2.5)**2, x0,
constraints=cons, options=options)
converged = res.success
assert converged, res.message
np.testing.assert_array_almost_equal(sol, res.x, 3)
|
"""
Operational Sutras
"""
from sanskrit_parser.base.sanskrit_base import SanskritImmutableString, SLP1
from decimal import Decimal
from copy import deepcopy
from sanskrit_parser.generator.paninian_object import PaninianObject
import logging
logger = logging.getLogger(__name__)
# Global Domains
class GlobalDomains(object):
def __init__(self):
self.domains = {
"saMjYA": True,
"upadeSa": False,
"prakfti": False,
"pratyaya": False,
"aNga": False,
"pada": False,
"saMhitA": False,
"standard": False
}
def isdomain(self, d):
return self.domains[d]
def set_domain(self, d):
for k in self.domains:
if k == d:
self.domains[k] = True
else:
self.domains[k] = False
def active_domain(self):
r = []
for k in self.domains:
if self.domains[k]:
r.append(k)
return r
# Base class
class Sutra(object):
def __init__(self, name, aps, optional=False, overrides=None):
if isinstance(name, str):
self.name = SanskritImmutableString(name)
else:
self.name = name
if isinstance(aps, str):
self.aps = aps # Adhaya.pada.sutra
aps_l = aps.split(".")
aps_t = [int(_x) for _x in aps_l]
if len(aps_l) > 3: # Subsutra/Vartikam
aps_sub = Decimal("0."+str(aps_t[-1]))
else:
aps_sub = 0
self._aps_tuple = aps_t
elif isinstance(aps, tuple):
aps_t = aps
self._aps_tuple = aps_t
self.aps = '.'.join([str(x) for x in list(aps_t)])
self._aps_num = aps_t[2]+aps_t[1]*1000+aps_t[0]*10000 + aps_sub
self.overrides = overrides
self.optional = optional
logger.info(f"Initialized {self}: {self._aps_num} Optional:{self.optional}")
def __str__(self):
if self.optional:
_o = "*"
else:
_o = ""
return f"{self.aps:7}: {str(self.name)} {_o}"
class LRSutra(Sutra):
def __init__(self, name, aps, cond, xform, insert=None, domain=None,
update=None, optional=False, bahiranga=1, overrides=None):
'''
Sutra Class that expects a left and right input
'''
super().__init__(name, aps, optional, overrides)
self.domain = domain
self.cond = cond
self.xform = xform
self.update_f = update
self.insertx = insert
self.bahiranga = bahiranga # Bahiranga score. Smaller wins
def inAdhikara(self, context):
return self.adhikara(context)
def isTriggered(self, s1, s2, domains):
logger.debug(f"Checking {self} View: {s1} {s2}")
env = _env(s1, s2)
if self.domain is not None:
t = self.domain(domains)
else:
t = domains.isdomain("standard")
if self.cond is not None:
c = self.cond(env)
else:
c = True
logger.debug(f"Check Result {c and t} for {self}")
return c and t
def update(self, s1, s2, o1, o2, domains):
env = _env(s1, s2)
env["olp"] = o1
env["orp"] = o2
if self.update_f is not None:
self.update_f(env, domains)
return env["olp"], env["orp"]
def operate(self, s1, s2):
# We take the string tuple returned, and update s1, s2
rs1 = deepcopy(s1)
rs2 = deepcopy(s2)
if self.xform is not None:
env = _env(s1, s2)
ret = self.xform(env)
rs1.update(ret[0], SLP1)
rs2.update(ret[1], SLP1)
return rs1, rs2
def insert(self, s1, s2):
if self.insertx is not None:
env = _env(s1, s2)
itx = self.insertx(env)
r = [s1, s2]
for i in itx:
if not isinstance(itx[i], PaninianObject):
assert isinstance(itx[i], str)
itx[i] = PaninianObject(itx[i])
r.insert(i, itx[i])
logger.debug(f"After insertion {r}")
return r
else:
return(s1, s2)
def _env(s1, s2):
# Helper function to define execution environment
env = {}
env["lp"] = s1
env["rp"] = s2
if s1.canonical() == "":
env["l"] = SanskritImmutableString("")
else:
env["l"] = SanskritImmutableString(s1.canonical()[-1], SLP1)
if s2.canonical() == "":
env["r"] = SanskritImmutableString("")
else:
env["r"] = SanskritImmutableString(s2.canonical()[0], SLP1)
if len(s1.canonical()) > 1:
env["ll"] = SanskritImmutableString(s1.canonical()[-2], SLP1)
env["lc"] = SanskritImmutableString(s1.canonical()[:-1], SLP1)
else:
env["ll"] = SanskritImmutableString("")
env["lc"] = SanskritImmutableString("")
if len(s2.canonical()) > 1:
env["rr"] = SanskritImmutableString(s2.canonical()[1], SLP1)
env["rc"] = SanskritImmutableString(s2.canonical()[1:], SLP1)
else:
env["rr"] = SanskritImmutableString("", SLP1)
env["rc"] = SanskritImmutableString("", SLP1)
return env
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.