content stringlengths 5 1.05M |
|---|
from django.apps import AppConfig
from django.contrib.auth.signals import user_logged_in
from .config import access_and_compliance_group_name
class AccessAndComplianceConfig(AppConfig):
name = 'django_access_and_compliance'
verbose = 'Access and Compliance'
def ready(self):
from .signals import ensure_compliant
user_logged_in.connect(ensure_compliant)
|
from . import APP_NAME, PROJECT_NAME, APP_BASE
def application_routes(config):
config.add_route(APP_NAME + '.home', '/')
config.add_static_view('static', 'static', cache_max_age=3600)
|
import attr
@attr.dataclass
class Pembelajaran:
rombongan_belajar_id: str
status_di_kurikulum_str: str
mata_pelajaran_id: int
nama_mata_pelajaran: str
sk_mengajar: str
ptk_terdaftar_id: str
tanggal_sk_mengajar: str
jam_mengajar_per_minggu: int
induk_pembelajaran_id: str = ""
status_di_kurikulum: int = 9
semester_id: str = "20201"
pembelajaran_id: str = "Admin.model.PembelajaranNew-1"
|
import cmatrix as cmat
import qsim
import qgates
# demonstration of basic entangled state simulation (bell state)
# initial state |00>
state = qsim.create_state([0, 0])
# create state 1/sqrt(2)*(|00> + |10>)
state = qsim.apply_gate(state, qgates.H, 0)
# apply cnot to combined state, producing 1/sqrt(2)*(|00> + |11>)
state = qsim.apply_gate(state, qgates.cnot, 0)
# measure state
print("measurement: ", qsim.measure_all_standard(state))
# subsequent measurements must be the same since measuring changes the state
print("measurement: ", qsim.measure_all_standard(state))
print("measurement: ", qsim.measure_all_standard(state))
print("measurement: ", qsim.measure_all_standard(state))
# measurement confirms entangled state! |
from collections import namedtuple
from marshmallow import Schema, fields, post_load, pre_dump
from werkzeug.exceptions import Conflict
from .exceptions import FlumpUnprocessableEntity
EntityData = namedtuple('EntityData', ('id', 'type', 'attributes', 'meta'))
ResponseData = namedtuple('ResponseData', ('data', 'links'))
EntityMetaData = namedtuple('EntityMetaData', ('etag'))
ManyResponseData = namedtuple('ManyResponseData', ('data', 'links', 'meta'))
class EntityMetaSchema(Schema):
etag = fields.Str(dump_only=True)
def make_data_schema(
resource_schema, only=None, partial=False, id_required=False
):
"""
Constructs a Schema describing the main jsonapi format for the
current `resource_schema`.
:param resource_schema: The schema describing the resource. Should be
an instance of :class:`marshmallow.Schema`
:param only: A list or tuple of fields to serialize on the
`resource_schema`, if None, all fields will be
serialized.
:param partial: If True, ignore missing fields on the
`resource_schema` when deserializing.
:param id_required: Whether or not the `id` field of the returned
`JsonApiSchema`
:returns: :class:`make_data_schema.JsonApiSchema`
"""
class JsonApiSchema(Schema):
id = fields.Str(required=id_required)
type = fields.Str(required=True)
attributes = fields.Nested(resource_schema,
required=True, only=only, partial=partial)
meta = fields.Nested(EntityMetaSchema, dump_only=True)
@post_load
def to_entity_data(self, data):
"""
Automagically load the current data to the `EntityData`
namedtuple format. When loading we do not have an ID so this
will be None.
"""
return EntityData(data.get('id'), data['type'],
data['attributes'], None)
return JsonApiSchema
def make_response_schema(resource_schema, only=None, many=False):
"""
Constructs Schema describing the format of a response according to jsonapi.
:param resource_schema: The schema describing the resource. Should be
an instance of :class:`marshmallow.Schema`
:param only: A list or tuple of fields to serialize on the
`resource_schema`, if None, all fields will be
serialized.
:param many: Should be set to True if we are returning multiple
entities.
:returns: :class:`make_response_schema.JsonApiResponseSchema`
"""
data_schema = make_data_schema(resource_schema, only=only)
class LinkSchema(Schema):
self = fields.Str()
first = fields.Str()
last = fields.Str()
next = fields.Str()
prev = fields.Str()
class MetaSchema(Schema):
total_count = fields.Integer()
# This may contain extra data depending on the context. For instance
# the PageSizePagination mixin makes use of this field to include the
# current page and size in the response.
extra = fields.Dict()
class JsonApiResponseSchema(Schema):
data = fields.Nested(data_schema, many=many)
links = fields.Nested(LinkSchema)
meta = fields.Nested(MetaSchema)
return JsonApiResponseSchema
def make_entity_schema(resource_schema, resource_name, data_schema):
"""
Constructs a schema describing the format of POST/PATCH requests for
jsonapi. Provides automatic error checking for the data format.
:param resource_schema: The schema describing the resource. Should be
an instance of :class:`marshmallow.Schema`
:param resource_name: The name of the resource type defined for the API.
:param data_schema: An instance or
:class:`make_data_schema.JsonApiSchema`.
:returns: :class:`make_entity_schema.JsonApiPostSchema`
"""
class JsonApiPostSchema(Schema):
data = fields.Nested(data_schema)
@post_load
def check_for_errors(self, loaded_data):
"""
Checks for errors with the ID or respource type, raising the
errors specified in jsonapi if found.
"""
resource = loaded_data.get('data')
if not resource:
raise FlumpUnprocessableEntity
if resource.type != resource_name:
err_msg = (
'Url specified the creation of "{}" but type '
'specified "{}".'
).format(resource_name, resource.type)
raise Conflict(err_msg)
return resource
return JsonApiPostSchema
|
from flask import Blueprint, render_template
from . import db
from flask_login import login_required, current_user
import os
import pandas as pd
import plotly.express as px
main = Blueprint('main', __name__)
# filepath = os.path.join(os.path.dirname(__file__),'results.csv')
# open_read = open(filepath,'r')
# page =''
# while True:
# read_data = open_read.readline()
# page += '<p>%s</p>' % read_data
# if open_read.readline() == '':
# break
# @main.route("/data")
# def data():
# return page
@main.route("/data")
df = pd.read_csv('results.csv')
df.head()
return render_template('dataset.html')
@main.route('/')
def index():
return render_template('index.html')
@main.route('/profile')
@login_required
def profile():
return render_template('profile.html', name=current_user.name) |
# coding: utf-8
print('Já passoooou!!!')
|
from django.urls import path
from . import views
urlpatterns = [
path('district', views.add_district, name='district'),
path('Region', views.add_Region, name='Region'),
path('Ministry', views.add_Ministry, name='Ministry'),
]
|
import hashlib
import magic
import os
import re
import struct
import subprocess
from pixaxe.steg import ImageInfo, NotSupported
from assemblyline_v4_service.common.base import ServiceBase
from assemblyline_v4_service.common.result import Result, ResultSection, BODY_FORMAT
from assemblyline.common.str_utils import safe_str
from assemblyline_v4_service.common.utils import TimeoutException, set_death_signal
from functools import reduce
class Pixaxe(ServiceBase):
PAT_FILEMARKERS = {
# Header, Trailer, additional methods
'bmp': ['\x42\x4D', None, 'bmp_dump'],
'gif': ['\x47\x49\x46\x38.\x61.{19,}\x2C.{9,}', '\x00\x3B', None],
'jpeg': ['\xFF\xD8.{18}\xFF\xDB.{3,}\xFF\xDA.{13,}', '\xFF\xD9', None],
'jpeg2000': ['\x00\x00\x00\x0C\x6A\x50\x20\x20\x0D\x0A', None, 'jp2_dump'],
'png': ['\x89\x50\x4E\x47', '\x49\x45\x4E\x44.{4}', None],
}
XMP_TAGGED_VALUES = {
'DOCUMENT ID': 'XMP_DOCUMENT_ID',
'DERIVED FROM DOCUMENT ID': 'XMP_DERIVED_DOCUMENT_ID',
'INSTANCE ID': 'XMP_INSTANCE_ID',
'XMP TOOLKIT': 'XMP_TOOLKIT',
'CREATOR TOOL': 'XMP_CREATOR_TOOL'
}
def __init__(self, config=None):
super(Pixaxe, self).__init__(config)
self.sha = None
def start(self):
self.log.debug("Pixaxe service started")
@staticmethod
def getfromdict(data, mapList):
try:
match = reduce(lambda d, k: d[k], mapList, data)
except KeyError:
match = None
return match
def setindict(self, data, mapList, value):
"""Sets value in a nested dictionary using getfromdict method.
Args:
data: Dictionary to input data.
mapList: List of dictionary keys to iterate through.
value: Value of final key to place in dictionary.
Returns:
Dictionary with new value, or None if KeyError.
"""
self.getfromdict(data, mapList[:-1])[mapList[-1]] = value
@staticmethod
def mimetype(f, t):
"""Determine if Magic-MIME file type of data matches desired file type.
Args:
f: Raw data to evaluate.
t: File type to compare (string).
Returns:
True if file type matches t, or False.
"""
is_t = False
m = magic.Magic(mime=True)
ftype = m.from_buffer(f)
if t in ftype:
is_t = True
return is_t
def bmp_dump(self, data):
"""BMP embedded file extraction. Looks for traits of known BMP file structure to find embedded BMP data.
Args:
data: Raw data to search.
Returns:
BMP data if discovered, or original data.
"""
# noinspection PyBroadException
try:
# Byte offset to start of image
soi = struct.unpack('<I', data[10:14])[0]
# Size of image data, including padding -- potentially unreliable
sizei = struct.unpack('<I', data[34:38])[0]
# Width in pixels
# wi = struct.unpack('<I', data[18:22])[0]
# Height in pixels
# hi = struct.unpack('<I', data[22:26])[0]
# Depth
# di = struct.unpack('<H', data[26:28])[0]
# Bits per pixel
# bpi = struct.unpack('<H', data[28:30])[0]
# Image bytes
# sizei = (wi*hi*di*bpi)/8
bmp_data = data[0:(soi + sizei)]
verify_bmp = self.mimetype(bmp_data, 'image')
if not verify_bmp:
return data
return bmp_data
except:
return data
def jpg2_dump(self, data):
"""Looks for traits of known JPEG 2000 file structure to confirm that data is likely JPEG 2000 data.
Args:
data: Raw data to search.
Returns:
JPEG 2000 data if discovered, or original data.
"""
ftyps = {
'\x6a\x70\x32\x20': 'jp2',
'\x6a\x70\x78\x20': 'jpf',
'\x6a\x70\x6d\x20': 'jpm',
'\x6d\x6a\x70\x32': 'mj2',
'\xFF\x4F\xFF\x51': 'j2c'
}
trailer = '\xFF\xD9'
cdata = data
end = 0
try:
jtype = data[20:24]
if jtype in ftyps:
file_type = ftyps[jtype]
self.log.debug(file_type)
else:
return
while True:
findend = cdata.find(trailer)
if findend == -1:
return
else:
end += findend + 2
# Another jp2 codestream
if cdata[findend + 6:findend + 10] == 'jp2c':
cdata = cdata[findend + 2:]
# Possible .mov file types
elif file_type == 'mj2' and cdata[findend + 6:findend + 10] in \
['free', 'mdat', 'moov', 'pnot', 'skip', 'wide']:
msize = struct.unpack('>I', cdata[findend + 2:findend + 6])[0]
jp2_data = data[0:end + msize]
break
else:
jp2_data = data[0:end]
break
return jp2_data
except:
return
def find_additional_content(self, alfile):
"""Looks for appended file content attached to an image.
Args:
alfile: AL submission file path.
Returns:
Embedded file data if found, or None.
"""
with open(alfile, 'rb') as f:
data = f.read()
for ftype, tinfo in iter(self.PAT_FILEMARKERS.items()):
# Build up the regex
if tinfo[1] is not None:
embed_regex = re.compile(tinfo[0] + '.+' + tinfo[1], re.DOTALL)
else:
embed_regex = re.compile(tinfo[0] + '.+', re.DOTALL)
# Find the pattern that should match the image.
img_match = re.match(embed_regex, str(data))
if img_match:
img_data = img_match.group()
# Go to extraction module if there is one
if tinfo[2] is not None:
img_data = getattr(self, tinfo[2])(img_data)
# Otherwise extract data as-is (regex is considered good enough)
leftovers = data.replace(img_data, b"")
# Remove trailing NULL bytes
leftovers = re.sub(b'[\x00]*$', b'', leftovers)
if len(leftovers) > 15:
return leftovers
return
def tesseract_call(self, infile, outfile):
"""Runs command-line tool Tesseract. Arguments:
Args:
infile: File path.
outfile: File path of output data.
Returns:
Standard output and error output of command.
"""
cmd = ['tesseract', infile, outfile]
# Process the command and save the csv result in the result object
return self.process(command=cmd, name="tesseract")
def convert_img(self, infile, outfile):
"""Runs command-line tool convert. Arguments:
# -resize 200% Enlarge image by 200%
Args:
infile: File path.
outfile: File path of output data.
Returns:
Standard output and error output of command.
"""
cmd = ['convert', '-resize', '200%', infile, outfile]
return self.process(command=cmd, name="convert")
def process(self, command, name):
"""Runs command-line tool argument using the subprocess module.
Args:
command: List of command-line arguments.
name: Name of application being run (for logger output).
Returns:
Standard output and error output of command.
"""
try:
process = subprocess.run(command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
preexec_fn=set_death_signal(),
timeout=self.config['command_timeout'])
stdout, stderr = process.stdout, process.stderr
if stderr:
if len(stderr) == 0:
stderr = None
except TimeoutException as e:
self.log.debug("Timeout exception for file {}, with process {}:".format(self.sha, name) + str(e))
stdout = None
stderr = None
except Exception as e:
self.log.warning("{} failed to run on sample {}. Reason: ".format(name, self.sha) + str(e))
stdout = None
stderr = None
return stdout, stderr
def assess_output(self, output, req):
"""Filters and writes output produced by OCR engine Tesseract.
Args:
output: Path to CSV file containing Tesseract output.
req: AL request object (to submit extracted file).
Returns:
Filtered output string, or NULL string if no usable output found.
"""
ocr_strings = ""
output = "{}.txt".format(output)
if os.path.getsize(output) == 0:
return False
filtered_lines = set()
filtered_output = os.path.join(self.working_directory, "filtered_output.txt")
with open(output, 'r') as f:
lines = f.readlines()
for l in lines:
safe_l = safe_str(l)
# Test number of unique characters
uniq_char = ''.join(set(safe_l))
if len(uniq_char) > 5:
filtered_lines.add(safe_l + "\n")
if len(filtered_lines) == 0:
return None
with open(filtered_output, 'w') as f:
f.writelines(filtered_lines)
for fl in filtered_lines:
ocr_strings += fl
req.add_extracted(filtered_output, "Filtered strings extracted via OCR", "output.txt")
return ocr_strings
def execute(self, request):
"""Main Module. See README for details."""
global imginfo
result = Result()
request.result = result
self.sha = request.sha256
infile = request.file_path
run_steg = request.get_param('run_steg')
# Run image-specific modules
supported_images = re.compile('image/(bmp|gif|jpeg|jpg|png)')
if re.match(supported_images, request.file_type):
# Extract img info using Pillow (already available in steg.py) and determine if steg modules should be run
if self.config['run_steg_auto'] or run_steg:
decloak = True
else:
decloak = False
try:
imginfo = ImageInfo(infile, request, result, self.working_directory, self.log)
except NotSupported:
decloak = False
# Run Tesseract on sample
# Process the command and save the csv result in the result object
usable_out = None
orig_outfile = os.path.join(self.working_directory, 'outfile')
stdout, stderr = self.tesseract_call(infile, orig_outfile)
if stdout or stderr:
# Assess Tesseract warnings
if b"pix too small" in stderr:
# Make the image larger with convert command
c_outfile = os.path.join(self.working_directory, 'enlrg_img')
c_stdout, c_stderr = self.convert_img(infile, c_outfile)
if c_stdout:
c_outfile = os.path.join(self.working_directory, 'c_outfile')
enlrg_infile = os.path.join(self.working_directory, 'enlrg')
if not c_stderr:
stdout, stderr = self.tesseract_call(enlrg_infile, c_outfile)
if stdout:
if not stderr:
outfile = c_outfile
else:
outfile = orig_outfile
else:
outfile = orig_outfile
else:
outfile = orig_outfile
else:
outfile = orig_outfile
else:
outfile = orig_outfile
self.log.debug("Tesseract errored/warned on sample {}. Error:{}".format(self.sha, stderr))
usable_out = self.assess_output(outfile, request)
if usable_out:
ores = ResultSection("OCR Engine detected strings in image",
body_format=BODY_FORMAT.MEMORY_DUMP)
ores.add_line("Text preview (up to 500 bytes):\n")
ores.add_line("{}".format(usable_out[0:500]))
result.add_section(ores)
# Find attached data
additional_content = self.find_additional_content(infile)
if additional_content:
ares = (ResultSection("Possible Appended Content Found",
body_format=BODY_FORMAT.MEMORY_DUMP))
ares.add_line("{} Bytes of content found at end of image file".format(len(additional_content)))
ares.add_line("Text preview (up to 500 bytes):\n")
ares.add_line("{}".format(safe_str(additional_content)[0:500]))
ares.set_heuristic(2)
result.add_section(ares)
file_name = "{}_appended_img_content".format(hashlib.sha256(additional_content).hexdigest()[0:10])
file_path = os.path.join(self.working_directory, file_name)
request.add_extracted(file_path, file_name, "Carved content found at end of image.")
with open(file_path, 'wb') as unibu_file:
unibu_file.write(additional_content)
# Steganography modules
if decloak:
if request.deep_scan:
imginfo.decloak()
|
import json
from tiledb import TileDBError
from tiledb.cloud import rest_api
class TileDBCloudError(TileDBError):
pass
def check_exc(exc):
internal_err_msg = (
"[InternalError: failed to parse or message missing from ApiException]"
)
# Make sure exc.status and exc.body exist before dereferncing them.
if not isinstance(exc, rest_api.ApiException):
raise Exception(internal_err_msg)
if exc.status == 404 and len(exc.body) == 0:
return TileDBCloudError("Array or Namespace Not found")
try:
body = json.loads(exc.body)
new_exc = TileDBCloudError(
"{} - Code: {}".format(body["message"], body["code"])
)
except:
raise Exception(internal_err_msg) from exc
return new_exc
|
############################################################
# -*- coding: utf-8 -*-
#
# # # # # # #
# ## ## # ## # #
# # # # # # # # # # #
# # ## # ## ## ######
# # # # # # #
#
# Python-based Tool for interaction with the 10micron mounts
# GUI with PyQT5 for python
#
# written in python3, (c) 2019-2021 by mworion
# Licence APL2.0
#
###########################################################
# standard libraries
import pytest
import unittest.mock as mock
# external packages
from PyQt5.QtCore import QThreadPool, QObject, pyqtSignal
import requests
from skyfield.api import wgs84
from mountcontrol.mount import Mount
# local import
from logic.environment.onlineWeather import OnlineWeather
from base.loggerMW import setupLogging
setupLogging()
@pytest.fixture(autouse=True, scope='function')
def module_setup_teardown():
class Test(QObject):
threadPool = QThreadPool()
message = pyqtSignal(str, int)
update10s = pyqtSignal()
mount = Mount(host='localhost', MAC='00:00:00:00:00:00', verbose=False,
pathToData='tests/workDir/data')
mount.obsSite.location = wgs84.latlon(latitude_degrees=20,
longitude_degrees=10,
elevation_m=500)
global app
class Test1:
status_code = 200
@staticmethod
def json():
return 'test'
with mock.patch.object(requests,
'get',
return_value=Test1()):
app = OnlineWeather(app=Test())
yield
app.threadPool.waitForDone(1000)
def test_properties():
with mock.patch.object(app,
'updateOpenWeatherMapData'):
app.keyAPI = 'test'
assert app.keyAPI == 'test'
app.online = True
assert app.online
def test_startCommunication_1():
app.running = False
with mock.patch.object(app,
'updateOpenWeatherMapData'):
suc = app.startCommunication()
assert not suc
assert not app.running
def test_startCommunication_2():
app.running = False
app.apiKey = 'test'
with mock.patch.object(app,
'updateOpenWeatherMapData'):
suc = app.startCommunication()
assert suc
assert app.running
def test_stopCommunication_1():
app.running = True
suc = app.stopCommunication()
assert suc
assert not app.running
def test_getDewPoint_1():
val = app.getDewPoint(-100, 10)
assert not val
def test_getDewPoint_2():
val = app.getDewPoint(100, 10)
assert not val
def test_getDewPoint_3():
val = app.getDewPoint(10, -10)
assert not val
def test_getDewPoint_4():
val = app.getDewPoint(10, 110)
assert not val
def test_getDewPoint_5():
val = app.getDewPoint(10, 10)
assert val == -20.216642415771897
def test_updateOpenWeatherMapDataWorker_1():
suc = app.updateOpenWeatherMapDataWorker()
assert not suc
def test_updateOpenWeatherMapDataWorker_2():
data = {'test': {}}
suc = app.updateOpenWeatherMapDataWorker(data=data)
assert not suc
def test_updateOpenWeatherMapDataWorker_3():
entry = {'main': {'temp': 290,
'grnd_level': 1000,
'humidity': 50},
'clouds': {'all': 100},
'wind': {'speed': 10,
'deg': 260},
'rain': {'3h': 10}
}
data = {'list': [entry]}
suc = app.updateOpenWeatherMapDataWorker(data=data)
assert suc
def test_updateOpenWeatherMapDataWorker_4():
data = {'list': []}
suc = app.updateOpenWeatherMapDataWorker(data=data)
assert not suc
def test_getOpenWeatherMapDataWorker_1():
val = app.getOpenWeatherMapDataWorker()
assert val is None
def test_getOpenWeatherMapDataWorker_2():
class Test:
status_code = 300
with mock.patch.object(requests,
'get',
return_value=Test()):
val = app.getOpenWeatherMapDataWorker('http://localhost')
assert val is None
def test_getOpenWeatherMapDataWorker_3():
class Test:
status_code = 300
with mock.patch.object(requests,
'get',
side_effect=Exception(),
return_value=Test()):
val = app.getOpenWeatherMapDataWorker('http://localhost')
assert val is None
def test_getOpenWeatherMapDataWorker_4():
class Test:
status_code = 300
with mock.patch.object(requests,
'get',
side_effect=TimeoutError(),
return_value=Test()):
val = app.getOpenWeatherMapDataWorker('http://localhost')
assert val is None
def test_getOpenWeatherMapDataWorker_5():
class Test:
status_code = 200
@staticmethod
def json():
return 'test'
with mock.patch.object(requests,
'get',
return_value=Test()):
val = app.getOpenWeatherMapDataWorker('http://localhost')
assert val == 'test'
def test_updateOpenWeatherMapData_1():
suc = app.updateOpenWeatherMapData()
assert not suc
def test_updateOpenWeatherMapData_2():
app.online = True
suc = app.updateOpenWeatherMapData()
assert not suc
def test_updateOpenWeatherMapData_3():
app.online = True
app.running = True
suc = app.updateOpenWeatherMapData()
assert suc
|
from abc import ABC, abstractmethod
import re
from commiter.src.types import Status
from commiter.src.utils import parse_printer_format_range
from commiter.src.command.observers import *
from commiter.src.backend.tasks import AbstractTask
from commiter.src.command.selectors import *
class AbstractAction(TaskObservable):
def __init__(self, backend: Backend, callbacks: List[AbstractTaskCallback] = None):
super(AbstractAction, self).__init__(callbacks)
self.backend = backend
@abstractmethod
def can_parse(self, string: str) -> bool:
pass
@abstractmethod
def parse(self, string: str):
pass
@abstractmethod
def perform(self, *args, **kwargs):
pass
class AddTask(AbstractAction):
def __init__(self, backend: Backend, task_class, callbacks: List[AbstractTaskCallback] = None):
super(AddTask, self).__init__(backend, callbacks)
self.global_regex = r"^!a (.+)"
self.task_class = task_class
def can_parse(self, string: str) -> bool:
return re.match(self.global_regex, string) is not None
def parse(self, string: str):
tasks_descr = re.match(self.global_regex, string).group(1).split("!,")
self.on_before_action()
tasks = []
for t_d in tasks_descr:
tasks.append(self.task_class.parse_string(t_d))
self.backend.add_task(tasks)
self.on_after_action()
class DeleteTask(AbstractAction):
def __init__(self, backend: Backend, selector: AbstractSelector, callbacks: List[AbstractTaskCallback] = None):
super(DeleteTask, self).__init__(backend, callbacks)
self.selector = selector
self.global_regex = r"^!d (.*)"
def can_parse(self, string: str) -> bool:
match = re.match(self.global_regex, string)
if match is not None:
return self.selector.can_use(match.group(1))
else:
return False
def parse(self, string: str):
range_str = re.match(self.global_regex, string).group(1)
self.on_before_action()
selection = [i for i in self.selector.parse(range_str)]
for i in selection:
self.backend.delete_task(i)
self.on_after_action()
class ModifyProperty(AbstractAction):
def __init__(self, backend: Backend, selector: AbstractSelector, callbacks: List[AbstractTaskCallback] = None):
super(ModifyProperty, self).__init__(backend, callbacks)
self.global_regex = r"^!m_([A-Za-z.0-9]+) (.*) (.*)"
def can_parse(self, string: str) -> bool:
match = self.match(self.global_regex, string)
if match is None:
return False
return self.selector.can_use(match.group(2))
def parse(self, string: str):
match = self.match(self.global_regex, string)
tasks = self.selector.parse(match.group(2))
for t in tasks:
t.modify(match.group(1), match.group(3))
class Issue(AbstractAction):
def __init__(self, backend: Backend, task_class: AbstractTask, selector: AbstractSelector, callbacks: List[AbstractTaskCallback] = None):
super(Issue, self).__init__(backend, callbacks)
self.task_class = task_class
self.global_regex = r"^!bug (.*)"
self.selector = selector
def can_parse(self, string: str) -> bool:
match = self.match(self.global_regex, string)
return match is None
def parse(self, string: str):
match = re.match(self.global_regex, string)
content = match.group(1)
# if nothing specified, we assume that evreything is bugged
if content == "":
for t in self.backend.get_tasks():
t.set_status(self.task_class.get_bugged_status())
return
# else a selector is expected to specify potentially affected functionnalities
if not self.selector.can_use(content):
raise Exception(f"Malformatted selector {content} in {string}")
for t in self.selector.select(content):
t.set_status(self.task_class.get_bugged_status())
|
"""\
Self installs IronPkg into the current IronPython environment.
egg: ironpkg-1.0.0-1.egg
md5: 41787f5a12384e482e8e9f1d90f8bcdc
Options:
-h, --help show this help message and exit
--install self install
"""
import os
import sys
import base64
import hashlib
import tempfile
import zipfile
from os.path import dirname, isdir, join
b64eggdata = 'UEsDBBQAAAAIAAmCST4qnflHLwYAAEARAAASAAAAZWdnaW5zdC9zY3JpcHRzLnB5pVjbjts2EH3XVzDeGpQQr9IACQoY8FORBEHRblG0SIvNQqAlymYjkwJJx3a/vjO8SJQvSS9+WFvi8MyFM2eGK3a90pYokwn/y5yGn5pnrVY7WCx7ZrckvGZrg48LsmaGS7bjC/KnEnJBhGmExq9WdBy/OyE/ZR6DbzZCGlvurehMRNK7SrdZln3meq0MJyvylnWGZ/zI671l6w5fgUHl+CLbMrNdM7mpwAZY1bys1a4Hhbmmd8/K59/QBb78sQDchrfkoIXlFQDkjbHFMiPw8RYdedUwy6IxdScIMwRfZU7KWed2uUerT343flTPJS4tCD2saVE6LTnu9cL8WPPekvcPb7RWetx3Rz5suSRrpayxmvW9kJsFsVtOMGZDlMA2CB8RkuwhLEw2pGYSxTRvFRzLCCeVJWsOHqMFlsMpHLai3uLuhx+IEbIGJGtIrSSsWnzPugM7GafUwOmVA1rPjHEPcOD1dqca7+G3371+HaNZa84gnL1Wx1NudA1JIGQFxx4iO5vN3LeXI4w4SaJaAsLoTxAn+QchG3UwRMnuVEz2ipaEhBjj1msBxs++R1gIWYBN8sSqJZnrGZmjIrcNnOGYz7ouuWzMQdhtTjGytPDnC85VmL6QRTGT0aUiGhHXS2OZtgGA9809LUbDEpD48/HV8mnQAO+xOPLg92KQ8mqm2TnaZWoteutBH5f3r57Ic0Lv/duyP9FpggZxD9kq2BbzM6xgmtK4HJKVUvoxu3s2m+f9yW6VLMwsuyO/biFDfHQPUA3+HBuyPsXc9MUD2QjPBGNCTM9rwTqCq3pfW6Gkye6u8InZrwG55pBlGabDisxdwDU8uiIXNh9lypp1Xf4IAug8CjC9+fz4cvlUFBkYD0fdiNoG61djLizwzFcIPHhcd5BNuX/U3O61JC61xwhd5rfgJgcfw1mHyLR76RzEOkL7IDTwkOZyzB4sTEeICFJOqyTU2I594vDSTCSyYLMmwJ4cSwZX8bepMPQV07VLVfrm3bv79z+9fXiBgX+B/GEqqyp8AstKe7RpooZtC8K8AyuHX5q+g6gXg9i14ksKMKCs5tphOqiVr7v8TEPwJIBGrStCf/7l4fc/6BSeCzBo9IhOFnG7x05LkYtiioEfn1ahQ+Wu8jB8O26ZL78A9NhBfSDCU1FMDenOPf8CbA90LI4D6BnUrVAm4Yzg59w1WANK3MFCcQB9N/kl/6a5M+q/dMO1upUT/6sElCae19RoT1m3PXXHOLb+AeQ/+g7qou/wcyJ61n7jZ0Jvof2eCVx042QpZYLLKENDxih7Lk7GB88RuR97oI3qU4WkChs9l0+b3wfc4ygSJUmv0N/A6FYRRCn/YcejQ8fz+yFYBpkPMbK0zUWjYBraS5vTJS2w2F46GWjle+RFZC/MgSgLfC36vAgkgHucuGdULMjZ3MxQXTJ/JZ0HjTjvOT5EN7vN2GxirwlxudFsDjgtBUaDKCyhr9wB7DwfQu9aTUsq91RVjmGqasegJKpAMkkncjZhA5vnPijFMIzOcwwP4DmhoSHF1/nQdToFzO+er+TUMDr5QFzOTmj5AsexNmTN/+4VPoKVPfU4hVCANqqLOWtoVJKKOYXSWVEa7lt2nij0PXJsRZ7XY94MW+FgdyZPgBOINkxF+DV2Ahy63V0ijEWJN5A1czfpY4ynxDROSXF/wnTnxXsp4k15vppOUMnYe8Oe9poVN6hgIERnj9P/BRu9fT4tgFdT0DMueSuOHEZ2nOQ3MOZi7mGJwN3nKo34O5cHInhwLrNQdwp+ccQiLV9vd+gXrfDNIiS7GJI9aqRDrVLMC9yWXnd8jQPPAIX5Gw3eO+DKtsPbiCvvSVyLUCYmwXA3lTDHGuWvQQgmFZEcggGcCkEEwgVqAT3lVR934Ex6d4SKsvU2aRKhDvOdu29Rz1POp1250WqPRNmpA9dQ+dfD+EXeRAHJD1WI68QUGHpzuLxSmHM9BsS871gNtPnxI9xn4S98F4uLVnr+QXAkF2gBq5ejW4Pa1dn5BOuD3K1b129943pQ7NSu+bh0+CrtR9X/livHokiHcKQi/58IOdbUaC644IoiHRKv8qlP5WnZgeKvtJGRhWjdiXCT/BtQSwMEFAAAAAgALINIPuvnEdjxEQAAmLYAABMAAABlZ2dpbnN0L2V4ZV9kYXRhLnB57Rxtm9M28nt/RaClhSvd2omTta/QpiwLpWzp0oXe3iG62LIMaXeTPIkDCwf97acZvViSFe9LobScHphnbb3NaGY0mpGcoYeT3vXeZz/8hxxnETmOAAbyr4LEfq8qAeq9SJ32Foy7Kv98SN0yRo5jPpkiF884oYTU5JiWF+Tc4h18e/BssuzNF7Oni/yoR/PpdFb3CtZbrKa9ybR388e93tGsZBtkwf9NP+kiY3dbPe3A8ByGoiAbkeOSs7+k3588FaaeKymzQg7WlwKJ3U6uoPo2X+K+XW/373VItoXJHMlRHySxQ73a8GRdhynHXBgjuorr0t0i0ynz1Z+mbo2Um5eSrqMuwN8PNmp2XNuFaCcqu8zVmJYGnaj46wBVevfJRpnXuVNHvW0NcNf4uYnA5c+faLSxsAlJWxuC29klYvTHiBhvFMulW7XvbftuIFUIODMmNjPaS3+3Ww3izu30dBJ5dzMNECBAgAAB/rrwkG/JGY+QeFiV8r2XUbGtptljHrrCQwERWMrjhrKA4hUvplAMnXlpNVIv13lQZgQsNHqxxYvS0ixaPOIOIMSOTmRDN/nfBCJBa0sHrH3A+gCq+O7PeHUBbgJrmgHZFQaf9Sv+NgQ8tWiyZlgRlJf8b1E0QboCYEeJM9dzVbwARlH+QvtEOiMw6cScIYTD/RffQ/nAKefB8tkGh4MGPpAKeeVAj3hRbHGVD0WH5mwTh4kopKzWTNxfy8FfDQ7m+doxGw4yzsEsvy4C8oYkUVHGHTIQBwZ2WZrd1ERedogcinlA7JAmz22ZgV7kA7JomrPs0G7S0FE43KEOYjlZP4MixSBFyU+nQmMwLPtmXQ+HxUXmY9COQWeZ2FO+dEpaTmI9HvJYI4sxrGXCVbDwRCNYTslnaw3NnrI0n5gURhD1ybgDRTBsiwCMkyIwhnojhMOyyCSabm5LqbUo3FZLDqYMwWaOzOiNVU8tkfHY4g5VvRNdlKpjPacp0nlkEoRIUt1MLLMSFZc3V8jBmMLkYnrfaAlmEPVtqIh1tDOHU65anNA5iDQ9UG4MW4wjIROcK0s/N4n1kCHRcKIvnwHHroWDP2xamlXAWxadHWAssDGmEiiiDW5mQsAsnZLaZhnj1FZYdcs2TjAyjdsjGxNNlRT8DDD1Rk+dW+nIWC9QQwcCG9pOatfFVDKMh/ymXNYtu/USEBywhIB7lodxTT+Wje31nvlcBWBEs1ocvYReOGh031jYsFgqZo4rJ5V8utZkrEPYPw3C7GwIuULq7gL7Y7nHxNCv/tRo+Mnahra2rnOy0LeiplZJd6tDngNYgWNLnju2MNEcOayhqG2ajn/9vGfwlEp/JJJuSLUpfBPYgFzvqFC85ghyJtqWyJmXatNRhLd2mPIm3/rghMc2CNRV/m2xMCmq+UW5VPo35I7oKH5RuGVAdfYG0ETXOOsYITC3wbfCXvB3VXGRl8cDaYqz3/EqY4S8IzfuqGJyvFmJaeo6aXiGfoLATOMKTSQyFMeNLSCreA33I2fEeBEebjT8RB2plC7gSn70y8FjS8CGeOHccr1QbO/DqATRIOkGf5XNksOl7V4e/TWM2dP2cOgMy3nEyaPHHjWNYR47xPKPGn0tf5EUSLbDCWmawI5ZS+mCtuZfrsQAdLSFekoMQQ22xFTB84au6FGzUpgJ3H1TsSCaEEE5DlaL0m0h/AWjBWu1SKVHUTo8U6uDd5ka047eiKYRJ7EWcyvK1uBc4RfNIA8cX5mzNnP09m1NNGvTAqIkHTNtbKBhE4q+NWvQk+o8NgnEHGeCnjT6SliC0V35UovK9RIClUAdfQH8xNWIQZ0xQJRAgxziEb0wfBdgMvw6t0JI5lLoVNg0ysV1x2WZ9iBAyS1MEPQk1opqGRBrh4hVoKp2iLNL445lhBKXOcAapPZjexbK0gFOPlZRtTmLfj7z+flKtdDG+WRSPGyGj3Np1uMtaRqYZ7MqR5YbOZTGojLtvjAnQFSl7Ekt/VI5LHX9MR4wtYatb4MA3qi5rb4VFVUFLdXmMCLk1o9AQKYLeNtbcm6ROqnpK4ehb80aFKsvrQqrhFXBfQ35DZtAwbq4wfepddyA9SHGy9UqR2e7uNvgE/vybXmIMNIVuC8XcgvcdPcsqIwt1ggKs6QJy0EOVCkQ+vOjbTVFYRuq6iR2GlwhMtZ/Ymu6wKQEDaSMboF9dQyu3t7a27SxwV0lltNmbLGRs8VKU8BalljY2cQJq01g6c1WGKQ8wHMbWWlGK2BaNVgKjyONvt6oJd/kNt/o5z1oGS2lrvGWK2k+dQsxzWLoneY5HNQ7iikc2BdLukAOE3HEkEzm9UZllqTzl0CNeJ1aEvHcAquN9hqx4kjlLanTo6paCTPQHMUZgmoOPy7ZezDs12j3HB3QzDEse4fWaJzWCobOkWkeYyEFa/9AFdl0sIMGbprYb6NgllLmEbj63O5NxfO0VsL+hixWYkiU9mb3hGDiseZNkZF20EeF7uTybDDblEeo8NwHM4CeXX1JcDttIfn4Agpb1VGzrn42mzZ1YMCaug12zCSzoC6xCTe9KjxpaPtMidEib7seTbBetVcAOhDUKS+N6fnRgZ319Szk1tCgrYjjCDZsQBsPJ9AnD3fT1fM4afT8tu3KgJ6zVnSy1mVQgUY08DF57PIwclRY8hC+DLN5dQ17bDtulpxpu3nDuFIudToQ1LbvOb6SOm5R+kac2XXyMkGP2J4R+CuuJxhZJNzy4nutTjLWKYtjHeDIbz0O7zQxNHsjnWvLD/FNBKjpRnJLjPhaWBzUJc8XYugQCNcbTB3g80u4CbKu+Q/mXttaO2pU9o1NKp4n5F3q8UC59O6gvr35W4cQ8Hir5KTYHgP1oTuH5nAxHdl1zh5hrSkZMev7tubYUwts35miHbv3m+hHHX/ZWB6K8yk3RHJmVhLniFZe+w1Ndglb755Z5H0/KzCoylPXLpROa+OaAW1sIbUDo+bSJFo5xtAbdA1kn/WI5YuKQlIbpX14A986x0uYlTD+ouHF+mt3ILS36h7UHm0oTyzwb71Duq9kPJETxiZZfa1zJqclACYCfCo+IfqY1DvwRd/AiTj48g/bdK2rjlmeWgBi3WKBReLgmtm7f83qFrXlRjtXs1BdU2VN5aogbijl6VmRm1FvhNfXiSRs86E6C8BFSfWdeHOq0ixXK6asHJ9C2Qxc6Y/F/i7OhR05iTvd017FrDmlFxdE+h5mkMqJxZuCnFgZnKHVrryhLM9P4gFvx5p6vAtbifVI9dLuvj7Ys1kPlIEQ48q82UIHvb6iXBxlONQhIji3I7L2dlPOKRFGwhj97kp01IoCe0Sc6R3llUIN8G9x/qnupAfX+VDUIBS7FGQ6lqyjglqrjbZa4JGCJGGDPsdlLV47RGPrgkMxB/UTScU19Nh/owySa0bAVTzoVJx9KHhD5HcRb5R2PybyvlVKe6g21xXRX06UI3HkWajGhaos1ID6+wopTgxvcC9yvgR2DuKodRCHhwkZsQ+jzTB6SIyPA5od1PJbwckTSjd2bJ760CAxjSSxzzRAwNHqnnYqkD4zupOB275D5VvCXAuDKHjT3INTicEoUvdZZWJyULSyN+Jf/QzFm1v3Qw4P4iJ1vlg+MwHjDgIKIUW5gr9rglEnJCny3yK9XHZaxhmPD8GDMHzXc39EoUi8bCyQPO41y1WvDaX+pl4OxcdIFvHgq6d6bs55A44ZC6vbtEqz6w3GAW60GGQVqdkKMbK2L6h7FL9/DhweWkPDIh+tLkhDB/4gjVwLB1tJKj8gMCtphsagwItuVdFx7SuRmNtLhEMfE+0I8r3hiWGDYhF8KYKXd06zRaTFHmncajPEsW61W5GuCJrEu94nOeStQnmyi998oYCM83PQePzWwb0KMCcC52Ksi1Edkzul6R0ciwkLHwrmUe5L+1E+PxGLy3j1OR2MXPxXrXmDz+noVKxTB8UoxVR+SNVSV4xc6uWLA8NgpkQb+8wJqsTVROuHJsqFK33UAfdYekVaZbAXSV/RR09H3+RFz6RPHb0pZaPSw7OUrnVX/7DBR1Ondyq2ADMORCup95HeSjg6a5UamrOB2TwfSnm1VD2TvFAsgL65uvHdPLnHJVmq0Vek8WMQpbjtML6ysa8ATdzZK3s3WvsxikXlyEelClFM0aqTS2HUGzwLm641tN2zW5znsyrHNeMLKaMA+8r46XhG/LRKOLxYn8F1AKzGPLPaNOvD/rlN3OxIuYr3jMatHcrjYqq+6GGPiP4QAL1i59ZPSLlTGdDqD8hCO60g1EodKEdmV2/rpN0aIfa2Tte07vtax9L4Mbd10m7dkwyJ/ajHsnrYE8fzsYHG/dGv+4vlVDP9ulvx/GpbRk1ztbcee+cxVlfxJi1KAMaKOte3go/0zmMOjt7MD86SaqGPLfTodnZGwCXaNYnwZNLuy56xNLAYM19vJC5wLwzxZ24l9sQ7vS/aVWhmsytiZNVO4pR/qTqbxY1T+sFo0R1qqwp2nWTX8Kd0MUv9xf7WVeRvTb3FcXrfVxz5im/6m1JP8WVv08pPQ5T4B448xdc8ZT1P2VVP2XeeMh9q39zHXhJjX/dnnrIHvu7EU7jvKSs9ZU88ZXNP2WG7zAbbeTq5PkA3tPlnWDzYda3zpgABAgQIECDA3xm2RCKm2ZxNe5eW4sbx4wtObTWZlr3dl/DhU49xl3xV58Uh0+23ZqvDsgcNoRKKeeHPk0W9yg/vr9jiZe9yXpaLq71Pi6u95eQVm1WXiytXoNXGxpf8/2I1rSdH7Mv5kq3K2RcLdjijG60TsgABAgQI8OHA7u5pW97ca95Yors9O/UA5DiPjDEqavSkraxSHUCpHubB/XfFmGNj5LRvvlDjJUvMF9pBTT4yKgvzhZpjlCYqZr5UkTl6/yfjhRov8ch8YfrlH/op0U9j/XRPP+3rp0I/Heqn58bgaWS+5CZBJ0CWGI1zcyqFWUPNMUtmvFQm6ohpdejtrUMaxPlBiZPI70MP6OFsaSSkEClAD8ANfq5K/4lF4GAjk/BtwXL8bBTTlB4s5/mLKbbH26mDg6esPson03zxFF3pG6J0fnBwwIu0X7xlFWt8P+hiNn0+Wcz0R4Q/6ooKMq2qYiqKl6w+yOfzg/rl3MywgVcLB/lyyRbynkCksDmg7HjS/GoIkMQHk5n+sGmMBUf5b2ye189E6SZ+QHIwmzZdIZ0ML+LITZJkPllePj+c1M0AXLNAWfPaxA1C6tP8kMcMqug3KKrmi8m0lkfBKdT1J0vOasoM1P0jdkThpx5aS0CC/eXk6TQ/NNQlwdJ6MdWNM8S2zQnZXcwoUwkUGcr1Nqu3ZkdH+bTcmUzZt4r7MS//YVauDtmtySG7lx+JKoD7VvV3vOdhUykviqF+r855XDW/M61mshpuzKP+HqsfTp9ht3L7mLJ5PZlNOZaayVviGH+rJ6MyTnLNqGbhBaMKAzZT13f/yMvR8jld1BvlocHMONn9P39pc+Xy7tt9urv9073tnUHf5vxfEz776OkqZNH+C2XRpu8ri3Yekb9fFm2Cvypl1RrEPtJDIu0AbwFCIm0AtAARCYm0I7WxjElIpB0gQIAAAQK8P/D8DDYk0g6JtEMi7ZBI2xyZhETa6tfCIZF2SKStEkWERNodP8bE1BIwaEikvcbJCom0UxISaZ+EMSTSDom01bRDIu0z2aSQSDsk0iYhkXZIpB0SactNPSTSXpNI2yxI5y9fWGLwXP2q3XVMrODxvNmzr5A/L3s26plpMH3Zs9GkhezZ7yt7NhwK/enZs38l7zx79l3yJ2bPTsiZsmffIX/57NmMvPPs2Zg8qBPH28ieDavnfWbPfklOnT1b7NDvMIH2Njl3Au3MnUZzqIjJ4vwJtN2DmdMk0IZQ6o9k0HbhYcioXYeM2q2BExIyajfda9HmD2bULkjIqO1a0JBRu20ZQkZtdy8KGbVDRu2QUTtk1D4m2hEMGbU7FfMMpjdk1A4ZtUNGbRIyaoeM2iGjtloS6k4+ZNQOGbWd4pBRu10WMmp31Afohjb/DIsXMmoHCBAgQIAAHxSEjNoBAgQIEOA9QMiovQZCCuYPKgVzEOcHJU4SMmqHjNrwN2TU7n4JGbV9LyGjtgmfffQ/UEsDBBQAAAAIACyDSD62qR6LkwEAAA0DAAATAAAAZWdnaW5zdC9fX2luaXRfXy5weX2Sz27CMAzG734KixNMrA+AtAMHhjhMQog7Cq3bZuRP5aRlffs5LR2gaYt6aGv7y/ezPZvN4FjrgPLEmnC932HpGamqtAsR3v4+AB+eCSN9RayJKcsyANiMhZgbFQK8Ph6AndwjQeWiVpEKPPeoHCquWksu4vqwxXkgwjMZf11IrEhx30TtnTLYEZ99ILhQf/Vc3AtTYq1GgtIbKdauQkux9kXAeQJq2rPRObaBFiuAwYUxc3lHObvxcxQQ9CU27DtdjBbF1lJ8Rz+E85Y53bnvRd0huU6zd8kGAJP1HU2qh+ErEdyuE7lG5RdVUYZ4TN0WYKt64R0r5LyMEK0xkhtr9OVkCq+1zmu8CqdQFFi0nChT9Kb/rJFKnLJ0/yttCNo2ph/igvhJeRxzAN5bl6c+hxU8jqyiePqxP5FtyRGrKHpMsWUXUGHwnEZqtAxfTEv+b+yQDeUbJRxkaJjdbfFKbSg5+QdYBnBTTBmD0tROmMkel+zttLiZVVr6bhtxhbKTacJLfIJZDuQnWaog2KfSwTdQSwMEFAAAAAgALINIPh46PgWvCgAAEiMAAA8AAABlZ2dpbnN0L21haW4ucHnNWl+P27gRf/enILxYSG5sJSmSK7qoDy2uSRCg6B16hwLtxhBkibKZyKRKUrvrHO67d4akJFKifUnbh+5DbEszw+HMj/OPuSF/6vRRyDvyvik4+bE8cto05A9M2W9/pBxed4ejzkpx+naxXC4/LJgUvP10IEyRgih2ahtKtBANqYUkjCtdNA3jB1LwinTce0APB5UR8tPR0i9AwF4WjJOKFhVwEg1vFOUK3h8LTZgmlaCKcKFJWUhKWG1IUA6+dKLVAt/h4viyFFLSUpO2KTQ8O62BMlGwREt5RXnJQOBBDMy0WoOiAjjloi3KT8WBEk5ppUBFsqfjBmhFWslwEWHWcY/XZptKEMHN1gQqX6hPVilJVSu4YnvWMI0rixps1q9zKjh8yDWh2SEjFI0KMv4hOvKxU5oc2AMsoxdmu7iKt+Xng14KtTll6JoF+EJITYTqv6nz8PUza2vW0P7nd4LX7PBDIRVsvJbiBFxZW+gjcQTFXuHPNdkX4JLiRNekYtJ++SgYB7sqeIAfRq4VArqiXlmnGWjmRLVgOK7zmudFqZkAVnkC1hy3Lk+5rNfk2IEx8v1ZUxUI6kWoUrJWq8ViUdGaoBb5A5UKhIHYtOaruwWBPzQCfr4D01k4oW5IHlgelqW6kxwBrDvAb2r35USuskAYoK7mGaBHPTJ9TJMMdEvcgvhXc7KFf+7vNq92PUOySRDQNR/J7JJuPRCoWsBECoRr8nK1MmS0UXTGUIO9kgR2XjaFUuTN4fAeDJOK/UeAOahh6NEqec4403meKtqASWvrPtjTHkC5fVuAcE9rJMoMDWqPn+Gr0lhtO7N1D4fU8KxW9y92WSMeqUxXi0HCDZwEON3MgBNxA6oKeSaPR1Ye4YhyDcfevnzz7t3m/V/ffj9Q9cekaTxx3iG0LlShtieqixwkgMIIzhSAn7WS1uwJjNcvAZYet7aKCNBPehDgSwUZee4QmecZkCUT9j3jbvlx5ZBCMU3zXvmYmjL5C9t/QLJNT5Z4JrX+AjQj8/0ufO6cDG/ctxEVkja5XcLhwrhtxAFgio7HPTVvMzC21Bbuo4qrKTKR9h6Ol09DnpGXd7vFuL7znFncWxbOCIZ1E0NCY3tE+Adh6VR8ovBCTegmtvkMu3dBLvsna9/CZzqCfOKvQhoQoC0tc4Y/GwYHayqXPmkJcSt4bqNRVkpaoFeleALU2i1OubmW57wFZ2uVTpT4nJUN+Mp/7ASDJXP33UkNGGXH06QVAMY+3rbnKSIfJQIOjYWaD+4I9Jn4BJK0sYgxgxFinuQYkHNnsXQ4Tc99UeZMrAPH/eofO3AhaU5PrT678LTy8eHUQbSHkLDwGw3tgtilc+upOTu8tQBGAaVBaqNl8hi8tFZMkw88yYx4oxOCHB8FlL0rh2cQ5WoQ7ifazP/huR1JM8BSlYZQBSOkCbxUoqE9HkxeMQyKmmwKsCJCBgZKDh27Rj85YrCMH0XuZm40+ZskBu5YyPWiZ4Q9fsd49JPs6CJGYs+Oi0mon49SD7wTjI4OC8L2Rc/dDJUEklaFLqaOc4RAlruUdyuBhtwOlY8fRmKsNvR5jLGYOZIPuSxX7LNZjzk2XCZ8G+PHiD4kggu7IaDL2pM6yQKD3YL9SILh2ss0IRK+SHodtdDu6mmxaaqoog6vYJc/T2yW3JHNy18GEvpEy3oI9yMkqnBzxruwv/vB11AQOOfht8ka8GSwdDKJQArW0Fo69Lrq+B4/d5MwPDhqlu/r1Wjy6t5fy0+fkQhsF3W/rsbcIMLiSfwSamj0ThAzlWGYZ2y37gCUPpNGQ7RvftzJwPXZhjvHvLLFsNlrOglOhm1rPqAsgdjhxU2nlr9N0yhZni2UzfNYhsUn45OgNArpdz/I8Wuh5CZZfaHEM6ONFeAnX1dKTCBedlg4YhfxYgSPDQ2qO6XOYAcKq9QitQZDqNj44UM78IiHRICd0pXodH8gb3+v4BzA+fUar9QEnPiZueJqTp4NRdQFHWfZxmwOtjs3piywBH8ZPA/bopCybkShU74iz0lsJST6DfnmFfl2i2aeS5mbJktW16jqplPHdE6CTnw21dyrxPrTa3uPMEjMMkKwlZiKsKn0m9cbWNQUI2F8jWg7YhBclFeuvhuiSJBgJTGQN1m5grgGZSa2Nhg5g815paCNac8hZpqEv/YUcXlwfYnV1QPIG7B6XdVl5mQ9PJ0xD33ClNtjuswdtGsTEbtZ+eTMGPRN+HVlwgjaMYJf20T1jZfJD87Wg19sd2Uk3e28oykLBtXVm6eStljSpcs/s4onGgJMi7MvLdDN5EglXa6mVVU0jYRxvt/OOPN4nmCRGdtnknW8U7SaRsZJBnA1urFtD8F+7THX87WdprhZlJuRhEUx1nBj2xakkVi3nBUtTv7mlfXYflb8Ss8JL0ez47BqImneQexjFRlqHa2ArqModlBWxhXhSKo9zxwAmyiPJ1E5vV787vVrv97qeD8n8nd/pZWqecR8puyaTBQi3jcjPdXtoVMuqRoGg6U/5MEf6T2GDKznOl3sG2qnFWDUzWao08agspuUM+VjtXUzytQV6+N+T8MEwVO1H/Hh39/oSTxQYqsIfxjVT5ZtKRcUyLBPCMGKPdDmHBWKvHaqimw6HdSzxV8bipv3ZRagEQMbI7spqkOkV0vjTv7jiYvt9pZvpMSLge8KDCw1gziG9Pb4ge53S3+adrU/dz1CX+B7fW+k8OECu+amb/CsXcgN4d1pTyVOBs2jr65tIr1VdKqCheThYMfrdqwSOvNah8TnFcCsSoH9/Z8VKS6qzcrqNgwwiU1m/ZnHsqOMlcM3Dn/jbQzegVQ4NwApZQTFZnknb+ISe2qnEXgyB7xmpq8vlPoiaQBLOr1coJyCywAM4y2CgnBGKzM5c8PryMjaXiu8KcojlLQU24x+Qu7fUrgbLjcrfywUwfSKOd1JRAojyckNbyuwt8Ua/Nen4dYOQVjwmT3X+gfa9KzcnAKz5xTyDG57xu7xT4frPt3aXnBcnq+HuWYYXITAm/VgZmwQGRFEpwO2Wau8wcDOAcHeXM2gUJ9wL8nt5rcvFLl1gzAbN/HVLVSZP0iBFzSknzP83V6huI1Z2lcvAJ7JNpkYNoq+6QqROzCn86kAE/eKmpu9Vrc4cuzT7vemaHR3f1Yd2I3/NO0UAGu7NB935Bby9oHcC0OhduQeMPQjybJst7wwUaioLVeAfpvnlSjz3AXSNiuqKrei0mTTgG2Wmw0iKCbLXhhul0pjc66hSo9RHWnTbpfm8MVP3jK+uLSL23j13y7vop5bMoWmAc7NvzooJNRwpjGBeLgAoguaPVjN3BwVUm1Ek1WMkVvGSp43kM++gnHjsJRcYDE8QK2wZzjgSKvNDKpy/NmP8uCwCjcARmFeW2kudZ1jZI/EfEBwPgH58r0EMH469HeyWG7EqP3Q1C+OQJj0M4dJpobchuVNukTPWPgRawqiofLH/3OAXJ2ZBS1XoXL+4bwYI/uqL1ybMjBcf4Vra1vhDcy9DgGromEAPWteDJNF3GRjk7v21ERXQwkZOJkleSMI0JIDWr5wtEWZW9m/7TAzGsj9EICKrjGJrWgei7PqT+MXaDn+V5H/lZ795SOGRoY35CZq5mY2mOcYKPPczQht1Fz8G1BLAwQUAAAACABvilc+3WFHXsECAAB4BgAAEAAAAGVnZ2luc3QvdXRpbHMucHmVVE2P0zAQvedXjCpFTZZu2FbAYcVekEBCCIG4cEAoysekNY3taOzsNv+e8UezzaJFEKmq7Rk/z3tvbCEHTRa0SUQYmWkeUqVaLefAYbSin2eWhNonHWnJm4uhsgeIoboyqCqJGxCmFeT+OtHz9JcWKkmS5lCRgbsIUfRoLfLCi/NCK/bCGk5ssQN5LK0cSsbJ8tsE+ItTBnB4GddbDISdOG1gXbigIK2G436dh3Sawj73BQ4FSUuIWUQKeXhqcLCQfRdM+sG8J9Jc+scvfpA/QgyVMX4iOlDaBo4z1GMeiyKPlyEfIbQjqTOHyHEYmLctO1VWjRVaZZ3aQBhGwNVq5f+/8n47gc8He0BohROtHl0uONEhc1q7UQ5sH/8i0sbnP4jWHjyU7vyCHu0wWmg0EZpBq9aA1TGVLY1ZA+k9xw17SzAabKGeXMAjdaPyJ0CNvX4oFgWHSlfp9es3BtLdjVlBCp7f+kdqfq55FolGLUiyLiVS1j7l/g2lvkdAObACnISN1STQsKbNSEbcYz8tD+80BU2Ecnb0rJUzZEYOdjLL2Eotl+WVm6PscfDXpV3s8laeK/Wx5LInnjuM1/0utzrzLanzEBu4R6q1wbsPVW8wbvMVOEuflsCBmL8sKyru1eLbdAsphZ7InfQOZE5fXI1Y36h6oY6R03n9X+/G8npeXt2lcq4x72C9Lrzs4ZkpmoMWDWb+dci9dydn3InDe8y2N3n+tFbyfR7F81DxPO6u1JSpce3l4TfzoxSY5REM++cc/i95z904/anx8sWJreKdP4yyUmU9WTTZn70engm+emqUNZK7iD4VlBNFasIAwN1ftVXdo1NMLvvfdSO8he3N7tUjifgCrVMB75w+KnTukQ3JFFzDNoeXfgs/yNszzPFvMJ88zjFJFuvFroPPPpJ1va4sc2TgbHd1tXNW/gZQSwMEFAAAAAgALINIPp/TiQaBBAAAzwoAABMAAABlbnN0YWxsZXIvY29uZmlnLnB5fVZta+M4EP7uXzG4BDt3Xue+LYTtcaW010LfaAPL0RajOhNbG1sykpw0LHe/fUfyS+zkWn+JrHl/5plxTuBcVjvFs9zA2w4uhMllTS8RXIs09k7grKYbNYfrggl4SnOBRQHfuG5Of2FnEKey/NPzeFlJZUDvdHesCmZWUpXeSskSpI4rZnJohVyveIER/JBceI0GCm1YUaDqdJJkg0pzKZKk0agNL3QnZW86wfeKiWWtUXmed35/d3n9d3J5B6fgx1xJUa0z37u6v71IWtnD2eKKpGPT0P9v5sPv0NtPvad/nhYXtwdWNtWQ6osrhSv+Hg0NPG+JK8jQJLbIcDr3gB7f993vI5paCTA52tCyqA2CA8NIkLWCVIoVz8AiEo8M+aoFKjysoo1gH9V4P9TwDjwc13Ts41in8dKK76RAKvXxPFncPtxYmH3/hahyTVg/rLO2jloxQ01z5ZDw9MPHOyHxIufaqVprw7jQDieCk9UFkYila5YhZVBJzY1UHHUE1LlxsIg8US+XlspdNpOwpc+UAkjl3DrZjpgriG4by5GSiDx3mQDsm0u1TWyvk+Z1qg5y3TINqUJmmpBccMOJuztQtRBcZC4YWgJSnmVJ+cYeObiThqdEgkJL0mAGpCis9RLfydG4Rh5jDFeLxQMsucK0uYZtztOcPLVgAWvyCZyPL0uskEKZdxNAKPDdWIa5VLJMTyNIaZTfEAqubeI5KrSTfkngFDJlxUEG1tD5bWJQ7bKyYLMitpwusXxD1dVBfr6TstxqC6PBsunjG/VPFyzNKXfejEBXzq6ZAYGUCqWJOmUVLiPAOIupI6ACG3Y+m53PX176zHYvLwFQwkPhWEwK3nUD6aO9pl4+E4uD3JiKDLbbbTxaXjNrPHMLY2ZxCiLv1bMT6GZ6q7jBTwa6IyrtLppquaJLAqpS8gcVGeiPZ3vPLkpwzzwna4lLguEOdKKVpFtJbT5aCREE22DaKsVN3t2oTpoG63DaK6SF1FSYe60UFwb870oa/J8pnvvR8XrZ2339Cr9BcBq0W5DGYvkJYGmtFOF/EIbmiREzUscvtWtG3GKjZYmwYUVNBKI75y3jmx54fbQxc6aZMSq0eRAmqSVfcLzrrDh2wgbYJeH68193JOqkbmfuF3oEy2m7Cjsz0rcJh73jkz6n/sqhfnr05Qhu7s/Pbr48XjzcB9PIaXd9UbC2g/IcDCkckIlzFbzu66BShTSwjKngZI27cD0o0j52R3BRY3+5sSk/r1+HLta0oDvnY/N9pWRy/NXcTHttLDR+brsZfUcahrR0cRxKGjZ0tGn52G3ydgQsDYfzMNDcg2uVDqap1en+j1iN7hx3h/EgMJXmND4pZYsj9aEgnD7/0UCZrjJHEyp0z5ihv8ESsO46/Y6xvT1tDvuNPeSq13ZzRQFa7HrnH4QBjYaan839Q14d8ag1tseJXZYT5dO+CNeRC0n9G5Vij0NqDgLYPWpjOLMxf4/CBd3rRAUUzZoSHwiLJBGsxCRxtEySkj5ySdIys93F+3R62ni/AFBLAwQUAAAACAAsg0g+r9j6XoUPAADYLgAAEwAAAGVuc3RhbGxlci92ZXJsaWIucHnNWm1z20aS/q5fMUutC8CZhEhKtmKVZSWbOLuu25Vdtm8vVZKWHgJDEhEIMBhAshKtf/s93TN4I0DJud0Pq8oLOOjp7unXZ2awLz6uIi3WaVjESkTrTazWKsm1ePf6nTj85vnevpiOJ+PR+Gg0HZ+I1VIEcZooscrzzcnBwTzK50VwrXI/zZYHuczU9UEY6bzIo1jfqExHaXKwtzcYDPYG72WOXzIeCPtChGoRJRGNCpmEYiMxnCzFIs3EDyWTvxvaH6PlKt8D66DQPFfm4t3d93iYjscvfJawB/XTLBf6TpePmdrbC2KptXiTZVa+5fg6y9LMff05UBsa9072BP7Ahw2Cf2QiompSqbNPgohwA6Yl778US/U3+XOaWdbnxdpw7xdai/quT4SYq0AWWol8pcSa2FZvkmI9Vxkpt4JI5uKmi1zVc6S4UzITmBPKXIlbqQWGQ8/fY+oPSolPipSYpcmMmMxYwgycP4mULSGiRHw6T7O1jKNfVWg1/8RuCVUu4RSfmbGhloXMQhFIUkHAP3Ieq1DM74RWeU7ezFdwleX8o4y18svVN+y4L77DUrNrLI7UJRVo9VoFKQKDgiNfRRmHCGIzXfDbT/zrk8gLRO2Q9AMfaypt5IZp4uRiJW/ImqkmjkuO76HIU6EpRDZZulFZfOcLeEN9lpQC4AMZFStow6RpFkJB11khGJXOHQHv5/DmPtYx8cfzidj+c93JcOwNMWfuDCf8sHCGnldO8UN1c3j0rH8KUQ4x4IDIGQrQ1RM7knontmVtUp1Pnz1vyOyfIhyixP9BjB890plVW+9HWIEYPxaO5fHY3z86D/UrZgB7ipfEUIz6/g4q8vsOg/uv0uBRBmwXq8NLu84+ZQ56WP4+DXYxaGrQa4WdfweY/xYplIkYWYowF7dpEYdIl6GYFzlzpHR3UJ9l7FDBuY6QhEiKJAqQwT++Of/ur7O/fff+v1+/F6fW6Xt7f3/9/sObt+ez968xmCk/SNebKFZu5jgOZ/s/TM06e/fSJtery/DppY//ePXq9sUabWFdrIVz7p871RT1Oc9kOc89OzHz/svjKTK5K8sjtGRS4WB2lfGWjam9JctNpjIVv7qQ8+DqPgu8SgNHOqcy3qzkkELtdI66h6fAOQW5kigkqHhhRDV2Tzz+B34Z5gpUVBRksuxuNrVaDRM1VsuE3lllFQo8BMIr95KzshyiWZ53hlG8pEGiMWN28h/hlCF5CV7709sPr72ynXVqv5vOf1ZB3uhbotMXTX/5c5qGtYUn/rRhAvVLEd3IGL6g2jvAS388aNL64+YvOWm9O5TT9u/51vtg67d/VP3++PaHtydiEcUxtRH0jyI36v5JNrVtOYxCEPEk8tvUxpVuaVdTlq6M1Y2KxbrQtuHI6o1WWSTjLfVbXRBISMxmBIZmM1ereDEUaFE7GvXpx6xQXq04mHyfKWr3sus7NC+dyyRQYpGla1CUWELnGdqz9Rv9fYt2KtdCi9/w6p/o7qpDu0W6Qz/x2zxN43+K/10prjHwNtq4jqh5yqSVL3IDThQRhF2QtzV8WRTwFmMYqXcioTavxCjkix/UQhZxLshOvnibqBIxpHO44kYRjqCyh9YOVu/u3r0BeEFWtnOZJkRJngEaB4xewKQPFxHGaqulRRxdK461k055YLw6RhM97Ht1NH7e/+IZ5rTGG+iL8CAKBkMa6Jss4zuhkiAtMrk0MHIjg2t6lgVgEHmkxYqBIyB1TDYhk9vlDBuwsozlUG3i9I4hGsSR7bacECJu/aWPXnKLyENcrsVfkUmfKx3WMsH/MsT3LWUk2R3yIGCd3nQ9gFqmo3kUR/kdOaDgHYLcRsRsb5SU8UC4ka/8FptbY5wqhmoUHSvNQDExaJHCuEXj+c0kq54pQf0ZbVeUuztNPZNa33JRXSMV0rDKdcrFGcNXN4jBojK4qfxk6dNmjx32dRlUdKr0LcJGWchUXmQJ9mssw2dpszydwSmu2yNwWDL0PKs6VyW7zN9fk97RRGyjbPWoXMaBw9r4TaOuZR6sABtqDOFr1IJg5WqvIooAQNLc0LZTK5MRonPHLk97dZlbZmmx0RDETHz+GUZB7tZSzC7jVFxc1dPQEST2AXYR1fA8ToNrkDZCgswRpvCskXTh2DnOFRuQ90DA1rW4Jq4BKzPNX6rcdZqvnJYZWpPIJlDOpYZ+ju2517aNUfJpv5ZNRheTE1JyyxI+qrRKQpf3WS4z87ymZeoYqify0NZqzGB7HZYQyUiLIOX7dIcvDOHV1y+sK7kyJDnia3BbaYRw9qvKUj0DeFnmq9OJt9d+v9NIlYvh85Pdk1oZ3LKsAXdNg7WWZV47W/6m4W3T0/akrTYmbhExqw4nQ3fRtjx5jqTs9Ftjto8Qo1VeNJd5Mb6qd4coCS5XniuvwwSSSINI7xDSFNRjTsjxtjW3/L5ecbsHJjXxtK1lj/8tg/9vBJQkHNZlNbKc6Ve7EuxAYXRqUjOAHfCPeCUmL74Z95XOnYdYg0ZbbLfdoXiCfwcd8w1uVxFq+ZrO7IQBEIsCzUjRYcs8Vmt9gpkD8US42wpS9el0nyqlqy5E6HAGZaiZoW9l6WaGKgaUkCxNqnI/2pnjnYwe97Qu3n2aHeQvBeCU0sOyp0oADgQ/8AhCQu/G0IaD7/uDigmBtLnis04Vbs9rLqsBwxkLt7G4QfPGzoQzWyvFtltpX/wPnaTRZpMjxE7UHRTfY70eBE9UoqQSTNWSyfoQxjImgmQy0RYW3xa97QfxG+xp1mwHINkukjADHQO2IWJLGgAlpjDDIUMFBadpmd3Vaoz7ER0CTLeLHNmNj0K1rzdAn+7AH2yVWUiAlm7iUVZxuiWUYqenwhk73aLyIEIZBDKhesQ7R7TSsLQyqdDNMNZ/CwPbg44T4TzRDk2zKZa008VrFy9aeVmEqL4lXqu49ARHe2XwTqyM/dgCeLgYTdgK464NWNom3TTAlmHAhsRLT7zsRMXJToXHNRubmfS2UT5owXZH3UXGde0pYXE98hB4b01h+G6mtErIx0wmGkFEdqlyV30GENHlATfVc4OGIxR4k54NHYkU/uRoaSHlffHTTz+Z/V+YKk0n3MFKAQhRzEbJDfanoeFepyiB1xLsDxttnTWv+w1GHN/xf06jxKXV3XjM9Ib0JRYPQLdmB2t7TBNCM8TIj+6rnRLtHIDStlyrPd8Z2edHlajnlTm66MnRhmEsMYS3+eDduCcDIgQuBXHZ+bu8oUGEfJzuSI2mOTov+QWZp1Qr6kNKRDTpRHkzHSiqdubD4Il2qXZ4dWeezTgBZjM8JXKtZrMhp02zRZvKNaOzXpmVO8WU+kZTBBe/j3cb1a53dpZ4YsrHE92tddAlxzyjdEMPHjRyqtEWdpipX6rTtG197EYy0uXxmOE07B6gbXnSmGVrzYb77vpCLmca87OpY5z/p+j4cpeKidqpYll1oaaNF7a5ldXgsdy9zAaPMujYKIaOzgK7rFu840f16zJosG0Ia2n8r3FdNrkSR10sl0rns6Ry3sy2BVfXJ+sfDBV6Rk1XtY8gpitM9AvCWcvoRiX9p7lvFuIuLcpT6C3gyCddEXeN6hCfz8y6YWWsYVsMH7FFuUfCE+Zv8D3ALN35klrYRPIpbH3Q78IgrLUJvM5Vt0WNqN6LIuFTVruEqruRJaprHc0fKFSmYW66bRCzyqGYyxJf08ktn/vK8hS3utgNioxOn+M76jZ8BG2Og33xZ2YmRVisN5jCjMwNcvXxQsHmrD9CGIqj6TfH9qh5bTw6EtPDyVS4zw79F4dPPHvI1T2kJ+CaFtTHZWIv9kcGzdLKZJGnmBkFZRCVvhmJw6PjI+F+M/HHRxX7W3KQOShl09aThEUyBtu09yr93yI0T/6/NXGvRff6pzI7/E1baobfAd0pUuiEKlfZOqJz+GTr+j/P7uq86l486W7BEnzbF2fAyHeVHkyl+FuOHQi7lmK+OGCWhHe0H6e3KnNt8u8zJDJehO14DRwaa3mtcgJ1TEYYJc2iJW0JNzEfvrnOiG8M6dZbOnwVPqJ7Q4evEPl36/2uzanrbM3KAofvHg1LcyWLgQdZjICdylkPUNljO8sOI35nZITeg/x+VOJTovDNnFnjWTSYf53uDapm0SVnZdone0vqhZX1K88h5nRKIU6hj/2BNilEgA7ugkWG4hbZFIZI7HEdAZnydTF3swEo/jgAT3oY04MNvzYV2BkqPJRUtQIQwHW3VsTcTcxH0wFpMZA+HlCCKbGVuVMvN7X79T2ArdKAoXNY/06YC2h7I63ywNLHKne0vTphjqHUK/76J817VHfpovs+C668i8vRpX/l0oWwWczl5HLaXgxqyghLHGWHxxMxelV9rnI8sQRjf2IIjl/Qe/yk98cvegRbcQSMzeNZdtYQ7nelf48wTE7EFEKlfzjkh/lkCCkv/PEXe9m7JcT/cnXGS7zyLv0zu6xetjdj4nkzsVfO8BsiC4EO5EMh4zqtM1UberwXaPKpNurY7/NVor3+MyVu31w7Fwm2eHA0+jhdgY7UYqECFJjE3JMNxQDN49CnC74XA8udryShTq5PxPGxcL9MUdzn/KlGz7ov5+OnbEz37A+Xobd73UIezA8CkxRJWl3di9d0X0e3Z3JAjuSn8cAXH1ByN3maxvQJ1KK8Ad/ncwm/R9Xp0TPhHvvPnj+kLWlqfGQDb/ly8morjyiSHQ6t8/NzR+RyyR/kcSbjR58N/DOXJ9zjv5d+RgHQDDAa3YqwgE1CokZly4SPJMlB/kNWHK2j/jWMXHk/vw+8B/NHCtP10CWpVJjsD+kjgUH5vZxp9/YulZW0X0RsZ49/OeLsuef5jUWNH5H5+wQhgIwsrypyjwjhWm2WZvpEJdIUpbBPCk+6N/S8lkFbAJ3/OZljv/vD84hjAJ7JlLl2KG+jadROEigNR3LiZ5Py4ziEsh2jOZNxg/CFf3g4mhxPp9OaEGOGkIa3abOSeBdtNxyz+9H9qBOINKu31vGKaaeHmkJNwXzdVWi5tPcGtTZPe7QBdVtxJDGNTQ8b1rAlHINNMn8Z5RXdA2TzX7PHyNADJIn1LaHRD2NtwmbREF8mz8YocUcPljiT3vfQ9B5qfEV2W5v6hIOE29hM8ZbD4CNsXBCiXsvqQY/Np8Sl+jzIrGkaTJoUo6D+fKhLAEuBw02kbo1drPWCfot8mU6EO/afP1z0YQHwvLd870etahSgqk5flRYp0yqIDz5eU5nTYrD5PGAQi/yKsnZSmQ9xdR9A6kRyq808AuazLprP/k1wvsGSth97e/8HUEsDBBQAAAAIACyDSD6n0vFHGAAAABYAAAAVAAAAZW5zdGFsbGVyL19faW5pdF9fLnB5i48vSy0qzszPi49XsFVQMtQz0DNQ4gIAUEsDBBQAAAAIANKFXD7VcbgopQ8AAHs0AAARAAAAZW5zdGFsbGVyL21haW4ucHm9W1+P27gRf/en4DlYSLqztcm16cOiPvRwvdwtUOSCNGhR+LaCVqJtXWTJIeV1FkW/e2eGQ4qUZGeDHJqHtSWTw5nhb/6SeSa+P3a7Vt2I2zpvxN+LXSPrWvy50ubbX2QDPx+3uy4t2v13s/l8/utMNrrL61oqUWmRi33e5Nuq2YqubWuxaZWQW3jW3fI+17IUlRmuFyJvStHtpPjhb7cwdVYgkVLI5vB+K067qtgJfKVFe+yAmCWDX8uWJuZFd8xrSzGdmakFcJ4XhdRalJXuVHV/7Kq20WKj2r2oWyBKS//87t0boeSh1VXXqkrqFOWZVftDqzrRavtNSftNP7qXSLfZ2qeu2ssZkW91esi7neAfUOQm38sFsKLMl63s9jh+ATLDS/zYVDU8/tZWDRM5dIdcaWmp/HJAAd7gKzWza7I6zAx+SEFSUBiPOByAxy7bNBkoCgi4qUXbbKqtmRnMANW1TQUKWogCuYW58K3dAzf5fS2zB6k0EqKZVVPKj7LMUIWWQPzDLq9gzlv5Afa3LDP4orOuzfRBFguBf7Ncw5QPi5k4+4+Ez8q8yxe0hRnwAtpOZrPZM7Gt23vYwfZgNvUhVxXypmeleszUsREr8bpt5Ax4vW9Bh/w4m5Vyg8rPGC7AedVs2pgETW6IHdx//Hwru6NqEM1lRarL1aM4VbivMEXtc3wn8ntCJgDxkBfv860k+apNBSi+f8QfiJhTqsCVwCKODVoBoik9KLmpPi4EWAlyKapNQA8Mqmk7MyUNOIQNJ/YzgBCIiNiJfYLRjz/9tLx9/eqXKKHxQBgpEeRifzJLjv8USW3Uhc972eVZ97Gz9P1pDBBYKMsYfFmWwuDheojt2FIaLUaPJSzwn/8asT7KIpgBADAEmTncD2IEV1+V68h+j+7GiCI7W+GftMC/sbW9nqFkYhb+BiKu2GS9wYwiK7CS+/ZBxuB0WLANos/aPL2n10NDjNGsIpoNsO41xgieVJKsgLQ18x+321v47FeQVcrMDHlktMdo88aamFc024Xh2LOxVB/qqsvwTUyDjQDvtxm5NUYCEltH5EpB70AEVj0vKLPwJFEvyko8OIGtZEwTgAmzzhk48ATMJCyOJwoOWUd2z1EaRDQGkgDPRLw9yCbGyTDo5H5IT6rqZDwnP7gSV+rXZi6uSL9uSFGDK3J7Y7R0wQ99QhinRRoHTgJNtlclkRfzK239h6OBfBEFo8NaT8xy1pVoccp1P1m0zY24ismAEo2kcHkWifyC8clxwc6BhWHCPxg3mN7MFx4P/ONtE0TimzlD9ANoAUKILzZmFCbkNKJIaVYvBOikSFFt4KKLndQUnGIMN2Y/bgJrN4tH+PVKR7xnPVsGjGgFwIUhS3bRM9TDGd77WP6gYYqWXaxSdvrAtOUYImJvX8lwA4D6sQJjhlRLg75J0REALjU+HqKsLFEinVijM/O+f8irGiOh4Cg9MbtIazR1OyCQ4gIoYY9RM2h+IJaSmPwd0Eur6Bp/uI7X//5VX999c82fyTVYB8JC75Dfxm6D24E90TEkUy1zVezi3mBAoftRWNqnW9UeD/GLZAK8PAZJICTqvJBxtOu6w831NYofJf3rNEhiza/Thom2vkLbsu59j/JHV8tvn2vBf3XkbQIOuBJx9Ea1v8mio4CPC/zDKBy/vrUwf4w8vYs/PRdfi2gVOYiDYwa4WCcYeII4CQCPu4L5LFq6p85Ns75Z/vFuAHnw3F3VHKV7WRO6ERaxXYz8JWMk8Juf62cveilePM0P4FXLOIr6OeHekpI+32P7DJgshKJHSJcMvE8//H8uFTEOv0xGIzzuA5xDVoKf0V0SzhmLdUEHPqC64wH4qDEaG5yednmns0ae4mIEzZcIzRcvz0HzNUPSbR4+5NZ3DDD5tZiv5jMOSQQU9GlnQMnuSLewNTsI9sgfzHiVg9gO1ja4ILhxboBkRLCfi1Q6e8jrqsQMkzIqF5o+AeqqySDxgL8P9Pd+MscZEXXTp10+Eg2BTeMmgT3iKH/IHogkVU1rnHm3jtjIojufakxjv1oR/2jZk5XSuCqjeYn4buonJJVMRj/Gha8xohMMHe7pOwWi+Xl+MGCUUzStwHnMjGg3INWjl1h45Y5D4pyhzt4McoqBM7aV0BtaA6umsNp3lBBqOae7VSpTeJg/VApbB3OTSezz0uwUciYKLGHxa8wVbmJyk7AAMwY3Dw1u/vRYoC8Eg5cvMRgsI6NinGjTCSww11FhCh4yKHxjEgviNoVUVIHRHKWOkz5hsibH6QORXIj38nFlGhlp3Z6kelJceYr92QSDEB+mHKGlMXxcxnKZbJ8PkjzfQOoWwV87e718cXfJiGF64tGibN13C5SQDCqf0X5y1etSKrt4shAT2Y512ErmZVZK9PIZBhUdP6XjsIe4gJ20voNw8MCksROGuEcIcKrKUyGFRgDjjyMr0/+3RgIHVUXo5QcEY0mpDRgUIuMCGT/oh72HEstKFBv3wag1ClOOsxF/U/mFXOjmsOMEv26wmM7LOBlMdFWc/5rsb+W1rGL8E44ZNsLIjMMhoKR1YNx3aPF2Y116qxlPDKVTrhosjMGWq23TKufwVxR1Bxj7Z44IQ1ActVTcvrKoQDDdS3R+zRbAEpOnND2FEttTxwPIBb5wYVHQAiHVTzccQY3oN7DSmVsYcAzoVtJYE9A87aQZq0zBUxon1YYdMK7jDGbfIZ4DObkNCKuD2wMjBkYq9OH2ZxMKKw1fMbjU2pR3pdS0IpS3J1nXC243V13fcVPExnJpVMDrLISumkIS70Qo92OX1RaQKVtpCAH/wBnTt1zRWCO0EexnCVQx/DINbJDGCXAAIzfHhqwaZ3GL3HQiww3HkBxato0bnuVhexwWQcT0VkHj1i57x54Sgg8+OfGDsELl2siFObo2CI1DUBBRwugF42npTzh9U+9/oOiF87FA1vO7cFaFLvaDqbVNx2OK9iR9nh4PgaUm0y6BUSS1g84lZxSq3ChI5YyOLYt3JgONIcQna4hZE0UJZ05sOVjJs4VpNDFT18deO924FOxyfOhjjoMSvg19wTuKEQzuHsXU2O/dv9hWD9JkT64pIdp7DEHWIgcIbZv6Ed1Lec6EQoyW06WkVVQQcspRXvmGub5SvcFpKffsyxzR1PTjDM1BlDJpF9gF5AJ+N5mKZ7u8t5s9Ew9ZvhD3WW5sJdjSmz8EGf0ADDARPSpJeX+s6pLSrP6JUv8uBtKTKXs85xRS7CtN/pHgUfmtOsgYUBms+SuNGphAKhq7wcwwGvXNeS/SrGH43SjUoOdJbC4x7It75y6YVGlM5LkdVxzVKAnSbf0gtR8WEHIhZJAMl1IkbtaqUqq4p7oytP3enD7bJX3dBnUDH8+w03GYj6CYtih6SpYbApgZmc5ymQ9bFhg/3QMbQqbt4zmql87OLv6bSFt7DtHs5UfA8IvE1XZ8iDbS2l+DUotredgLQXvB7Vv8R/keDqBdONt+BcZKP9Oh0ba1DOHEgCeTH4uaknNIePD7scSQNUDRLYyHVAV80QNmOcBUMIOOpgFfLSYLpx0eEWI4MxUS5AiNkHWPOkhH+oVSIf5VybrUo3JzUBui0NQbGMk9Om+xa08VHrAB3AZ0LHwiWD4ie0SflbeHWtZG4QMs7R8lx0cNIFvN6QNABsXFVqz5YPVOrClUiTVD5W5+BnaQTBWqolmrLCvbIstsZzrFzJdPBebLpTl3nqJjjotWc92hZ+nAo0yN2sn6sJqDdIc6p/NVPsk+Kj6QBctFBM+n1o+WG0AZcAG7U0yS/xwmiIhDPbKCmnaGOz9jofOYspMusuEcEr8KS3EIW4JZA4LJpATzfsDvyz62PeDD5fLnuaeqAFJgtiNfhmmeo2VltI5x/Uu5hvr6NHEC79KXMxzUhgP00F/KAdLw6xNIyY9KgbOo/X4WF0HgUDu5P8NUY5gq1eNSHZvfRTPYFRan9ggeYJdD2L+XkLuV7amp27yU5TXXJ9f9ceA0a68Na027hP3VX8paIyuqFC0jkKUphzwfQOcwb9j+UjaGqe4Z2bWR3fS6vnhXiAq3YbCKqmzFe2hFTH3GBI3v04aHns36PZd2uA5nekaYByMMB/H5Yor56ZnLB3di9cQ5sA6dSiwbefrdPHxrC/NRE4v7EaalcF5trpvMLAOzePFMbTFpO6Sma4OPcZ/zQDyO8RW20p9TTMFZqQk1XiaUSqVaFc9NSBVd/p5KEKR+pNPbeU+TKBin6whaX36WpLYRgeb0zt+mA0LmoJt2B/mO2xI6oXU3eLCupbXIb4Hd0YOBZUJUUdCg4xYe8OLP6+d3mFintwNxbEE0rXv775kQDk19FkR31vp7g+byWOYOHbNhvnmrIGmBIs2uCVieGu1fIrFs8r5d5tKwyffi7DvzmJpDYfMQJ+fW4sFY6GBjMU7GFcczUSgJoOWxlHZSsxL7RbqJOkxedDdc31wvYeXjO6w3zE+mTXlRKhxppvLFOb55s3DdqLNTtex4kjaVl7tgR4rlR/rJ0loF0Ag3AjH4qW3gjah9NUwcyk/vAvZh6f4h30+6Ncc3dMiCx8TsBxO3UNWA6skNh5waA3kSZMzQvobyT6vOYoUWwTzmieqgezb2TeChvlqJF4OiauCWuIrFzox0rskcY3ihB5M3t8DgKo89fLDOILkslzubvijcM+HFC1+20CFdlGjga+14/3D8HKe9Blcr8XzCA3u3cFhZ7LfxfKW2eE+wBaMhE54nY8rfiW8nCH8f9BXoxrQjnzejFeb29qM9+YpsBU+LDFyySXKeBFwz1NNP0Cyc1JvtvIybOWdWMVX8NBOmU4PVEHHetNhb1izPMzoyOHtUYZKC/sTBJL4VRMZ7aQ8qxs2rC1V3MuhT3Dk+3oRrYrbhddls7m824ak3JJ7hQYaWRMzVErtgjXuJ5RXH+dLES/POkRunFkEuYXMEw9DUnalwBNINOg893dAceVoKiipyVcaXmxnmUNYJ/kp2uHN+98jK0Z8qBrdLvQ5oq9M9JFkwRA/GjLoul3tGHs0i3SBLPcMLEV5unYZvsZPF+2xfvlx5mZ3Nr+xmnOuXmLi54k+nm7emQnEoj+k4CP2ADNtrY3GNpm9uQNdf2moKL+UYzvhWetAnjQ1YGylLPey3J+awDbOTxyEpZzxlixkPl2VVF4wb9bXozAjzHu+OWdBj3DRkaNMSYHpmCAQUaMbE2cugjW0H2n265b2wG+WM3xxguKswo7tPnw3OaaJ0+WbI68Q17+CCzgSpqVa4cZ6wq/jfZ3L1uOCTheNh2bVL/G3UBZ+8N9r3v2cz4CCjQ5Usww2IsgxbZVkW8VVQ6lDO/gdQSwMEFAAAAAgACYJJPqKWffycBQAArA0AABIAAABlbnN0YWxsZXIvdXRpbHMucHmVVm1v2zYQ/u5fwXkIJG2uYidN2gZLgWJttwBdWwTp9qEpXFqiLDYSqZJUXPfX7zlKsiQnDTYDiWXd3cN7ee6Osqy0ccxu7UQ2jzm3eSFX3c/aFBU3Vgx+Q3o0yYwumbZxxV3OWhlfWfo5Y+JbxVVaW2EmjaJYr6WyLq6dLGynntclV8vV1gk7Y6ZcmqxVhiYvCmE6xeXyVhgrtVou9zRiCOBOp/hWm5IX8rtI/24MZuzCGO7wxIv21StjNNyapCIjh5e9ryE5H51NGD5GuNqoLqJwXylqARKutJIJL0LbGk6nU/992QC4XPRKQK2MsEI575JlGi6wyugvInFM8VLEI4zWCRsXeoOTd4eS5jJTYabG3vbeZCq2VSFdGDwKoo/zTztTXaKafFWILqVh+73n/h/yViB81kqZdUaqNQtFvI5ZsIiP43mcituj48dBNNtlSzG98rFscpnkHghl0huLg40hQeOAtFrF7LU2YAovq0I0h9PnHg/9cYt5ELHnPxQ/hZT9zK5MLTzUJhdGcNvjdiDAaA1I/zUvrBgF7sy2NwJgLi1aIrlhJb8RoK5jlbZWwgHmNAMjWGCSgMmm1Lcd7fz5AxyvVNbWsZVgmaakiJSttkixRWKBlsq1dPHOBEjsvMOLQZyCJyIkmBkLKPXP6BNEO4O2BndagArcaIlviajcDzpiGPRFhjo6LhWVnMK6HBt0Vc44dXOY9XVEaGYANEgJC47m82dfiC0W3VpsO4dJp2EXJHWS4wV3Awzeca+nDhGd8ljyVMT7CWjPawlfpifLTBZi2NvjFrXeA+jZuqSGpF9kwcJCJ9yhStyxz2T+OWLcwp8cwaYikcizx2n9g20h1Brj8Pho3MeZRCl1JVTYjMfArNrC5RC0AzdWYhMG8KMVoYXgBBF60Bx5rW5gkkkwgqfh6cnJ8WlPAZkxpV2j1RvRZwX1m92bPK6rFKGFXjNqfYyTQlsRRsOJkscIFswU1u3GDwWyxBoI8beX0XeVHxrtjgDI1xqWM4So0oJy9OfV1XvGa+RYOQwqYtUIwCa5KMWMKeGQ/Rlr8kUdX2JHAM1sZywzfI0kdJsp7h68Q83QsZjUzifD2zQva8LKNXrwvHOxmZJ+rjdHIsgmfO/5QPGyedOf0arEPE2XOYoBiOADgB69WCM46tILo9X7m/XhgQ3YwXCJjVLcHYBvT5EWt8v2xkgnlqgWX9Lq84nP9IzMZsTa87daCeqp78I/3uE4TxlZM784idywxKROG2QaYjvG+zJRD8+aAd4NLI9EzlF7ezvUMqZyN2MBzYy6YUYS0l8vTyD6BxVuWhuO0XT0a94DYaBi5d3KVKSzZv2tsRQtW3FDslSiJnyLvqttN39SvVGF5ulhoqvtuLfAeTqipzuuMrF1qa5d7CMMg4Nnln2kEgwuHCEZRdF9VllR2zzsRQosSGqaxvNJdyRyCHVunN1IXA8CSt/Z4WEQ9X74i5Hnz8cnZ592r388C0RxD3DuXLUHPNpQI8hdV+7E7cjvOPbh8o0f9rihjSEMl1bcUQun1+rAXqvfuaJW8gyA7Oxakc2BnSKnofBcjLoY7AC5QX3lfaBNNL1Qt9hNKRmcsQND9t7dh2bhsMqet/7hN7Y4XsyfHPWHreos86JzdnRyeo83A4XF6fHTx5P/PGY7y/8/aTPdknAwaVt7RDe2vG8qD84as9y7qTFEVS0GTP31nHbQPQgZ2seFKmKHTf5+YaeP2XNP7DHo3faJB5eMPY39VvFeoVXgxmJUuIfa89O1Ch5sxP391AEjg54QoyXFfjofp7YFFMa0B05fv7h68Ya9urx8d3nGXtJsxNVU4MKb+iF5ra7VdBTS6DOlf9QVD2vJ5s5bV7hBYB5iKuJ2U1pWSltyl+QxzDv6d0Zgy3AHd96Lb7jIL6LJv1BLAwQUAAAACAAsg0g+dL4k8M0OAABSMQAAHwAAAGVuc3RhbGxlci9pbmRleGVkX3JlcG8vY2hhaW4ucHm1Wm1v3DYS/u5fwbMRSGrXSpygOGBxWyBomyDAXQK4wR1wrqFqJe6atVZURCqOc7j/fjNDUiQl7ab+cAvE2ZXI4XBennkhxaGTvWZSnQnzTT2OX7+Kbicafrbr5YFVv+petPt3H5h9636b11LlXanv3MttqXhbHviK7blW4it8EQqJ4f+16FfsDynaM7fSgeuyLnXpftdC6QLmwwKGfM8/DaLnB95qt8Q1/7RiZV0X8EUVWhaq49WKwSKa94VZnOiUqoDphg5vlS6bhvf5oEWjHK20koeu7Mttw4vPvFdCtit2qH8oiOUzdurTH4p+t2IPvdC8wE0UuFIx9E1m19zvBSwbr9h1ID1d7NqirDQsd3Z2VjWlUuynu1K0qdz+wSudrc9o7ZrvWFGIVuiiSBVvYLmed1Jtbm5XDPjdSsU3b8pGcZjh+MJxuX3JNm7Y2fj+gh3KTpGIerEdkAnFtGQoRhVTEW3NvwCN//w3nE6cMrkzvAgte8HVinGh73jPGlmVDZM9vD1IzWOCNAMI3tyOz3c0tJMMaNLrdST2C/bxjsNGyp6jivn6+XOWyg65Bn0+MmKR1xmueKd1R+/dw4gSMYB2g6uk+CfzuxK7SG7r+UyjN+IwhYmjfsLnODDQBL1iyXUgpnUSbxx37SUTL2un/6af9Ql7xvpg0XEb3ihWRhRgWZuEvl3WvONtnesvOgmYOj8/H7+/rmtWGumDAYD6WIW6BVI5z+F5WdNDYwYofdQ6PgEj994BDgQKEpqVbc2GDlyB+2n54sInxW32fQ7MAQx4I3tcn5udeq0R55sQNfKq4WU7dCQdxIKpor2w87JD+cwsgaQIGwTCRP/7UbCRuYzjckCWXqsHMP80sSYaCtyNJxhMx2k3f13fTkbh54K9ti6ELCDRQPyz0SflOJHnb3onB9AQkTtf+Q1Ek3izRGbOlRz0KcY8fBTgpgSnykp6OrTneujbP+mK417QOEHf62gj3rfKAXHLharUr7oA1ymODshkE1vAwUAMR+UQ1T6XzcADivS8aoDVNDCjlj8UDj5dlMvJUwrjl+Zt6pfIImRANEZwGOnkwHdPawP+xEKZBsMU/wS8ID30ERMblynDv8OMsNfijXUFR+YWtoWEAkwC0RSHUlcA1zE2fTJuewSDrkn9BBiKawSYODI93InqjhFhGhRmBBRmS+/bwJDYCV4HoLGMP5ZP3ATXaTaTlJeSF8FxMYldBEG4eIEPUvyTsY0FEoRH4D63ixstzX3NvsZQZQgEcEeisgOWRe+l/v+SNzz08hK90oG0UWJjGLGkKtlq+KVYqRmAM4yXLTcLLCsnzAiOxcZQgTBiZnve6qaassPmcp9I1z2+YK00DzEaxbKipMSCKqj30YSnCUFjYbGybK5ZbAfR1FZlpOtTOoP1mB46CCJjqkrzM5JYGfFG6SGOgbgTeMh1aRKnf9qXJt1kwAHHCY3YZjmDjAsmAWjdyRqnDwo8assb+RD7CQd/AINHsaDOw+VN4MKnTdnvOejcsYNOQFx7pBwOW8jLFzduRRgFeJ+xG6zzGIXDbqeiJkd8glNMvOGb+7D8e9FYH5n7xtQdwF6cry8y1cD0DWsc/6GRo31nWTgyV1BdpPf8cTOOjYwsCGo7sGiNc2IfsLJ+D945lT+Mvbm8ug0k22O4CUT7TeMNASeEFGOByiWWofRPmsRU7TcJRsAk5BGGAOkC4xyy641AneT3V5qmMD6GnMIko0B0elQKOEUJZRQFc95WkNxb9BspRR6R4K9kRYaDJFAHZYNpzIia4WosQUYTn2UPLaRQau5pdpJiSqLLojceJNDHIZU+JkM1NHox+oUViVPxRFj4ubCxt2kc54qlD3zc0V35GaKAbFCUWJ2Z6jySjiED1hZ4mYLUAnI8SaiEeUZsoeol5mG+0E+NMvscf8yQHoYvZbKCLK03JWUkcoLRlvEvsF+UY8jIvBmAvEMBCmGa0nuNUBlIfjaepPuSytwlxhzTL3Mzv0Vt/wh78z+XJ3mFUsrQv4wFcSyhlyiEB6EgI/xjAKShfUDiY2QDe4GwF8V+ieoTaoGUk9Mjn+865GyWyZiXs7p24qsUyxv+mTebqyM++waQQMXKBEBBF0pwfjUAEsL8R+N9dU1+dMCid+pmxscxaIKbUYem7BGnu84Uo8EKMNtHVhOY7VLglsTvahIUifbosBOXzxZ35jLS0S2nUSDWbezCUwRcxD73QeMzTZhPRywNQ5doh3m1h1NueqwJ0mDf81IvaMGM3PRWt1BcXE1jd2wK30yOFhSGADXx8YHS8NEkvAblQhoTa0wcwdMLo0spdeQyqcV1SB0G0jAQwN2YBpyWbDSXgJIRxgtjqeCE7SXhEdKLWEu3vCohNQvNCFKSxwlTbm0TYMGud0NLUqIFKAhBHkJgENH3yiuV4j1tLEQmKGpekVYCn/6krrBZCF/WLH2xYsn1hw8fkyxqHiLMLCilX5Rg3LWJDOfKmMTT2gbXwaLA4qU1Vr+t7PyIN0FyxWuz8ILr+E7d5atXPXv2SuA/bNqlYN6XNA384+bF7SoC9Owskt5L02oNV8fQY9xZpybMeaaWuLlAvzku4IWQGndjo11PIy3InALtUjgxzTRXoVBEXxrG2HcWKlu+h2LkM2czzAzls0whOhUglzoycCFTQlQQC3Fq7Aem6QmFEV6Nr69us0CFlgwl4jHnMKfGdqLCLPrmah3Lm1Rv4LMODcKGSHgZwKIwBxmF7Gvexy0WEOPmYz/wb+fhmL4eq/xbDjAC2LTlbi1ee9T7SGCDhGAUkQFI2YMaWzSYMRkGREH+bCP5WE6sWAXpVriQyYSBJE0fG40IUV1Z3Zd7T2jLSZPjzC3fYfIF5hDn5Huplzby1G70z0G1a8lZJtFdnvXn2KEP0JBK1hGWgzYFZdQEXcHy8WjYP1Zhx8uzaS1Hul8cfhNRvo3wxnXOzTlZGo3MniKcaOY3cHcdyCoS1tKxELpNmPc49M+O9+IoV5qJe1Yb4OM5PhEfDgeWU5jFHMji/y/X1x+u16CiGHdMygEbwr2bk5wpN+wvGxstjwYXrzcX6YGcQnKxyNWjyiFj0OlVFsbdeWujkj3kNJ1s6ZjFZj9R1DhgXQDuaaJfQCxMPcA/u2EhvUAndiUGGenYL4LiqopV7zAzWGI8o2SXP076BjVLbcWRUTzyJQmmgbMQ6rJnWmniJK0y3QNbCk8j7FIdHHH5WrFGYpoJ6MlN0UslOMhOKAVfIJqJQweFRyPlvauh8D2BbUDJIiHkOLy2sBkU114ntMaWA97SUFdwh6V97EgkoQIha1bvAw8Qqhvegk+pjP2NvpKYJk7VYh/KDpulCifkix/rbCZheWphccGwMwkkgoymdqEAZd7zI3IIloe56VbKJqUY5eVh+qb0cDSEhdzGSvU0LMSSpnJ33qGqcERyuzzbptko5pDFzRHJw75GxcGYdinZAq+jftADmCdJaZ71lOidv3ypOJ2op+d/l7Jz7aggmu/7srs7X6rfg9QE0wHXdHRl2yRfDKOuyRpcVor+XTrNYjaj+4GKOgM+Y8ri6KPuRlJ2GvsdKf0+b8kvh/2RlPOLSXCEqJXGjaXwYG5yMLR8Mid2f+bExzFizAbG3CT2ERrLrPNpahE3acWw6Tu/xxLW0jsO60/btavxMcRZ2Fllr5KsGPBa3ReH+gf7YMle6/6xAA3Nbp+EAn6LpcjkWIKQrZIdZoislg9tI+0tgyhmQsjzBykjn3l0plnxWe+j4p4m0Md1/JRxW/G0hzveRocZCBnmtgPmwsomw44ud5QFAHVEiOhDApoo9o+ff3DnKAI0BHZbSShy8PS1kYBaDLZpmoU9bwSqLaKkkSXqyhFNd8KCPF3ipSqW2lZ081A+QpjEAzU6bsjyiM57qWe9v+8MW+YKDYST6KtDDNhyLU1bNuRgZgiwT+O4Tjq5KRFAyNRDM/wRCZAbSZquAmDvdZ59eQkDru3LvqaOt8ssDJuTHeJ+3u2s+WLxswoHIw9mW4zvdpAwrKLUZflKhTLUdgNeL/I8genuW0nlI8LK2CQmQ0kpAWiaGbXStPlATtmRk2iQ1GZ2moGpa5rAu2RFGb+HIFL/sQn4cpzhXaWd3JFBlhHYgqzG52OUJ+AlvTSEh6AfRB1042vUJdZ0QLhykToV2ShcIyUFzwT1vgz3IjxcxaH8S0fHBDgOBvrbXStIA+8h/xx6ozBrVuEcL1VcG7kxrGF3y165qTGO4m97JdE8gKBJ3MCLJXwjUiNioMu7a4HjdHiQzfH+9H0cWxC9t3yanjIJegXFwXh8YuSGdRIstlDXeeVO7xOmu3YBryH3ACiE1cA6EmfT+POWzCe6wmRv0SXZbVyiGsBfqjKf1oT7yXDiCkFcf2kYfddyHAajAvAHG5UdZqtgrt+zBG/W4OFa8rBNvnHXR7roB+pbkQmE8X28yDPuEG95ksrHR1JBTUBNscnqNMpHXfAc8rLJRb3AZIS5Q/lnbpCNGVcn8eZY3mOw7NI++e15MsvLTJMg6BnR/TW0XcOGA4CTF3PocmDLhtbeppwEcntPhI6OQqulvMtcFgvYGg/BbZ1prkZhg/zAIQ4+4rUDjFbwstSzS4e+Pw483OPNUjpBh1Lzrmz3x9o6NqN2m0WXddeTU/cw88KISlJ/+W/2+gl3F8GjtbQX7pype/b6yrDFkl/evr189/7Nh+eY/D03ebe/KPoVhtgr2fm/RfcG9UgATWxEVkbq9FvKIg92C7bUEWNfqdil6wUTW/s68wJS4rRWsDzhLm2QtStsyGUXqxz3Sb0J2klhYU059vzeHHxNv+Zoa6mfdJzpI9fixkUmIXR+o216f3HRh2cuA5Xq7G71aNA2THqHhMi0lZ+5t2DvBa0MnQDiHuKZxpqJGkz3y1YP9lDQjXybJMztY+ogdDE/dfOy1Ugiqnt2VCgD9qHFRBNmIZCicJuDEVtMy/l+PwU0/CzW/ZZAmLAIVUB9JeoC6Bj3bY+ewpz/6/X1+3fv3659viZamo0X8qnWxEuj5EBuC3GCc5K74GIr6Q/Crb1n9j9QSwMEFAAAAAgALINIPmF4fq9fCAAARhgAACIAAABlbnN0YWxsZXIvaW5kZXhlZF9yZXBvL21ldGFkYXRhLnB5nVhtj9u4Ef7uX8FusJDcOLqmRb4E9QEBegmCojkgd0WBbhYG16JtXvR2JLW73qL/vc8MSYmS7c1d90Mik8PhcOaZZ4bUddcaJ1q70P7LHodPo4ZBZ3Szj7+edLfTlVrsTFuL7U889/FHEWbj7zDdVpXaOt02NgqUaif7ypV667xMa4tOukOcv5NWNbJWK6Et7bMSv7S6WYm9crXTNI4vq5/Uwi8vtXUbLMCuUYW2m3tZ6XKj9ntS5QWN+rXXRtWqcVHws/o1aFGNdRK2mqJ3uhqMrcs3Gz7sYgG7RSeNVRvdlOoxL6WTy7cLgb+rqyv+/4O+V41wByVoUrQ7IRvB0sKfxPbbg5DWj70qVaeasnCPbgXjXG8aIVkP+QYuk+Yoatl1dDJWqikQdz3NCTqXFa7lmW3bODoWdsRP1rFtjVG2a5uSllsfhGJibinWaTTyCvqXPGNVt0FMMG9UsW3rDtbnJluvv/9i/5h/+enlEv//db3OvPiuNaLSjcK5+OSF7SrtaMTmwUX0V0Nf0FzU0m0POYksh3m9E/UozYobLKmLvWn7Ln+9nMzRmXXTq2GwvNk1twX8Baey5sKQv7p8uVywEPwBdf/572DzrmGLC+2U+aqOE2MhTPqwIPvSZAVhMOcdlkEXxwtSARp07g1BaWM7tc3pn7PokIKmkhDH2FtMUSx5muBCQJHiQVUVGVurMuShD+K7ylLstRW7vmFdgD3kjdopo5qt4hPWyslXDMZ7ZSwJvS5eT6yCzs2nFrFb0+dKuGOnchoYQ3sv/Tg562ZwUJ4RBLMVLVuu8DPskIzc9boq8Vs3brlKFkqzPXgx3pplu0o6OuZ8vLUE+8looqk7ukPcMtElt1/lXlmMM6Zvx7BKaxUzhKaEh5s4Ujc44i2fEetpIMXkeVk64xSsBK5BYDITNoUAJIoAytlGE7k/AHVZIhAnSHvw6q34fi3+FGR8xntKoBxz+Rg90N7GA4fi51UMDko8Y7ASfJiP8su51wzbvnXIa0vbvx4E/N6FLMvcFPTt174Af30FpHujRNOKzrS/gIgIqBQXILpuMeMOoMkW/k3PWqkm91qX5Db6Obc9pHVliahuAOgvC8I7Z2KEOxwJxGcL0oQf1zkbZxbj/HUevjHKruUx/lrqxYKwyiP0AZGIUx6LPzDuccqj/pNkGZ1ekj+XBlaKa47CrbceALscExwtEtowT2fNxtCgiAzAS3QM8wECHHtUNFXmtGIlwHfrwCdV+6DmYE52vhLi2qzIbOhZnrXtNhikKqu+bT1JL+YC2YRXR86tqCal1ZfCy8V35cvo+r3ErhcrcVIapfAVF+x4HGl2rL1n6i4rIzXIaS3vqqHkaqLFqgfm/VE+7kKhB7Z/Nj200qp//O0NMhS9ChoBgJ3aFyGBeEnkTZZJlKmSYhPLdtu7rneJJQV0c8YgR5QxiCZErVdTa2tDXtNuuuGlVICnFE9HHQqfeqTyQ0JGAb9gteyLAdkgApEgTjlnnleefji1UgqClzaRhW6Q/d7Uukdi3CmkP341bsDHqVZYEYpKUk1iGRnXhfqR1oyxTow1YZ4OOsRoROgL8S42aOwkjssBdZcNH8SGYxXq0RFa4ZDyDe1Asc1uT4hyXjJYPBQNhsJIZzzDFPeXP39Tjd/OF9S0RGUMrSzGb5rKo/Ux17z0VAO6vFq736wiiF/qrJhuoWpYOG2saOg2shb/ON9UhYznrP09bffl/poza+iqlQS5E274lsC53s5lYuZPOu4k/VNLqHac3BF+U7v5gujgnoLu2YZ0C76ZoCsE6kLvJ3Lf4o/0tRxNnnbCgymeMGkoMiYR1IRuy+BvIx9Ise9jyS85LnvUrx9mLv8cWlYyBYtCT+tJuVBFyro09Z0Phe9o0y426kNGhwqd/fDhw6uPn97/+F2yMGOhJ0yHy2fxb929p0vJYF/M8KipaR15+4m7Eeo2Um8/FduqtSofU8BIDab6uzr+QBybXwU162szaNLdK27Lry3VwjxIrMRggtfGGIShoFdZRik/Nd02+h7ywf2/3/ezu0REzGBremWcejzsnkDkmeDHCuyz/rKJF8O42YSlm/8jlBfD6GkncBG1WQZtA2Iz833suM+0KEFDdjk8Ixlx7mzCTfpbwZHxyj27iIcLewzQSjwcNDhoi+qOCunZVZUDFYXqdDZ0Od3HgUaBi7g/d3w6GY0TLyc8zgWEHKVpAVaEp5TLC1CeEsfG55Bn5LnHGVfEV5uLKy5Daq6aFE6HnkGseMkLInD7DhAfWFkbFloRK2+VbyBXdEO+Q/jP9pP/5PWnDzex9YJKhLs1vq/UO63KYtIXpmtkRfA8ohODyWjFtbMDYeYBD743tKyjbkto3EqGE/sXTUHd4WKEtrC3aCABlS10OsXG+MOWXB68FT/TO0HAGPxE5azEyviwxG4QbXfKEjCXXYWI+heQwXXZ/FjZkLfBkWOWdSB8J67Yidjz7dVqUDxcgoINgW755S+PQknKx6wa29lJPo/TaRluYeOozJNDfBR6gf6cXkv2qlGGzWOXNOrBM3ktj8LRJVZSqnLGwmHaoKV9MJo83opa4R57DOqovYOGJk6Tf/k1Z4ey/yBNGXqGnewfYWd8L83nfUK4rbW2INqD4wfnL5+9vcGX5MJdUyAw9kG7Q54Var/PZje8k/ezsHD+eJrvmtnKEM5/vfv86eOnD2+F3jdteF/glQIruVuiQO+a53c9jy7smdoVPOKDO3t18ZebpI4FsfMNT6KUVhbgp6EfpiZ84KsZ7uIfRa3gyJ7Sb2r0GfnEroGd5uIn7knWT+tP0nGE85ykHbvnSA9OJa6V0epitm0isat6e8iXi+cTeUDvUC7/B1BLAwQUAAAACAAsg0g+t4b5KKMEAADMCwAAJQAAAGVuc3RhbGxlci9pbmRleGVkX3JlcG8vZGlzdF9uYW1pbmcucHmVVm2P4zQQ/p5fMVo4NWVDwoEEqNJKrHQrcR84nWCBD+1SuYnbmE3iyHY2rcSPZ2bsZJO2Ot31Q+OX8TMvfmbGqm61cWBktDe6Bm3TVrgSlF9WtlAm8luysU5UlTRp51RlBxGxs1t5bEVTdFaaBHJdt8KIXSW3L9JYpZsoit69/+Nx+/H+Ee5QU0oiqpKxWcR7/K6yLP1mvdlkT/+VzrU0vc2W8fofWrpdfr1YRlEh92DbSrltoayL6W+5igB/Nzc3/OVdCwJoz6gdGqmbBGR6SGERcPu+T+VR1G3FRmRGtjrba53Kw2GRgGqcZixaB/QIyLxG1BLiT0Lg2cUAs0wjxrifGQI0bA4JdLbDIJ6AUAtYkAxpRsOrXpysV307Kk4Z67EUdBcJWAw6HhZFgWDgSgmu1wHbQq+qCg7qRfIOQad0FqfS1JaRRofIO5LwEyPRMLQHIyBNXormIPEGT/648QJ5Z4xsXHViINZdGon/p1Za0Hs23a6iUQ9Gi8d3/hcFV5R30iqnzYlP43yPVAGn+fqMzHkLoyag0rmoGM6erJN1QpYzUi1QxPCn0Q52Eq0v5FEW6SRgfSkRpfFbDEOx3usO3VckwlDsO2kvCnZsap/mFT6fgMax6ZVFQqg9qp3gMpA8YkjtMhkvYLh/yyFEfK9ohITdCQe2RZfDjTKOyF03uB34FPh3GdF7NLfWbmo1+a3yEurOOkzIxglFsaS8M9L6ix7sRniG+SBqsiBw266misafF/11oITVSJ4XUanC85bIdMaArLMm40v0qTLde1htMGPsZuof51fjSt0dSveaYQ8f32Ukm/2NluveZseff8ym56j4kPpVK6zttSl++SIkHwKKokPqUKI1uYReDmlZihd/oVzBZinqLzu4xJsMxolVCma1RLb1CsuqQOqZXpgCbCVsCfFO5M9+iGwPFi2Z4yMXgoODJYhlr4Kls3JYo6FD1U1r4fLSF01PL+QAlu6acoFy54Nu0A3aH8tf8loq7qBOD0Z3bfwWTRvG3y+DrOtMc3Yk8gWbFi/qdThwXszX3z2FUwPIZ598SyfxSrfYuS4azHrTp9hEvn0dbIrb5YZK9dhYlN0yibe4SJrj8J3r3WldxUFNiOggN2tQ10HoPq4fnt1IAmF5qvl6+LlfxcP8BzKCrcgrKZqu3VL02RAanDXL38QzZm9HacwNw1/g2KRUKlPc8VOg6qT2JxqKaZFhJGSQLwBE2WCv8DZgmelaCI8AahCUJx5zzlXlqZLi+8I4S9we2+0iGB7EiKwsSlngBWciY/O+vQPc8Wktq2sKQsJOT2Pr2fLrx+f4+qfV01T5sD2DuVD/FfzZqONrKkz3PtODcy+GNVlZea4s1Iwv0bfZfFIhbk/ixi/AeP7I84yaYIR7v2DeGGQsl1chBkVTv4yg/vpwzGVLnTO+ed9MWoznzwremBt4w0sBZFKIhkR4fYkShWP6O8uD3/kQvRpdh10vDmRNYNepqlhSgb14UQ6EpjebBxG0Iaq/Bqbv/sV2Hp5e2G+7yvErZRceWQxKOi29n5/laZ4NQzAv3tHswHoRZosnrAB+hY1dPEX/A1BLAwQUAAAACAAsg0g+Lx+rf7QAAABHAQAAIgAAAGVuc3RhbGxlci9pbmRleGVkX3JlcG8vX19pbml0X18ucHl9kE0KwjAQhfc5xdCVQk8guBBRcCGIuHAXQvNqAiZpkxF6fJOGdqdDFvP33hemaRrxMDZRfmxA1mtM0DJiCHS4XcT+Twhxvjyvpx25EEGMickgQogmu/YxOOqMsp6sG0JkOpai9iPGj41w8LxMN3eMLaUBnVQp83PR2ze8clgagn6FtolXmdJaZrMkOcjit61MB1ZasVqAM6pMZFG3NKiYIMtG3Z89M9761yJZP1QV5UhzKr5QSwMEFAAAAAgALINIPlx45FVDBAAAvw0AACUAAABlbnN0YWxsZXIvaW5kZXhlZF9yZXBvL3JlcXVpcmVtZW50LnB5nVdNb9s4EL3rV7ABAkmoasTJLYhS9LLHPQQF9lAEAi3RNlt9haSz8P76nRmSEikrm3QFBJHNN49vhjPD8V4NHWukNlXPO9kfmOzGQRmmx1aaShwO8LUo2F62At8qhCbJHq1Erw1vW6E2JyNb7S1r3g+9rHmbJEndcq3Zk3jJht1PUZv8PmHwXF1d0f9vTImXk1SiE71hFsIkMPUSmOU/omG7M+MRShsFMjfsm4GX3ckIbTlR3T0zRzELYKMaiBLXCPQqlJZDb3EtuMKGPRsHreWuFX5V+/0assENa9MLrZ2ZeBUt2s0LhMPnBoUM5oiB7LgxQFgw3p+nb+qjmNFb+Bv69ky0qJF1J5BEsAl063wDmsYrXMPdOVwxgdBgd5Jts4T78Ddiz6oKg11VmRbtvkDPKxthd1T47AfFajgUlj48lkU6L+ADByzw1NFvxMwMIdtk0oKSMljYUKJlebLgg3TLAJuzh5LdrjKh4M18BsB6E69R1Er77mPiPtqwlOzPoReTkdyjuti7kGjKKxT24+Y5v0RGcrYRs/enBH/iPUw3Aho5t8+XlLNywLlopV/SHAS8s/8t+8x2w9AiHE8G7GPJIOvCqoRMiuVN5D5osjfZQssWgjGllEtzl1F6FHWQSz738HkS5qSg3L6rk0AxWAYIx+Li1JSowinhLSdBgm4wUdFm+Yax70eOLaRYdIJFdRVUHAhx4Z2ICLMTFK+gR/iOMPWIzapHLnnRiR9pJwxvuOGVs0mf2WPJ0u1mm9qwhPmxchA38TkoihYFKzKkvWp0EDb4VM4pu2r+B2+1eGfj7W9sPPv2KS61/7377fu7+zBfGt8lCzurklIXNJZh/c8ZW2FvcT0wyNXfOZY0DXbWLDiGC7o3AqTZZ8gOdq3hLwJeEJD6dfMv13Iyt04udAIw9FuJccXxMKWdHQ+KCY2EhvLjVJuDrVx7ff8XR4qDwLXKSaFRdtdQTd2N000Et6hQgSbHAZBssi2IxiIjoiPXx0u3HAUuzhxgl6ARb5oK9OnKDBUmTRY0rmleaWzTQEfgmH6Jc8H+Psr6CF1lhOY0QJi0CBtGMNjogjgsCLtcA0kF8ePqvIl2iSs6vHfsil3IAyzJeaakMxkGWed0aeMk5SAjr3/xg9BoaB3G7yuuIQVeiLgIBprybuH5k8+ClYGNhkHrU+yIK9Mt3uFBDT34Mp0vdZQe+Jb4bJ+NHqPGEFiGRRN3pHWWuzdZfO2EHSMJ8gYDG8xHLozTZOxC6T9/JJyYSx6/cunhHSRUp32F8Yurz55AHPVoCCyYv7WjgX5SmYcOhhnRDpBwOstDN7zL9HPB4fD9o65+3LnIMXel37O/8CVEHeQr9D2I3ZL9ayTBuffWSdFvGnJk1VuAgdCKwgZmMFpQ4GIvqQYWw4lmKeLTwlHA+qAFFab/YWLzfU0sVrIirHJTtcZcVm4YLa2I5F9QSwMEFAAAAAgA6oVcPprQawluAAAAjgAAABQAAABFR0ctSU5GTy9zcGVjL2RlcGVuZFXMQQqDMBAF0P2cwt3sxBygV+gFSiljMupgkpFkLHh7YwuF7v7jf35io0BGrzeXKpq7W4eudwiZEl+QonlbZ4S/wdAPCOMuMTQ7ACp+aemumWGLZJOWdC19FAStQar96sOWz81X5FeauTY/nnACUEsDBBQAAAAIAOqFXD7dgvgQOgAAAEwAAAAZAAAARUdHLUlORk8vZW50cnlfcG9pbnRzLnR4dItOzs8rzs9JjS9OLsosKCmO5cosys9LTU9XsFUAkpl5xSV6uYmZeVYgAixXkA2WA0ok5uSkFiHJAgBQSwECFAMUAAAACAAJgkk+Kp35Ry8GAABAEQAAEgAAAAAAAAAAAAAApIEAAAAAZWdnaW5zdC9zY3JpcHRzLnB5UEsBAhQDFAAAAAgALINIPuvnEdjxEQAAmLYAABMAAAAAAAAAAAAAAKSBXwYAAGVnZ2luc3QvZXhlX2RhdGEucHlQSwECFAMUAAAACAAsg0g+tqkei5MBAAANAwAAEwAAAAAAAAAAAAAApIGBGAAAZWdnaW5zdC9fX2luaXRfXy5weVBLAQIUAxQAAAAIACyDSD4eOj4FrwoAABIjAAAPAAAAAAAAAAAAAACkgUUaAABlZ2dpbnN0L21haW4ucHlQSwECFAMUAAAACABvilc+3WFHXsECAAB4BgAAEAAAAAAAAAAAAAAA7YEhJQAAZWdnaW5zdC91dGlscy5weVBLAQIUAxQAAAAIACyDSD6f04kGgQQAAM8KAAATAAAAAAAAAAAAAACkgRAoAABlbnN0YWxsZXIvY29uZmlnLnB5UEsBAhQDFAAAAAgALINIPq/Y+l6FDwAA2C4AABMAAAAAAAAAAAAAAKSBwiwAAGVuc3RhbGxlci92ZXJsaWIucHlQSwECFAMUAAAACAAsg0g+p9LxRxgAAAAWAAAAFQAAAAAAAAAAAAAApIF4PAAAZW5zdGFsbGVyL19faW5pdF9fLnB5UEsBAhQDFAAAAAgA0oVcPtVxuCilDwAAezQAABEAAAAAAAAAAAAAAKSBwzwAAGVuc3RhbGxlci9tYWluLnB5UEsBAhQDFAAAAAgACYJJPqKWffycBQAArA0AABIAAAAAAAAAAAAAAKSBl0wAAGVuc3RhbGxlci91dGlscy5weVBLAQIUAxQAAAAIACyDSD50viTwzQ4AAFIxAAAfAAAAAAAAAAAAAACkgWNSAABlbnN0YWxsZXIvaW5kZXhlZF9yZXBvL2NoYWluLnB5UEsBAhQDFAAAAAgALINIPmF4fq9fCAAARhgAACIAAAAAAAAAAAAAAKSBbWEAAGVuc3RhbGxlci9pbmRleGVkX3JlcG8vbWV0YWRhdGEucHlQSwECFAMUAAAACAAsg0g+t4b5KKMEAADMCwAAJQAAAAAAAAAAAAAApIEMagAAZW5zdGFsbGVyL2luZGV4ZWRfcmVwby9kaXN0X25hbWluZy5weVBLAQIUAxQAAAAIACyDSD4vH6t/tAAAAEcBAAAiAAAAAAAAAAAAAACkgfJuAABlbnN0YWxsZXIvaW5kZXhlZF9yZXBvL19faW5pdF9fLnB5UEsBAhQDFAAAAAgALINIPlx45FVDBAAAvw0AACUAAAAAAAAAAAAAAKSB5m8AAGVuc3RhbGxlci9pbmRleGVkX3JlcG8vcmVxdWlyZW1lbnQucHlQSwECFAMUAAAACADqhVw+mtBrCW4AAACOAAAAFAAAAAAAAAAAAAAAgAFsdAAARUdHLUlORk8vc3BlYy9kZXBlbmRQSwECFAMUAAAACADqhVw+3YL4EDoAAABMAAAAGQAAAAAAAAAAAAAAgAEMdQAARUdHLUlORk8vZW50cnlfcG9pbnRzLnR4dFBLBQYAAAAAEQARAJ0EAAB9dQAAAAA='
def unzip(zip_file, dir_path):
"""Unzip the zip_file into the directory dir_path."""
z = zipfile.ZipFile(zip_file)
for name in z.namelist():
if name.endswith('/'):
continue
path = join(dir_path, *name.split('/'))
if not isdir(dirname(path)):
os.makedirs(dirname(path))
fo = open(path, 'wb')
fo.write(z.read(name))
fo.close()
z.close()
def self_install():
tmp_dir = tempfile.mkdtemp()
egg_path = join(tmp_dir, 'ironpkg-1.0.0-1.egg')
data = base64.b64decode(b64eggdata)
assert hashlib.md5(data).hexdigest() == '41787f5a12384e482e8e9f1d90f8bcdc'
fo = open(egg_path, 'wb')
fo.write(data)
fo.close()
unzip(egg_path, tmp_dir)
sys.path.insert(0, tmp_dir)
import egginst
print "Bootstrapping:", egg_path
ei = egginst.EggInst(egg_path)
ei.install()
if __name__ == '__main__':
if '--install' in sys.argv:
self_install()
else:
print __doc__
|
import time
import curses
HIGH = 1
LOW = 0
COL_WIDTH = 23
GAME_SECONDS = 30
class Game(object):
time = 0
score = 0
playing = False
def __init__(self, stdscr, machine_id, sensor_pin,
button1_pin, button2_pin, timer_display, score_display):
self.stdscr = stdscr
self.machine_id = machine_id
self.sensor_pin = sensor_pin
self.button1_pin = button1_pin
self.button2_pin = button2_pin
self.timer_display = timer_display
self.score_display = score_display
self.debug(2, "Press '%s' to start" % self.button1_pin)
def start(self):
self.stdscr.clear()
self.time = 0
self.score = 0
self.playing = True
self.start_time = time.time()
def stop(self):
self.stop_time = self.time
self.playing = False
self.debug(5, "Game Over!")
def update(self, current_time):
if self.playing:
self.check_sensor()
self.update_timer(current_time)
else:
self.check_buttons()
def get_input(self):
try:
return self.stdscr.getkey()
except Exception:
return None
def check_sensor(self):
key = self.get_input()
if self.sensor_pin == key:
self.increment_score()
def check_buttons(self):
key = self.get_input()
if self.button1_pin == key:
self.start()
if self.button2_pin == key:
# multi player
pass
def update_timer(self, current_time):
elapsed_time = int(current_time - self.start_time)
if (elapsed_time - self.time) >= 1:
# Increment clock every second
self.time = elapsed_time
# update timer display
msg = "Time: %s" % self.time
self.debug(3, msg)
if (elapsed_time >= GAME_SECONDS):
self.stop()
def increment_score(self):
self.score += 2
# update score display
msg = "Score: %s" % self.score
self.debug(4, msg)
def debug(self, row, msg):
# print("Machine:", self.machine_id, msg)
col = self.machine_id * COL_WIDTH
self.stdscr.addstr(0, col, "Machine: %s" % self.machine_id)
self.stdscr.addstr(row, col, msg)
def main(stdscr):
stdscr.clear()
machines = [
Game(stdscr, 1, 'q', '1', '7', 4, 5),
Game(stdscr, 2, 'w', '2', '8', 4, 5),
Game(stdscr, 3, 'e', '3', '9', 4, 5),
]
curses.noecho()
curses.cbreak()
curses.curs_set(False)
stdscr.nodelay(True)
while True:
current_time = time.time()
for machine in machines:
machine.update(current_time)
stdscr.refresh()
if __name__ == '__main__':
curses.wrapper(main)
|
import os, shutil, zipfile, random
# Qt imports
from PyQt5.QtCore import QObject, pyqtSignal, pyqtSlot
# Local imports
from alfred import alfred_globals as ag
from . import RequestThread
from .module_info import ModuleInfo
from alfred.logger import Logger
import tarfile
class ModuleManager(QObject):
_instance = None
module_data = None
data_fetched = pyqtSignal(list)
conn_err = pyqtSignal()
update_started = pyqtSignal(int)
installation_started = pyqtSignal(int)
installation_finished = pyqtSignal(int)
uninstallation_finished = pyqtSignal(int)
signal_train = pyqtSignal()
update_flag = False
modules_count = len(ModuleInfo.all())
@classmethod
def instance(cls):
if cls._instance is None:
cls._instance = cls()
cls._instance.update_flag = False
return cls._instance
def create_thread(self):
if not hasattr(self, 'rthread'):
self.rthread = RequestThread()
Logger().info("Created request thread")
self.rthread.signal_finished.connect(self.thread_finished)
Logger().info("Connected request thread")
@pyqtSlot()
def thread_finished(self):
if self.rthread.conn_err:
self.conn_err.emit()
return
if self.rthread.purpose == "list":
self.data_fetched.emit(self.rthread.data)
else:
self.install(self.module_data)
@pyqtSlot(dict)
def download(self, module):
self.installation_started.emit(int(module['id']))
Logger().info(
"Downloading {} v{}".format(
module["name"], module["latest_version"]["number"]
)
)
self.create_thread()
self.rthread.purpose = "download"
self.module_data = module
self.rthread.url = ag.modules_download_url.format(
id=module["id"],
version_id=module["latest_version"]["id"]
)
self.module_zip_path = self.rthread.zip_path = os.path.join(
ag.user_folder_path, module["name"]
)
self.rthread.start()
@pyqtSlot()
def fetch_data(self):
self.create_thread()
self.rthread.purpose = "list"
self.rthread.start()
@pyqtSlot(dict)
def install(self, mod_data):
# TODO fetch missing module info
source = 'alfredhub'
mod_id = mod_data['id']
name = mod_data['name']
username = mod_data['user']['username']
version = mod_data['latest_version']['number']
install_dir = os.path.join(ag.user_folder_path, 'modules',
source, username, name)
Logger().info("Installing module {}".format(name))
Logger().info("Installation dir is {}".format(install_dir))
for directory in [install_dir]:
if os.path.exists(directory):
shutil.rmtree(directory)
os.makedirs(directory)
path = self.module_zip_path
if zipfile.is_zipfile(path):
module_file = zipfile.ZipFile(path, 'r')
elif tarfile.is_tarfile(path):
module_file = tarfile.open(path)
else:
msg = '{path} is not a valid zip or tar file'.foramt(path=path)
Logger().err(msg)
raise IOError(msg)
module_file.extractall(install_dir)
module_file.close()
os.remove(self.module_zip_path)
info = ModuleInfo(mod_id, name, source, username, version)
info.create()
if self.update_flag:
shutil.copytree(self.data_backup_path, os.path.join(install_dir, "data"))
shutil.rmtree(self.data_backup_path)
self.update_flag = False
else:
shutil.copytree(
os.path.join(os.path.dirname(__file__), "default_data_folder"),
os.path.join(install_dir, "data")
)
self.installation_finished.emit(int(self.module_data['id']))
self.modules_count += 1
self.signal_train.emit()
@pyqtSlot(int)
def uninstall(self, mod_id, retrain=True):
info = ModuleInfo.find_by_id(mod_id)
Logger().info("Uninstalling module {} v{}".format(
info.name, info.version
))
module_folder_path = info.root()
try:
shutil.rmtree(module_folder_path)
except Exception as ex:
Logger().err(ex)
info.destroy()
self.uninstallation_finished.emit(info.id)
Logger().info("Unistalled module successfully")
self.modules_count -= 1
if retrain:
self.signal_train.emit()
@pyqtSlot(dict)
def update(self, mod_data):
# backing up data folder
try:
info = ModuleInfo.find_by_id(mod_data["id"])
random_folder_name = "tmp_{}".format(random.randint(1, 1000000000000))
self.data_backup_path = os.path.join(ag.tmp_folder_path, random_folder_name)
data_path = os.path.join(info.root(), "data")
shutil.copytree(data_path, self.data_backup_path)
# updating
self.update_started.emit(int(mod_data['id']))
self.update_flag = True
self.uninstall(mod_data["id"], False)
self.download(mod_data)
self.update_flag = False
except Exception as ex:
Logger().err(ex)
|
import numpy as np
from astropy.coordinates.distances import Distance
from astropy import units as u
import scipy.constants as c
from astropy.cosmology import FlatLambdaCDM
cosmo=FlatLambdaCDM(H0=70,Om0=0.3)
#Flat cosmology, might want to update this
import pandas as pd
from os.path import expanduser
home = expanduser("~")
from ctc_observ import sdss_mag_to_jy
from ctc_observ import flux_to_nulnu
from ctc_observ import ab_to_jy
from ctc_stat import dmod
dict_wav={'u':0.3543,'g':0.4770,'r':0.6231,'i':0.7625,'z':0.9134,
'U':0.36,'B':0.44,'V':0.55,'R':0.64,'I':0.79}
mabs_sun = {'u':6.41, 'g':5.15, 'r':4.67, 'i':4.56, 'z':4.53}
storez09=pd.HDFStore(home+'/work_scripts/dwarf/zibetti2009.h5')
#store=pd.HDFStore(home+'/work_scripts/data_prepare_misc/mtar_b03z09.h5')
store=storez09
def z09_mstar_lsun(mags,band,color,color_str,redshift,ubv=None,ld=None,close=None):
'''
Use the Zibetti 2009 table B1 values
to estimate stellar mass quickly
input:
1. magnitude(s) : array of magnitudes
if it's ubvri, the mags should be in AB
2. band(s) : array of strings, in the order of blue->red, don't use u/U band
3. colors for sdss:u-g~z, g-r~z,r-i,r-z
4. redshift :
set ld if luminosity distance is passed instaed of redshift
'''
wav=np.zeros(len(band),dtype=float)
lband=np.zeros(len(band),dtype=float)
mag_dmod = dmod(redshift)
for i in range(len(band)):
wav[i]=dict_wav[band[i]]
lband[i] = (mabs_sun[band[i]] - (mags[i]-mag_dmod))/2.5
def get_z09pars(band,color_str,ubv=None):
if not ubv:
z09=storez09['z09_sdss']
pars=z09.loc[color_str,band].values
else:
z09=storez09['z09_ubv']
pars=z09.loc[color_str,band].values
return pars
mass_band=np.zeros((len(band),len(color_str)),dtype=np.float64)
for i in range(len(band)):
for j in range(len(color_str)):
pars=get_z09pars(band[i],color_str[j],ubv=ubv)
mass_band[i,j]=lband[i]+pars[0]+pars[1]*color[j]
if close:storez09.close()
return mass_band
def z09_mstar(mags,band,color,color_str,redshift,ubv=None,ld=None,close=None):
'''
Use the Zibetti 2009 table B1 values
to estimate stellar mass quickly
input:
1. magnitude(s) : array of magnitudes
if it's ubvri, the mags should be in AB
2. band(s) : array of strings, in the order of blue->red, don't use u/U band
3. colors for sdss:u-g~z, g-r~z,r-i,r-z
4. redshift :
set ld if luminosity distance is passed instaed of redshift
'''
wav=np.zeros(len(band),dtype=np.float64)
for i in range(len(band)):
wav[i]=dict_wav[band[i]]
def get_z09pars(band,color_str,ubv=None):
if not ubv:
z09=storez09['z09_sdss']
pars=z09.loc[color_str,band].values
else:
z09=storez09['z09_ubv']
pars=z09.loc[color_str,band].values
return pars
def get_lum(mag,redshift,ubv=ubv,ld=ld):
#calculate fluxes in jansky
flux=[]
if not ubv:
if len(band)==5:
flux=sdss_mag_to_jy(mag)
else:
for i in range(len(band)):
flux.append(sdss_mag_to_jy(mag[i],band=band[i]))
else:
for i in range(len(band)):
flux.append(ab_to_jy(mag[i],band=band[i]))
#now calculate luminosity distances using z and flux
flux=np.asarray(flux)
lband=[]
lband=flux_to_nulnu(flux,redshift,wav,lsun=True,cosmo=cosmo,ld=ld)
#in log lsun unit
return lband
#Using lband, pars and mag_band to calculate mass_band
lband=get_lum(mags,redshift)
#print lband
mass_band=np.zeros((len(band),len(color_str)),dtype=np.float64)
for i in range(len(band)):
for j in range(len(color_str)):
pars=get_z09pars(band[i],color_str[j],ubv=ubv)
mass_band[i,j]=lband[i]+pars[0]+pars[1]*color[j]
if close:storez09.close()
return mass_band
def mtl_mstar(mags,band,color,color_str,redshift,ld=None,close=None, method='z09'):
'''
Use the Zibetti 2009 table B1 values
or Bell 2003, default is z09
to estimate stellar mass quickly
input:
1. magnitude(s) : array of magnitudes
if it's ubvri, the mags should be in AB
2. band(s) : array of strings, in the order of blue->red, don't use u/U band
3. colors for sdss:u-g~z, g-r~z,r-i,r-z
4. redshift :
set ld if luminosity distance is passed instaed of redshift
'''
def get_lum(mag,redshift,ld=ld):
#calculate fluxes in jansky
flux=[]
if len(band)==5:
flux=sdss_mag_to_jy(mag)
else:
for i in range(len(band)):
flux.append(sdss_mag_to_jy(mag[i],band=band[i]))
#now calculate luminosity distances using z and flux
flux=np.asarray(flux)
lband=[]
lband=flux_to_nulnu(flux,redshift,wav,lsun=True,cosmo=cosmo,ld=ld)
#in log lsun unit
return lband
def get_pars(band,color_str):
if method is 'z09':
mtl_sdss=store['z09_sdss']
#mtl_bri=store['z09_bri']
elif method is 'b03':
mtl_bri=store['b03_bri']
mtl_sdss = store['b03_sdss']
#if ubv is True:
# pars=mtl_bri.loc[color_str,band].values
else:
print('method could only be z09 or b03')
pars = mtl_sdss.loc[color_str,band].values
return pars
wav=np.zeros(len(band),dtype=np.float64)
for i in range(len(band)):
wav[i]=dict_wav[band[i]]
#Using lband, pars and mag_band to calculate mass_band
lband=get_lum(mags,redshift)
#print lband
mass_band=np.zeros((len(band),len(color_str)),dtype=np.float64)
for i in range(len(band)):
for j in range(len(color_str)):
pars = get_pars(band[i],color_str[j])
mass_band[i,j]=lband[i]+pars[0]+pars[1]*color[j]
if close:store.close()
return mass_band
def sdss_mass(sdssmags, z=None,color = None, band=None, method='z09'):
'''
call z09_mstar to calculate the stellar mass
using the input sdss magnitudes
default: band = 'i', color = 'g-i'
'''
if color is None:
color = 'g-i'
if band is None:
band = 'i'
if z is None:
z=0.000000001
umag, gmag, rmag, imag, zmag = sdssmags
color_str = ['u-g','u-r','u-i','u-z','g-r','g-i','g-z','r-i','r-z']
sdsscolor = [umag-gmag, umag-rmag,umag-imag,umag-zmag,gmag-rmag,gmag-imag,gmag-zmag,rmag-imag,rmag-zmag]
colors=pd.DataFrame({'bands':color_str,'color':sdsscolor})
mags = [gmag, rmag, imag, zmag]
bands = ['g','r','i','z']
if method is 'z09':
zmstar = mtl_mstar(mags,bands,sdsscolor,color_str,z,method=method)
mstar = pd.DataFrame(zmstar,index=bands,columns=color_str)
elif method is 'b03':
bmstar = mtl_mstar(mags,bands,sdsscolor,color_str,z,method=method)
mstar = pd.DataFrame(zmstar,index=bands,columns=color_str)
else:
print('method can only be z09 or b03')
return mstar.loc[band,color]
def sdss_z09_izi(abs_i,col_gi):
'''
calculate the stellar mass using i-band and g-i
and solar abs. magnitude 4.56
input : rest-frame g-i and rest-frame i-band abs mag
log M = 1.032(col_gi) - 0.963 + log L
'''
abs_i_sun = 4.56
loglsun = (abs_i_sun-abs_i)/2.5
mstar = 1.032*col_gi-0.963+loglsun
return mstar
def mstar_k(kmag,z):
'''
Use redshift and observed ks-band luminosity to estimate stellar mass
'''
dm = dmod(z)
kmag_abs = kmag-dm
lk_sun = (3.28-kmag_abs)/2.5
mass_k = np.log10(0.31)+lk_sun
return mass_k
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
from skimage.util import view_as_windows
import warnings, logging
import tensorflow as tf
from tensorflow.python.framework import ops
from tensorflow.python.ops import nn_grad, math_grad
from collections import OrderedDict
from .utils import make_batches, slice_arrays, to_list, unpack_singleton, placeholder_from_data
from time import time
tf.compat.v1.disable_v2_behavior()
SUPPORTED_ACTIVATIONS = [
'Relu', 'Elu', 'Sigmoid', 'Tanh', 'Softplus', 'MaxPool'
]
UNSUPPORTED_ACTIVATIONS = [
'CRelu', 'Relu6', 'Softsign'
]
_ENABLED_METHOD_CLASS = None
_GRAD_OVERRIDE_CHECKFLAG = 0
# -----------------------------------------------------------------------------
# UTILITY FUNCTIONS
# -----------------------------------------------------------------------------
def activation(type):
"""
Returns Tensorflow's activation op, given its type
:param type: string
:return: op
"""
if type not in SUPPORTED_ACTIVATIONS:
warnings.warn('Activation function (%s) not supported' % type)
f = getattr(tf.nn, type.lower())
return f
def original_grad(op, grad):
"""
Return original Tensorflow gradient for an op
:param op: op
:param grad: Tensor
:return: Tensor
"""
if op.type not in SUPPORTED_ACTIVATIONS:
warnings.warn('Activation function (%s) not supported' % op.type)
opname = '_%sGrad' % op.type
if hasattr(nn_grad, opname):
f = getattr(nn_grad, opname)
else:
f = getattr(math_grad, opname)
return f(op, grad)
# -----------------------------------------------------------------------------
# ATTRIBUTION METHODS BASE CLASSES
# -----------------------------------------------------------------------------
class AttributionMethod(object):
"""
Attribution method base class
"""
def __init__(self, T, X, session, keras_learning_phase=None):
self.T = T # target Tensor
self.X = X # input Tensor
self.Y_shape = [None,] + T.get_shape().as_list()[1:]
# Most often T contains multiple output units. In this case, it is often necessary to select
# a single unit to compute contributions for. This can be achieved passing 'ys' as weight for the output Tensor.
self.Y = tf.placeholder(tf.float32, self.Y_shape)
# placeholder_from_data(ys) if ys is not None else 1.0 # Tensor that represents weights for T
self.T = self.T * self.Y
self.symbolic_attribution = None
self.session = session
self.keras_learning_phase = keras_learning_phase
self.has_multiple_inputs = type(self.X) is list or type(self.X) is tuple
logging.info('Model with multiple inputs: %s' % self.has_multiple_inputs)
# Set baseline
# TODO: now this sets a baseline also for those methods that does not require it
self._set_check_baseline()
# References
self._init_references()
# Create symbolic explanation once during construction (affects only gradient-based methods)
self.explain_symbolic()
def explain_symbolic(self):
return None
def run(self, xs, ys=None, batch_size=None):
pass
def _init_references(self):
pass
def _check_input_compatibility(self, xs, ys=None, batch_size=None):
if ys is not None and len(ys) != len(xs):
raise RuntimeError('When provided, the number of elements in ys must equal the number of elements in xs')
if batch_size is not None and batch_size > 0:
if self.T.shape[0].value is not None and self.T.shape[0].value is not batch_size:
raise RuntimeError('When using batch evaluation, the first dimension of the target tensor '
'must be compatible with the batch size. Found %s instead' % self.T.shape[0].value)
if isinstance(self.X, list):
for x in self.X:
if x.shape[0].value is not None and x.shape[0].value is not batch_size:
raise RuntimeError('When using batch evaluation, the first dimension of the input tensor '
'must be compatible with the batch size. Found %s instead' % x.shape[
0].value)
else:
if self.X.shape[0].value is not None and self.X.shape[0].value is not batch_size:
raise RuntimeError('When using batch evaluation, the first dimension of the input tensor '
'must be compatible with the batch size. Found %s instead' % self.X.shape[0].value)
def _session_run_batch(self, T, xs, ys=None):
feed_dict = {}
if self.has_multiple_inputs:
for k, v in zip(self.X, xs):
feed_dict[k] = v
else:
feed_dict[self.X] = xs
# If ys is not passed, produce a vector of ones that will be broadcasted to all batch samples
feed_dict[self.Y] = ys if ys is not None else np.ones([1,] + self.Y_shape[1:])
if self.keras_learning_phase is not None:
feed_dict[self.keras_learning_phase] = 0
return self.session.run(T, feed_dict)
def _session_run(self, T, xs, ys=None, batch_size=None):
num_samples = len(xs)
if self.has_multiple_inputs is True:
num_samples = len(xs[0])
if len(xs) != len(self.X):
raise RuntimeError('List of input tensors and input data have different lengths (%s and %s)'
% (str(len(xs)), str(len(self.X))))
if batch_size is not None:
for xi in xs:
if len(xi) != num_samples:
raise RuntimeError('Evaluation in batches requires all inputs to have '
'the same number of samples')
if batch_size is None or batch_size <= 0 or num_samples <= batch_size:
return self._session_run_batch(T, xs, ys)
else:
outs = []
batches = make_batches(num_samples, batch_size)
for batch_index, (batch_start, batch_end) in enumerate(batches):
# Get a batch from data
xs_batch = slice_arrays(xs, batch_start, batch_end)
# If the target tensor has one entry for each sample, we need to batch it as well
ys_batch = None
if ys is not None:
ys_batch = slice_arrays(ys, batch_start, batch_end)
batch_outs = self._session_run_batch(T, xs_batch, ys_batch)
batch_outs = to_list(batch_outs)
if batch_index == 0:
# Pre-allocate the results arrays.
for batch_out in batch_outs:
shape = (num_samples,) + batch_out.shape[1:]
outs.append(np.zeros(shape, dtype=batch_out.dtype))
for i, batch_out in enumerate(batch_outs):
outs[i][batch_start:batch_end] = batch_out
return unpack_singleton(outs)
def _set_check_baseline(self):
# Do nothing for those methods that have no baseline required
if not hasattr(self, "baseline"):
return
if self.baseline is None:
if self.has_multiple_inputs:
self.baseline = [np.zeros([1,] + xi.get_shape().as_list()[1:]) for xi in self.X]
else:
self.baseline = np.zeros([1,] + self.X.get_shape().as_list()[1:])
else:
if self.has_multiple_inputs:
for i, xi in enumerate(self.X):
if list(self.baseline[i].shape) == xi.get_shape().as_list()[1:]:
self.baseline[i] = np.expand_dims(self.baseline[i], 0)
else:
raise RuntimeError('Baseline shape %s does not match expected shape %s'
% (self.baseline[i].shape, self.X.get_shape().as_list()[1:]))
else:
if list(self.baseline.shape) == self.X.get_shape().as_list()[1:]:
self.baseline = np.expand_dims(self.baseline, 0)
else:
raise RuntimeError('Baseline shape %s does not match expected shape %s'
% (self.baseline.shape, self.X.get_shape().as_list()[1:]))
class GradientBasedMethod(AttributionMethod):
"""
Base class for gradient-based attribution methods
"""
def get_symbolic_attribution(self):
return tf.gradients(self.T, self.X)
def explain_symbolic(self):
if self.symbolic_attribution is None:
self.symbolic_attribution = self.get_symbolic_attribution()
return self.symbolic_attribution
def run(self, xs, ys=None, batch_size=None):
self._check_input_compatibility(xs, ys, batch_size)
print(xs.shape)
results = self._session_run(self.explain_symbolic(), xs, ys, batch_size)
print(np.array(results).shape)
return results[0] if not self.has_multiple_inputs else results
@classmethod
def nonlinearity_grad_override(cls, op, grad):
return original_grad(op, grad)
class PerturbationBasedMethod(AttributionMethod):
"""
Base class for perturbation-based attribution methods
"""
def __init__(self, T, X, session, keras_learning_phase):
super(PerturbationBasedMethod, self).__init__(T, X, session, keras_learning_phase)
self.base_activation = None
# -----------------------------------------------------------------------------
# ATTRIBUTION METHODS
# -----------------------------------------------------------------------------
"""
Returns zero attributions. For testing only.
"""
class DummyZero(GradientBasedMethod):
def get_symbolic_attribution(self,):
return tf.gradients(self.T, self.X)
@classmethod
def nonlinearity_grad_override(cls, op, grad):
input = op.inputs[0]
return tf.zeros_like(input)
"""
Saliency maps
https://arxiv.org/abs/1312.6034
"""
class Saliency(GradientBasedMethod):
def get_symbolic_attribution(self):
return [tf.abs(g) for g in tf.gradients(self.T, self.X)]
"""
Gradient * Input
https://arxiv.org/pdf/1704.02685.pdf - https://arxiv.org/abs/1611.07270
"""
class GradientXInput(GradientBasedMethod):
def get_symbolic_attribution(self):
return [g * x for g, x in zip(
tf.gradients(self.T, self.X),
self.X if self.has_multiple_inputs else [self.X])]
"""
Integrated Gradients
https://arxiv.org/pdf/1703.01365.pdf
"""
class IntegratedGradients(GradientBasedMethod):
def __init__(self, T, X, session, keras_learning_phase, steps=100, baseline=None):
self.steps = steps
self.baseline = baseline
super(IntegratedGradients, self).__init__(T, X, session, keras_learning_phase)
def run(self, xs, ys=None, batch_size=None):
self._check_input_compatibility(xs, ys, batch_size)
session_run_time = 0
gradient = None
for alpha in list(np.linspace(1. / self.steps, 1.0, self.steps)):
xs_mod = [b + (x - b) * alpha for x, b in zip(xs, self.baseline)] if self.has_multiple_inputs \
else self.baseline + (xs - self.baseline) * alpha
start = time()
_attr = self._session_run(self.explain_symbolic(), xs_mod, ys, batch_size)
end = time()
session_run_time += end - start
if gradient is None: gradient = _attr
else: gradient = [g + a for g, a in zip(gradient, _attr)]
print('Int Grads session run time', session_run_time)
results = [g * (x - b) / self.steps for g, x, b in zip(
gradient,
xs if self.has_multiple_inputs else [xs],
self.baseline if self.has_multiple_inputs else [self.baseline])]
return results[0] if not self.has_multiple_inputs else results
# "perpendicular" implementation of IG, iterating over each sample instead of each step
class IntegratedGradients_new(GradientBasedMethod):
def __init__(self, T, X, session, keras_learning_phase, steps=100, baseline=None):
self.steps = steps
self.baseline = baseline
super(IntegratedGradients_new, self).__init__(T, X, session, keras_learning_phase)
def run(self, xs, ys=None, batch_size=None):
self._check_input_compatibility(xs, ys, batch_size)
results = []
for x in xs:
interpolation = [self.baseline[0] + i/self.steps*(x - self.baseline[0]) for i in range(self.steps+1)]
attribs = self._session_run(self.explain_symbolic(), interpolation, ys, batch_size)[0]
gradient = np.sum(np.array(attribs[1:]), axis=0) #ignore 1st step which is baseline
results.append(gradient * (x - self.baseline[0])/self.steps)
return results
"""
Layer-wise Relevance Propagation with epsilon rule
http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0130140
"""
class EpsilonLRP(GradientBasedMethod):
eps = None
def __init__(self, T, X, session, keras_learning_phase, epsilon=1e-4):
assert epsilon > 0.0, 'LRP epsilon must be greater than zero'
global eps
eps = epsilon
super(EpsilonLRP, self).__init__(T, X, session, keras_learning_phase)
def get_symbolic_attribution(self):
return [g * x for g, x in zip(
tf.gradients(self.T, self.X),
self.X if self.has_multiple_inputs else [self.X])]
@classmethod
def nonlinearity_grad_override(cls, op, grad):
output = op.outputs[0]
input = op.inputs[0]
return grad * output / (input + eps *
tf.where(input >= 0, tf.ones_like(input), -1 * tf.ones_like(input)))
"""
DeepLIFT
This reformulation only considers the "Rescale" rule
https://arxiv.org/abs/1704.02685
"""
class DeepLIFTRescale(GradientBasedMethod):
_deeplift_ref = {}
_deeplift_ref_out = {}
def __init__(self, T, X, session, keras_learning_phase, baseline=None):
self.baseline = baseline
super(DeepLIFTRescale, self).__init__(T, X, session, keras_learning_phase)
# _ = tf.gradients(self.T, self.X)
# print('still in init')
def get_symbolic_attribution(self):
print('get symbolic')
return [g * (x - b) for g, x, b in zip(
tf.gradients(self.T, self.X),
self.X if self.has_multiple_inputs else [self.X],
self.baseline if self.has_multiple_inputs else [self.baseline])]
# return 0
#
# # Testing gradient override in instance method
# def run(self, xs, ys=None, batch_size=None):
# self._check_input_compatibility(xs, ys, batch_size)
# grads = tf.gradients(self.T, self.X)
# attribs = [g * (x - b) for g, x, b in zip(
# grads,
# self.X if self.has_multiple_inputs else [self.X],
# self.baseline if self.has_multiple_inputs else [self.baseline])]
# results = self._session_run(attribs, xs, ys, batch_size)
# return results[0] if not self.has_multiple_inputs else results
#
@classmethod
def nonlinearity_grad_override(cls, op, grad):
if op.type == 'MaxPool':
# print('in maxpool')
xout = op.outputs[0]
input = op.inputs[0]
ref_input = cls._deeplift_ref[op.name]
rout = cls._deeplift_ref_out[op.name]#
delta_in = input - ref_input
cross_max = tf.maximum(xout, rout)
diff1 = cross_max - rout
diff2 = xout - cross_max
xmax_pos = original_grad(op, grad*diff1)
# rmax_pos = original_grad(op, grad*diff2)
# replace: "maxpoolgrad"(op, )
# orig_input=ref_input, orig_output=ref_output, grad=grad *diff2
# extract k_size, stride, padding from op
# getting arguments from op
node_def = op.node_def
padding = node_def.attr["padding"].s.decode("ASCII")
strides = node_def.attr["strides"].list.i
strides = list(strides)
ksize = node_def.attr["ksize"].list.i
ksize = list(ksize)
# prepare tiling
batch_size = tf.shape(grad)[0:1]
batch_size = tf.expand_dims(batch_size, 0)
# print("batch_size")
# print(batch_size)
ones = tf.constant([1]*(len(ref_input.shape)-1),dtype=tf.dtypes.int32)
ones = tf.expand_dims(ones, 0)
# print("ones")
# print(ones)
multiples = tf.concat([batch_size, ones], axis=1)
multiples = multiples[0]
# print("multiples")
# print(multiples)
correct_input_shape = tf.shape(input)
tf_ref_input = tf.constant(ref_input)
tiled_ref_input = tf.tile(tf_ref_input, multiples)
tiled_ref_input = tf.reshape(tiled_ref_input, correct_input_shape)
correct_output_shape = tf.shape(xout)
ones = tf.constant([1]*(len(rout.shape)-1),dtype=tf.dtypes.int32)
ones = tf.expand_dims(ones, 0)
# print("ones")
# print(ones)
multiples = tf.concat([batch_size, ones], axis=1)
multiples = multiples[0]
# print("multiples")
# print(multiples)
tiled_ref_output = tf.tile(rout, multiples)
tiled_ref_output = tf.reshape(tiled_ref_output, correct_output_shape)
# print("tiled_ref_input")
# print(tiled_ref_input)
rmax_pos = nn_grad.gen_nn_ops.max_pool_grad(
orig_input= tiled_ref_input,#tf.ones((tf.shape(grad)[0], 1)) * ref_input,
orig_output= tiled_ref_output, # tf.ones((tf.shape(grad)[0], 1)) * rout,
grad=grad*diff2,
#ksize=[1]+list(self.pool_size)+[1],
ksize=ksize,#[1,2,2,1],#[1]+op.ksize+[1],
strides=strides,#[1,2,2,1],#[1]+list(op.strides)+[1],
padding=padding)#"VALID")#op.padding)
t = False
if t:
print("grad")
print(grad)
print("input")
print(input)
print("diff1")
print(diff1)
print("diff2")
print(diff2)
print("rmax_pos")
print(rmax_pos)
print("xmax_pos")
print(xmax_pos)
print("ref_input")
print(ref_input.shape)
testing1 = False
if testing1:
print('printed')
# p1 = tf.print(('xout', xout))
# p2 = tf.print(('input',input))
# p3 = tf.print(('ref_input',ref_input))
# p4 = tf.print(('rout',rout))
# p6 = tf.print(('delta_in',delta_in))
# print("printing python id of ref_input")
# print(id(ref_input))
# to_print = tf.print((('output', xout), ('input',input),('ref_input',ref_input), ('ref_output',rout),('cross_max', cross_max), ('diff1', diff1), ('delta_in',delta_in), ('xmax_pos', xmax_pos), ('rmax_pos', rmax_pos)))
to_print = tf.print(( (tf.shape(grad)[0]), ('grad', grad), ('ref_input', ref_input), ('rout', rout), ('xmax_pos', xmax_pos), ('rmax_pos', rmax_pos), ('diff1', diff1),('diff2', diff2) ))
with tf.control_dependencies([to_print]):
result = tf.where(
tf.abs(delta_in) < 1e-5,
tf.zeros_like(delta_in),
(xmax_pos + rmax_pos) / delta_in
)
return result
# return
# return tf.where(
# tf.abs(delta_in) < 1e-5, # 1e-5
# tf.zeros_like(delta_in),
# (xmax_pos + rmax_pos) / delta_in
# )
# instant_grad = activation(op.type)(0.5 * (ref_input + input))
return tf.where(
tf.abs(delta_in) < 1e-7, # 1e-5
original_grad(op, grad),
(xmax_pos + rmax_pos) / delta_in
)
output = op.outputs[0]
input = op.inputs[0]
ref_input = cls._deeplift_ref[op.name]
ref_output = activation(op.type)(ref_input)
delta_out = output - ref_output
delta_in = input - ref_input
instant_grad = activation(op.type)(0.5 * (ref_input + input))
# print("IN GRAD OVERRIDE")
testing = False
if testing:
print('GRAD')
p1 = tf.print(('output', output))
p2 = tf.print(('input',input))
p3 = tf.print(('ref_input',ref_input))
p4 = tf.print(('ref_output',ref_output))
p5 = tf.print(('delta_out',delta_out))
p6 = tf.print(('delta_in',delta_in))
tf.print(('delta_in',delta_in))
print("printing python id of ref_input")
print(id(ref_input))
to_print = tf.print((('output', output), ('input',input),('ref_input',ref_input), ('ref_output',ref_output),('delta_out',delta_out), ('delta_in',delta_in)))
with tf.control_dependencies([to_print]):
result = tf.where(tf.abs(delta_in) > 1e-5, grad * delta_out / delta_in, original_grad(instant_grad.op, grad))
else:
result = tf.where(tf.abs(delta_in) > 1e-5, grad * delta_out / delta_in, original_grad(instant_grad.op, grad))
# result = tf.where(tf.abs(delta_in) > 1e-5, grad * delta_out / delta_in, original_grad(instant_grad.op, grad))
return result
#return tf.where(tf.abs(delta_in) > 1e-5, grad * delta_out / delta_in,
# original_grad(instant_grad.op, grad))
def _init_references(self):
print ('DeepLIFT: computing references...')
sys.stdout.flush()
self._deeplift_ref.clear()
self._deeplift_ref_out.clear()
ops = []
g = tf.get_default_graph()
for op in g.get_operations():
if len(op.inputs) > 0 and not op.name.startswith('gradients'):
if op.type in SUPPORTED_ACTIVATIONS:
ops.append(op)
YR = self._session_run([o.inputs[0] for o in ops], self.baseline)
activs = self._session_run([o.outputs[0] for o in ops], self.baseline)
for (r, op) in zip(YR, ops):
self._deeplift_ref[op.name] = r
for (r, op) in zip(activs, ops):
self._deeplift_ref_out[op.name] = r
# print('DeepLIFT: references ready')
sys.stdout.flush()
class IntegratedDeepLIFT_v2(GradientBasedMethod):
_deeplift_ref = {}
def __init__(self, T, X, session, keras_learning_phase, steps=100, baseline=None):
self.baseline = baseline
self.steps = steps
super(IntegratedDeepLIFT_v2, self).__init__(T, X, session, keras_learning_phase)
def run(self, xs, ys=None, batch_size=None):
self._check_input_compatibility(xs, ys, batch_size)
gradient = None
for alpha in list(np.linspace(1. / self.steps, 1.0, self.steps)):
xs_mod = [b + (x - b) * alpha for x, b in zip(xs, self.baseline)] if self.has_multiple_inputs \
else self.baseline + (xs - self.baseline) * alpha
_attr = self._session_run(self.explain_symbolic(), xs_mod, ys, batch_size)
if gradient is None: gradient = _attr
else: gradient = [g + a for g, a in zip(gradient, _attr)]
results = [g * (x - b) / self.steps for g, x, b in zip(
gradient,
xs if self.has_multiple_inputs else [xs],
self.baseline if self.has_multiple_inputs else [self.baseline])]
return results[0] if not self.has_multiple_inputs else results
@classmethod
def nonlinearity_grad_override(cls, op, grad):
output = op.outputs[0]
input = op.inputs[0]
ref_input = cls._deeplift_ref[op.name]
ref_output = activation(op.type)(ref_input)
delta_out = output - ref_output
delta_in = input - ref_input
instant_grad = activation(op.type)(0.5 * (ref_input + input))
#compute before you update ref
ans = tf.where(tf.abs(delta_in) > 1e-5, grad * delta_out / delta_in,
original_grad(instant_grad.op, grad))
#based on interpolation, curr input is next ref_input
cls._deeplift_ref[op.name] = input
return ans
def _init_references(self):
# print ('DeepLIFT: computing references...')
sys.stdout.flush()
self._deeplift_ref.clear()
ops = []
g = tf.get_default_graph()
for op in g.get_operations():
if len(op.inputs) > 0 and not op.name.startswith('gradients'):
if op.type in SUPPORTED_ACTIVATIONS:
ops.append(op)
YR = self._session_run([o.inputs[0] for o in ops], self.baseline)
for (r, op) in zip(YR, ops):
self._deeplift_ref[op.name] = r
# print('DeepLIFT: references ready')
sys.stdout.flush()
class IntegratedDeepLIFT_batch(GradientBasedMethod): #shapes of self.Y and ys do not match
_deeplift_ref = {}
def __init__(self, T, X, session, keras_learning_phase, steps=100, baseline=None):
self.baseline = baseline
self.steps = steps
super(IntegratedDeepLIFT_batch, self).__init__(T, X, session, keras_learning_phase)
def run(self, xs, ys=None, batch_size=None):
self._check_input_compatibility(xs, ys, batch_size)
results = []
interpolation_time = 0
session_run_time = 0
gradient_time = 0
n = xs.shape[0]
print(n)
if batch_size == None:
batch_size = n
for i in range(batch_size):
x = xs[i*n//batch_size : (i+1)*n//batch_size]
# for x in xs: #for each example:
self.baseline = np.zeros(xs.shape[1:])
start = time()
# interpolation = [self.baseline + i/self.steps*(x - self.baseline) for i in range(self.steps+1)] #evenly-spaced steps ranging from baseline to example's input values
alpha_values = np.linspace(0, 1.0, self.steps+1)
x_minus_baseline = x - self.baseline
correct_shape = (self.steps+1,) + (1,) * (len(xs.shape) - 1)
# interpolation = self.baseline[None, :] + alpha_values[:, None] * x_minus_baseline[None, :]
interpolation = self.baseline[None, :] + alpha_values.reshape(correct_shape) * x_minus_baseline[None, :]
end = time()
interpolation_time += end - start
start = time()
attribs = self._session_run(self.explain_symbolic(), interpolation, ys, batch_size)[0] #run attributions with all steps of interpolation at once
end = time()
session_run_time += end - start
start = time()
gradient = np.sum(np.array(attribs[1:]), axis=0) #ignore 1st step, which uses baseline as input
end = time()
gradient_time += end - start
results.append(gradient * (x - self.baseline)/self.steps) #result is sum of all (gradient * change in step)
# print('interpolation', interpolation_time)
# print('session run', session_run_time)
# print('gradient', gradient_time)
# print()
return results
@classmethod
def nonlinearity_grad_override(cls, op, grad): #custom gradient for DeepLIFT multiplier
if op.type == 'MaxPool':
output = op.outputs[0]
input = op.inputs[0]
ref_input = tf.concat([input[0:1], input[0:-1]], axis=0) #input[0:1] is 1st component; input[0:-1] ranges from 1st to second-to-last component
ref_output = tf.concat([output[0:1], output[0:-1]], axis=0) #same logic
# ref_input = cls._deeplift_ref[op.name]
# rout = cls._deeplift_ref_out[op.name]#
delta_in = input - ref_input
# dup0 = [2] + [1 for i in delta_in.shape[1:]]
cross_max = tf.maximum(output, ref_output)
# diffs = tf.concat([cross_max - rout, xout - cross_max], 0)
# xmax_pos,rmax_pos = tf.split(original_grad(op, grad * diffs), 2) #
diff1 = cross_max - ref_output
diff2 = output - cross_max
xmax_pos = original_grad(op, grad*diff1)#*diff1#*diffs) #* diffs
# getting arguments from op
node_def = op.node_def
padding = node_def.attr["padding"].s.decode("ASCII")
strides = node_def.attr["strides"].list.i
strides = list(strides)
ksize = node_def.attr["ksize"].list.i
ksize = list(ksize)
# # prepare tiling
# batch_size = tf.shape(grad)[0:1]
# batch_size = tf.expand_dims(batch_size, 0)
# # print("batch_size")
# # print(batch_size)
#
# ones = tf.constant([1]*(len(ref_input.shape)-1),dtype=tf.dtypes.int32)
# ones = tf.expand_dims(ones, 0)
# # print("ones")
# # print(ones)
#
# multiples = tf.concat([batch_size, ones], axis=1)
# multiples = multiples[0]
# # print("multiples")
# # print(multiples)
#
# correct_input_shape = tf.shape(input)
# tf_ref_input = tf.constant(ref_input)
# tiled_ref_input = tf.tile(tf_ref_input, multiples)
# tiled_ref_input = tf.reshape(tiled_ref_input, correct_input_shape)
#
# correct_output_shape = tf.shape(xout)
#
# ones = tf.constant([1]*(len(rout.shape)-1),dtype=tf.dtypes.int32)
# ones = tf.expand_dims(ones, 0)
# # print("ones")
# # print(ones)
#
# multiples = tf.concat([batch_size, ones], axis=1)
# multiples = multiples[0]
# # print("multiples")
# # print(multiples)
#
# tiled_ref_output = tf.tile(rout, multiples)
# tiled_ref_output = tf.reshape(tiled_ref_output, correct_output_shape)
# print("tiled_ref_input")
# print(tiled_ref_input)
rmax_pos = nn_grad.gen_nn_ops.max_pool_grad(
orig_input= ref_input,#tf.ones((tf.shape(grad)[0], 1)) * ref_input,
orig_output= ref_output, # tf.ones((tf.shape(grad)[0], 1)) * rout,
grad=grad*diff2,
#ksize=[1]+list(self.pool_size)+[1],
ksize=ksize,#[1,2,2,1],#[1]+op.ksize+[1],
strides=strides,#[1,2,2,1],#[1]+list(op.strides)+[1],
padding=padding)#"VALID")#op.padding)
t = False
print('in maxpool')
if t:
print("grad")
print(grad)
print("input")
print(input)
print("diff1")
print(diff1)
print("diff2")
print(diff2)
print("rmax_pos")
print(rmax_pos)
print("xmax_pos")
print(xmax_pos)
print("ref_input")
print(ref_input.shape)
testing1 = False
if testing1:
print('printed')
p1 = tf.print(('xout', output))
p2 = tf.print(('input',input))
p3 = tf.print(('ref_input',ref_input))
p4 = tf.print(('rout',ref_output))
p6 = tf.print(('delta_in',delta_in))
# print("printing python id of ref_input")
# print(id(ref_input))
# to_print = tf.print((('output', xout), ('input',input),('ref_input',ref_input), ('ref_output',rout),('cross_max', cross_max), ('diff1', diff1), ('delta_in',delta_in), ('xmax_pos', xmax_pos), ('rmax_pos', rmax_pos)))
to_print = tf.print((('grad.shape', grad.shape), ('xmax_pos', xmax_pos), ('rmax_pos', rmax_pos), ('diff1.shape', diff1.shape),('xmax_pos.shape', xmax_pos.shape) ))
with tf.control_dependencies([to_print]):
result = tf.where(
tf.abs(delta_in) < 1e-5,
tf.zeros_like(delta_in),
(xmax_pos + rmax_pos) / delta_in
)
return result
# return
return tf.where(
tf.abs(delta_in) < 1e-7, # before 1e-5
tf.zeros_like(delta_in),
#original_grad(op, grad),
(xmax_pos + rmax_pos) / delta_in
)
input = op.inputs[0] #inputs to `op`, using all steps in the interpolation as model input, for a single example
output = op.outputs[0] #outputs from op ''
#`ref_input` (below) is the list of reference values, for the corresponding input values in `input` (with steps in interpolation as model input).
#Because for each step in the interpolation, its reference value is equal to the previous step's value,
#the reference for a step in `input` is equal to the previous step's value.
#So we define ref_input to be `input`, shifted one index to the right, so that each step in `input` can be matched with its "previous step" in `ref_input`.
#To keep dimensions correct, the 1st step in `ref_input` (which is now empty) will be set to the 1st step in `input` (e.g. if `input` is [1,2,3,4], `ref_input` is [1,1,2,3]).
#We will be ignoring the 1st index of the attributions anyway, because it corresponds to the baseline as input.
ref_input = tf.concat([input[0:1], input[0:-1]], axis=0) #input[0:1] is 1st component; input[0:-1] ranges from 1st to second-to-last component
ref_output = tf.concat([output[0:1], output[0:-1]], axis=0) #same logic for reference output
delta_out = output - ref_output
delta_in = input - ref_input
instant_grad = activation(op.type)(0.5 * (ref_input + input))
return tf.where(tf.abs(delta_in) > 1e-5, grad * delta_out / delta_in,
original_grad(instant_grad.op, grad))
class IntegratedDeepLIFT(GradientBasedMethod): #shapes of self.Y and ys do not match
_deeplift_ref = {}
def __init__(self, T, X, session, keras_learning_phase, steps=100, baseline=None):
self.baseline = baseline
self.steps = steps
super(IntegratedDeepLIFT, self).__init__(T, X, session, keras_learning_phase)
def run(self, xs, ys=None, batch_size=None):
self._check_input_compatibility(xs, ys, batch_size)
results = []
interpolation_time = 0
session_run_time = 0
gradient_time = 0
for x in xs: #for each example:
self.baseline = np.zeros(xs.shape[1:])
start = time()
# interpolation = [self.baseline + i/self.steps*(x - self.baseline) for i in range(self.steps+1)] #evenly-spaced steps ranging from baseline to example's input values
alpha_values = np.linspace(0, 1.0, self.steps+1)
x_minus_baseline = x - self.baseline
correct_shape = (self.steps+1,) + (1,) * (len(xs.shape) - 1)
# interpolation = self.baseline[None, :] + alpha_values[:, None] * x_minus_baseline[None, :]
interpolation = self.baseline[None, :] + alpha_values.reshape(correct_shape) * x_minus_baseline[None, :]
end = time()
interpolation_time += end - start
start = time()
attribs = self._session_run(self.explain_symbolic(), interpolation, ys, batch_size)[0] #run attributions with all steps of interpolation at once
end = time()
session_run_time += end - start
start = time()
gradient = np.sum(np.array(attribs[1:]), axis=0) #ignore 1st step, which uses baseline as input
end = time()
gradient_time += end - start
results.append(gradient * (x - self.baseline)/self.steps) #result is sum of all (gradient * change in step)
# print('interpolation', interpolation_time)
# print('session run', session_run_time)
# print('gradient', gradient_time)
# print()
return results
@classmethod
def nonlinearity_grad_override(cls, op, grad): #custom gradient for DeepLIFT multiplier
if op.type == 'MaxPool':
output = op.outputs[0]
input = op.inputs[0]
ref_input = tf.concat([input[0:1], input[0:-1]], axis=0) #input[0:1] is 1st component; input[0:-1] ranges from 1st to second-to-last component
ref_output = tf.concat([output[0:1], output[0:-1]], axis=0) #same logic
# ref_input = cls._deeplift_ref[op.name]
# rout = cls._deeplift_ref_out[op.name]#
delta_in = input - ref_input
# dup0 = [2] + [1 for i in delta_in.shape[1:]]
cross_max = tf.maximum(output, ref_output)
# diffs = tf.concat([cross_max - rout, xout - cross_max], 0)
# xmax_pos,rmax_pos = tf.split(original_grad(op, grad * diffs), 2) #
diff1 = cross_max - ref_output
diff2 = output - cross_max
xmax_pos = original_grad(op, grad*diff1)#*diff1#*diffs) #* diffs
# getting arguments from op
node_def = op.node_def
padding = node_def.attr["padding"].s.decode("ASCII")
strides = node_def.attr["strides"].list.i
strides = list(strides)
ksize = node_def.attr["ksize"].list.i
ksize = list(ksize)
# # prepare tiling
# batch_size = tf.shape(grad)[0:1]
# batch_size = tf.expand_dims(batch_size, 0)
# # print("batch_size")
# # print(batch_size)
#
# ones = tf.constant([1]*(len(ref_input.shape)-1),dtype=tf.dtypes.int32)
# ones = tf.expand_dims(ones, 0)
# # print("ones")
# # print(ones)
#
# multiples = tf.concat([batch_size, ones], axis=1)
# multiples = multiples[0]
# # print("multiples")
# # print(multiples)
#
# correct_input_shape = tf.shape(input)
# tf_ref_input = tf.constant(ref_input)
# tiled_ref_input = tf.tile(tf_ref_input, multiples)
# tiled_ref_input = tf.reshape(tiled_ref_input, correct_input_shape)
#
# correct_output_shape = tf.shape(xout)
#
# ones = tf.constant([1]*(len(rout.shape)-1),dtype=tf.dtypes.int32)
# ones = tf.expand_dims(ones, 0)
# # print("ones")
# # print(ones)
#
# multiples = tf.concat([batch_size, ones], axis=1)
# multiples = multiples[0]
# # print("multiples")
# # print(multiples)
#
# tiled_ref_output = tf.tile(rout, multiples)
# tiled_ref_output = tf.reshape(tiled_ref_output, correct_output_shape)
# print("tiled_ref_input")
# print(tiled_ref_input)
rmax_pos = nn_grad.gen_nn_ops.max_pool_grad(
orig_input= ref_input,#tf.ones((tf.shape(grad)[0], 1)) * ref_input,
orig_output= ref_output, # tf.ones((tf.shape(grad)[0], 1)) * rout,
grad=grad*diff2,
#ksize=[1]+list(self.pool_size)+[1],
ksize=ksize,#[1,2,2,1],#[1]+op.ksize+[1],
strides=strides,#[1,2,2,1],#[1]+list(op.strides)+[1],
padding=padding)#"VALID")#op.padding)
t = False
print('in maxpool')
if t:
print("grad")
print(grad)
print("input")
print(input)
print("diff1")
print(diff1)
print("diff2")
print(diff2)
print("rmax_pos")
print(rmax_pos)
print("xmax_pos")
print(xmax_pos)
print("ref_input")
print(ref_input.shape)
testing1 = False
if testing1:
print('printed')
p1 = tf.print(('xout', output))
p2 = tf.print(('input',input))
p3 = tf.print(('ref_input',ref_input))
p4 = tf.print(('rout',ref_output))
p6 = tf.print(('delta_in',delta_in))
# print("printing python id of ref_input")
# print(id(ref_input))
# to_print = tf.print((('output', xout), ('input',input),('ref_input',ref_input), ('ref_output',rout),('cross_max', cross_max), ('diff1', diff1), ('delta_in',delta_in), ('xmax_pos', xmax_pos), ('rmax_pos', rmax_pos)))
to_print = tf.print((('grad.shape', grad.shape), ('xmax_pos', xmax_pos), ('rmax_pos', rmax_pos), ('diff1.shape', diff1.shape),('xmax_pos.shape', xmax_pos.shape) ))
with tf.control_dependencies([to_print]):
result = tf.where(
tf.abs(delta_in) < 1e-5,
tf.zeros_like(delta_in),
(xmax_pos + rmax_pos) / delta_in
)
return result
# return
return tf.where(
tf.abs(delta_in) < 1e-7, # before 1e-5
tf.zeros_like(delta_in),
#original_grad(op, grad),
(xmax_pos + rmax_pos) / delta_in
)
input = op.inputs[0] #inputs to `op`, using all steps in the interpolation as model input, for a single example
output = op.outputs[0] #outputs from op ''
#`ref_input` (below) is the list of reference values, for the corresponding input values in `input` (with steps in interpolation as model input).
#Because for each step in the interpolation, its reference value is equal to the previous step's value,
#the reference for a step in `input` is equal to the previous step's value.
#So we define ref_input to be `input`, shifted one index to the right, so that each step in `input` can be matched with its "previous step" in `ref_input`.
#To keep dimensions correct, the 1st step in `ref_input` (which is now empty) will be set to the 1st step in `input` (e.g. if `input` is [1,2,3,4], `ref_input` is [1,1,2,3]).
#We will be ignoring the 1st index of the attributions anyway, because it corresponds to the baseline as input.
ref_input = tf.concat([input[0:1], input[0:-1]], axis=0) #input[0:1] is 1st component; input[0:-1] ranges from 1st to second-to-last component
ref_output = tf.concat([output[0:1], output[0:-1]], axis=0) #same logic for reference output
delta_out = output - ref_output
delta_in = input - ref_input
instant_grad = activation(op.type)(0.5 * (ref_input + input))
return tf.where(tf.abs(delta_in) > 1e-5, grad * delta_out / delta_in,
original_grad(instant_grad.op, grad))
# potential problem: self.gradients is cached and will not reload; so old values of deeplift_ref are cached there??? ie gradient constant values perfectly preserved
class IntegratedDeepLIFT_true(GradientBasedMethod):
_deeplift_ref = {}
_deeplift_ref_out = {}
def __init__(self, T, X, session, keras_learning_phase, steps=100, baseline=None):
#baseline, init refs, explain symbolic should all be done
self.baseline = baseline
self.expanded_baseline = None
self.steps = steps
self.T = T # target Tensor
self.X = X # input Tensor
self.Y_shape = [None,] + T.get_shape().as_list()[1:]
# Most often T contains multiple output units. In this case, it is often necessary to select
# a single unit to compute contributions for. This can be achieved passing 'ys' as weight for the output Tensor.
self.Y = tf.placeholder(tf.float32, self.Y_shape)
# placeholder_from_data(ys) if ys is not None else 1.0 # Tensor that represents weights for T
self.T = self.T * self.Y
self.symbolic_attribution = None
self.session = session
self.keras_learning_phase = keras_learning_phase
self.has_multiple_inputs = type(self.X) is list or type(self.X) is tuple
logging.info('Model with multiple inputs: %s' % self.has_multiple_inputs)
# Set baseline
# TODO: now this sets a baseline also for those methods that does not require it
self._set_check_baseline()
# References
# self._init_references()
# Create symbolic explanation once during construction (affects only gradient-based methods)
# self.explain_symbolic()
# super(IntegratedDeepLIFT_true, self).__init__(T, X, session, keras_learning_phase)
def run(self, xs, ys=None, batch_size=None):
self._check_input_compatibility(xs, ys, batch_size)
# baseline to match n x input size
self.expanded_baseline = np.broadcast_to(self.baseline[0], xs.shape) # np.broadcast_to(self.baseline[0], (xs.shape[0],) + self.baseline.shape[1:])
# print(self.expanded_baseline.shape)
# do this after size of input is known
self._init_references()
self.explain_symbolic()
self.session.run(tf.initialize_variables(self._deeplift_ref.values()))
self.session.run(tf.initialize_variables(self._deeplift_ref_out.values()))
gradient = None
for alpha in list(np.linspace(1. / self.steps, 1.0, self.steps)):
xs_mod = [b + (x - b) * alpha for x, b in zip(xs, self.baseline)] if self.has_multiple_inputs \
else self.baseline + (xs - self.baseline) * alpha
_attr = self._session_run(self.explain_symbolic(), xs_mod, ys, batch_size)
if gradient is None: gradient = _attr
else: gradient = [g + a for g, a in zip(gradient, _attr)]
#update references
self._init_references_input(xs_mod)
results = [g * (x - b) / self.steps for g, x, b in zip(
gradient,
xs if self.has_multiple_inputs else [xs],
self.baseline if self.has_multiple_inputs else [self.baseline])]
# print("results")
# print(results[0])
return results[0] if not self.has_multiple_inputs else results
@classmethod
def nonlinearity_grad_override(cls, op, grad):
# print(op.type)
if op.type == 'MaxPool':
print('hello')
xout = op.outputs[0]
input = op.inputs[0]
ref_input = cls._deeplift_ref[op.name]
rout = cls._deeplift_ref_out[op.name]#
delta_in = input - ref_input
# dup0 = [2] + [1 for i in delta_in.shape[1:]]
cross_max = tf.maximum(xout, rout)
# diffs = tf.concat([cross_max - rout, xout - cross_max], 0)
# xmax_pos,rmax_pos = tf.split(original_grad(op, grad * diffs), 2) #
diff1 = cross_max - rout
diff2 = xout - cross_max
xmax_pos = original_grad(op, grad*diff1)#*diff1#*diffs) #* diffs
rmax_pos = original_grad(op, grad*diff2)#*diff2#*diffs) #* diffs
testing1 = False
if testing1:
print('printed')
p1 = tf.print(('xout', xout))
p2 = tf.print(('input',input))
p3 = tf.print(('ref_input',ref_input))
p4 = tf.print(('rout',rout))
p6 = tf.print(('delta_in',delta_in))
# print("printing python id of ref_input")
# print(id(ref_input))
# to_print = tf.print((('output', xout), ('input',input),('ref_input',ref_input), ('ref_output',rout),('cross_max', cross_max), ('diff1', diff1), ('delta_in',delta_in), ('xmax_pos', xmax_pos), ('rmax_pos', rmax_pos)))
to_print = tf.print((('grad.shape', grad.shape), ('xmax_pos', xmax_pos), ('rmax_pos', rmax_pos), ('diff1.shape', diff1.shape),('xmax_pos.shape', xmax_pos.shape) ))
with tf.control_dependencies([to_print]):
result = tf.where(
tf.abs(delta_in) < 1e-5,
tf.zeros_like(delta_in),
(xmax_pos + rmax_pos) / delta_in
)
return result
# return
return tf.where(
tf.abs(delta_in) < 1e-5,
tf.zeros_like(delta_in),
(xmax_pos + rmax_pos) / delta_in
)
output = op.outputs[0]
input = op.inputs[0]
ref_input = cls._deeplift_ref[op.name]
ref_output = activation(op.type)(ref_input)
delta_out = output - ref_output
delta_in = input - ref_input
instant_grad = activation(op.type)(0.5 * (ref_input + input))
testing = False
if testing:
print('GRAD')
p1 = tf.print(('output', output))
p2 = tf.print(('input',input))
p3 = tf.print(('ref_input',ref_input))
p4 = tf.print(('ref_output',ref_output))
p5 = tf.print(('delta_out',delta_out))
p6 = tf.print(('delta_in',delta_in))
print("printing python id of ref_input")
print(id(ref_input))
to_print = tf.print((('output', output), ('input',input),('ref_input',ref_input), ('ref_output',ref_output),('delta_out',delta_out), ('delta_in',delta_in)))
with tf.control_dependencies([to_print]):
result = tf.where(tf.abs(delta_in) > 1e-5, grad * delta_out / delta_in, original_grad(instant_grad.op, grad))
else:
result = tf.where(tf.abs(delta_in) > 1e-5, grad * delta_out / delta_in, original_grad(instant_grad.op, grad))
return result
def _init_references(self):
# print ('DeepLIFT: computing references...')
sys.stdout.flush()
self._deeplift_ref.clear()
self._deeplift_ref_out.clear()
ops = []
g = tf.get_default_graph()
for op in g.get_operations():
if len(op.inputs) > 0 and not op.name.startswith('gradients'):
if op.type in SUPPORTED_ACTIVATIONS:
ops.append(op)
YR = self._session_run([o.inputs[0] for o in ops], self.expanded_baseline)
activs = self._session_run([o.outputs[0] for o in ops], self.expanded_baseline)
for (r, op) in zip(YR, ops):
# print(op.name)
# print("value of reference to set:", r)
self._deeplift_ref[op.name] = tf.Variable(r) # create Variable
#self._deeplift_ref[op.name].load(r, self.session)
for (r, op) in zip(activs, ops):
# print(op.name)
# print("value of reference to set:", r)
self._deeplift_ref_out[op.name] = tf.Variable(r) # create Variable
# self.session.run(tf.global_variables_initializer())
# print('DeepLIFT: references ready')
# print(self._deeplift_ref)
sys.stdout.flush()
def _init_references_input(self, xs):
# print ('DeepLIFT: computing references...')
sys.stdout.flush()
# self._deeplift_ref.clear()
ops = []
g = tf.get_default_graph()
for op in g.get_operations():
if len(op.inputs) > 0 and not op.name.startswith('gradients'):
if op.type in SUPPORTED_ACTIVATIONS:
ops.append(op)
YR = self._session_run([o.inputs[0] for o in ops], xs)
activs = self._session_run([o.outputs[0] for o in ops], xs)
# print(YR)
# print('OPS')
for (r, op) in zip(YR, ops):
# print(op.name)
# print("value of reference to set:", r)
if op.name in self._deeplift_ref: # check if op already present
#print('ALREADY CONTAINED')
# print("setting", op.name)
# print( self.session.run(self._deeplift_ref[op.name].assign(r)) )#, self.session)
# print("checking if value updated")
# print( self.session.run(self._deeplift_ref[op.name] ) )
# print("printing python id of _deeplift_ref op")
# print(id(self._deeplift_ref[op.name]))
self._deeplift_ref[op.name].load(r, self.session)
else:
print('NOT PRESENT')
self._deeplift_ref[op.name] = tf.Variable(r)
for (r, op) in zip(activs, ops):
# print(op.name)
# print("value of reference to set:", r)
if op.name in self._deeplift_ref_out: # check if op already present
#print('ALREADY CONTAINED')
# print("setting", op.name)
# print( self.session.run(self._deeplift_ref[op.name].assign(r)) )#, self.session)
# print("checking if value updated")
# print( self.session.run(self._deeplift_ref[op.name] ) )
# print("printing python id of _deeplift_ref op")
# print(id(self._deeplift_ref[op.name]))
self._deeplift_ref_out[op.name].load(r, self.session)
else:
print('NOT PRESENT')
self._deeplift_ref_out[op.name] = tf.Variable(r)
# print('DeepLIFT: references ready')
# print(self._deeplift_ref)
sys.stdout.flush()
class IntegratedDeepLIFT_old_but_works(GradientBasedMethod):
_deeplift_ref = {}
def __init__(self, T, X, xs, session, keras_learning_phase, steps=100, baseline=None):
super(IntegratedDeepLIFT_old, self).__init__(T, X, xs, session, keras_learning_phase)
self.steps = steps
self.baseline = baseline
@classmethod
def nonlinearity_grad_override(cls, op, grad):
output = op.outputs[0]
input = op.inputs[0]
ref_input = cls._deeplift_ref[op.name]
ref_output = activation(op.type)(ref_input)
delta_out = output - ref_output
delta_in = input - ref_input
instant_grad = activation(op.type)(0.5 * (ref_input + input))
result = tf.where(tf.abs(delta_in) > 1e-5, grad * delta_out / delta_in,
original_grad(instant_grad.op, grad))
testing = True
if testing:
print('GRAD')
print('output', output)
print('input', input)
print('ref_input', ref_input)
print('ref_output', ref_output)
print('delta_out', delta_out)
print('delta_in', delta_in)
print('result', result)
return result
def _set_check_baseline(self):
if self.baseline is None:
if self.has_multiple_inputs:
self.baseline = [np.zeros(xi.shape) for xi in self.xs]
else:
self.baseline = np.zeros(self.xs.shape) #DEFAULT CASE; assume shape of xs including # of samples
else:
if self.has_multiple_inputs:
for i, xi in enumerate(self.xs):
if self.baseline[i].shape == self.xs[i].shape[1:]:
self.baseline[i] = np.expand_dims(self.baseline[i], 0)
else:
raise RuntimeError('Baseline shape %s does not match expected shape %s'
% (self.baseline[i].shape, self.xs[i].shape[1:]))
else:
if self.baseline.shape == self.xs.shape[1:]:
self.baseline = np.expand_dims(self.baseline, 0)
else:
raise RuntimeError('Baseline shape %s does not match expected shape %s'
% (self.baseline.shape, self.xs.shape[1:]))
def run(self):
# Check user baseline or set default one
self._set_check_baseline()
# print ('DeepLIFT: computing references...')
sys.stdout.flush()
self._deeplift_ref.clear()
ops = []
g = tf.get_default_graph()
for op in g.get_operations():
if len(op.inputs) > 0 and not op.name.startswith('gradients'):
if op.type in SUPPORTED_ACTIVATIONS:
ops.append(op)
initial_reference_values = self.session_run([o.inputs[0] for o in ops], self.baseline)
for op,ref_val in zip(ops,initial_reference_values):
print('op name and val')
print(op.name)
print(ref_val)
self._deeplift_ref[op.name] = tf.Variable(ref_val, name=op.name+"_reference")
print('first ref:')
print(self._deeplift_ref)
attributions = self.get_symbolic_attribution()
gradient = None
for alpha in list(np.linspace(0, 1.0, num=self.steps, endpoint=False)):
xs_mod = [b + (xs - b) * alpha for xs, b in zip(self.xs, self.baseline)] if self.has_multiple_inputs \
else self.baseline + (self.xs - self.baseline) * alpha #"baseline" xs
xs_mod1 = [b + (xs - b) * (alpha + 1.0/self.steps) for xs, b in zip(self.xs, self.baseline)] if self.has_multiple_inputs \
else self.baseline + (self.xs - self.baseline) * (alpha + 1.0/self.steps) #"actual" xs
# Init references with a forward pass
ref_values = self.session_run([o.inputs[0] for o in ops], xs_mod)
print(alpha, ' ref:')
for op,ref_val in zip(ops,ref_values):
print('op name and val')
print(op.name)
print(ref_val)
self._deeplift_ref[op.name].load(ref_val, self.session)
print(self._deeplift_ref)
print('xs_mod')
print(xs_mod1)
_attr = self.session_run(attributions, xs_mod1)
print('attr')
print(_attr)
if gradient is None: gradient = _attr
else: gradient = [g + a for g, a in zip(gradient, _attr)]
print('grads')
print(gradient)
results = [g * (x - b) / self.steps for g, x, b in zip(
gradient,
self.xs if self.has_multiple_inputs else [self.xs],
self.baseline if self.has_multiple_inputs else [self.baseline])]
return results[0] if not self.has_multiple_inputs else results
"""
Occlusion method
Generalization of the grey-box method presented in https://arxiv.org/pdf/1311.2901.pdf
This method performs a systematic perturbation of contiguous hyperpatches in the input,
replacing each patch with a user-defined value (by default 0).
window_shape : integer or tuple of length xs_ndim
Defines the shape of the elementary n-dimensional orthotope the rolling window view.
If an integer is given, the shape will be a hypercube of sidelength given by its value.
step : integer or tuple of length xs_ndim
Indicates step size at which extraction shall be performed.
If integer is given, then the step is uniform in all dimensions.
"""
class Occlusion(PerturbationBasedMethod):
def __init__(self, T, X, session, keras_learning_phase, window_shape=None, step=None):
super(Occlusion, self).__init__(T, X, session, keras_learning_phase)
if self.has_multiple_inputs:
raise RuntimeError('Multiple inputs not yet supported for perturbation methods')
input_shape = X[0].get_shape().as_list()
if window_shape is not None:
assert len(window_shape) == len(input_shape), \
'window_shape must have length of input (%d)' % len(input_shape)
self.window_shape = tuple(window_shape)
else:
self.window_shape = (1,) * len(input_shape)
if step is not None:
assert isinstance(step, int) or len(step) == len(input_shape), \
'step must be integer or tuple with the length of input (%d)' % len(input_shape)
self.step = step
else:
self.step = 1
self.replace_value = 0.0
logging.info('Input shape: %s; window_shape %s; step %s' % (input_shape, self.window_shape, self.step))
def run(self, xs, ys=None, batch_size=None):
self._check_input_compatibility(xs, ys, batch_size)
input_shape = xs.shape[1:]
batch_size = xs.shape[0]
total_dim = np.asscalar(np.prod(input_shape))
# Create mask
index_matrix = np.arange(total_dim).reshape(input_shape)
idx_patches = view_as_windows(index_matrix, self.window_shape, self.step).reshape((-1,) + self.window_shape)
heatmap = np.zeros_like(xs, dtype=np.float32).reshape((-1), total_dim)
w = np.zeros_like(heatmap)
# Compute original output
eval0 = self._session_run(self.T, xs, ys, batch_size)
# Start perturbation loop
for i, p in enumerate(idx_patches):
mask = np.ones(input_shape).flatten()
mask[p.flatten()] = self.replace_value
masked_xs = mask.reshape((1,) + input_shape) * xs
delta = eval0 - self._session_run(self.T, masked_xs, ys, batch_size)
delta_aggregated = np.sum(delta.reshape((batch_size, -1)), -1, keepdims=True)
heatmap[:, p.flatten()] += delta_aggregated
w[:, p.flatten()] += p.size
attribution = np.reshape(heatmap / w, xs.shape)
if np.isnan(attribution).any():
warnings.warn('Attributions generated by Occlusion method contain nans, '
'probably because window_shape and step do not allow to cover the all input.')
return attribution
"""
Shapley Value sampling
Computes approximate Shapley Values using "Polynomial calculation of the Shapley value based on sampling",
Castro et al, 2009 (https://www.sciencedirect.com/science/article/pii/S0305054808000804)
samples : integer (default 5)
Defined the number of samples for each input feature.
Notice that evaluating a model samples * n_input_feature times might take a while.
sampling_dims : list of dimension indexes to run sampling on (feature dimensions).
By default, all dimensions except the batch dimension will be sampled.
For example, with a 4-D tensor that contains color images, single color channels are sampled.
To sample pixels, instead, use sampling_dims=[1,2]
"""
class ShapleySampling(PerturbationBasedMethod):
def __init__(self, T, X, session, keras_learning_phase, samples=5, sampling_dims=None):
super(ShapleySampling, self).__init__(T, X, session, keras_learning_phase)
if self.has_multiple_inputs:
raise RuntimeError('Multiple inputs not yet supported for perturbation methods')
dims = len(X.shape)
if sampling_dims is not None:
if not 0 < len(sampling_dims) <= (dims - 1):
raise RuntimeError('sampling_dims must be a list containing 1 to %d elements' % (dims-1))
if 0 in sampling_dims:
raise RuntimeError('Cannot sample batch dimension: remove 0 from sampling_dims')
if any([x < 1 or x > dims-1 for x in sampling_dims]):
raise RuntimeError('Invalid value in sampling_dims')
else:
sampling_dims = list(range(1, dims))
self.samples = samples
self.sampling_dims = sampling_dims
def run(self, xs, ys=None, batch_size=None):
xs_shape = list(xs.shape)
batch_size = xs.shape[0]
n_features = int(np.asscalar(np.prod([xs.shape[i] for i in self.sampling_dims])))
result = np.zeros((xs_shape[0], n_features))
run_shape = list(xs_shape) # a copy
run_shape = np.delete(run_shape, self.sampling_dims).tolist()
run_shape.insert(1, -1)
reconstruction_shape = [xs_shape[0]]
for j in self.sampling_dims:
reconstruction_shape.append(xs_shape[j])
for r in range(self.samples):
p = np.random.permutation(n_features)
x = xs.copy().reshape(run_shape)
y = None
for i in p:
if y is None:
y = self._session_run(self.T, x.reshape(xs_shape), ys, batch_size)
x[:, i] = 0
y0 = self._session_run(self.T, x.reshape(xs_shape), ys, batch_size)
delta = y - y0
delta_aggregated = np.sum(delta.reshape((batch_size, -1)), -1, keepdims=False)
result[:, i] += delta_aggregated
y = y0
shapley = result / self.samples
return shapley.reshape(reconstruction_shape)
# -----------------------------------------------------------------------------
# END ATTRIBUTION METHODS
# -----------------------------------------------------------------------------
attribution_methods = OrderedDict({
'zero': (DummyZero, 0),
'saliency': (Saliency, 1),
'grad*input': (GradientXInput, 2),
'intgrad': (IntegratedGradients, 3),
'elrp': (EpsilonLRP, 4),
'deeplift': (DeepLIFTRescale, 5),
'occlusion': (Occlusion, 6),
'shapley_sampling': (ShapleySampling, 7),
'integdeeplift': (IntegratedDeepLIFT_v2, 8),
'idl': (IntegratedDeepLIFT, 9),
'idl_true': (IntegratedDeepLIFT_true, 10),
'intgrad_new': (IntegratedGradients_new, 11),
'idl_batch': (IntegratedDeepLIFT_batch, 12)
})
@ops.RegisterGradient("DeepExplainGrad")
def deepexplain_grad(op, grad):
global _ENABLED_METHOD_CLASS, _GRAD_OVERRIDE_CHECKFLAG
_GRAD_OVERRIDE_CHECKFLAG = 1
if _ENABLED_METHOD_CLASS is not None \
and issubclass(_ENABLED_METHOD_CLASS, GradientBasedMethod):
return _ENABLED_METHOD_CLASS.nonlinearity_grad_override(op, grad)
else:
return original_grad(op, grad)
class DeepExplain(object):
def __init__(self, graph=None, session=tf.get_default_session()):
self.method = None
self.batch_size = None
self.session = session
self.graph = session.graph if graph is None else graph
self.graph_context = self.graph.as_default()
self.override_context = self.graph.gradient_override_map(self.get_override_map())
self.keras_phase_placeholder = None
self.context_on = False
if self.session is None:
raise RuntimeError('DeepExplain: could not retrieve a session. Use DeepExplain(session=your_session).')
def __enter__(self):
# Override gradient of all ops created in context
self.graph_context.__enter__()
self.override_context.__enter__()
self.context_on = True
return self
def __exit__(self, type, value, traceback):
self.graph_context.__exit__(type, value, traceback)
self.override_context.__exit__(type, value, traceback)
self.context_on = False
def get_explainer(self, method, T, X, **kwargs):
if not self.context_on:
raise RuntimeError('Explain can be called only within a DeepExplain context.')
global _ENABLED_METHOD_CLASS, _GRAD_OVERRIDE_CHECKFLAG
self.method = method
if self.method in attribution_methods:
method_class, method_flag = attribution_methods[self.method]
else:
raise RuntimeError('Method must be in %s' % list(attribution_methods.keys()))
if isinstance(X, list):
for x in X:
if 'tensor' not in str(type(x)).lower():
raise RuntimeError('If a list, X must contain only Tensorflow Tensor objects')
else:
if 'tensor' not in str(type(X)).lower():
raise RuntimeError('X must be a Tensorflow Tensor object or a list of them')
if 'tensor' not in str(type(T)).lower():
raise RuntimeError('T must be a Tensorflow Tensor object')
logging.info('DeepExplain: running "%s" explanation method (%d)' % (self.method, method_flag))
self._check_ops()
_GRAD_OVERRIDE_CHECKFLAG = 0 # modified?
_ENABLED_METHOD_CLASS = method_class
method = _ENABLED_METHOD_CLASS(T, X,
self.session,
keras_learning_phase=self.keras_phase_placeholder,
**kwargs)
if issubclass(_ENABLED_METHOD_CLASS, GradientBasedMethod) and _GRAD_OVERRIDE_CHECKFLAG == 0:
warnings.warn('DeepExplain detected you are trying to use an attribution method that requires '
'gradient override but the original gradient was used instead. You might have forgot to '
'(re)create your graph within the DeepExlain context. Results are not reliable!')
# commented out, otherwise gradient override will be called once Method is constructed, and then turn off (?)
# _ENABLED_METHOD_CLASS = None
_GRAD_OVERRIDE_CHECKFLAG = 0
self.keras_phase_placeholder = None
return method
def explain(self, method, T, X, xs, ys=None, batch_size=None, **kwargs):
explainer = self.get_explainer(method, T, X, **kwargs)
result = explainer.run(xs, ys, batch_size)
return result
@staticmethod
def get_override_map():
return dict((a, 'DeepExplainGrad') for a in SUPPORTED_ACTIVATIONS)
def _check_ops(self):
"""
Heuristically check if any op is in the list of unsupported activation functions.
This does not cover all cases where explanation methods would fail, and must be improved in the future.
Also, check if the placeholder named 'keras_learning_phase' exists in the graph. This is used by Keras
and needs to be passed in feed_dict.
:return:
"""
g = tf.get_default_graph()
for op in g.get_operations():
if len(op.inputs) > 0 and not op.name.startswith('gradients'):
if op.type in UNSUPPORTED_ACTIVATIONS:
warnings.warn('Detected unsupported activation (%s). '
'This might lead to unexpected or wrong results.' % op.type)
elif 'keras_learning_phase' in op.name:
self.keras_phase_placeholder = op.outputs[0]
|
from pybpodgui_api.models.user.user_io import UserIO
class User(UserIO):
pass
|
from django.db import connection
def print_queries():
retval = list()
for i, query in enumerate(connection.queries):
q = query['sql']
if q.find('SELECT "django_session"."session_key"') == 0 : continue
if q.find('SELECT "auth_user"."id", "auth_user"."password"') == 0 : continue
print(q)
|
# This Python file uses the following encoding: utf-8
# ___________________________________________________________________
# directories.py
# rosevomit.core.directories
# ___________________________________________________________________
"""This file contains the 'get_dir' function, which should theoretically be able to find any subdirectory of the Rosevomit directory from anywhere within the 'rosevomit' module or the associated repository. Theoretically."""
import glob
import os
import pathlib
from core import logs
from core.customerrors import RealityError
_DIRECTORIESLOGGER = logs.BaseLogger (__name__)
def get_cwd_name_only() -> str:
"""Returns only the folder name of the current working directory, not the full path.
Returns
-------
str
The name of the current working directory.
"""
_cwd = pathlib.Path.cwd()
_path_split = os.path.split(_cwd)
return _path_split[-1]
def get_dir(ARG_dirname: str) -> pathlib.Path:
"""Returns the path of subdirectory of the 'rosevomit' directory. This function should work anywhere within the 'rosevomit' module or the associated repository.
Parameters
----------
ARG_dirname : str
The name of the Rosevomit directory you're looking for.
Returns
-------
_dirpath : pathlib.Path
The path of the Rosevomit directory you're looking for.
"""
# Let's assume we don't know where this function is being called from or to, and we don't know what the exact directory structure is that we're navigating through. Generally, all we know is that (1) our program Rosevomit is contained in a directory named 'rosevomit', and (2) the 'rosevomit' directory plus whatever directories we've created for documentation/testing/general development purposes should be contained in a directory called 'rosevomitrepo'. We also know (3) the name of the directory (ARG_dirname) we're looking for, which should be somewhere within the 'rosevomit' directory.
#
# We begin by finding out where we are.
_cwd = pathlib.Path.cwd()
# We need a known starting place to begin navigating. This can be either our 'rosevomit' directory or our 'rosevomitrepo' directory.
_path_parts = pathlib.PurePath (_cwd).parts
if "rosevomit" in _path_parts:
_path_partslist = list(_path_parts)
while _path_partslist[-1] != "rosevomit":
_path_partslist.pop()
_path = os.path.join (*_path_partslist) # The '*' is a "splat" operator
rosevomit_directory = pathlib.PurePath (_path)
os.chdir ("..")
repository_directory = pathlib.PurePath (pathlib.Path.cwd())
# If the 'rosevomit' directory isn't in _path_parts, then that's a problem. We'll attempt to work around it by looking for the 'rosevomitrepo' directory. At the end of the day, we need *some* sort of place to begin navigating around the filesystem.
elif "rosevomitrepo" in _path_parts:
_path_partslist = list(_path_parts)
while _path_partslist[-1] != "rosevomitrepo":
_path_partslist.pop()
_path = os.path.join (*_path_partslist) # The '*' is a "splat" operator
repository_directory = pathlib.PurePath (_path)
else:
# If neither 'rosevomit' nor 'rosevomitrepo' show up in our path, we're well and truly lost. Let's raise an exception (a Python error).
raise FileNotFoundError
# TODO: Honestly, we should split the part of the function above this comment into it's own function. Otherwise, we're exceeding a single responsibility for this function.
# Now that we've established which parts of the expected Rosevomit filesystem we can find, we navigate to the best available location to look for the subdirectory.
# TODO: There's a *lot* of advice online about how checking for variable existence in Python is not a good way to handle flow control... so this next part probably needs refactoring.
if "rosevomit_directory" in locals(): # exists
os.chdir (rosevomit_directory)
os.chdir (f"{ARG_dirname}")
elif "repository_directory" in locals(): # exists
os.chdir (repository_directory)
possible_paths = glob.glob (f"*/{ARG_dirname}", recursive=True)
num_possible_paths = len (possible_paths)
if num_possible_paths == 1:
os.chdir (possible_paths[0])
elif num_possible_paths == 0:
raise FileNotFoundError
else:
# If multiple paths are returned, something has gone wrong and we need to stop.
raise ValueError
else:
raise RealityError("This should definitely 100% never happen.")
_dirpath = pathlib.Path.cwd()
# Finally, return to where we started
os.chdir (_cwd)
return _dirpath
|
from __future__ import print_function
import unittest
import numpy as np
from SimPEG import EM
from scipy.constants import mu_0
from SimPEG.EM.Utils.testingUtils import getFDEMProblem
testJ = True
testH = True
verbose = False
TOL = 1e-5
FLR = 1e-20 # "zero", so if residual below this --> pass regardless of order
CONDUCTIVITY = 1e1
MU = mu_0
freq = 1e-1
addrandoms = True
SrcList = ['RawVec', 'MagDipole'] #or 'MAgDipole_Bfield', 'CircularLoop', 'RawVec'
def adjointTest(fdemType, comp):
prb = getFDEMProblem(fdemType, comp, SrcList, freq)
# prb.solverOpts = dict(check_accuracy=True)
print('Adjoint {0!s} formulation - {1!s}'.format(fdemType, comp))
m = np.log(np.ones(prb.sigmaMap.nP)*CONDUCTIVITY)
mu = np.ones(prb.mesh.nC)*MU
if addrandoms is True:
m = m + np.random.randn(prb.sigmaMap.nP)*np.log(CONDUCTIVITY)*1e-1
mu = mu + np.random.randn(prb.mesh.nC)*MU*1e-1
survey = prb.survey
u = prb.fields(m)
v = np.random.rand(survey.nD)
w = np.random.rand(prb.mesh.nC)
vJw = v.dot(prb.Jvec(m, w, u))
wJtv = w.dot(prb.Jtvec(m, v, u))
tol = np.max([TOL*(10**int(np.log10(np.abs(vJw)))),FLR])
print(vJw, wJtv, vJw - wJtv, tol, np.abs(vJw - wJtv) < tol)
return np.abs(vJw - wJtv) < tol
class FDEM_AdjointTests(unittest.TestCase):
if testJ:
def test_Jtvec_adjointTest_jxr_Jform(self):
self.assertTrue(adjointTest('j', 'jxr'))
def test_Jtvec_adjointTest_jyr_Jform(self):
self.assertTrue(adjointTest('j', 'jyr'))
def test_Jtvec_adjointTest_jzr_Jform(self):
self.assertTrue(adjointTest('j', 'jzr'))
def test_Jtvec_adjointTest_jxi_Jform(self):
self.assertTrue(adjointTest('j', 'jxi'))
def test_Jtvec_adjointTest_jyi_Jform(self):
self.assertTrue(adjointTest('j', 'jyi'))
def test_Jtvec_adjointTest_jzi_Jform(self):
self.assertTrue(adjointTest('j', 'jzi'))
def test_Jtvec_adjointTest_hxr_Jform(self):
self.assertTrue(adjointTest('j', 'hxr'))
def test_Jtvec_adjointTest_hyr_Jform(self):
self.assertTrue(adjointTest('j', 'hyr'))
def test_Jtvec_adjointTest_hzr_Jform(self):
self.assertTrue(adjointTest('j', 'hzr'))
def test_Jtvec_adjointTest_hxi_Jform(self):
self.assertTrue(adjointTest('j', 'hxi'))
def test_Jtvec_adjointTest_hyi_Jform(self):
self.assertTrue(adjointTest('j', 'hyi'))
def test_Jtvec_adjointTest_hzi_Jform(self):
self.assertTrue(adjointTest('j', 'hzi'))
def test_Jtvec_adjointTest_exr_Jform(self):
self.assertTrue(adjointTest('j', 'exr'))
def test_Jtvec_adjointTest_eyr_Jform(self):
self.assertTrue(adjointTest('j', 'eyr'))
def test_Jtvec_adjointTest_ezr_Jform(self):
self.assertTrue(adjointTest('j', 'ezr'))
def test_Jtvec_adjointTest_exi_Jform(self):
self.assertTrue(adjointTest('j', 'exi'))
def test_Jtvec_adjointTest_eyi_Jform(self):
self.assertTrue(adjointTest('j', 'eyi'))
def test_Jtvec_adjointTest_ezi_Jform(self):
self.assertTrue(adjointTest('j', 'ezi'))
def test_Jtvec_adjointTest_bxr_Jform(self):
self.assertTrue(adjointTest('j', 'bxr'))
def test_Jtvec_adjointTest_byr_Jform(self):
self.assertTrue(adjointTest('j', 'byr'))
def test_Jtvec_adjointTest_bzr_Jform(self):
self.assertTrue(adjointTest('j', 'bzr'))
def test_Jtvec_adjointTest_bxi_Jform(self):
self.assertTrue(adjointTest('j', 'bxi'))
def test_Jtvec_adjointTest_byi_Jform(self):
self.assertTrue(adjointTest('j', 'byi'))
def test_Jtvec_adjointTest_bzi_Jform(self):
self.assertTrue(adjointTest('j', 'bzi'))
if testH:
def test_Jtvec_adjointTest_hxr_Hform(self):
self.assertTrue(adjointTest('h', 'hxr'))
def test_Jtvec_adjointTest_hyr_Hform(self):
self.assertTrue(adjointTest('h', 'hyr'))
def test_Jtvec_adjointTest_hzr_Hform(self):
self.assertTrue(adjointTest('h', 'hzr'))
def test_Jtvec_adjointTest_hxi_Hform(self):
self.assertTrue(adjointTest('h', 'hxi'))
def test_Jtvec_adjointTest_hyi_Hform(self):
self.assertTrue(adjointTest('h', 'hyi'))
def test_Jtvec_adjointTest_hzi_Hform(self):
self.assertTrue(adjointTest('h', 'hzi'))
def test_Jtvec_adjointTest_jxr_Hform(self):
self.assertTrue(adjointTest('h', 'jxr'))
def test_Jtvec_adjointTest_jyr_Hform(self):
self.assertTrue(adjointTest('h', 'jyr'))
def test_Jtvec_adjointTest_jzr_Hform(self):
self.assertTrue(adjointTest('h', 'jzr'))
def test_Jtvec_adjointTest_jxi_Hform(self):
self.assertTrue(adjointTest('h', 'jxi'))
def test_Jtvec_adjointTest_jyi_Hform(self):
self.assertTrue(adjointTest('h', 'jyi'))
def test_Jtvec_adjointTest_jzi_Hform(self):
self.assertTrue(adjointTest('h', 'jzi'))
def test_Jtvec_adjointTest_exr_Hform(self):
self.assertTrue(adjointTest('h', 'exr'))
def test_Jtvec_adjointTest_eyr_Hform(self):
self.assertTrue(adjointTest('h', 'eyr'))
def test_Jtvec_adjointTest_ezr_Hform(self):
self.assertTrue(adjointTest('h', 'ezr'))
def test_Jtvec_adjointTest_exi_Hform(self):
self.assertTrue(adjointTest('h', 'exi'))
def test_Jtvec_adjointTest_eyi_Hform(self):
self.assertTrue(adjointTest('h', 'eyi'))
def test_Jtvec_adjointTest_ezi_Hform(self):
self.assertTrue(adjointTest('h', 'ezi'))
def test_Jtvec_adjointTest_bxr_Hform(self):
self.assertTrue(adjointTest('h', 'bxr'))
def test_Jtvec_adjointTest_byr_Hform(self):
self.assertTrue(adjointTest('h', 'byr'))
def test_Jtvec_adjointTest_bzr_Hform(self):
self.assertTrue(adjointTest('h', 'bzr'))
def test_Jtvec_adjointTest_bxi_Hform(self):
self.assertTrue(adjointTest('h', 'bxi'))
def test_Jtvec_adjointTest_byi_Hform(self):
self.assertTrue(adjointTest('h', 'byi'))
def test_Jtvec_adjointTest_bzi_Hform(self):
self.assertTrue(adjointTest('h', 'bzi'))
if __name__ == '__main__':
unittest.main()
|
from django.contrib import admin
from bmh_lims.database import models
from simple_history.admin import SimpleHistoryAdmin
# Register your models here.
admin.site.register(models.Sample, SimpleHistoryAdmin)
admin.site.register(models.WorkflowDefinition, SimpleHistoryAdmin)
admin.site.register(models.WorkflowBatch, SimpleHistoryAdmin)
admin.site.register(models.Project, SimpleHistoryAdmin)
admin.site.register(models.Lab, SimpleHistoryAdmin)
admin.site.register(models.DNAExtractionResults, SimpleHistoryAdmin)
|
import util
import events
import calabi_yau
import macro
'''
Uncomment to enable code reloading in development environments:
```
reload(util)
reload(calabi_yau)
```
Example:
`RunPythonScript` `~/lib/macro/build.py`
`RunPythonScript` `~/lib/macro/export.py`
'''
|
'''
This script finds the top third-party domains (TPs) setting cookies in the news websites.
Also, finds top 10 TPs and plots a barplot.
'''
import sqlite3
import matplotlib.pyplot as plt
import operator
import numpy as np
import pandas as pd
import seaborn as sns
# LEFT_TO_LEFTCENTRE
left = ["ndtv24x7","indianexpress","hindustantimes","punjabkesari","thehindu","anandabazar","scoopwhoop","ndtv","sandesh","dinamalar","mathrubhumi",
"lokmat","greaterkashmir","scroll","siasat","andhrajyothy","kashmirlife","outlookindia","eenadu","huffingtonpost","thenewsminute","thewire",
"youthkiawaaz","thehindubusinessline","risingkashmir","milligazette","mumbaimirror","thestatesman","telegraphindia","deshabhimani","newslaundry",
"bangaloremirror","altnews","kashmirreader","countercurrents","ahmedabadmirror","punemirror","timesheadline","greatandhra"]
# RIGHT_TO_RIGHTCENTRE
right = ["aajtak","intoday","jagran","bhaskar","zeenews","indiatvnews","abplive","amarujala","indiatimes","navbharattimes","patrika","news18","timesnownews","newsonair", "nic",
"newindianexpress","india","firstpost","republicworld","newslivetv","deccanchronicle","fakingnews","dnaindia","gujaratsamachar","dailyhunt",
"newdelhitimes","webdunia","moneycontrol","newsnation","newsnationtv","swarajyamag","aninews","dailyo","forbesindia","dailyexcelsior","oneindia","opindia",
"starofmysore","dailypioneer","ians"]
# CENTRE_AND_LEASTBIASED
centre = ["timesofindia","indiatoday","news24","news24online","bbc","thequint","jansatta","economictimes","mid-day","dailythanthi","manoramaonline",
"livehindustan","financialexpress","cnbctv18","businesstoday","livemint","catchnews","businessinsider","deccanherald","theprint","wionews",
"ptinews","business-standard","tribuneindia","headlinesoftoday","nagpurtoday","asianage","freepressjournal"]
# UNKNOWN
unknown = ["rvcj","thelogicalindian","brutindia","brut","thebetterindia","pinkvilla","topyaps","telanganatoday","wisden","statetimes","sentinelassam","assamtribune",
"socialsamosa","newstracklive","leagueofindia","prabhatkhabar","thesangaiexpress","news4masses","sinceindependence","5abnow","factchecker"]
def find_website_leaning(host, sites_list):
domain_sites_count[host] = [0, 0, 0, 0]
for url in sites_list:
url = url.split('/')[2]
url_list = url.split('.')
if len(url_list) == 2:
url = url_list[0]
elif len(url_list) >= 3:
url = url_list[1]
if url in left:
domain_sites_count[host][0] += 1
elif url in right:
domain_sites_count[host][1] += 1
elif url in centre:
domain_sites_count[host][2] += 1
elif url in unknown:
domain_sites_count[host][3] += 1
else:
print('%s is not present in any leaning.' % url)
homepages = list(pd.read_csv('news_homepage_urls.csv')['news_url'])
#print(homepages)
conn = sqlite3.connect('./topics_url_crawls/topics_url_crawl4/crawl-data_all_topics4.sqlite')
c = conn.cursor()
domain_sites_dict = {}
domain_sites_dict_multiple = {}
total_cookies = 0
homepage_visit_ids = {}
for row in c.execute('SELECT visit_id, site_url FROM site_visits'):
if row[1] in homepages:
#homepage_visit_ids.append(row[0])
#homepage_visit_ids[row[0]] = homepages.index(row[1]) + 1
homepage_visit_ids[row[0]] = row[1]
#print(homepage_visit_ids)
for row in c.execute('SELECT visit_id, host FROM javascript_cookies'):
if row[0] in homepage_visit_ids:
if row[1].startswith('.'):
host = row[1][1:]
else:
host = row[1]
if host in domain_sites_dict_multiple:
domain_sites_dict_multiple[host].append(homepage_visit_ids[row[0]])
else:
domain_sites_dict_multiple[host] = [homepage_visit_ids[row[0]]]
total_cookies += 1
conn.close()
domain_sites_count = {}
for host in domain_sites_dict_multiple:
#domain_sites_count[host] = len(set(domain_sites_dict[host]))
# For calculating Prominence.
# domain_sites_dict[host] = list(set(domain_sites_dict[host]))
# for site_id in domain_sites_dict[host]:
# if host not in domain_sites_count:
# domain_sites_count[host] = 1.0 / homepage_visit_ids[site_id]
# else:
# domain_sites_count[host] += 1.0 / homepage_visit_ids[site_id]
# For calculating leaning-wise % websites.
if host == 'economictimes.indiatimes.com':
domain_sites_dict['economictimes.com'] = list(set(domain_sites_dict_multiple[host]))
host = 'economictimes.com'
else:
domain_sites_dict[host] = list(set(domain_sites_dict_multiple[host]))
find_website_leaning(host, domain_sites_dict[host])
# Sort dict in descending order of #cookies from each unique domain.
sorted_dom_dict = sorted(domain_sites_count.items(), key=lambda e: sum(e[1]), reverse=True)
print('Total cookies:', total_cookies)
print('Total unique third-party domains:', len(sorted_dom_dict))
count = 0
cookies_by_top10 = 0
top_15 = {}
# 40 37 26
for k, v in sorted_dom_dict:
cookies_by_top10 += len(domain_sites_dict_multiple[k])
top_15[k] = [ (v[0]/39.0)*100, (v[1]/38.0)*100, (v[2]/28.0)*100 ]
count += 1
if count == 10:
break
print('Total cookies by top 10 third-parties:', cookies_by_top10)
indx = np.arange(10)
keys_list = list(top_15.keys())
values = list(top_15.values())
values[0][1] = values[0][1] - (2/38.0)*100
left_vals = [x[0] for x in values]
right_vals = [x[1] for x in values]
center_vals = [x[2] for x in values]
general_web = [21, 8.2, 3.9, 1.3, 1.3, 3.6, 2.7, 0.4, 2, 2.3]
bar1 = plt.bar(indx-0.3, left_vals, align='center', color='red', edgecolor='black', width=0.2)
bar2 = plt.bar(indx-0.1, center_vals, align='center', color='yellow', edgecolor='black', width=0.2)
bar3 = plt.bar(indx+0.1, right_vals, align='center', color='blue', edgecolor='black', width=0.2)
bar4 = plt.bar(indx+0.3, general_web, align='center', color='black', edgecolor='black', width=0.2)
plt.xticks(indx, keys_list, rotation=90, size=12)
plt.xlabel('Top 10 Third-Party Domains (setting cookies)', size=12)
plt.ylabel('% of Websites', size=12)
plt.legend((bar1[0], bar2[0], bar3[0], bar4[0]), ('Left', 'Centre', 'Right', 'General Web'))
plt.show()
|
"""Register a number of portable filters (with a Coffin library object)
that require a compatibility layer to function correctly in both engines.
"""
from jinja2 import Markup
from django.utils.safestring import mark_safe, mark_for_escaping
def needing_autoescape(value, autoescape=None):
return str(autoescape)
needing_autoescape.needs_autoescape = True
def jinja_safe_output(value):
return Markup(value)
def django_safe_output(value):
return mark_safe(value)
def unsafe_output(value):
return unicode(value)
def django_raw_output(value):
return value
def django_escape_output(value):
# Make sure the value is converted to unicode first, because otherwise,
# if it is already SafeData (for example, when coming from the template
# code), then mark_for_escaping would do nothing. We want to guarantee
# a EscapeData return value in this filter though.
return mark_for_escaping(unicode(value))
from coffin.template import Library
register = Library()
register.filter('needing_autoescape', needing_autoescape)
register.filter('jinja_safe_output', jinja_safe_output)
register.filter('django_safe_output', django_safe_output)
register.filter('django_raw_output', django_raw_output)
register.filter('unsafe_output', unsafe_output)
register.filter('django_escape_output', django_escape_output) |
from collections import namedtuple
import re
import spacy
from spacytextblob.spacytextblob import SpacyTextBlob
class AnalyticsCreator:
"""Adds desired datapoints to each hansard element in the inputted named_tuple. This class acts a bit like a
library of different methods that can be incorporateed as and when they are desired."""
def __init__(self, combined_dict):
self.combined_dict = combined_dict
self.current_tup = None
self.CountAddedTuple = None
self.new_field_name = None
self.analytics_to_add_dict = {}
self.nlp = None
def create_named_tuple_with_additional_analytic(self):
for d in self.combined_dict.items():
print(d)
first_key = list(self.combined_dict.keys())[0]
a_current_named_tuple = self.combined_dict[first_key]
print(a_current_named_tuple._fields)
return namedtuple("NewTuple", a_current_named_tuple._fields + (self.new_field_name,))
def get_word_count(self):
word_count = 0
if self.current_tup.hansard_text:
word_list = self.current_tup.hansard_text.split(" ")
word_count = len(word_list)
return word_count
def get_whether_interrupted(self):
is_interrupted = 0
interjection = self.current_tup.interjection
if interjection:
if re.fullmatch(r".*Interruption.*", interjection):
is_interrupted = 1
return is_interrupted
def preprocessing_spacy(self):
"""This is loaded first to avoid re-loading on every iteration."""
if not self.nlp:
self.nlp = spacy.load("en_core_web_md")
spacy_text_blob = SpacyTextBlob()
self.nlp.add_pipe(spacy_text_blob)
def get_sentiment_subjectivity(self):
text = self.current_tup.hansard_text
doc = self.nlp(text)
subjectivity = doc._.sentiment.subjectivity
print(subjectivity)
return subjectivity
def get_sentiment_polarity(self):
text = self.current_tup.hansard_text
doc = self.nlp(text)
polarity = doc._.sentiment.polarity
print(polarity)
return polarity
def add_datapoint_to_named_tuple(self, func_to_add, prepocessing_func=None):
if prepocessing_func:
prepocessing_func()
NewTuple = self.create_named_tuple_with_additional_analytic()
for k, tup in self.combined_dict.items():
self.current_tup = tup
word_count = func_to_add()
new_tuple = NewTuple(*tup, word_count)
self.combined_dict[k] = new_tuple
def add_word_count(self):
self.new_field_name = "word_count"
self.add_datapoint_to_named_tuple(self.get_word_count)
def compile_analytics_to_add_dict(self):
self.analytics_to_add_dict = {
"word_count": [self.get_word_count],
"interruptions_count": [self.get_whether_interrupted],
"subjectivity": [self.get_sentiment_subjectivity, self.preprocessing_spacy],
"polarity": [self.get_sentiment_subjectivity, self.preprocessing_spacy]
}
def add_to_tuple(self):
self.compile_analytics_to_add_dict()
for field_name, functions_list in self.analytics_to_add_dict.items():
get_function = functions_list[0]
preprocessor = None
if len(functions_list) == 2:
preprocessor = functions_list[1]
self.new_field_name = field_name
self.add_datapoint_to_named_tuple(get_function, prepocessing_func=preprocessor)
for d in self.combined_dict.items():
print(d)
return self.combined_dict
class ProportionCalculator:
"""In order to extract meaning from e.g. no. of words spoken by gender, we need to know the underlying
distribution of MLAs."""
def __init__(self, mla_param_dict, desired_identifiers):
self.mla_param_dict = mla_param_dict
self.desired_identifiers = desired_identifiers
self.proportions_dict = {}
self.current_tup = None
def get_proportions(self, identifier):
tuple_list = [v for v in self.mla_param_dict.values()]
dict_list = [v._asdict() for v in tuple_list]
identifier_count = {}
for d in dict_list:
id_output = d[identifier]
count_value = identifier_count.get(id_output)
if count_value:
identifier_count[id_output] += 1
else:
identifier_count[id_output] = 1
total_count = sum([v for v in identifier_count.values()])
identifier_as_proportion = {k: v / total_count for k, v in identifier_count.items()}
return identifier_as_proportion
def get_all_proportions(self):
sample_named_tuple = [v for v in self.mla_param_dict.values()][0]
all_identifiers = [i for i in self.desired_identifiers if i in sample_named_tuple._fields]
all_identifier_counts = list(map(self.get_proportions, all_identifiers))
identifier_counts_dict = dict(zip(all_identifiers, all_identifier_counts))
return identifier_counts_dict
class DiscreteAnalyticsCreator:
"""Groups analytics at the hansard component level into chosen identifiers with a meaningfully limited number of
discrete groups"""
get_mean_metrics = {"subjectivity", "polarity"}
get_proportional = {"word_count", "interruptions_count"}
def __init__(self, combined_analytics_dict, proportions_dict):
self.combined_analytics_dict = combined_analytics_dict
self.proportions_dict = proportions_dict
self.desired_identifiers = None
self.desired_metrics = None
self.current_identifier = None
self.current_metric = None
self.current_identifier_count = {}
self.totalizer_dicts = {}
def get_tuples_as_dict(self):
tuple_list = [v for v in self.combined_analytics_dict.values()]
dict_list = [v._asdict() for v in tuple_list]
return dict_list
def totalize_metric_for_identifier(self):
dict_list = self.get_tuples_as_dict()
self.current_identifier_count = {}
for d in dict_list:
id_output = d[self.current_identifier]
identifier_grouping = self.current_identifier_count.get(id_output)
identifier_value = d[self.current_metric]
if identifier_grouping:
self.current_identifier_count[id_output] += identifier_value
else:
self.current_identifier_count[id_output] = identifier_value
def calculate_average(self):
identifier_average = {k: v / len(self.combined_analytics_dict) for k, v in
self.current_identifier_count.items()}
return identifier_average
def calculate_proportion(self):
total_count_for_metric = sum([v for v in self.current_identifier_count.values()])
proportion_of_total_count_by_grouping = {k: v / total_count_for_metric for k, v in
self.current_identifier_count.items()}
expected_proportions_dict = self.proportions_dict[self.current_identifier]
proportion_diff_dict = {}
for k, v in proportion_of_total_count_by_grouping.items():
expected_proportion = expected_proportions_dict[k]
proportion_found = v
proportion_diff = proportion_found - expected_proportion
proportion_diff_as_pcnt = proportion_diff * 100
proportion_diff_as_pcnt_1dp = round(proportion_diff_as_pcnt, 1)
proportion_diff_dict[k] = proportion_diff_as_pcnt_1dp
return proportion_diff_dict
def run_calculation(self):
if self.current_metric in self.get_mean_metrics:
analytic_output_dict = self.calculate_average()
else:
analytic_output_dict = self.calculate_proportion()
return analytic_output_dict
def get_all_desired_metrics_for_all_desired_identifiers(self):
overall_analytics_summary = {identifier: {} for identifier in self.desired_identifiers}
for identifier in self.desired_identifiers:
self.current_identifier = identifier
for metric in self.desired_metrics:
self.current_metric = metric
self.totalize_metric_for_identifier()
calc_output_dict = self.run_calculation()
overall_analytics_summary[identifier][metric] = calc_output_dict
return overall_analytics_summary
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri May 28 09:39:44 2021
@author: ike
"""
import os.path as op
import torch
from .endonet import EndoNet
from .resunet import ResUNet, ResUNetClassifier
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
MODELS = dict(R=ResUNet, E=EndoNet, RC=ResUNetClassifier)
def getModel(params):
model = MODELS[params("model")](params).double().to(DEVICE)
checkpoint = None
if op.isfile(params("mSave")):
checkpoint = torch.load(params("mSave"), map_location=DEVICE)
model.load_state_dict(checkpoint["Model"])
return model, checkpoint |
# -*- coding: utf-8 -*-
"""Disaggregation related utilities"""
import logging
import numpy as np
def mains_to_batches(mains, num_seq_per_batch, seq_length, pad=True, stride=1):
"""
In the disaggregation step this is used
convert mains into batches. E.g. [0,1,2,3,4,5] with num_seq_per_batch=4, stride=1, seq_length=3
will be converted into [[[[0],[1],[2]],[[1],[2],[3]],[[2],[3],[4]],[[3],[4],[5]]]]
Parameters
----------
:param mains : 1D np.ndarray
And it is highly advisable to pad `mains` with `seq_length` elements
at both ends so the net can slide over the very start and end.
:param num_seq_per_batch: number of sequences in a batch
:param seq_length: int, sequence length
:param pad: boolean, padding for the mains
:param stride : int, optional
:return: batches : list of 3D (num_sequences, seq_length, 1) np.ndarray
"""
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
assert mains.ndim == 1
if pad:
mains = np.pad(np.copy(mains), seq_length - 1, 'constant')
n_mains_samples = len(mains)
# Divide mains data into batches
n_batches = ((float(n_mains_samples) / stride) / num_seq_per_batch)
n_batches = np.ceil(n_batches).astype(int)
logging.debug("Number of batches {}".format(n_batches))
batches = []
seq_starts = []
seq_indexes = list(range(0, n_mains_samples - seq_length, stride))
for batch_i in range(n_batches):
selected_indexes = seq_indexes[batch_i*num_seq_per_batch:(batch_i+1)*num_seq_per_batch]
batch = []
for inx in selected_indexes:
seq = mains[inx:inx+seq_length]
if len(seq) == seq_length:
batch.append(seq)
seq_starts.append(inx)
batch = np.reshape(np.array(batch), (len(batch), seq_length, 1)).astype(np.float32)
batches.append(batch)
return batches
def disaggregate(model, mains, model_name, num_seq_per_batch, seq_len,
appliance, target_scale, stride=1):
"""
Disaggregation function to predict all results for whole time series mains.
:param model: tf model object
:param mains: numpy.ndarray, shape(-1,)
:param model_name: name of the used model
:param num_seq_per_batch: int, number of sequences to have in the batch
:param seq_len: int, length of the sequence
:param appliance: str, name of the appliance
:param target_scale: int, scaling factor of predicted value
:param stride: int, stride of moving window
:return:
p: np.ndarray, shape(-1,), disaggregated power of the appliance
metrics = dict containing the metrics
"""
# Converting mains array into batches for prediction
mains = mains.reshape(-1,)
agg_batches = mains_to_batches(mains, num_seq_per_batch, seq_len, stride=stride, pad=True)
if (appliance == 'fridge') or (appliance == 'Refrigerator') or (appliance == 'REFRIGERATOR'):
if target_scale:
target_max = target_scale
else:
target_max = 313
target_min = 0
input_max = 7879
input_min = 80
elif (appliance == 'washing machine') or (appliance == 'Washing_Machine') or (appliance == 'WASHING_MACHINE'):
if target_scale:
target_max = target_scale
else:
target_max = 3999
target_min = 0
input_max = 7879
input_min = 80
elif (appliance == 'dishwasher') or (appliance == 'Dishwasher') or (appliance == 'DISHWASHER'):
if target_scale:
target_max = target_scale
else:
target_max = 500
target_min = 0
input_max = 7879
input_min = 80
elif (appliance == 'Electric_Vehicle') or (appliance == 'electric vehicle') or (appliance=='ELECTRIC_VEHICLE'):
if target_scale:
target_max = target_scale
else:
target_max = 6000
target_min = 0
input_max = 7879
input_min = 80
elif (appliance == 'DRYER'):
if target_scale:
target_max = target_scale
else:
target_max = 2500
target_min = 0
input_max = 7879
input_min = 80
# list to store predictions
y_net = []
for id, batch in enumerate(agg_batches):
X_pred = np.copy(batch.reshape(-1, seq_len, 1))
X_pred /= (input_max-input_min)
X_pred = X_pred * 10
y_net.append(model.predict(X_pred))
# converting the predictions to rectangles
rectangles = pred_to_rectangles(y_net, num_seq_per_batch, seq_len, stride)
return rectangles
def ensemble_rectangles(rectangles_dict, target_scale, seq_len, stride, probability_threshold, sample_period_seconds, mains):
"""
Ensemble the predicted activities into time series data.
:param rectangles_dict: dict, activity dict from the prediction
:param target_scale: int, scaling factor to convert predicted value to real value
:param seq_len: int
:param stride: int
:param probability_threshold: float
:param sample_period_seconds: str, sampling period in 'xs'.
:param mains: 1D np.ndarray
:return: disaggregated_power: np.ndarray, time series data
"""
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
# Scaling the predictions
for model, _ in rectangles_dict.items():
rectangles_dict[model][:, 2] *= target_scale # scaling the y power estimates
# Merging the rectanles
rectangles = np.array([rect for rectangles in rectangles_dict.values() for rect in rectangles])
# thresholding predictions
num_samples = len(mains) + 2 * (seq_len - 1)
matrix = np.ones(shape=(num_samples, 2), dtype=np.float32)
# TODO: Remove those rectangels with less than miniumum threshold on power
# TODO: This assumes padding always
for rect in rectangles: # gettting one rectangle
start_point = int(min(rect[0], num_samples))
end_point = int(min(rect[1], num_samples))
matrix[start_point:end_point, 0] += 1
matrix[start_point:end_point, 1] += rect[2]
matrix[:, 1] = (matrix[:, 1] - 1) / matrix[:, 0]
# the lowest predicted number of power at a same time point must exceed the pro_threshold
prob = (matrix[:, 0] - 1) / matrix[:, 0].max()
matrix[prob < probability_threshold] = 0
matrix[matrix[:, 0] < 2] = 0
power_vector = matrix[:, 1]
# basically removing the padding from the starting and controlling it using len(mains_arr) at the end
disaggregated_power = power_vector[seq_len - 1:seq_len + mains.shape[0] - 1]
return disaggregated_power
def pred_to_rectangles(pred, num_seq_per_batch, seq_len, stride):
"""
Convert prediction result to rectangle values for displaying in charts
:param pred: list, list of predictions from network
:param num_seq_per_batch: int, number of seqs per batch
:param seq_len: int
:param stride: int
:return: np.ndarray with batches of [start, end, height] as rectangles
"""
rectangles = []
for id, batch in enumerate(pred):
start_batch = id * num_seq_per_batch * stride
if batch.shape[0]:
for i in range(batch.shape[0]):
start_seq = start_batch + stride * i
start_rec = start_seq + seq_len * batch[i][0]
end_rec = start_seq + seq_len * batch[i][1]
height_rec = batch[i][2]
if ~np.isnan(start_rec) and ~np.isnan(end_rec):
start_rec = int(round(start_rec))
end_rec = int(round(end_rec))
else:
# print np.nan
start_rec = 0
end_rec = 0
height_rec = 0
rectangles.append([start_rec, end_rec, height_rec])
return np.array(rectangles)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
IKA overhead stirrer.
"""
import argparse
import logging
from serial import Serial
from instruments.instrument import Instrument
from lib import helper_functions
__author__ = "Brent Maranzano"
__license__ = "MIT"
logger = logging.getLogger("instrument.Bronkhorst")
class Ika(Instrument):
"""Ika overhead stirrer interface.
"""
def __init__(self, port):
"""Sets an object attribute with defining the device port.
Arguments
port (str): Device port
"""
super().__init__(port)
def connect(self):
"""Connect to a serial port.
Arguments
port (str): Device port
"""
self._ser = Serial(port=self._port, baudrate=9600, bytesize=7,
parity="E", stopbits=1, rtscts=0, timeout=0.5)
def start(self, callback=None):
"""Start stirrer.
"""
command = "START_4 \r \n"
self._queue_request(command=command, callback=callback)
def stop(self, callback=None):
"""Stop stirrer.
"""
command = "STOP_4 \r \n"
self._queue_request(command=command, callback=callback)
def set_rate(self, rate, callback=None):
"""Set the stir rate (rev/min)
"""
command = "OUT_SP_4 {:.2f} \r \n".format(rate)
self._queue_request(command=command, callback=callback)
def get_rate_SP(self, callback=None):
"""Get the stir rate set point (rev/min)
"""
command = "IN_SP_4 \r \n"
self._queue_request(command=command, callback=callback)
def get_rate_PV(self, callback=None):
"""Get the stir rate present value (rev/min)
"""
command = "IN_PV_4 \r \n"
self._queue_request(command=command, callback=callback)
def main(self):
"""Start the instrument communication.
"""
super()._start_threads()
self.connect()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Bronkhorst mass flow.")
parser.add_argument(
"--port",
help="Device port instrument is connected",
type=str,
default="/dev/ttyUSB0"
)
parser.add_argument(
"--debug_level",
help="debugger level (e.g. INFO, WARN, DEBUG, ...)",
type=str,
default="INFO"
)
args = parser.parse_args()
instrument = Ika(args.port)
instrument.connect()
instrument.run()
|
# coding: utf-8
"""
Description:
Contains helper functions to assist you when working with database translations
Functions:
all_instances_as_translated_dict: Applies 'instance_as_translated_dict' to the iterable of instances
get_current_language: Returns the current active language. Will set a default language if none is found.
get_translation: Returns a translated text using an Item id and a Language instance
instance_as_translated_dict: Returns a model instance into a dict containing all of its fields
set_default_language: Sets the default language if none is chosen
update_user_language: Updates the user current language following Django guildelines
"""
# --------------------------------------------------------------------------------
# > Imports
# --------------------------------------------------------------------------------
# Built-in
# Django
from django.db import models
from django.db.models.fields.files import ImageFieldFile, FieldFile
from django.utils.translation import activate, LANGUAGE_SESSION_KEY
# Third-party
# Local
from .models import Item, Language, Translation
# --------------------------------------------------------------------------------
# > Functions
# --------------------------------------------------------------------------------
def all_instances_as_translated_dict(instances, depth=True, language=None, request=None):
"""
Description:
Applies 'instance_as_translated_dict' to the iterable of instances
Returns a list of dicts which contains the fields of all your instances
Check the 'instance_as_translated_dict' for more info
Args:
instances (iterable): An iterable of your model instances
depth (bool, optional): Determines if FK will also be transformed into dicts. Defaults to True.
language (Language, optional): A Language instance from this app. Defaults to None.
request (HttpRequest, option): HttpRequest from Django. Defaults to None.
Returns:
list: A list of dicts, where each dict contains the fields/values of the initial instances
"""
# Checking arguments
if language is None and request is None:
raise TypeError("You must provide either 'language' or 'request'")
# Get the language from the session
if language is None:
language = get_current_language(request)
# Loop over instances
results = []
for instance in instances:
result = instance_as_translated_dict(instance, depth=depth, language=language)
results.append(result)
return results
def get_current_language(request, set_default=True, default_id=1):
"""
Description:
Returns the current active language. Will set a default language if none is found.
Args:
request (HttpRequest): HttpRequest from Django
set_default (Boolean): Indicates if a default language must be activated (if none currently is). Default to True.
default_id (Integer): The PK for the default Language instance. Default to 1
Returns:
Language: The currently used language from our app's Language model
"""
# Base variables
language = None
language_name = request.session.get(LANGUAGE_SESSION_KEY, False)
# Get the language
if language_name:
try:
language = Language.objects.get(django_language_name=language_name)
except Language.DoesNotExist:
pass
# Set a default language if necessary
if language is None and set_default:
language = set_default_language(request, default_id)
# Always return the active language
return language
def get_translation(language, item_id):
"""
Description:
Returns a translated text using an Item id and a Language instance
Args:
language (Language): Language instance from this app
item_id (int): Key contained in the 'translated field'
Returns:
str: The translated text
"""
translation = ""
try:
entry = Translation.objects.get(language=language, item_id=item_id)
translation = entry.text
except Translation.DoesNotExist:
pass
return translation
def instance_as_translated_dict(instance, depth=True, language=None, request=None):
"""
Description:
Returns a model instance into a dict containing all of its fields
Language can be given as an argument, or guess through the user of "request"
With "depth" set to True, ForeignKey will also be transformed into sub-dict
Files and images are replaced by a subdict with 'path', 'url', and 'name' keys
Meaning you will be able to manipulate the dict in an HTML template much like an instance
Args:
instance (Model): An instance from any of your models
depth (bool, optional): Determines if FK will also be transformed into dicts. Defaults to True.
language (Language, optional): A Language instance from this app. Defaults to None.
request (HttpRequest, option): HttpRequest from Django. Defaults to None.
Returns:
dict: A dict with all of the instance's fields and values
"""
# Checking arguments
if language is None and request is None:
raise TypeError("You must provide either 'language' or 'request'")
# Get the language from the session
if language is None:
language = get_current_language(request)
# Loop over fields
translated_dict = {}
fields = instance._meta.get_fields()
for field in fields:
value = getattr(instance, field.name, None)
if value is not None:
value_type = type(value)
# Case 1: Get the translation
if value_type == Item:
new_value = Translation.objects.get(item=value, language=language).text
# Case 2: Go to the linked model and repeat the process (unless depth=False)
elif issubclass(value_type, models.Model):
if depth:
new_value = instance_as_translated_dict(value, depth=True, language=language)
else:
new_value = value
# Case 3:
elif value_type in {ImageFieldFile, FieldFile}:
if value:
new_value = {
"name": getattr(value, "name", ""),
"url": getattr(value, "url", ""),
"path": getattr(value, "path", ""),
}
else:
new_value = ""
# Case 4: Keep the value as it is
else:
new_value = value
translated_dict[field.name] = new_value
return translated_dict
def set_default_language(request, pk=1):
"""Sets the default language if none is chosen"""
language = Language.objects.get(id=pk)
update_user_language(request, language=language)
return language
def update_user_language(request, language=None, language_id=None):
"""
Description:
Updates the user current language following Django guildelines
This will allow for both "Django" frontend translations and "our app" database translation
The new language must be passed either through a Language instance or an ID
Args:
request (HttpRequest): Request object from Django, used to get to the session
language (Language, optional): A Language instance from this app. Defaults to None.
language_id (id, optional): ID of the language in our database. Defaults to None.
"""
# Checking arguments
if language is None and language_id is None:
raise TypeError("You must provide either 'language' or 'language_id'")
# Get the language from the session
if language is None:
language = Language.objects.get(id=language_id)
# Update the user's language
activate(language.django_language_name)
request.session[LANGUAGE_SESSION_KEY] = language.django_language_name
|
from setuptools import setup
setup(name='dmiyakawabadlogging',
version='0.1',
author='Daisuke Miyakawa',
author_email='d.miyakawa+badlogging@gmail.com',
description='Demonstrates bad logging strategy',
long_description='Demonstrates bad logging strategy',
packages=['dmiyakawabadlogging'],
package_data={'dmiyakawabadlogging': ['README.rst']},
include_package_data=True,
license='Apache License 2.0',
url='https://github.com/dmiyakawa/python_bad_logging',
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3'])
|
# coding: utf-8
#
import functools
import inspect
import shlex
from typing import Union
import six
from ._proto import Direction
from .exceptions import SessionBrokenError, UiObjectNotFoundError
def U(x):
if six.PY3:
return x
return x.decode('utf-8') if type(x) is str else x
def E(x):
if six.PY3:
return x
return x.encode('utf-8') if type(x) is unicode else x # noqa: F821
def check_alive(fn):
@functools.wraps(fn)
def inner(self, *args, **kwargs):
if not self.running():
raise SessionBrokenError(self._pkg_name)
return fn(self, *args, **kwargs)
return inner
_cached_values = {}
def cache_return(fn):
@functools.wraps(fn)
def inner(*args, **kwargs):
key = (fn, args, frozenset(kwargs.items()))
value = _cached_values.get(key)
if value is not None:
return value
_cached_values[key] = ret = fn(*args, **kwargs)
return ret
return inner
def hooks_wrap(fn):
@functools.wraps(fn)
def inner(self, *args, **kwargs):
name = fn.__name__.lstrip('_')
self.server.hooks_apply("before", name, args, kwargs, None)
ret = fn(self, *args, **kwargs)
self.server.hooks_apply("after", name, args, kwargs, ret)
return inner
# Will be removed in the future
def wrap_wait_exists(fn):
@functools.wraps(fn)
def inner(self, *args, **kwargs):
timeout = kwargs.pop('timeout', self.wait_timeout)
if not self.wait(timeout=timeout):
raise UiObjectNotFoundError({
'code': -32002,
'message': E(self.selector.__str__())
})
return fn(self, *args, **kwargs)
return inner
def intersect(rect1, rect2):
top = rect1["top"] if rect1["top"] > rect2["top"] else rect2["top"]
bottom = rect1["bottom"] if rect1["bottom"] < rect2["bottom"] else rect2[
"bottom"]
left = rect1["left"] if rect1["left"] > rect2["left"] else rect2["left"]
right = rect1["right"] if rect1["right"] < rect2["right"] else rect2[
"right"]
return left, top, right, bottom
class Exists(object):
"""Exists object with magic methods."""
def __init__(self, uiobject):
self.uiobject = uiobject
def __nonzero__(self):
"""Magic method for bool(self) python2 """
return self.uiobject.jsonrpc.exist(self.uiobject.selector)
def __bool__(self):
""" Magic method for bool(self) python3 """
return self.__nonzero__()
def __call__(self, timeout=0):
"""Magic method for self(args).
Args:
timeout (float): exists in seconds
"""
if timeout:
return self.uiobject.wait(timeout=timeout)
return bool(self)
def __repr__(self):
return str(bool(self))
def list2cmdline(args: Union[list, tuple]):
return ' '.join(list(map(shlex.quote, args)))
def inject_call(fn, *args, **kwargs):
"""
Call function without known all the arguments
Args:
fn: function
args: arguments
kwargs: key-values
Returns:
as the fn returns
"""
assert callable(fn), "first argument must be callable"
st = inspect.signature(fn)
fn_kwargs = {
key: kwargs[key]
for key in st.parameters.keys() if key in kwargs
}
ba = st.bind(*args, **fn_kwargs)
ba.apply_defaults()
return fn(*ba.args, **ba.kwargs)
class ProgressReader:
def __init__(self, rd):
pass
def read(self, size=-1):
pass
def natualsize(size: int):
_KB = 1 << 10
_MB = 1 << 20
_GB = 1 << 30
if size >= _GB:
return '{:.1f} GB'.format(size / _GB)
elif size >= _MB:
return '{:.1f} MB'.format(size / _MB)
else:
return '{:.1f} KB'.format(size / _KB)
def swipe_in_bounds(d: "uiautomator2.Device",
bounds: list,
direction: Union[Direction, str],
scale: float = 0.6):
"""
Args:
d: Device object
bounds: list of [lx, ly, rx, ry]
direction: one of ["left", "right", "up", "down"]
scale: percent of swipe, range (0, 1.0)
Raises:
AssertionError, ValueError
"""
def _swipe(_from, _to):
print("SWIPE", _from, _to)
d.swipe(_from[0], _from[1], _to[0], _to[1])
assert 0 < scale <= 1.0
assert len(bounds) == 4
lx, ly, rx, ry = bounds
width, height = rx - lx, ry - ly
h_offset = int(width * (1 - scale)) // 2
v_offset = int(height * (1 - scale)) // 2
left = lx + h_offset, ly + height // 2
up = lx + width // 2, ly + v_offset
right = rx - h_offset, ly + height // 2
bottom = lx + width // 2, ry - v_offset
if direction == Direction.LEFT:
_swipe(right, left)
elif direction == Direction.RIGHT:
_swipe(left, right)
elif direction == Direction.UP:
_swipe(bottom, up)
elif direction == Direction.DOWN:
_swipe(up, bottom)
else:
raise ValueError("Unknown direction:", direction)
if __name__ == "__main__":
for n in (1, 10000, 10000000, 10000000000):
print(n, natualsize(n))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# @Author: José Sánchez-Gallego (gallegoj@uw.edu)
# @Date: 2018-09-22
# @Filename: mangadb.py
# @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause)
from peewee import (BooleanField, FloatField, ForeignKeyField,
IntegerField, PrimaryKeyField, TextField)
from . import OperationsDBModel, database # noqa
from .platedb import Exposure as PlatedbExposure
from .platedb import Plate as PlatedbPlate
class UnknownField(object):
def __init__(self, *_, **__):
pass
class CurrentStatus(OperationsDBModel):
camera = TextField(null=True)
exposure_no = IntegerField(null=True)
flavor = TextField(null=True)
mjd = IntegerField(null=True)
pk = PrimaryKeyField()
unpluggedifu = BooleanField(null=True)
class Meta:
db_table = 'current_status'
schema = 'mangadb'
class Plate(OperationsDBModel):
all_sky_plate = BooleanField(null=True)
comment = TextField(null=True)
commissioning_plate = BooleanField(null=True)
manga_tileid = IntegerField(null=True)
neverobserve = BooleanField()
pk = PrimaryKeyField()
platedb_plate = ForeignKeyField(column_name='platedb_plate_pk',
null=False,
model=PlatedbPlate,
field='pk',
unique=True)
special_plate = BooleanField(null=True)
ha_min = FloatField(null=True)
ha_max = FloatField(null=True)
field_name = TextField(null=True)
completion_factor = FloatField(null=True)
class Meta:
db_table = 'plate'
schema = 'mangadb'
class DataCube(OperationsDBModel):
b1_sn2 = FloatField(null=True)
b2_sn2 = FloatField(null=True)
pk = PrimaryKeyField()
platedb_plate = ForeignKeyField(column_name='plate_pk',
null=True,
model=PlatedbPlate,
field='pk')
r1_sn2 = FloatField(null=True)
r2_sn2 = FloatField(null=True)
class Meta:
db_table = 'data_cube'
schema = 'mangadb'
class ExposureStatus(OperationsDBModel):
label = TextField(null=True)
pk = PrimaryKeyField()
class Meta:
db_table = 'exposure_status'
schema = 'mangadb'
class SetStatus(OperationsDBModel):
label = TextField(null=True)
pk = PrimaryKeyField()
class Meta:
db_table = 'set_status'
schema = 'mangadb'
class Set(OperationsDBModel):
comment = TextField(null=True)
name = TextField(null=True)
pk = PrimaryKeyField()
status = ForeignKeyField(column_name='set_status_pk',
null=True, model=SetStatus,
field='pk',
backref='sets')
class Meta:
db_table = 'set'
schema = 'mangadb'
class Exposure(OperationsDBModel):
comment = TextField(null=True)
data_cube = ForeignKeyField(column_name='data_cube_pk',
null=True,
model=DataCube,
backref='exposures',
field='pk')
dither_dec = FloatField(null=True)
dither_position = UnknownField(null=True) # ARRAY
dither_ra = FloatField(null=True)
status = ForeignKeyField(column_name='exposure_status_pk',
null=True,
model=ExposureStatus,
field='pk',
backref='exposures')
ha = FloatField(null=True)
pk = PrimaryKeyField()
platedb_exposure = ForeignKeyField(column_name='platedb_exposure_pk',
null=True,
model=PlatedbExposure,
field='pk',
backref='mangadb_exposure')
seeing = FloatField(null=True)
set = ForeignKeyField(column_name='set_pk',
null=True,
model=Set,
field='pk',
backref='exposures')
transparency = FloatField(null=True)
class Meta:
db_table = 'exposure'
schema = 'mangadb'
class ExposureToDataCube(OperationsDBModel):
data_cube = ForeignKeyField(column_name='data_cube_pk',
null=True,
model=DataCube,
field='pk')
exposure = ForeignKeyField(column_name='exposure_pk',
null=True,
model=Exposure,
field='pk')
pk = PrimaryKeyField()
class Meta:
db_table = 'exposure_to_data_cube'
schema = 'mangadb'
class Filelist(OperationsDBModel):
name = TextField(null=True)
path = TextField(null=True)
pk = PrimaryKeyField()
class Meta:
db_table = 'filelist'
schema = 'mangadb'
class Sn2Values(OperationsDBModel):
b1_sn2 = FloatField(null=True)
b2_sn2 = FloatField(null=True)
exposure = ForeignKeyField(column_name='exposure_pk',
null=True,
model=Exposure,
backref='sn2_values',
field='pk')
pipeline_info_pk = IntegerField(null=True)
pk = PrimaryKeyField()
r1_sn2 = FloatField(null=True)
r2_sn2 = FloatField(null=True)
class Meta:
db_table = 'sn2_values'
schema = 'mangadb'
class Spectrum(OperationsDBModel):
data_cube = ForeignKeyField(column_name='data_cube_pk',
null=True,
model=DataCube,
backref='spectrums',
field='pk')
exposure = ForeignKeyField(column_name='exposure_pk',
null=True,
model=Exposure,
backref='spectrums',
field='pk')
fiber = IntegerField(null=True)
ifu_no = IntegerField(null=True)
pk = PrimaryKeyField()
class Meta:
db_table = 'spectrum'
schema = 'mangadb'
|
from __future__ import print_function
from __future__ import unicode_literals
import logging
import sys
from os.path import abspath, dirname
import webbrowser
from fs.opener import open_fs
from ...context import Context
from ...command import SubCommand
from ...wsgi import WSGIApplication
from ...compat import urlencode, quote
from ...compat import socketserver
from ...compat import PY2
if PY2:
from thread import interrupt_main
else:
from _thread import interrupt_main
from wsgiref.simple_server import WSGIServer, WSGIRequestHandler, make_server
log = logging.getLogger("moya.runtime")
class RequestHandler(WSGIRequestHandler):
# Disable simple_server's logging to stdout
def log_message(self, format, *args):
pass
class ThreadedWSGIServer(socketserver.ThreadingMixIn, WSGIServer):
daemon_threads = True
class Showform(SubCommand):
"""Show a form from the project"""
help = "show a form from the project"
def add_arguments(self, parser):
parser.add_argument(
dest="formelement", metavar="ELEMENTREF", help="form element reference"
)
parser.add_argument(
"-l",
"--location",
dest="location",
default=None,
metavar="PATH",
help="location of the Moya server code",
)
parser.add_argument(
"-i",
"--ini",
dest="settings",
default=None,
metavar="SETTINGSPATH",
help="path to project settings file",
)
parser.add_argument(
"-s",
"--server",
dest="server",
default="main",
metavar="SERVERREF",
help="server element to use",
)
parser.add_argument(
"-H",
"--host",
dest="host",
default="127.0.0.1",
help="IP address to bind to",
)
parser.add_argument(
"-p", "--port", dest="port", default="8001", help="port to listen on"
)
parser.add_argument(
"-d",
"--develop",
dest="develop",
action="store_true",
default=False,
help="enable develop mode for debugging Moya server",
)
return parser
@classmethod
def _post_build(cls, application):
lib = application.archive.load_library_from_module(
"moya.libs.showform", priority=100, template_priority=100
)
application.archive.build_libs()
context = Context()
application.archive.call(
"moya.showform#install", context, "__showform__", server=application.server
)
def run(self):
super(Showform, self).run()
args = self.args
application = WSGIApplication(
self.location,
self.get_settings(),
args.server,
validate_db=False,
develop=self.args.develop,
post_build_hook=self._post_build,
)
form_app, form = application.archive.get_element(args.formelement)
log.info("testing {form} in {form_app}".format(form=form, form_app=form_app))
server = make_server(
args.host,
int(args.port),
application,
server_class=ThreadedWSGIServer,
handler_class=RequestHandler,
)
def handle_error(request, client_address):
_type, value, tb = sys.exc_info()
if isinstance(value, KeyboardInterrupt):
interrupt_main()
# Allow keyboard interrupts in threads to propagate
server.handle_error = handle_error
formelement_quoted = quote(args.formelement)
url = "http://{}:{}/moya-show-form/form/{}/".format(
args.host, args.port, formelement_quoted
)
log.info("opening %s", url)
webbrowser.open(url)
try:
server.serve_forever()
finally:
log.debug("user exit")
application.close()
|
# -*- encoding: utf-8 -*-
#Written by: Karim shoair - D4Vinci ( Cr3dOv3r )
import sys,os
from . import updater
from .websites import *
from .color import *
def getinput(text):
# Return the suitable input type according to python version
if ver=="3":
return input(text)
else:
return raw_input(text)
def banner():
banner = open(os.path.join("Data","banners.txt")).read()
os.system('clear')
banner_to_print = G + banner.format(Name=R+"Cr3d0v3r By "+Bold+B+"D4Vinci - Edited by HeroS3c" + G,
Description=C+"Know the dangers of email credentials reuse attacks."+G,
MassiveVersion=R+"Cr3d0v3r Mass:"+G+ " The Cr3d0v3r Version for check an entire list of emails"+B+" ("+C+"Created by"+B+" HeroS3c)"+G) + end
print(banner_to_print)
all_websites =list(websites.keys()) + list(custom_websites.keys()) + list(req_websites.keys())
|
import _pytest.mark
import pytest
unit: _pytest.mark.MarkDecorator = pytest.mark.unit
param: _pytest.mark.MarkDecorator = pytest.mark.parametrize
web: _pytest.mark.MarkDecorator = pytest.mark.web
api: _pytest.mark.MarkDecorator = pytest.mark.api
|
importCommon()
import DicerMiRNA
#########################
## Functions: About SHAPE and Structure
#########################
class TransInfo:
def __init__(self, name):
self.full_shape = []
self.full_seq = ""
self.stem_loop = [0,0,0,0]
self.full_dot = ""
self.name = name
self.enrich_score = 0
self.cleavage_score = 0
self.stemloop_seq = ""
self.stemloop_shape = []
self.stemloop_dot = []
self.ago2 = 0
self.ago3 = 0
self.ago2_normed = 0
self.ago3_normed = 0
self.rnatype = ""
self.cluster_id = -1
def stemLength(self):
return ((self.stem_loop[1]-self.stem_loop[0])+(self.stem_loop[3]-self.stem_loop[2]))/2
def PredictStructure(dataframe, Sequence, SHAPE):
TransList = []
for tid in dataframe.index:
if tid not in SHAPE:
continue
print("Process ", tid, "...")
trans = TransInfo(tid)
seq = Sequence[tid]
shape = SHAPE[tid]
if len(seq)>250:
continue
if tid not in dataframe.index:
continue
enrich_score = round(dataframe.loc[tid, 'RIP/INPUT'],3)
cleavage_score = round(dataframe.loc[tid, 'OEMut/OEWT'],3)
ago2 = round(dataframe.loc[tid, 'AGO2/INPUT'],3)
ago3 = round(dataframe.loc[tid, 'AGO3/INPUT'],3)
dot = Structure.predict_structure(seq, shape)
#stem_loops = Structure.find_stem_loop(dot, max_loop_len=15, max_stem_gap=5, min_stem_len=5)
if enrich_score>1:
stem_loops = Structure.find_stem_loop(dot, max_loop_len=15, max_stem_gap=5, min_stem_len=10)
else:
stem_loops = Structure.find_stem_loop(dot, max_loop_len=10, max_stem_gap=3, min_stem_len=4)
if len(stem_loops) == 0:
continue
max_sl = []
if enrich_score>1:
aucs = []
for stemloop in stem_loops:
subdot = dot[stemloop[0]-1:stemloop[3]]
subshape = shape[stemloop[0]-1:stemloop[3]]
if len(subshape)-subshape.count('NULL')<10:
continue
auc = General.calc_AUC_v2(subdot, subshape)
aucs.append([stemloop,auc])
aucs.sort(key=lambda x: x[1],reverse=True)
if len(aucs)==0:
continue
max_sl,sl_auc = aucs[0]
else:
mid = int( len(stem_loops)/2 )
max_sl = stem_loops[mid]
trans.full_shape = shape
trans.full_seq = seq
trans.stem_loop = max_sl
trans.full_dot = dot
trans.enrich_score = enrich_score
trans.cleavage_score = cleavage_score
trans.ago2_normed = ago2
trans.ago3_normed = ago3
TransList.append(trans)
print ("Raw transcripts:", len(SHAPE))
print ("Transcripts with stemloop:", len(TransList))
return TransList
def RNAType_To_Colors(RNAType_list):
RNAColor = {'mRNAExon': Colors.RGB['amber'],\
'mRNAIntron': Colors.RGB['amber'],\
'intergenic': Colors.RGB['amber'],\
'mRNA-intergenic-lncRNA': Colors.RGB['amber'],\
'SNORA': Colors.RGB['light_green'],\
'SNORD': Colors.RGB['blue'],\
'miRNA': Colors.RGB['green'],\
'tRNA': Colors.RGB['deep_orange'],\
'snRNA': Colors.RGB['brown']}
return [ RNAColor.get(rt,'#f1f1f1') for rt in RNAType_list ]
def is_valid_RNA(RNA_id):
if RNA_id.startswith('mRNA') or \
RNA_id.startswith('intergenic') or \
RNA_id.startswith('lncRNA') or \
RNA_id.startswith('snoRNA') or \
RNA_id.startswith('miRNA') or \
RNA_id.startswith('tRNA') or \
RNA_id.startswith('miscRNA') or \
RNA_id.startswith('snRNA'):
return True
return False
def read_depth(inFn):
Depth = {}
for line in open(inFn):
tid,pos,cov = line.strip().split()
if tid not in Depth:
Depth[tid] = []
Depth[tid].append(int(cov))
return Depth
def stemLoopGetSHAPE(TransList, SHAPE):
TransWithStemLoop = []
for trans in TransList:
tid = trans.name
sl = trans.stem_loop
dot = trans.full_dot
seq = trans.full_seq
shape = trans.full_shape
ls,le,rs,re = sl
sl_mid = int((le+rs)/2)
pseudo_shape = ['NULL']*60+[min(max(float(it),0),1) if it!='NULL' else 'NULL' for it in shape]+['NULL']*60
pseudo_dot = '.'*60+dot+'.'*60
pseudo_seq = 'N'*60+seq+'N'*60
start = sl_mid+60-30
end = sl_mid+60+30
seg_seq = pseudo_seq[start:end]
seg_shape = pseudo_shape[start:end]
seg_dot = pseudo_dot[start:end]
if seg_shape.count('NULL')>5:
continue
if not is_valid_RNA(trans.name):
print("Warning: filter ", trans.name, " Not a valid RNA")
continue
trans.stemloop_seq = seg_seq
trans.stemloop_shape = seg_shape
trans.stemloop_dot = seg_dot
TransWithStemLoop.append(trans)
print("Raw number of transcript:",len(TransList))
print("Number of transcripts with structures:",len(TransWithStemLoop))
return TransWithStemLoop
def stemLoopGetAgo(TransList, AGO2, AGO3):
for trans in TransList:
tid = trans.name
sl = trans.stem_loop
left,right = sl[0],sl[3]
if tid in AGO2:
trans.ago2 = max(AGO2[tid][left:right])
if tid in AGO3:
trans.ago3 = max(AGO3[tid][left:right])
def stemLoopGetEnrich(TransList, normed_df_detected):
for trans in TransList:
tid = trans.name
if tid in normed_df_detected.index:
trans.enrich_score = round(normed_df_detected.loc[tid,'RIP/INPUT'],3)
trans.cleavage_score = round(normed_df_detected.loc[tid,'OEMut/OEWT'],3)
else:
trans.enrich_score = trans.cleavage_score = np.nan
def renameTrans(trans_list, Gaper):
for trans in trans_list:
name_items = trans.name.split('_')
if name_items[1].startswith('ENST'):
gene_name = Gaper.getTransFeature(name_items[1])['gene_name']
trans.name = name_items[0]+"_"+name_items[1]+"_"+gene_name
trans.rnatype = trans.name.split('_')[0]
if 'SNORA' in trans.name:
trans.rnatype = 'SNORA'
if 'SNORD' in trans.name:
trans.rnatype = 'SNORD'
if trans.rnatype in ('mRNAExon','mRNAIntron','intergenic','lncRNA','miscRNA'):
trans.rnatype = 'mRNA-intergenic-lncRNA'
#########################
## Read shape and sequence
#########################
shape_1000, seq_1000 = DicerMiRNA.read_shape("/Share2/home/zhangqf7/lipan/precursor_SHAPEMAP/Final-Dicer-Run/RIP_NAIN3_repX_20190514/shape_files_1000")
Gaper = GAP.init("/150T/zhangqf/GenomeAnnotation/Gencode/hg38.genomeCoor.bed")
AGO2 = read_depth("/Share2/home/zhangqf7/lipan/precursor_SHAPEMAP/Cell_2014/3.mapBWA/AGO2.depth")
AGO3 = read_depth("/Share2/home/zhangqf7/lipan/precursor_SHAPEMAP/Cell_2014/3.mapBWA/AGO3.depth")
#########################
## Read RNA-Seq data & Get SHAPE & Add Ago
#########################
def get_normed_rnaseq_table():
#########################
## Read data and combine
#########################
genecountFn = "/Share2/home/zhangqf7/lipan/precursor_SHAPEMAP/which_rna_enriched/3.organized_table/RNA-Seq-genecount.txt"
df = pd.read_csv(genecountFn,sep="\t",index_col=0)
AGO2 = read_depth("/Share2/home/zhangqf7/lipan/precursor_SHAPEMAP/Cell_2014/3.mapBWA/AGO2.depth")
AGO3 = read_depth("/Share2/home/zhangqf7/lipan/precursor_SHAPEMAP/Cell_2014/3.mapBWA/AGO3.depth")
df['AGO2'] = [0] * df.shape[0]
df['AGO3'] = [0] * df.shape[0]
for tid in df.index:
if tid in AGO2:
df.loc[tid, 'AGO2'] = sum(AGO2[tid])
for tid in df.index:
if tid in AGO3:
df.loc[tid, 'AGO3'] = sum(AGO3[tid])
FSS = df['FSS_rep1']+df['FSS_rep2']+df['FSS_rep3']+df['FSS_rep4']
OEWT = df['OEWTDicer_rep1']+df['OEWTDicer_rep2']+df['OEWTDicer_rep3']+df['OEWTDicer_rep4']
OEMut = df['OEMutDicer_rep1']+df['OEMutDicer_rep2']+df['OEMutDicer_rep3']+df['OEMutDicer_rep4']
INPUT = df['INPUT_rep1_20190514']+df['INPUT_rep2_20190514']+df['INPUT_rep3_20190514']
RIP = df['RIP_DMSO_rep1_20190514']+df['RIP_DMSO_rep2_20190514']
AGO2 = df['AGO2']
AGO3 = df['AGO3']
concat_df = pd.concat([INPUT, RIP, FSS, OEWT, OEMut, AGO2, AGO3],axis=1)
concat_df.columns = ['INPUT', 'RIP', 'FSS', 'OEWT', 'OEMut', 'AGO2', 'AGO3']
RNASeq_high = concat_df.loc[:,('FSS','OEWT','OEMut')].sum(axis=1)>30
#########################
## Normalize Data
#########################
libsize = concat_df.sum()
normed_df = concat_df/libsize
normed_df = normed_df.loc[(concat_df['RIP']>10)&(RNASeq_high),:]
normed_df['RIP/INPUT'] = np.log2(normed_df.RIP/(normed_df.INPUT+1e-15))
normed_df['FSS/OEWT'] = np.log2(normed_df.FSS/(normed_df.OEWT+1e-15))
normed_df['OEMut/OEWT'] = np.log2(normed_df.OEMut/(normed_df.OEWT+1e-15))
normed_df['AGO2/INPUT'] = np.log2(normed_df.AGO2/(normed_df.INPUT+1e-15))
normed_df['AGO3/INPUT'] = np.log2(normed_df.AGO3/(normed_df.INPUT+1e-15))
normed_df = normed_df.sort_values(by='RIP/INPUT', ascending=False)
normed_df.loc[normed_df['FSS/OEWT']==numpy.Infinity,'FSS/OEWT'] = numpy.nan
normed_df.loc[normed_df['OEMut/OEWT']==numpy.Infinity,'OEMut/OEWT'] = numpy.nan
normed_df.loc[normed_df['FSS/OEWT']==-numpy.Infinity,'FSS/OEWT'] = numpy.nan
normed_df.loc[normed_df['OEMut/OEWT']==-numpy.Infinity,'OEMut/OEWT'] = numpy.nan
normed_df.loc[normed_df['AGO2/INPUT']==-numpy.Infinity,'AGO2/INPUT'] = numpy.nan
normed_df.loc[normed_df['AGO3/INPUT']==-numpy.Infinity,'AGO3/INPUT'] = numpy.nan
return normed_df
normed_df_detected = get_normed_rnaseq_table(); print(normed_df_detected.shape)
normed_df_detected = normed_df_detected.loc[ set(shape_1000.keys())&set(normed_df_detected.index) ,:].sort_values(by='RIP/INPUT', ascending=False)
print(normed_df_detected.shape)
TransList = PredictStructure(normed_df_detected, seq_1000, shape_1000)
renameTrans(TransList, Gaper)
TransWithStemLoop = stemLoopGetSHAPE(TransList, shape_1000)
stemLoopGetAgo(TransWithStemLoop, AGO2, AGO3)
len(normed_df_detected)
len(TransList)
len(TransWithStemLoop)
trans_dict = { trans.name:trans for trans in TransWithStemLoop }
#########################
## Prepare the data
#########################
def show_rna_ratio(tid_list):
Len = len(tid_list)
tRNA = len([ name for name in tid_list if 'tRNA' in name ]) / Len * 100
SNORA = len([ name for name in tid_list if 'SNORA' in name ]) / Len * 100
SNORD = len([ name for name in tid_list if 'SNORD' in name ]) / Len * 100
miRNA = len([ name for name in tid_list if 'miRNA' in name ]) / Len * 100
snRNA = len([ name for name in tid_list if 'snRNA' in name ]) / Len * 100
other = 100-tRNA-SNORA-SNORD-miRNA-snRNA
print("tRNA: %.2f%%; SNORA: %.2f%%; SNORD: %.2f%%; miRNA: %.2f%%; snRNA: %.2f%%; other: %.2f%%; Number: %d" % (tRNA, SNORA, SNORD, miRNA, snRNA, other, Len))
def dfshape(trans_list, leave=['tRNA','miRNA','SNORA','SNORD']):
shape_list = []
tid_list = []
type_list = []
enrich_list = []
cleavage_list = []
for trans in trans_list:
mytype = trans.rnatype
if mytype not in ('tRNA','miRNA','SNORA','SNORD','mRNA-intergenic-lncRNA'):
mytype = 'other'
type_list.append(mytype)
shape_array = trans.stemloop_shape[:]
tid_list.append(trans.name)
shape_list.append( [ it if it!='NULL' else np.nan for it in shape_array ] )
enrich_list.append(trans.enrich_score)
cleavage_list.append(trans.cleavage_score)
df = pd.DataFrame(shape_list, index=tid_list)
for i in range(df.shape[1]):
df.loc[df.iloc[:,i].isna(),i] = df.mean()[i]
Filter = [it in leave for it in type_list]
df = df.loc[Filter, :]
type_list = [ it for it in type_list if it in leave ]
enrich_list = [ enrich_list[i] for i,it in enumerate(type_list) if it in leave ]
cleavage_list = [ cleavage_list[i] for i,it in enumerate(type_list) if it in leave ]
return df, type_list, enrich_list, cleavage_list
df, type_list, enrich_list, cleavage_list = dfshape(TransWithStemLoop, leave=['tRNA','miRNA','SNORA','SNORD','mRNA-intergenic-lncRNA'])
|
from tkinter import *
import tkinter.messagebox
from random import *
import datetime
now = datetime.datetime.now()
def dayandnight():
if now.strftime("%X") > '16:45:39':
return 'black'
elif now.strftime("%X") < '15:40:00':
return 'white'
def dayand():
if now.strftime("%X") > '16:45:39':
return 'white'
elif now.strftime("%X") < '15:40:00':
return 'black'
def newgui():
src = Tk()
src.title('')
src.geometry('400x360+100+200')
isi = open("times.txt","a")
isi.write("%s %s\n" % (now.strftime("%X"), str(hello.get())))
stms = open("dailytask","a")
time = "%b-%d/%m/%Y"
stms.write("%s %s\n" % (now.strftime(time), str(hello.get())))
Label(src, text = str(hello.get())).pack()
src.mainloop()
def shutdown():
try:
if tkinter.messagebox.askokcancel(title = "info",message = "are you sure to quit"):
testy.destroy()
except Exception as ex:
tkinter.messagebox.showinfo("Error", "cant find %s" % ex)
# create a app
testy = Tk()
# app title name
testy.title('Sticky Notes')
# label name
Label(testy, text = "hello").pack()
hello = Entry(testy)
hello.pack()
# button
Button(testy, text = "noteit", command = newgui).pack()
source = open("dailytask","r")
for each_item in source:
som = Tk()
(a,sdd) = each_item.split(' ',1)
som.title(str(a))
source = str(randint(700,1000))
soe = str(randint(400,600))
som.geometry(str('250x250+' + source + '+'+ soe))
#dayandnight.pack(side = 'left', padx = 10, pady = 10)
soap = Label(som, text = str(sdd),width = "10", height = "6", bg = dayandnight(), fg = dayand())
soap.pack(side = 'left', padx = 10, pady = 10)
x = datetime.datetime.now()
if x.strftime("%X") > '16:30:39':
som['bg']='black'
elif x.strftime("%X") < '15:40:00':
som['bg']='snow'
def kk():
source = str(randint(700,1000))
soe = str(randint(400,600))
som.geometry(str('250x250+' + source + '+'+ soe))
def ioto(file):
popss = open(file)
for kk in popss:
(s,sm) = kk.split(' ',1)
#print(kk)
#print(s)
x = str("%b-%d/%m/%Y")
smtsss = str(now.strftime(x))
#print(s.find(str(smtsss)))
y = s.find(smtsss)
if y == 0:
return ("you are already written some task")
else:
return ("write some task & be active")
#load for particular date
Label(testy, text = ioto("dailytask")).pack()
sou = StringVar()
sou.set(None)
testy.protocol("WM_DELETE_WINDOW",shutdown)
# full app
som.mainloop()
testy.mainloop()
|
import os
import json
import yaml
from pathlib import Path
def first_time_setup(credential_location="."):
settings_path = os.path.join(credential_location, "settings.yaml")
client_secrets_path = os.path.join(credential_location, "client_secrets.json")
credentials_json_path = os.path.join(credential_location, "credentials.json")
if not os.path.exists(settings_path):
settings_dict = {
"client_config_backend": "settings",
"client_config": {
"client_id": None,
"client_secret": None,
},
"save_credentials": True,
"save_credentials_backend": "file",
"save_credentials_file": credentials_json_path,
"get_refresh_token": True
}
with open(settings_path, "w") as settings_file:
yaml.dump(settings_dict, settings_file, default_flow_style=False)
else:
with open(settings_path, "r") as settings_file:
settings_dict = yaml.safe_load(settings_file)
print("""
Instructions (based on: https://pythonhosted.org/PyDrive/quickstart.html#authentication)
1. Go here https://console.developers.google.com/iam-admin/projects
2. Search for "Google Drive API", select the entry
3. Create a project, select it in the top dropdown, and click "Enable".
4. Select "Credentials" from the left menu. Select configure OAuth Consent Screen.
5. Set it to Internal and fill in Application name (e.g. PyDrive Access)
6. Select `Credentials` again from the left menu. Click "+Create Credentials", select "OAuth client ID".
You can choose now to log in with a web browser (log in with Google) or in the command line.
For web browser login:
Select "Application type" to be Web application.
Enter an appropriate name (e.g. gdrive-access)
Input http://localhost:8080 for "Authorized JavaScript origins".
Input http://localhost:8080/ for "Authorized redirect URIs".
For command line login:
Select "Application type" to be Desktop.
Enter an appropriate name (e.g. gdrive-access)
7. Click "Save".
8. Click "Download JSON" on the right side of Client ID to download client_secret_<really long ID>.json.
The downloaded file has all authentication information of your application. Rename the file to "client_secrets.json" and place it in {}.
""".format("your working directory" if credential_location == "." else credential_location))
while not os.path.exists(client_secrets_path):
input("Follow the above instructions to get Google Drive client credentials.\n"
"Hit enter when you have saved {} (Ctrl-C to abort)\n".format(client_secrets_path))
with open(client_secrets_path, "r") as json_file:
data = json.load(json_file)
if "web" in data:
settings_dict["client_config"]["client_id"] = data["web"]["client_id"]
settings_dict["client_config"]["client_secret"] = data["web"]["client_secret"]
elif "installed" in data:
settings_dict["client_config"]["client_id"] = data["installed"]["client_id"]
settings_dict["client_config"]["client_secret"] = data["installed"]["client_secret"]
else:
raise Exception("Unexpected key found in client_secrets.json. Expected web or installed, but"
" got {}. Maybe you didn't choose Application Type 'Web Application' or "
"'Desktop'?".format(list(data.keys())))
with open(settings_path, "w") as settings_file:
yaml.dump(settings_dict, settings_file, default_flow_style=False)
Path(credentials_json_path).touch()
def run():
import argparse
from .access import get_auth
parser = argparse.ArgumentParser(description="Setup Google Drive API credentials")
parser.add_argument("--dir", type=str, default=".", help="Directory to store credentials")
args = parser.parse_args()
first_time_setup(credential_location=args.dir)
settings_path = os.path.join(args.dir, "settings.yaml")
with open(settings_path, "r") as settings_file:
settings_dict = yaml.safe_load(settings_file)
auth = get_auth(settings_path, webauth="web" in settings_dict)
print("\nCongrats you are authenticated!\n")
if __name__ == "__main__":
run()
|
"""
Stack - List
LIFO
"""
class Stack:
def __init__(self):
self.array = []
def __str__(self):
return str(self.__dict__)
def print_length(self):
print(len(self.array))
def peek(self):
return self.array[-1]
def push(self, data):
self.array.append(data)
return self.array
def pop(self):
self.array.pop()
return self.array
print("# Stack - List")
s = Stack()
s.push("google")
s.push("udemy")
s.push("discord")
print("## Info")
print(s)
print(s.peek())
s.print_length()
print("## pop")
s.pop()
print(s)
print()
"""
Stack - LinkedList
LIFO
"""
class Node:
def __init__(self, data):
self.data = data
self.next = None
class Stack:
def __init__(self) -> None:
self.top = None
self.bottom = None
self.length = 0
def __repr__(self):
output = []
node = self.top
while node:
output.append(str(node.data))
node = node.next
output.append("None")
return " -> ".join(output)
def print_length(self):
print(self.length)
def peek(self):
return self.top
def push(self, data):
new_node = Node(data)
new_node.next = self.top
self.top = new_node
if self.length == 0:
self.bottom = self.top
self.length += 1
def pop(self):
if self.length == 0:
return "Empty queue, pop nothing"
else:
output = self.top.data
self.top = self.top.next
self.length -= 1
return output
print("# Stack - LinkedList")
s = Stack()
s.push("google")
s.push("udemy")
s.push("discord")
print("## Info")
print(s)
print(s.peek())
# print(s.length)
s.print_length()
print(s.top.data)
print(s.bottom.data)
# print(s.top.data)
# print(s.top.next.data)
# print(s.top.next.next.data)
print("## pop")
s.pop()
print(s)
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Web hook implementation."""
import logging
import string
from six.moves import urllib
from tradefed_cluster.services import app_manager
from multitest_transport.plugins import base
class WebHook(base.TestRunHook):
"""Hook which makes HTTP requests during a test run."""
name = 'Web'
def __init__(self, url=None, http_method=None, data=None, **_):
self.url = url
self.http_method = http_method or 'GET'
self.data = data
def Execute(self, context):
"""Make HTTP request according to test run context."""
test_run_context = context.test_run.GetContext()
url = string.Template(self.url).safe_substitute(test_run_context)
data = None
if self.data:
data = string.Template(self.data).safe_substitute(test_run_context)
hostname = app_manager.GetInfo('default').hostname
logging.info('Invoking a webhook: url=%s, method=%s, data=%s',
url, self.http_method, data)
request = urllib.request.Request(
url=url, data=data, headers={'X-MTT-HOST': hostname})
request.get_method = lambda: self.http_method
response = urllib.request.urlopen(request)
logging.info('Response: %s', response.read())
|
#!/usr/bin/python
import sys
from collections import defaultdict
from collections import OrderedDict
import re
import os
import argparse
import pysam
import glob
def usage():
test="name"
message='''
RelocaTE2: improved version of RelocaTE for calling transposable element insertions
'''
print message
#Retro1 ACGTC not.give Chr4 14199..14203 - T:7 R:4 L:3 ST:21 SR:9 SL:12
def txt2gff(infile, outfile):
#print infile, outfile
ins_type = ''
ofile = open(outfile, 'a')
count = 0
r_pos = re.compile(r'(\d+)\.\.(\d+)')
with open (infile, 'r') as filehd:
for line in filehd:
line = line.rstrip()
#print line
if len(line) > 2:
unit = re.split(r'\t',line)
count += 1
chro, start, end = ['', 0, 0]
chro = unit[3]
strand = unit[5]
ins_type = unit[1]
m = r_pos.search(unit[4])
if m:
start = m.groups(0)[0]
end = m.groups(0)[1]
r_count = re.sub(r'\D+', '', unit[7])
l_count = re.sub(r'\D+', '', unit[8])
r_supp = re.sub(r'\D+', '', unit[10])
l_supp = re.sub(r'\D+', '', unit[11])
r_id = 'repeat_%s_%s_%s' %(chro, start, end)
print >> ofile, '%s\t%s\t%s\t%s\t%s\t.\t%s\t.\tID=%s;TSD=%s;Note=%s;Right_junction_reads:%s;Left_junction_reads:%s;Right_support_reads:%s;Left_support_reads:%s;' %(chro, 'RelocaTE2', unit[2], start, end,strand, r_id, unit[1], ins_type, r_count, l_count, r_supp, l_supp)
ofile.close()
#[name, seq, start, strand]
def Supporting_count(event, tsd_start, teSupportingReads):
total = 0
right = 0
left = 0
read1 = []
read2 = []
#print 'event: %s' %(event)
if teSupportingReads.has_key(event):
#print 'event: %s' %(event)
for read in teSupportingReads[event]:
name = read[0]
seq = read[1]
start = read[2]
strand = read[3]
#print name, seq, start, strand
if int(start) + len(seq) <= int(tsd_start) and strand == '+':
total += 1
left += 1
read1.append(name)
elif int(start) >= int(tsd_start) and strand == '-':
total += 1
right += 1
read2.append(name)
#del teSupportingReads[event]
return (total, left, right, ','.join(read1), ','.join(read2))
else:
return (0,0,0,'','')
def read_repeat_name(infiles):
data = defaultdict(list)
for infile in infiles:
with open (infile, 'r') as filehd:
for line in filehd:
line = line.rstrip()
if len(line) > 2:
unit = re.split(r'\t',line)
data[unit[0]] = [unit[1], unit[2]]
return data
#s1= re.compile(r'(\S+)\.[rf]')
#s2= re.compile(r'(\S+)\/[12]')
## define insertion as repeat family according reads <-> repeats relation from blat or bam files
def insertion_family_supporting(reads, read_repeat):
repeat_family = defaultdict(lambda : int())
for read in re.split(r',', reads):
read_name = read
read_name1 = '%s/1' %(read)
read_name2 = '%s/2' %(read)
read_name3 = '%s.f' %(read)
read_name4 = '%s.r' %(read)
if read_repeat.has_key(read_name):
repeat_family[read_repeat[read_name][0]] += 1
elif read_repeat.has_key(read_name1):
repeat_family[read_repeat[read_name1][0]] += 1
#print '%s,%s,%s,' %(read_name, read_name1, read_repeat[read_name1])
elif read_repeat.has_key(read_name2):
repeat_family[read_repeat[read_name2][0]] += 1
#print '%s,%s,%s,' %(read_name, read_name2, read_repeat[read_name2])
elif read_repeat.has_key(read_name3):
repeat_family[read_repeat[read_name3][0]] += 1
elif read_repeat.has_key(read_name4):
repeat_family[read_repeat[read_name4][0]] += 1
if len(repeat_family.keys()) == 1:
#return first element if only have one repeat
return repeat_family.keys()[0]
elif len(repeat_family.keys()) > 1:
#return the one have largest value
sorted_by_value = OrderedDict(sorted(repeat_family.items(), key=lambda x: x[1]))
return sorted_by_value.keys()[-1]
else:
return 'NA'
## define insertion as repeat family according reads <-> repeats relation from blat or bam files
def insertion_family(reads, read_repeat):
repeat_family = defaultdict(lambda : int())
r = re.compile(r'(.*):(start|end):(5|3)')
for read in re.split(r',', reads):
m = r.search(read)
read_name = m.groups(0)[0] if m else 'NA'
if read_name != 'NA' and read_repeat.has_key(read_name):
repeat_family[read_repeat[read_name][0]] += 1
if len(repeat_family.keys()) == 1:
#return first element if only have one repeat
return repeat_family.keys()[0]
elif len(repeat_family.keys()) > 1:
#return the one have largest value
sorted_by_value = OrderedDict(sorted(repeat_family.items(), key=lambda x: x[1]))
return sorted_by_value.keys()[-1]
else:
return ''
#mPing GAA not.give Chr4 3386246..3386248 - T:3 R:1 L:2 ST:0 SR:0 SL:0
def write_output(top_dir, result, read_repeat, usr_target, exper, TE, required_reads, required_left_reads, required_right_reads, teInsertions, teInsertions_reads, teSupportingReads, existingTE_inf, existingTE_found, teReadClusters, bedtools, lib_size, existingTE_intact_id):
ref = '%s/%s.%s.all_ref_insert.txt' %(result, usr_target, TE)
ref_gff = '%s/%s.%s.all_ref_insert.gff' %(result, usr_target, TE)
REF = open ('%s/%s.%s.all_ref_insert.txt' %(result, usr_target, TE), 'w')
#REFGFF = open ('%s/%s.%s.all_ref_insert.gff' %(result, usr_target, TE), 'w')
#READS = open ('%s/%s.%s.all_ref_reads.list' %(result, usr_target, TE), 'w')
r = re.compile(r'(\w+):(\d+)-(\d+):(.*)')
te_id_temp = []
for te_id in sorted(existingTE_intact_id.keys()):
if existingTE_found.has_key(te_id):
te_id_temp.append(te_id)
else:
repeat_junction = existingTE_intact_id[te_id]
strand, chro, start, end = ['', '', '', '']
if r.search(te_id):
strand = r.search(te_id).groups(0)[3]
chro = r.search(te_id).groups(0)[0]
start = r.search(te_id).groups(0)[1]
end = r.search(te_id).groups(0)[2]
print >> REF, '%s\t%s\t%s\t%s\t%s..%s\t%s\tT:0\tR:0\tL:0\tST:0\tSR:0\tSL:0' %(repeat_junction, 'Reference_Only', exper, chro, start, end, strand)
for te_id in te_id_temp:
#print 'Found: %s' %(te_id)
##junction reads
strand, chro, start, end = ['', '', '', '']
if r.search(te_id):
strand = r.search(te_id).groups(0)[3]
chro = r.search(te_id).groups(0)[0]
start = r.search(te_id).groups(0)[1]
end = r.search(te_id).groups(0)[2]
#print '%s\t%s\t%s\t%s' %(strand, chro, start, end)
l_count, r_count = [0, 0]
if strand == '+':
l_count = len(existingTE_found[te_id]['start'].keys())
r_count = len(existingTE_found[te_id]['end'].keys())
else:
l_count = len(existingTE_found[te_id]['end'].keys())
r_count = len(existingTE_found[te_id]['start'].keys())
#print '%s\t%s' %(l_count, r_count)
##reads and events
total_supporting_l, left_supporting_l, right_supporting_l, left_reads_l, right_reads_l = [0, 0, 0, '', '']
total_supporting_r, left_supporting_r, right_supporting_r, left_reads_r, right_reads_r = [0, 0, 0, '', '']
reads = defaultdict(lambda : int())
event_l = defaultdict(lambda : int())
event_r = defaultdict(lambda : int())
if len(existingTE_found[te_id]['start'].keys()) > 0:
for rd in existingTE_found[te_id]['start'].keys():
reads[rd] = 0
event_l[existingTE_found[te_id]['start'][rd]] += 1
event_l_sorted = OrderedDict(sorted(event_l.items(), key=lambda x:x[1]))
event_l_top = event_l_sorted.keys()[-1]
total_supporting_l, left_supporting_l, right_supporting_l, left_reads_l, right_reads_l = Supporting_count(event_l_top, start, teSupportingReads)
if len(existingTE_found[te_id]['end'].keys()) > 0:
for rd in existingTE_found[te_id]['end'].keys():
reads[rd] = 1
event_r[existingTE_found[te_id]['end'][rd]] += 1
event_r_sorted = OrderedDict(sorted(event_r.items(), key=lambda x:x[1]))
event_r_top = event_r_sorted.keys()[-1]
total_supporting_r, left_supporting_r, right_supporting_r, left_reads_r, right_reads_r = Supporting_count(event_r_top, str(int(end)+1), teSupportingReads)
#event_l_sorted = OrderedDict(sorted(event_l.items(), key=lambda x:x[1]))
#event_r_sorted = OrderedDict(sorted(event_r.items(), key=lambda x:x[1]))
#event_l_top = event_l_sorted.keys()[-1]
#event_r_top = event_r_sorted.keys()[-1]
##repeat family
repeat_junction = insertion_family(','.join(reads.keys()), read_repeat)
#print 'repeat_junction: %s' %(repeat_junction)
##supporting reads
#total_supporting_l, left_supporting_l, right_supporting_l, left_reads_l, right_reads_l = Supporting_count(event_l_top, start, teSupportingReads)
#total_supporting_r, left_supporting_r, right_supporting_r, left_reads_r, right_reads_r = Supporting_count(event_r_top, str(int(end)+1), teSupportingReads)
#print 'left: t:%s\tl:%s\tr:%s\tl:%s\tr:%s' %(total_supporting_l, left_supporting_l, right_supporting_l, left_reads_l, right_reads_l)
#print 'right: t:%s\tl:%s\tr:%s\tl:%s\tr:%s' %(total_supporting_r, left_supporting_r, right_supporting_r, left_reads_r, right_reads_r)
##output
if l_count > 0 and r_count > 0:
print >> REF, '%s\t%s\t%s\t%s\t%s..%s\t%s\tT:%s\tR:%s\tL:%s\tST:%s\tSR:%s\tSL:%s' %(repeat_junction, 'Shared', exper, chro, start, end, strand, l_count+r_count, r_count, l_count, left_supporting_l+right_supporting_r, right_supporting_r, left_supporting_l)
elif l_count == 0 and r_count == 0:
print >> REF, '%s\t%s\t%s\t%s\t%s..%s\t%s\tT:%s\tR:%s\tL:%s\tST:%s\tSR:%s\tSL:%s' %(repeat_junction, 'Reference_Only', exper, chro, start, end, strand, l_count+r_count, r_count, l_count, left_supporting_l+right_supporting_r, right_supporting_r, left_supporting_l)
else:
print >> REF, '%s\t%s\t%s\t%s\t%s..%s\t%s\tT:%s\tR:%s\tL:%s\tST:%s\tSR:%s\tSL:%s' %(repeat_junction, 'insufficient_data', exper, chro, start, end, strand, l_count+r_count, r_count, l_count, left_supporting_l+right_supporting_r, right_supporting_r, left_supporting_l)
REF.close()
txt2gff(ref, ref_gff)
def read_direction(strands):
plus = 0
minus = 0
for s in strands:
if s == '+':
plus += 1
else:
minus += 1
if plus > 0 and plus > minus:
return 'left'
elif minus > 0 and minus > plus:
return 'right'
def get_boundary(reads_list, direction):
read_starts = list()
read_ends = list()
for read_inf in reads_list:
read_starts.append(int(read_inf[2]))
read_ends.append(int(read_inf[2])+len(read_inf[1]))
#print '%s\t%s\t%s\t%s' %(read_inf[0], read_inf[1], read_inf[2], read_inf[3])
if direction == 'right':
#print 'right: %s' %(sorted(read_starts, key=int)[0])
return sorted(read_starts, key=int)[0]
elif direction == 'left':
#print 'left: %s' %(sorted(read_ends, key=int)[-1])
return sorted(read_ends, key=int)[-1]
def read_to_repeat(read_name, read_repeat):
read_name1 = '%s/1' %(read_name)
read_name2 = '%s/2' %(read_name)
read_name3 = '%s.f' %(read_name)
read_name4 = '%s.r' %(read_name)
repeat_family = []
if read_repeat.has_key(read_name):
repeat_family = read_repeat[read_name]
elif read_repeat.has_key(read_name1):
repeat_family = read_repeat[read_name1]
elif read_repeat.has_key(read_name2):
repeat_family = read_repeat[read_name2]
elif read_repeat.has_key(read_name3):
repeat_family = read_repeat[read_name3]
elif read_repeat.has_key(read_name4):
repeat_family = read_repeat[read_name4]
return repeat_family
def TSD_from_read_depth(r, read_repeat, teReadClusters, teReadClusters_count, teReadClusters_depth, teInsertions, teInsertions_reads, existingTE_inf, existingTE_found):
#determine TSD from read depth at insertions site
#count depth to find TSD in
#if there are 5 reads (2 right, 3 left) they
#should only be a depth of 5 at the TSD
#teReadCluster = defaultdict(lambda : defaultdict(lambda : defaultdict(lambda : defaultdict(lambda : str()))))
#teReadClusters[event]['read_inf'][name]['strand']= strand
#teReadClusters_count[event]['read_count'] += 1
#teReadClusters_depth[event]['read_inf']['depth'][i] += 1
#check how many insertion in each cluster by find start/end of junction reads
#split cluster into subcluster, then find tsd using depth method
#reestimate supporting reads for each cluster
r5 = re.compile(r'start:[53]$')
r3 = re.compile(r'end:[53]$')
for cluster in sorted(teReadClusters.keys(), key=int):
chro = teReadClusters[cluster]['read_inf']['seq']['chr']
##check insertion number
left_reads = defaultdict(lambda : list())
right_reads= defaultdict(lambda : list())
teReadClusters_sub = defaultdict(lambda : defaultdict(lambda : defaultdict(lambda : defaultdict(lambda : str()))))
teReadClusters_sub_count = defaultdict(lambda : defaultdict(lambda : int()))
teReadClusters_sub_depth = defaultdict(lambda : defaultdict(lambda : defaultdict(lambda : defaultdict(lambda : int()))))
teReadClusters_sub_type = defaultdict(lambda : int())
#print 'tsdfiner%s' %(cluster)
for name in teReadClusters[cluster]['read_inf'].keys():
if name == 'seq':
#skip empty line
continue
seq = teReadClusters[cluster]['read_inf'][name]['seq']
start = teReadClusters[cluster]['read_inf'][name]['start']
strand = teReadClusters[cluster]['read_inf'][name]['strand']
#print name, start, seq, strand
end = int(start) + len(seq)
if strand == '+':
if r5.search(name):
pos = 'right'
right_reads[start].append(name)
elif r3.search(name):
pos = 'left'
left_reads[end].append(name)
elif strand == '-':
if r5.search(name):
pos = 'left'
left_reads[end].append(name)
elif r3.search(name):
pos = 'right'
right_reads[start].append(name)
#print len(left_reads.keys()), len(right_reads.keys())
if (len(left_reads.keys()) > 1 and len(right_reads.keys()) >= 1) or (len(left_reads.keys()) >= 1 and len(right_reads.keys()) > 1):
##more than two junction
count_tsd = 0
pairs_tsd = defaultdict(lambda : int())
##find pairs for one insertions
for start1 in left_reads.keys():
min_dist = 0
min_pair = ''
for start2 in right_reads.keys():
if min_dist == 0:
min_dist = abs(int(start2) - int(start1))
min_pair = start2
elif min_dist > abs(int(start2) - int(start1)):
min_dist = abs(int(start2) - int(start1))
min_pair = start2
if min_dist <= 100:
##find pairs
count_tsd += 1
pairs_tsd[start1] = 1
pairs_tsd[min_pair] = 1
teReadClusters_sub_type['%s-%s' %(cluster, count_tsd)] = 2
for read in left_reads[start1]:
teReadClusters_sub['%s-%s' %(cluster, count_tsd)]['read_inf'][read]['seq'] = teReadClusters[cluster]['read_inf'][read]['seq']
teReadClusters_sub['%s-%s' %(cluster, count_tsd)]['read_inf'][read]['start'] = teReadClusters[cluster]['read_inf'][read]['start']
teReadClusters_sub['%s-%s' %(cluster, count_tsd)]['read_inf'][read]['strand'] = teReadClusters[cluster]['read_inf'][read]['strand']
calculate_cluster_depth('%s-%s' %(cluster, count_tsd), teReadClusters[cluster]['read_inf'][read]['seq'], teReadClusters[cluster]['read_inf'][read]['start'], read, teReadClusters[cluster]['read_inf'][read]['strand'], teReadClusters_sub, teReadClusters_sub_count, teReadClusters_sub_depth)
for read in right_reads[min_pair]:
teReadClusters_sub['%s-%s' %(cluster, count_tsd)]['read_inf'][read]['seq'] = teReadClusters[cluster]['read_inf'][read]['seq']
teReadClusters_sub['%s-%s' %(cluster, count_tsd)]['read_inf'][read]['start'] = teReadClusters[cluster]['read_inf'][read]['start']
teReadClusters_sub['%s-%s' %(cluster, count_tsd)]['read_inf'][read]['strand'] = teReadClusters[cluster]['read_inf'][read]['strand']
calculate_cluster_depth('%s-%s' %(cluster, count_tsd), teReadClusters[cluster]['read_inf'][read]['seq'], teReadClusters[cluster]['read_inf'][read]['start'], read, teReadClusters[cluster]['read_inf'][read]['strand'], teReadClusters_sub, teReadClusters_sub_count, teReadClusters_sub_depth)
else:
##do not find pairs
count_tsd += 1
pairs_tsd[start1] = 1
teReadClusters_sub_type['%s-%s' %(cluster, count_tsd)] = 1
for read in left_reads[start1]:
teReadClusters_sub['%s-%s' %(cluster, count_tsd)]['read_inf'][read]['seq'] = teReadClusters[cluster]['read_inf'][read]['seq']
teReadClusters_sub['%s-%s' %(cluster, count_tsd)]['read_inf'][read]['start'] = teReadClusters[cluster]['read_inf'][read]['start']
teReadClusters_sub['%s-%s' %(cluster, count_tsd)]['read_inf'][read]['strand'] = teReadClusters[cluster]['read_inf'][read]['strand']
calculate_cluster_depth('%s-%s' %(cluster, count_tsd), teReadClusters[cluster]['read_inf'][read]['seq'], teReadClusters[cluster]['read_inf'][read]['start'], read, teReadClusters[cluster]['read_inf'][read]['strand'], teReadClusters_sub, teReadClusters_sub_count, teReadClusters_sub_depth)
##set unpaired
for start2 in right_reads.keys():
if not pairs_tsd.has_key(start2):
#not paired junction
count_tsd += 1
teReadClusters_sub_type['%s-%s' %(cluster, count_tsd)] = 1
for read in right_reads[start2]:
teReadClusters_sub['%s-%s' %(cluster, count_tsd)]['read_inf'][read]['seq'] = teReadClusters[cluster]['read_inf'][read]['seq']
teReadClusters_sub['%s-%s' %(cluster, count_tsd)]['read_inf'][read]['start'] = teReadClusters[cluster]['read_inf'][read]['start']
teReadClusters_sub['%s-%s' %(cluster, count_tsd)]['read_inf'][read]['strand'] = teReadClusters[cluster]['read_inf'][read]['strand']
calculate_cluster_depth('%s-%s' %(cluster, count_tsd), teReadClusters[cluster]['read_inf'][read]['seq'], teReadClusters[cluster]['read_inf'][read]['start'], read, teReadClusters[cluster]['read_inf'][read]['strand'], teReadClusters_sub, teReadClusters_sub_count, teReadClusters_sub_depth)
elif len(left_reads.keys()) > 1:
count_tsd = 0
for start1 in left_reads.keys():
count_tsd += 1
teReadClusters_sub_type['%s-%s' %(cluster, count_tsd)] = 1
for read in left_reads[start1]:
teReadClusters_sub['%s-%s' %(cluster, count_tsd)]['read_inf'][read]['seq'] = teReadClusters[cluster]['read_inf'][read]['seq']
teReadClusters_sub['%s-%s' %(cluster, count_tsd)]['read_inf'][read]['start'] = teReadClusters[cluster]['read_inf'][read]['start']
teReadClusters_sub['%s-%s' %(cluster, count_tsd)]['read_inf'][read]['strand'] = teReadClusters[cluster]['read_inf'][read]['strand']
calculate_cluster_depth('%s-%s' %(cluster, count_tsd), teReadClusters[cluster]['read_inf'][read]['seq'], teReadClusters[cluster]['read_inf'][read]['start'], read, teReadClusters[cluster]['read_inf'][read]['strand'], teReadClusters_sub, teReadClusters_sub_count, teReadClusters_sub_depth)
elif len(right_reads.keys()) > 1:
count_tsd = 0
for start2 in right_reads.keys():
count_tsd += 1
teReadClusters_sub_type['%s-%s' %(cluster, count_tsd)] = 1
for read in right_reads[start2]:
teReadClusters_sub['%s-%s' %(cluster, count_tsd)]['read_inf'][read]['seq'] = teReadClusters[cluster]['read_inf'][read]['seq']
teReadClusters_sub['%s-%s' %(cluster, count_tsd)]['read_inf'][read]['start'] = teReadClusters[cluster]['read_inf'][read]['start']
teReadClusters_sub['%s-%s' %(cluster, count_tsd)]['read_inf'][read]['strand'] = teReadClusters[cluster]['read_inf'][read]['strand']
calculate_cluster_depth('%s-%s' %(cluster, count_tsd), teReadClusters[cluster]['read_inf'][read]['seq'], teReadClusters[cluster]['read_inf'][read]['start'], read, teReadClusters[cluster]['read_inf'][read]['strand'], teReadClusters_sub, teReadClusters_sub_count, teReadClusters_sub_depth)
elif len(left_reads.keys()) == 1 and len(right_reads.keys()) == 1:
##one right and one left junction
#print left_reads.keys()[0], right_reads.keys()[0]
if abs(int(left_reads.keys()[0]) - int(right_reads.keys()[0])) > 100:
##two far from each other, might be one end from two insertion
teReadClusters_sub_type['%s-1' %(cluster)] = 1
teReadClusters_sub_type['%s-2' %(cluster)] = 1
start1 = left_reads.keys()[0]
for read in left_reads[start1]:
teReadClusters_sub['%s-1' %(cluster)]['read_inf'][read]['seq'] = teReadClusters[cluster]['read_inf'][read]['seq']
teReadClusters_sub['%s-1' %(cluster)]['read_inf'][read]['start'] = teReadClusters[cluster]['read_inf'][read]['start']
teReadClusters_sub['%s-1' %(cluster)]['read_inf'][read]['strand'] = teReadClusters[cluster]['read_inf'][read]['strand']
calculate_cluster_depth('%s-1' %(cluster), teReadClusters[cluster]['read_inf'][read]['seq'], teReadClusters[cluster]['read_inf'][read]['start'], read, teReadClusters[cluster]['read_inf'][read]['strand'], teReadClusters_sub, teReadClusters_sub_count, teReadClusters_sub_depth)
start2 = right_reads.keys()[0]
for read in right_reads[start2]:
teReadClusters_sub['%s-2' %(cluster)]['read_inf'][read]['seq'] = teReadClusters[cluster]['read_inf'][read]['seq']
teReadClusters_sub['%s-2' %(cluster)]['read_inf'][read]['start'] = teReadClusters[cluster]['read_inf'][read]['start']
teReadClusters_sub['%s-2' %(cluster)]['read_inf'][read]['strand'] = teReadClusters[cluster]['read_inf'][read]['strand']
calculate_cluster_depth('%s-2' %(cluster), teReadClusters[cluster]['read_inf'][read]['seq'], teReadClusters[cluster]['read_inf'][read]['start'], read, teReadClusters[cluster]['read_inf'][read]['strand'], teReadClusters_sub, teReadClusters_sub_count, teReadClusters_sub_depth)
else:
##one junction
teReadClusters_sub_type['%s-1' %(cluster)] = 2
start1 = left_reads.keys()[0]
for read in left_reads[start1]:
teReadClusters_sub['%s-1' %(cluster)]['read_inf'][read]['seq'] = teReadClusters[cluster]['read_inf'][read]['seq']
teReadClusters_sub['%s-1' %(cluster)]['read_inf'][read]['start'] = teReadClusters[cluster]['read_inf'][read]['start']
teReadClusters_sub['%s-1' %(cluster)]['read_inf'][read]['strand'] = teReadClusters[cluster]['read_inf'][read]['strand']
calculate_cluster_depth('%s-1' %(cluster), teReadClusters[cluster]['read_inf'][read]['seq'], teReadClusters[cluster]['read_inf'][read]['start'], read, teReadClusters[cluster]['read_inf'][read]['strand'], teReadClusters_sub, teReadClusters_sub_count, teReadClusters_sub_depth)
start2 = right_reads.keys()[0]
for read in right_reads[start2]:
teReadClusters_sub['%s-1' %(cluster)]['read_inf'][read]['seq'] = teReadClusters[cluster]['read_inf'][read]['seq']
teReadClusters_sub['%s-1' %(cluster)]['read_inf'][read]['start'] = teReadClusters[cluster]['read_inf'][read]['start']
teReadClusters_sub['%s-1' %(cluster)]['read_inf'][read]['strand'] = teReadClusters[cluster]['read_inf'][read]['strand']
calculate_cluster_depth('%s-1' %(cluster), teReadClusters[cluster]['read_inf'][read]['seq'], teReadClusters[cluster]['read_inf'][read]['start'], read, teReadClusters[cluster]['read_inf'][read]['strand'], teReadClusters_sub, teReadClusters_sub_count, teReadClusters_sub_depth)
else:
##one junction with one end support
if len(left_reads.keys()) > 0:
teReadClusters_sub_type['%s-1' %(cluster)] = 1
start1 = left_reads.keys()[0]
for read in left_reads[start1]:
teReadClusters_sub['%s-1' %(cluster)]['read_inf'][read]['seq'] = teReadClusters[cluster]['read_inf'][read]['seq']
teReadClusters_sub['%s-1' %(cluster)]['read_inf'][read]['start'] = teReadClusters[cluster]['read_inf'][read]['start']
teReadClusters_sub['%s-1' %(cluster)]['read_inf'][read]['strand'] = teReadClusters[cluster]['read_inf'][read]['strand']
calculate_cluster_depth('%s-1' %(cluster), teReadClusters[cluster]['read_inf'][read]['seq'], teReadClusters[cluster]['read_inf'][read]['start'], read, teReadClusters[cluster]['read_inf'][read]['strand'], teReadClusters_sub, teReadClusters_sub_count, teReadClusters_sub_depth)
elif len(right_reads.keys()) > 0:
start2 = right_reads.keys()[0]
for read in right_reads[start2]:
teReadClusters_sub['%s-1' %(cluster)]['read_inf'][read]['seq'] = teReadClusters[cluster]['read_inf'][read]['seq']
teReadClusters_sub['%s-1' %(cluster)]['read_inf'][read]['start'] = teReadClusters[cluster]['read_inf'][read]['start']
teReadClusters_sub['%s-1' %(cluster)]['read_inf'][read]['strand'] = teReadClusters[cluster]['read_inf'][read]['strand']
calculate_cluster_depth('%s-1' %(cluster), teReadClusters[cluster]['read_inf'][read]['seq'], teReadClusters[cluster]['read_inf'][read]['start'], read, teReadClusters[cluster]['read_inf'][read]['strand'], teReadClusters_sub, teReadClusters_sub_count, teReadClusters_sub_depth)
#print 'TSD finder: %s' %(cluster)
###teReadCluster_sub_depth add above
###deal with teReadClusters_sub, still store at cluster in teInsertions, write a TSD_check for this only.
for sub_cluster in teReadClusters_sub_depth.keys():
#print sub_cluster
#TSD_len = 0
#read_total = teReadClusters_sub_count[sub_cluster]['read_count']
#for chrs_pos in sorted(teReadClusters_sub_depth[sub_cluster]['read_inf']['depth'].keys(), key=int):
# depth = teReadClusters_sub_depth[sub_cluster]['read_inf']['depth'][chrs_pos]
# if float(depth) >= 0.6*float(read_total):
# TSD_len += 1
if teReadClusters_sub_type[sub_cluster] == 1:
TSD = 'UKN'
for name in teReadClusters_sub[sub_cluster]['read_inf'].keys():
real_name = r.search(name).groups(0)[0] if r.search(name) else ''
seq = teReadClusters_sub[sub_cluster]['read_inf'][name]['seq']
start = teReadClusters_sub[sub_cluster]['read_inf'][name]['start']
strand = teReadClusters_sub[sub_cluster]['read_inf'][name]['strand']
TSD_check_single(cluster, seq, chro, start, real_name, read_repeat, name, TSD, strand, teInsertions, teInsertions_reads, existingTE_inf, existingTE_found)
else:
TSD_len = 0
if tsd_finder(sub_cluster, 1, teReadClusters_sub_count, teReadClusters_sub_depth):
TSD_len = tsd_finder(sub_cluster, 1, teReadClusters_sub_count, teReadClusters_sub_depth)
elif tsd_finder(sub_cluster, 0.8, teReadClusters_sub_count, teReadClusters_sub_depth):
TSD_len = tsd_finder(sub_cluster, 0.8, teReadClusters_sub_count, teReadClusters_sub_depth)
elif tsd_finder(sub_cluster, 0.6, teReadClusters_sub_count, teReadClusters_sub_depth):
TSD_len = tsd_finder(sub_cluster, 0.6, teReadClusters_sub_count, teReadClusters_sub_depth)
if TSD_len > 0:
#print TSD_len
TSD = '.'*TSD_len
for name1 in teReadClusters_sub[sub_cluster]['read_inf'].keys():
real_name = r.search(name1).groups(0)[0] if r.search(name1) else ''
seq = teReadClusters_sub[sub_cluster]['read_inf'][name1]['seq']
start = teReadClusters_sub[sub_cluster]['read_inf'][name1]['start']
strand = teReadClusters_sub[sub_cluster]['read_inf'][name1]['strand']
#print name1, seq, start, strand, chro
TSD_check(cluster, seq, chro, start, real_name, read_repeat, name1, TSD, strand, teInsertions, teInsertions_reads, existingTE_inf, existingTE_found)
#print 'after tsd_check'
else:
#what if we can not find TSD? still could be insertions
TSD = 'UKN'
#print TSD
for name in teReadClusters_sub[sub_cluster]['read_inf'].keys():
real_name = r.search(name).groups(0)[0] if r.search(name) else ''
seq = teReadClusters_sub[sub_cluster]['read_inf'][name]['seq']
start = teReadClusters_sub[sub_cluster]['read_inf'][name]['start']
strand = teReadClusters_sub[sub_cluster]['read_inf'][name]['strand']
TSD_check(cluster, seq, chro, start, real_name, read_repeat, name, TSD, strand, teInsertions, teInsertions_reads, existingTE_inf, existingTE_found)
#print 'End of cycle'
def tsd_finder(sub_cluster, tsd_depth, teReadClusters_sub_count, teReadClusters_sub_depth):
TSD_len = 0
read_total = teReadClusters_sub_count[sub_cluster]['read_count']
for chrs_pos in sorted(teReadClusters_sub_depth[sub_cluster]['read_inf']['depth'].keys(), key=int):
depth = teReadClusters_sub_depth[sub_cluster]['read_inf']['depth'][chrs_pos]
if float(depth) >= float(tsd_depth)*float(read_total):
TSD_len += 1
return TSD_len
def align_process(bin_ins, read_repeat, record, r, r_tsd, count, seq, chro, start, end, name, TSD, strand, teInsertions, teInsertions_reads, existingTE_inf, existingTE_found, teReadClusters, teReadClusters_count, teReadClusters_depth, teSupportingReads):
range_allowance = 1000
padded_start = bin_ins[0] - range_allowance
padded_end = bin_ins[-1] + range_allowance
#insertions
#print 'insertions: %s' %(name)
if (int(start) >= padded_start and int(start) <= padded_end) or (int(end) >= padded_start and int(end) <= padded_end):
bin_ins.extend([int(start), int(end)])
bin_ins = sorted(bin_ins, key=int)
if r.search(name):
real_name = r.search(name).groups(0)[0]
if not r_tsd.search(TSD):
TSD_check(count, seq, chro, start, real_name, read_repeat, name, TSD, strand, teInsertions, teInsertions_reads, existingTE_inf, existingTE_found)
else:
calculate_cluster_depth(count, seq, start, name, strand, teReadClusters, teReadClusters_count, teReadClusters_depth)
elif not r.search(name) and not record.is_paired:
#reads not matched to repeat and not mates of junctions
#reads are mates of reads matched to middle of repeat
#supporting reads
teSupportingReads[count].append([name, seq, start, strand])
else:
#if start and end do not fall within last start and end
#we now have a different insertion event
count += 1
if r.search(name):
real_name = r.search(name).groups(0)[0]
if not r_tsd.search(TSD):
TSD_check(count, seq, chro, start, real_name, read_repeat, name, TSD, strand, teInsertions, teInsertions_reads, existingTE_inf, existingTE_found)
else:
calculate_cluster_depth(count, seq, start, name, strand, teReadClusters, teReadClusters_count, teReadClusters_depth)
elif not r.search(name) and not record.is_paired:
#reads not matched to repeat and not mates of junctions
#reads are mates of reads matched to middle of repeat
#supporting reads
teSupportingReads[count].append([name, seq, start, strand])
#initial insertion site boundary
bin_ins = [int(start), int(end)]
#print '%s\t%s' %(count, bin_ins)
return (bin_ins, count)
#existing_TE_bed_reader
#Chr4 1072 1479 Simple_repeat:1072-1479 1 +
#Chr4 1573 1779 Simple_repeat:1573-1779 0 +
def existing_TE_bed_reader(infile, existingTE_intact, chro, existingTE_intact_id):
#print 'Reading existing TE bed'
with open(infile, 'r') as filehd:
for line in filehd:
line = line.rstrip()
if len(line) > 2 and not line.startswith(r'#'):
unit = re.split(r'\t', line)
##intact repeat
#print line
if int(unit[4]) == 1 and unit[0] == chro:
te_id = '%s:%s-%s:%s' %(unit[0], unit[1], unit[2], unit[5])
existingTE_intact[unit[0]]['start'][int(unit[1])] = te_id
existingTE_intact[unit[0]]['end'][int(unit[2])] = te_id
existingTE_intact_id[te_id] = re.split(r':', unit[3])[0]
#print '%s\t%s\t%s' %(unit[0], unit[1], 'start')
#print '%s\t%s\t%s' %(unit[0], unit[2], 'end')
#if unit[5] == '+':
# existingTE_intact[unit[0]]['start'][unit[1]] = te_id
# existingTE_intact[unit[0]]['end'][unit[2]] = te_id
#else:
# existingTE_intact[unit[0]]['end'][unit[1]] = te_id
# existingTE_intact[unit[0]]['start'][unit[2]] = te_id
def existingTE_RM_ALL(top_dir, infile, existingTE_inf):
ofile_RM = open('%s/existingTE.bed' %(top_dir), 'w')
with open (infile, 'r') as filehd:
for line in filehd:
line = line.rstrip()
if len(line) > 2:
unit = re.split(r'\s+',line)
#print line
#print unit[5], unit[9], unit[12], unit[13], unit[14]
if unit[9] == '+':
for i in range(int(unit[6])-2, int(unit[6])+3):
existingTE_inf[unit[5]]['start'][int(i)] = 1
#print >> ofile_RM, '%s\t%s\t%s\t%s:%s-%s\t%s\t%s' %(unit[5], str(int(unit[6])-2), str(int(unit[6])+2), unit[11],unit[6],unit[7], '1', '+')
#print unit[10], 'start', unit[6]
for i in range(int(unit[7])-2, int(unit[7])+3):
existingTE_inf[unit[5]]['end'][int(i)] = 1
#print >> ofile_RM, '%s\t%s\t%s\t%s:%s-%s\t%s\t%s' %(unit[5], str(int(unit[7])-2), str(int(unit[7])+2), unit[11],unit[6],unit[7], '1', '+')
#print unit[10], 'end', unit[7]
print >> ofile_RM, '%s\t%s\t%s\t%s:%s-%s\t%s\t%s' %(unit[5], str(int(unit[6])), str(int(unit[7])), unit[11],unit[6],unit[7], '1', '+')
elif unit[9] == 'C':
for i in range(int(unit[6])-2, int(unit[6])+3):
existingTE_inf[unit[5]]['start'][int(i)] = 1
#print >> ofile_RM, '%s\t%s\t%s\t%s:%s-%s\t%s\t%s' %(unit[5], str(int(unit[6])-2), str(int(unit[6])+2), unit[11],unit[6],unit[7],'1', '-')
#print unit[10], 'start', unit[6]
for i in range(int(unit[7])-2, int(unit[7])+3):
existingTE_inf[unit[5]]['end'][int(i)] = 1
#print >> ofile_RM, '%s\t%s\t%s\t%s:%s-%s\t%s\t%s' %(unit[5], str(int(unit[7])-2), str(int(unit[7])+2), unit[11],unit[6],unit[7], '1', '-')
#print unit[10], 'end', unit[7]
print >> ofile_RM, '%s\t%s\t%s\t%s:%s-%s\t%s\t%s' %(unit[5], str(int(unit[6])), str(int(unit[7])), unit[11],unit[6],unit[7], '1', '-')
ofile_RM.close()
def existingTE_RM(top_dir, infile, existingTE_inf):
r_end = re.compile(r'\((\d+)\)')
ofile_RM = open('%s/existingTE.bed' %(top_dir), 'w')
with open (infile, 'r') as filehd:
for line in filehd:
line = line.rstrip()
if len(line) > 2:
unit = re.split(r'\s+',line)
#print line
#print unit[5], unit[9], unit[12], unit[13], unit[14]
if unit[9] == '+':
if int(unit[12]) == 1:
for i in range(int(unit[6])-2, int(unit[6])+3):
existingTE_inf[unit[5]]['start'][int(i)] = 1
print >> ofile_RM, '%s\t%s\t%s\t%s\t%s:%s-%s\t%s\t%s' %(unit[5], 'start', str(int(unit[6])-2), str(int(unit[6])+2), unit[11],unit[6],unit[7], '1', '+')
#print unit[10], 'start', unit[6]
if len(unit[14]) == 3:
unit[14] =re.sub(r'\(|\)', '', unit[14])
if int(unit[14]) == 0:
for i in range(int(unit[7])-2, int(unit[7])+3):
existingTE_inf[unit[5]]['end'][int(i)] = 1
print >> ofile_RM, '%s\t%s\t%s\t%s\t%s:%s-%s\t%s\t%s' %(unit[5], 'end', str(int(unit[7])-2), str(int(unit[7])+2), unit[11],unit[6],unit[7], '1', '+')
#print unit[10], 'end', unit[7]
elif unit[9] == 'C':
if len(unit[12]) == 3:
unit[12] =re.sub(r'\(|\)', '', unit[12])
if int(unit[12]) == 0:
for i in range(int(unit[6])-2, int(unit[6])+3):
existingTE_inf[unit[5]]['start'][int(i)] = 1
print >> ofile_RM, '%s\t%s\t%s\t%s\t%s:%s-%s\t%s\t%s' %(unit[5], 'start', str(int(unit[6])-2), str(int(unit[6])+2), unit[11],unit[6],unit[7],'1', '-')
#print unit[10], 'start', unit[6]
if int(unit[14]) == 1:
for i in range(int(unit[7])-2, int(unit[7])+3):
existingTE_inf[unit[5]]['end'][int(i)] = 1
print >> ofile_RM, '%s\t%s\t%s\t%s\t%s:%s-%s\t%s\t%s' %(unit[5], 'end', str(int(unit[7])-2), str(int(unit[7])+2), unit[11],unit[6],unit[7], '1', '-')
#print unit[10], 'end', unit[7]
ofile_RM.close()
def complement(seq):
complement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'}
bases = list(seq)
for i in range(len(bases)):
bases[i] = complement[bases[i]] if complement.has_key(bases[i]) else bases[i]
return ''.join(bases)
def reverse_complement(seq):
return complement(seq[::-1])
def calculate_cluster_depth(event, seq, start, name, strand, teReadClusters, teReadClusters_count, teReadClusters_depth):
teReadClusters_count[event]['read_count'] += 1
teReadClusters[event]['read_inf'][name]['seq'] = seq
teReadClusters[event]['read_inf'][name]['start'] = start
teReadClusters[event]['read_inf'][name]['strand']= strand
for i in range(int(start), int(start)+len(seq)):
teReadClusters_depth[event]['read_inf']['depth'][i] += 1
def TSD_check_single(event, seq, chro, start, real_name, read_repeat, name, TSD, strand, teInsertions, teInsertions_reads, existingTE_inf, existingTE_found):
##TSD already specified by usr, set tsd for single end of junction. actural the end of read if read on left of junction or start if read on right.
##seq is entire trimmd read, not just the TSD portion of the read
##start is the first postition of the entire read match to ref
repeat = read_repeat[real_name] # need to deal with any te, get infor from name of reads
rev_com = reverse_complement(seq)
result = 0
pos = ''
TE_orient = 0
TSD_start = 0
TSD_seq = ''
r5 = re.compile(r'start:[53]$')
r3 = re.compile(r'end:[53]$')
#r5_tsd = re.compile(r'^(%s)' %(TSD))
#r3_tsd = re.compile(r'(%s)$' %(TSD))
#print '%s\t%s\t%s\t%s\t%s\t%s\t%s' %(event, name, TSD_seq, TSD_start, TE_orient, pos, repeat)
##start means that the TE was removed from the start of the read
##5 means the trimmed end mapps to the 5prime end of the TE
##3 means the trimmed end mapps to the 3prime end of the TE
if strand == '+':
if r5.search(name):
result = 1
TSD_seq = 'UNK'
pos = 'right'
TE_orient = '-' if name[-1] == '5' else '+'
TSD_start = int(start)
elif r3.search(name):
result = 1
TSD_seq = 'UNK'
pos = 'left'
TE_orient = '+' if name[-1] == '5' else '-'
TSD_start = int(start) + len(seq)
elif strand == '-':
if r5.search(name):
result = 1
TSD_seq = 'UNK'
pos = 'left'
TE_orient = '+' if name[-1] == '5' else '-'
TSD_start = int(start) + len(seq)
elif r3.search(name):
result = 1
TSD_seq = 'UNK'
pos = 'right'
TE_orient = '-' if name[-1] == '5' else '+'
TSD_start = int(start)
#print '%s\t%s\t%s\t%s\t%s\t%s\t%s' %(event, name, TSD_seq, TSD_start, TE_orient, pos, repeat)
if result and TE_orient:
tir1_end, tir2_end = [0, 0]
#if 0:
# continue
if pos == 'left':
tir1_end = int(TSD_start)
#print 'tir1: %s' %(tir1_end)
elif pos == 'right':
tir2_end = int(TSD_start) - 1
#print 'tir2: %s' %(tir2_end)
if tir1_end > 0 and existingTE_inf[chro]['start'].has_key(tir1_end):
te_id = existingTE_inf[chro]['start'][tir1_end]
existingTE_found[te_id]['start'][name] = event
#print 'tir1'
elif tir2_end > 0 and existingTE_inf[chro]['end'].has_key(tir2_end):
te_id = existingTE_inf[chro]['end'][tir2_end]
existingTE_found[te_id]['end'][name] = event
#print 'tir2'
else:
#print 'not match'
##non reference insertions
teInsertions[event][TSD_start][TSD_seq]['count'] += 1 ## total junction reads
teInsertions[event][TSD_start][TSD_seq][pos] += 1 ## right/left junction reads
teInsertions[event][TSD_start][TSD_seq][TE_orient] += 1 ## plus/reverse insertions
#read_name = re.sub(r':start|:end', '', name)
teInsertions_reads[event][TSD_start][TSD_seq]['read'].append(name)
#print '1: %s\t 2: %s' %(read_name, teInsertions_reads[event][TSD_seq][TSD_start]['read'])
#print 'C: %s\t%s\t%s\t%s\t%s' %(event, name, TSD_seq, TSD_start, TE_orient)
def TSD_check(event, seq, chro, start, real_name, read_repeat, name, TSD, strand, teInsertions, teInsertions_reads, existingTE_inf, existingTE_found):
##TSD already specified by usr, not unknown
##seq is entire trimmd read, not just the TSD portion of the read
##start is the first postition of the entire read match to ref
repeat = read_repeat[real_name] # need to deal with any te, get infor from name of reads
rev_com = reverse_complement(seq)
result = 0
pos = ''
TE_orient = 0
TSD_start = 0
TSD_seq = ''
r5 = re.compile(r'start:[53]$')
r3 = re.compile(r'end:[53]$')
r5_tsd = re.compile(r'^(%s)' %(TSD))
r3_tsd = re.compile(r'(%s)$' %(TSD))
#print '%s\t%s\t%s\t%s\t%s\t%s\t%s' %(event, name, TSD_seq, TSD_start, TE_orient, pos, repeat)
##start means that the TE was removed from the start of the read
##5 means the trimmed end mapps to the 5prime end of the TE
##3 means the trimmed end mapps to the 3prime end of the TE
if strand == '+':
if r5.search(name) and (r5_tsd.search(seq) or r3_tsd.search(rev_com)):
result = 1
TSD_seq = r5_tsd.search(seq).groups(0)[0] if r5_tsd.search(seq) else 'UNK'
pos = 'right'
TE_orient = '-' if name[-1] == '5' else '+'
TSD_start = int(start)
elif r3.search(name) and (r5_tsd.search(rev_com) or r3_tsd.search(seq)):
result = 1
TSD_seq = r3_tsd.search(seq).groups(0)[0] if r3_tsd.search(seq) else 'UNK'
pos = 'left'
TE_orient = '+' if name[-1] == '5' else '-'
TSD_start = int(start) + (len(seq)-len(TSD))
elif strand == '-':
if r5.search(name) and (r5_tsd.search(rev_com) or r3_tsd.search(seq)):
result = 1
TSD_seq = r3_tsd.search(seq).groups(0)[0] if r3_tsd.search(seq) else 'UNK'
pos = 'left'
TE_orient = '+' if name[-1] == '5' else '-'
TSD_start = int(start) + (len(seq)-len(TSD))
elif r3.search(name) and (r5_tsd.search(seq) or r3_tsd.search(rev_com)):
result = 1
TSD_seq = r5_tsd.search(seq).groups(0)[0] if r5_tsd.search(seq) else 'UNK'
pos = 'right'
TE_orient = '-' if name[-1] == '5' else '+'
TSD_start = int(start)
#print '%s\t%s\t%s\t%s\t%s\t%s\t%s' %(event, name, TSD_seq, TSD_start, TE_orient, pos, repeat)
if result and TE_orient:
tir1_end, tir2_end = [0, 0]
#if 0:
# continue
if pos == 'left':
tir1_end = int(start) + len(seq)
#print 'tir1: %s' %(tir1_end)
elif pos == 'right':
tir2_end = int(start) - 1
#print 'tir2: %s' %(tir2_end)
if tir1_end > 0 and existingTE_inf[chro]['start'].has_key(tir1_end):
te_id = existingTE_inf[chro]['start'][tir1_end]
existingTE_found[te_id]['start'][name] = event
#print 'tir1'
elif tir2_end > 0 and existingTE_inf[chro]['end'].has_key(tir2_end):
te_id = existingTE_inf[chro]['end'][tir2_end]
existingTE_found[te_id]['end'][name] = event
#print 'tir2'
else:
#print 'not match'
##non reference insertions
teInsertions[event][TSD_start][TSD_seq]['count'] += 1 ## total junction reads
teInsertions[event][TSD_start][TSD_seq][pos] += 1 ## right/left junction reads
teInsertions[event][TSD_start][TSD_seq][TE_orient] += 1 ## plus/reverse insertions
#read_name = re.sub(r':start|:end', '', name)
teInsertions_reads[event][TSD_start][TSD_seq]['read'].append(name)
#print '1: %s\t 2: %s' %(read_name, teInsertions_reads[event][TSD_seq][TSD_start]['read'])
#print 'C: %s\t%s\t%s\t%s\t%s' %(event, name, TSD_seq, TSD_start, TE_orient)
def convert_tag(tag):
tags = {}
for t in tag:
tags[t[0]] = t[1]
return tags
def find_insertion_cluster_bam(align_file, read_repeat, target, TSD, teInsertions, teInsertions_reads, teReadClusters, teReadClusters_count, teReadClusters_depth, existingTE_inf, existingTE_found, teSupportingReads):
r = re.compile(r'(.*):(start|end):(5|3)')
r_tsd = re.compile(r'UNK|UKN|unknown', re.IGNORECASE)
r_cg = re.compile(r'[SID]')
bin_ins = [0]
count = 0
TSD_len = len(TSD)
ref = 'None' if target == 'ALL' else target
fsam = pysam.AlignmentFile(align_file, 'rb')
rnames = fsam.references
for record in fsam.fetch(reference=ref, until_eof = True):
if not record.is_unmapped:
name = record.query_name
flag = record.flag
start = int(record.reference_start) + 1
MAPQ = record.mapping_quality
cigar = record.cigarstring
seq = record.query_sequence
tag = record.tags if record.tags else []
length = len(seq)
chro = rnames[record.reference_id]
end = int(start) + int(length) - 1 #should not allowed for indel or softclip
strand = ''
# flag is 0 is read if read is unpaired and mapped to plus strand
if int(flag) == 0:
strand = '+'
else:
strand = '-' if record.is_reverse else '+'
if r_cg.search(cigar):
continue
tags = convert_tag(tag)
#print '%s\t%s\t%s' %(name, start, length)
# filter low quality mapping reads:
# 1. paired-end reads at least have one reads unique mapped (MAPQ set to 0 for both reads if both are repeat, else should be > 0 at least one unique mapped)
# 2. unpaired reads should unique mapped, no gap, mismatch <= 3 and no suboptimal alignment
#print 'before: %s\t%s\t%s' %(name, count, bin_ins)
#if record.is_proper_pair and (int(MAPQ) >= 29 or tags['XT'] == 'U'):
if record.is_proper_pair and int(MAPQ) > 0:
bin_ins, count = align_process(bin_ins, read_repeat, record, r, r_tsd, count, seq, chro, start, end, name, TSD, strand, teInsertions, teInsertions_reads, existingTE_inf, existingTE_found, teReadClusters, teReadClusters_count, teReadClusters_depth, teSupportingReads)
elif not record.is_paired:
#if tags['XT'] == 'U' and int(tags['XO']) == 0 and int(tags['XM']) <= 3 and int(tags['X1']) == 0:
#if tags['XT'] == 'U' and int(tags['XO']) == 0 and (int(tags['XM']) <= 3 or int(tags['X1']) == 0):
if tags['XT'] == 'U' and int(tags['XO']) == 0 and int(tags['X1']) <= 3:
#if tags['XT'] == 'U':
bin_ins, count = align_process(bin_ins, read_repeat, record, r, r_tsd, count, seq, chro, start, end, name, TSD, strand, teInsertions, teInsertions_reads, existingTE_inf, existingTE_found, teReadClusters, teReadClusters_count, teReadClusters_depth, teSupportingReads)
teReadClusters[count]['read_inf']['seq']['chr'] = chro
#print 'after: %s\t%s\t%s' %(name, count, bin_ins)
###TSD not given we infer from read depth
if r_tsd.search(TSD):
TSD_from_read_depth(r, read_repeat, teReadClusters, teReadClusters_count, teReadClusters_depth, teInsertions, teInsertions_reads, existingTE_inf, existingTE_found)
def createdir(dirname):
if not os.path.exists(dirname):
os.mkdir(dirname)
def parse_regex(infile):
data = []
with open (infile, 'r') as filehd:
for line in filehd:
line = line.rstrip()
if len(line) > 2:
unit = re.split(r'\s+',line)
unit[0] = re.sub(r'.(fq|fastq)','',unit[0])
unit[1] = re.sub(r'.(fq|fastq)','',unit[1])
unit[2] = re.sub(r'.(fq|fastq)','',unit[2])
data = unit
return data
def main():
required_reads = 1 ## rightReads + leftReads needs to be > to this value
required_left_reads = 1 ## needs to be >= to this value
required_right_reads = 1 ## needs to be >= to this value
align_file = sys.argv[1] ## combined bowtie or bwa results, sam format only
usr_target = sys.argv[2] ## chromosome to analyze: ALL or Chr1..N
genome_path = sys.argv[3] ## genome sequence
TE = sys.argv[4] ## repeat to analyze: ALL or mPing/other te name
regex_file = sys.argv[5] ## regex.txt
exper = sys.argv[6] ## prefix for output, title: HEG4
flank_len = sys.argv[7] ## length of seq flanking insertions to be returned: 100
existing_TE = sys.argv[8] ## existingTE.blatout
mm_allow = sys.argv[9] ## mismatches allowed: 0, 1, 2, 3
bowtie2 = sys.argv[10] ## use bowtie2 or not: 1 or 0
lib_size = sys.argv[11] ## insert size of library
bedtools = sys.argv[12]
#relax_reference = sys.argv[11]## relax mode for existing TE: 1 or 0
#relax_align = sys.argv[12]## relax mode for insertion: 1 or 0
bowtie_sam = 1 ## change to shift or remove in V2
existingTE_inf = defaultdict(lambda : defaultdict(lambda : defaultdict(lambda : str)))
existingTE_found = defaultdict(lambda : defaultdict(lambda : defaultdict(lambda : str)))
bwa = 0
teInsertions = defaultdict(lambda : defaultdict(lambda : defaultdict(lambda : defaultdict(lambda : int()))))
teInsertions_reads = defaultdict(lambda : defaultdict(lambda : defaultdict(lambda : defaultdict(lambda : list()))))
teReadClusters = defaultdict(lambda : defaultdict(lambda : defaultdict(lambda : defaultdict(lambda : str()))))
teReadClusters_count = defaultdict(lambda : defaultdict(lambda : int()))
teReadClusters_depth = defaultdict(lambda : defaultdict(lambda : defaultdict(lambda : defaultdict(lambda : int()))))
teSupportingReads = defaultdict(lambda : list())
top_dir = re.split(r'/', os.path.dirname(os.path.abspath(align_file)))[:-1]
#read existing TE from file
#existing_TE_intact = defaultdict(lambda : defaultdict(lambda : int()))
existingTE_intact = defaultdict(lambda : defaultdict(lambda : defaultdict(lambda : str)))
existingTE_intact_id = defaultdict(lambda: str())
existingTE_bed = '%s/existingTE.bed' %('/'.join(top_dir))
existingTE_bed_chr = '%s/existingTE.%s.bed' %('/'.join(top_dir), usr_target)
#print existingTE_bed
if os.path.isfile(existingTE_bed_chr) and os.path.getsize(existingTE_bed_chr) > 0:
existing_TE_bed_reader(existingTE_bed_chr, existingTE_intact, usr_target, existingTE_intact_id)
else:
os.system('grep -P \"%s\\t\" %s > %s' %(usr_target, existingTE_bed, existingTE_bed_chr))
existing_TE_bed_reader(existingTE_bed_chr, existingTE_intact, usr_target, existingTE_intact_id)
os.system('rm %s' %(existingTE_bed_chr))
#print 'Existing TE file does not exists or zero size'
#bedtools = ''
#try:
# subprocess.check_output('which bedtools', shell=True)
# bedtools = subprocess.check_output('which bedtools', shell=True)
# bedtools = re.sub(r'\n', '', bedtools)
#except:
# bedtools = '~/BigData/software/bedtools2-2.19.0/bin/bedtools'
##get the regelar expression patterns for mates and for the TE
##when passed on the command line as an argument, even in single
##quotes I lose special regex characters
s = re.compile(r'[\[.*+?]')
mate_file = parse_regex(regex_file)
TSD = mate_file[3]
TSDpattern= 1 if s.search(TSD) else 0
##read -> repeat relation
#top_dir = re.split(r'/', os.path.dirname(os.path.abspath(align_file)))[:-1]
result = '%s/results' %('/'.join(top_dir))
read_repeat_files = []
if usr_target == 'ALL':
read_repeat_files = glob.glob('%s/te_containing_fq/*.read_repeat_name.split.txt' %('/'.join(top_dir)))
else:
read_repeat_files = glob.glob('%s/te_containing_fq/%s.read_repeat_name.split.txt' %('/'.join(top_dir), usr_target))
#read_repeat_files = glob.glob('%s/te_containing_fq/*.read_repeat_name.txt' %('/'.join(top_dir)))
read_repeat = read_repeat_name(read_repeat_files)
##cluster reads around insertions
find_insertion_cluster_bam(align_file, read_repeat, usr_target, TSD, teInsertions, teInsertions_reads, teReadClusters, teReadClusters_count, teReadClusters_depth, existingTE_intact, existingTE_found, teSupportingReads)
##output absence
write_output(top_dir, result, read_repeat, usr_target, exper, TE, required_reads, required_left_reads, required_right_reads, teInsertions, teInsertions_reads, teSupportingReads, existingTE_intact, existingTE_found, teReadClusters, bedtools, lib_size, existingTE_intact_id)
if __name__ == '__main__':
main()
|
import time
import os
import random
import httplib, urllib
import time
import nosuch.midiutil
from subprocess import call,Popen
from nosuch.oscutil import *
from time import sleep
from vsthost import VstHost,VstPlugin
from nosuch.midiutil import MidiVstHostHardware
V = VstHost()
v1 = V.getVstInstance(0)
v2 = V.getVstInstance(1)
chanA = 1
chanB = 2
v1.setchannel(chanA)
v2.setchannel(chanB)
# v1.setparameter(2,0.25)
m = MidiVstHostHardware()
for n in range(10):
pitch = random.randint(1,127)
if (n%2) == 0:
chan = chanA
else:
chan = chanB
V.noteon(chan,pitch,100)
sleep(0.2)
V.noteoff(chan,pitch,100)
|
'''
Exercício 1
Escreva um programa que recebe como entradas (utilize a função input) dois números inteiros correspondentes
à largura e à altura de um retângulo, respectivamente. O programa deve imprimir, usando repetições encaixadas
(laços aninhados), uma cadeia de caracteres que represente o retângulo informado com caracteres '#' na saída.
Por exemplo:
digite a largura: 10
digite a altura: 3
##########
##########
##########
digite a largura: 2
digite a altura: 2
##
##
Dica: lembre-se que a função print pode receber um parametro 'end', que altera o último caractere da cadeia,
tornando possível a remoção da quebra de linha (reveja as vídeo-aulas)
'''
largura = int(input("Digite o valor correspondente à largura: "))
altura = int(input("Digite o valor correspondente à altura: "))
c_a = 0
while c_a < altura:
c_l = 0
while c_l < largura:
print("#", end = "")
c_l = c_l + 1
print()
c_a = c_a + 1
|
import logging
import pprint
import copy
import types
import six
from mixbox.entities import EntityList
from cybox.core import Object
from cybox.common import ObjectProperties
from stix.core import STIXPackage
import certau.util.stix.helpers as stix_helpers
class StixTransform(object):
"""Base class for transforming a STIX package to an alternate format.
This class provides helper functions for processing
:py:class:`STIXPackage<stix.core.stix_package.STIXPackage>` elements.
This class should be extended by other classes that
transform STIX packages into alternate formats.
The default constructor processes a STIX package to initialise
self.observables, a :py:class:`dict` keyed by object type.
Each entry contains a list :py:class:`list` of :py:class:`dict` objects
with three keys: 'id', 'observable', and 'fields', containing the
observable ID, the :py:class:`Observable<cybox.core.observable.Observable>`
object itself, and extracted fields, respectively.
Args:
package: the STIX package to transform
Attributes:
OBJECT_FIELDS: a :py:class:`dict` of supported Cybox object types
and fields ('properties'). The dictionary is keyed by Cybox object
type string (see :py:func:`_observable_object_type`) with each
entry containing a list of field names from that object that will
be utilised during the transformation.
Field names may reference sub-objects using dot notation.
For example the Cybox EmailMessage class contains a `header` field
referring to an EmailHeader object which contains a `to` field.
This field can be referenced using the notation `header.to`.
If OBJECT_FIELDS evaluates to False (e.g. empty dict()), it is
assumed all object types are supported.
OBJECT_CONSTRAINTS: a :py:class:`dict` of constraints on the
supported object types based on 'categories' associated with that
type. For example, the Cybox Address object uses the field
`category` to distinguish between IPv4, IPv6 and even email
addresses. Like OBJECT_FIELDS, the dictionary is keyed by object
type. Each entry contains a dictionary keyed by field name,
containing a list of values, or categories, (for that field name)
that are supported by the transform.
Note. Does not support the expression of more complex constraints,
for example combining different categories.
STRING_CONDITION_CONSTRAINT: a :py:class:`list` of string condition
values supported by the transform. For example, some transforms
may not support 'FitsPattern' or 'StartsWith' string condition
values. Use this to list the supported values. Note the values
are strings, even 'None'.
"""
# Class constants - see descriptions above
OBJECT_FIELDS = dict()
OBJECT_CONSTRAINTS = dict()
STRING_CONDITION_CONSTRAINT = list()
def __init__(self, package, default_title=None, default_description=None,
default_tlp='AMBER'):
self.package = package
self.observables = self._observables_for_package(package)
self.default_title = default_title
self.default_description = default_description
self.default_tlp = default_tlp
# Initialise the logger
self._logger = logging.getLogger()
self._logger.debug('%s object created', self.__class__.__name__)
# ##### Properties
@property
def package(self):
return self._package
@package.setter
def package(self, package):
if not isinstance(package, STIXPackage):
raise TypeError('expected STIXPackage object')
self._package = package
@property
def default_title(self):
return self._default_title
@default_title.setter
def default_title(self, title):
self._default_title = '' if title is None else str(title)
@property
def default_description(self):
return self._default_description
@default_description.setter
def default_description(self, description):
if description is None:
self._default_description = ''
else:
self._default_description = str(description)
@property
def default_tlp(self):
return self._default_tlp
@default_tlp.setter
def default_tlp(self, tlp):
if str(tlp) not in stix_helpers.TLP_COLOURS:
raise TypeError('invalid TLP colour')
self._default_tlp = str(tlp)
@property
def observables(self):
return self._observables
@observables.setter
def observables(self, observables):
self._observables = observables
# ##### Helpers for extracting various STIX package elements. #####
def package_title(self):
"""Retrieves the STIX package title (str) from the header."""
title = stix_helpers.package_title(self.package)
return title or self.default_title
def package_description(self):
"""Retrieves the STIX package description (str) from the header."""
description = stix_helpers.package_description(self.package)
return description or self.default_description
def package_tlp(self):
"""Retrieves the STIX package TLP (str) from the header."""
tlp = stix_helpers.package_tlp(self.package)
return tlp or self.default_tlp
# ### Internal methods for processing observables, objects and properties.
@staticmethod
def _observable_properties(observable):
"""Retrieves an observable's object's properties.
Args:
observable: a :py:class:`cybox.Observable` object
Returns:
:py:class:`cybox.ObjectProperties`: the properties from the
observable's object (if they exist), otherwise None.
"""
if (isinstance(observable.object_, Object) and
isinstance(observable.object_.properties, ObjectProperties)):
return observable.object_.properties
else:
return None
@staticmethod
def _observable_object_type(observable):
"""Determine the object type of an observable's object.
Observable object's properties are Cybox object types which extend
the ObjectProperties class. The class name for these objects is
used to represent the object type.
Args:
observable: a :py:class:`cybox.Observable` object
Returns:
str: a string representation of the observable's object properties
type, or None if observable contains no properties.
"""
properties = StixTransform._observable_properties(observable)
return properties.__class__.__name__ if properties else None
@staticmethod
def _condition_key_for_field(field):
"""Dictionary key used for storing the string condition of a field."""
return field + '_condition'
@classmethod
def _observables_for_package(cls, package):
"""Extract observables from a STIX package.
Collects observables from a STIX package and groups them by object
type. Only observables with an ID and containing a Cybox object are
returned. Results are returned in a dictionary keyed by object
type - see :py:func:`_observable_object_type`.
If OBJECT_FIELDS are specified only observables containing the
object types listed will be returned, and only those with at
least one of the listed fields containing a non-trivial value.
OBJECT_CONSTRAINTS and STRING_CONDITION_CONSTRAINT are also applied.
If no OBJECT_FIELDS are specified no constraints are applied and all
identified observables are returned.
Observables are sought from the following locations:
- the root of the STIX package
- within Indicator objects (where the indicators are in the package
root)
- within ObservableComposition objects found in either of the two
previous locations
Args:
package: a :py:class:`stix:STIXPackage` object
Returns:
dict: a dictionary of valid observables, keyed by object type
(See description above). May be empty.
"""
def _add_observables(new_observables):
for observable in new_observables:
if observable.observable_composition is not None:
_add_observables(
observable.observable_composition.observables
)
else:
object_type = cls._observable_object_type(observable)
if (observable.id_ is not None and
observable.id_ not in observable_ids and
object_type is not None):
object_type = cls._observable_object_type(observable)
if object_type in cls.OBJECT_FIELDS.keys():
fields = cls._field_values_for_observable(
observable
)
if not fields:
continue
elif not cls.OBJECT_FIELDS:
fields = None
else:
continue
if object_type not in observables:
observables[object_type] = []
new_observable = dict(
id=observable.id_,
observable=observable,
fields=fields,
)
observables[object_type].append(new_observable)
observable_ids.append(observable.id_)
# Look for observables in the package root and in indicators
observable_ids = []
observables = dict()
if package.observables:
_add_observables(package.observables)
if package.indicators:
for i in package.indicators:
if i.observables:
_add_observables(i.observables)
return observables
@classmethod
def _field_values_for_observable(cls, observable):
"""Collects property field values for an observable."""
object_type = cls._observable_object_type(observable)
fields = list(cls.OBJECT_FIELDS[object_type])
# Add any fields required for constraint checking
if object_type in cls.OBJECT_CONSTRAINTS.keys():
for field in cls.OBJECT_CONSTRAINTS[object_type]:
if field not in fields:
fields.append(field)
# Get field values
values = []
properties = cls._observable_properties(observable)
cls._field_values_for_entity(values, properties, fields)
# Check constraints
if object_type in cls.OBJECT_CONSTRAINTS.keys():
for field in cls.OBJECT_CONSTRAINTS[object_type]:
for value in values:
# Multiple constraints are combined with an implied 'AND'
# (i.e. all of the constraints must be satisfied)
if (field not in value or value[field] not in
cls.OBJECT_CONSTRAINTS[object_type][field]):
values.remove(value)
break
# Remove the constraint field if not needed
if field not in cls.OBJECT_FIELDS[object_type]:
del value[field]
return values
@classmethod
def _field_values_for_entity(cls, values, entity, fields, first_part=''):
"""Returns requested field values from a cybox.Entity object."""
def _first_parts(fields):
"""Get the bits on the left of the first dot in the field names.
"""
first_parts = set()
for field in fields:
parts = field.split('.')
first_parts.add(parts[0])
return first_parts
def _next_parts(fields, field):
"""Get the next parts for this field."""
next_parts = set()
first_part = field + '.'
for field in fields:
if field.startswith(first_part):
next_parts.add(field[len(first_part):])
return next_parts
def _convert_to_str(value):
if six.PY2:
if isinstance(value, basestring):
return value.encode('utf-8')
else:
return pprint.pformat(value)
else:
return str(value)
def _get_value_condition(value):
"""Set the condition value to '-' if the field doesn't have a
condition attribute to allow us to differentiate it from a value
that does contain a condition attribute, but its value is None.
"""
condition = getattr(value, 'condition', '-')
value = getattr(value, 'value', value)
return (_convert_to_str(value), _convert_to_str(condition))
def _add_value_to_dict(dict_, value, field):
value, condition = _get_value_condition(value)
if value and (not cls.STRING_CONDITION_CONSTRAINT or
condition in cls.STRING_CONDITION_CONSTRAINT or
condition == '-'):
dict_[field] = value
if condition != '-':
c_field = cls._condition_key_for_field(field)
dict_[c_field] = condition
def _add_value_to_values(values, value, field):
"""Add value and condition (if present) to results."""
if values:
for dict_ in values:
_add_value_to_dict(dict_, value, field)
else:
# First entry
dict_ = dict()
_add_value_to_dict(dict_, value, field)
if dict_:
values.append(dict_)
for field in _first_parts(fields):
full_first_part = first_part + '.' + field if first_part else field
next_parts = _next_parts(fields, field)
value = getattr(entity, field, None)
# Test if value is not a string and iterable
iterable = False
if not isinstance(value, six.string_types):
try:
iter(value)
iterable = True
except TypeError:
pass
if iterable:
values_copy = copy.deepcopy(values)
first = True
for item in value:
v_list = values if first else copy.deepcopy(values_copy)
if next_parts:
cls._field_values_for_entity(v_list, item, next_parts,
full_first_part)
else:
_add_value_to_values(v_list, item, full_first_part)
if not first:
values.extend(v_list)
else:
first = False
elif value:
if next_parts:
cls._field_values_for_entity(values, value, next_parts,
full_first_part)
else:
_add_value_to_values(values, value, full_first_part)
|
from django.utils.translation import ugettext_lazy as _
from mayan.apps.smart_settings.classes import SettingNamespace
from .literals import (
DEFAULT_CONVERTER_ASSET_STORAGE_BACKEND,
DEFAULT_CONVERTER_ASSET_STORAGE_BACKEND_ARGUMENTS,
DEFAULT_CONVERTER_GRAPHICS_BACKEND,
DEFAULT_CONVERTER_GRAPHICS_BACKEND_ARGUMENTS
)
from .setting_migrations import ConvertSettingMigration
namespace = SettingNamespace(
label=_('Converter'), migration_class=ConvertSettingMigration,
name='converter', version='0002'
)
setting_graphics_backend = namespace.add_setting(
default=DEFAULT_CONVERTER_GRAPHICS_BACKEND,
global_name='CONVERTER_GRAPHICS_BACKEND', help_text=_(
'Graphics conversion backend to use.'
)
)
setting_graphics_backend_arguments = namespace.add_setting(
default=DEFAULT_CONVERTER_GRAPHICS_BACKEND_ARGUMENTS,
global_name='CONVERTER_GRAPHICS_BACKEND_ARGUMENTS', help_text=_(
'Configuration options for the graphics conversion backend.'
)
)
setting_storage_backend = namespace.add_setting(
default=DEFAULT_CONVERTER_ASSET_STORAGE_BACKEND,
global_name='CONVERTER_ASSET_STORAGE_BACKEND', help_text=_(
'Path to the Storage subclass to use when storing assets.'
)
)
setting_storage_backend_arguments = namespace.add_setting(
default=DEFAULT_CONVERTER_ASSET_STORAGE_BACKEND_ARGUMENTS,
global_name='CONVERTER_ASSET_STORAGE_BACKEND_ARGUMENTS', help_text=_(
'Arguments to pass to the CONVERTER_ASSET_STORAGE_BACKEND.'
)
)
|
# coding: utf-8
"""
Inventory API
The Inventory API is used to create and manage inventory, and then to publish and manage this inventory on an eBay marketplace. There are also methods in this API that will convert eligible, active eBay listings into the Inventory API model. # noqa: E501
OpenAPI spec version: 1.14.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class PricingSummary(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'auction_reserve_price': 'Amount',
'auction_start_price': 'Amount',
'minimum_advertised_price': 'Amount',
'originally_sold_for_retail_price_on': 'str',
'original_retail_price': 'Amount',
'price': 'Amount',
'pricing_visibility': 'str'
}
attribute_map = {
'auction_reserve_price': 'auctionReservePrice',
'auction_start_price': 'auctionStartPrice',
'minimum_advertised_price': 'minimumAdvertisedPrice',
'originally_sold_for_retail_price_on': 'originallySoldForRetailPriceOn',
'original_retail_price': 'originalRetailPrice',
'price': 'price',
'pricing_visibility': 'pricingVisibility'
}
def __init__(self, auction_reserve_price=None, auction_start_price=None, minimum_advertised_price=None, originally_sold_for_retail_price_on=None, original_retail_price=None, price=None, pricing_visibility=None): # noqa: E501
"""PricingSummary - a model defined in Swagger""" # noqa: E501
self._auction_reserve_price = None
self._auction_start_price = None
self._minimum_advertised_price = None
self._originally_sold_for_retail_price_on = None
self._original_retail_price = None
self._price = None
self._pricing_visibility = None
self.discriminator = None
if auction_reserve_price is not None:
self.auction_reserve_price = auction_reserve_price
if auction_start_price is not None:
self.auction_start_price = auction_start_price
if minimum_advertised_price is not None:
self.minimum_advertised_price = minimum_advertised_price
if originally_sold_for_retail_price_on is not None:
self.originally_sold_for_retail_price_on = originally_sold_for_retail_price_on
if original_retail_price is not None:
self.original_retail_price = original_retail_price
if price is not None:
self.price = price
if pricing_visibility is not None:
self.pricing_visibility = pricing_visibility
@property
def auction_reserve_price(self):
"""Gets the auction_reserve_price of this PricingSummary. # noqa: E501
:return: The auction_reserve_price of this PricingSummary. # noqa: E501
:rtype: Amount
"""
return self._auction_reserve_price
@auction_reserve_price.setter
def auction_reserve_price(self, auction_reserve_price):
"""Sets the auction_reserve_price of this PricingSummary.
:param auction_reserve_price: The auction_reserve_price of this PricingSummary. # noqa: E501
:type: Amount
"""
self._auction_reserve_price = auction_reserve_price
@property
def auction_start_price(self):
"""Gets the auction_start_price of this PricingSummary. # noqa: E501
:return: The auction_start_price of this PricingSummary. # noqa: E501
:rtype: Amount
"""
return self._auction_start_price
@auction_start_price.setter
def auction_start_price(self, auction_start_price):
"""Sets the auction_start_price of this PricingSummary.
:param auction_start_price: The auction_start_price of this PricingSummary. # noqa: E501
:type: Amount
"""
self._auction_start_price = auction_start_price
@property
def minimum_advertised_price(self):
"""Gets the minimum_advertised_price of this PricingSummary. # noqa: E501
:return: The minimum_advertised_price of this PricingSummary. # noqa: E501
:rtype: Amount
"""
return self._minimum_advertised_price
@minimum_advertised_price.setter
def minimum_advertised_price(self, minimum_advertised_price):
"""Sets the minimum_advertised_price of this PricingSummary.
:param minimum_advertised_price: The minimum_advertised_price of this PricingSummary. # noqa: E501
:type: Amount
"""
self._minimum_advertised_price = minimum_advertised_price
@property
def originally_sold_for_retail_price_on(self):
"""Gets the originally_sold_for_retail_price_on of this PricingSummary. # noqa: E501
This field is needed if the Strikethrough Pricing (STP) feature will be used in the offer. This field indicates that the product was sold for the price in the <strong>originalRetailPrice</strong> field on an eBay site, or sold for that price by a third-party retailer. When using the <strong>createOffer</strong> or <strong>updateOffer</strong> calls, the seller will pass in a value of <code>ON_EBAY</code> to indicate that the product was sold for the <strong>originalRetailPrice</strong> on an eBay site, or the seller will pass in a value of <code>OFF_EBAY</code> to indicate that the product was sold for the <strong>originalRetailPrice</strong> through a third-party retailer. This field and the <strong>originalRetailPrice</strong> field are only applicable if the seller and listing are eligible to use the Strikethrough Pricing feature, a feature which is limited to the US (core site and Motors), UK, Germany, Canada (English and French versions), France, Italy, and Spain sites.<br/><br/>This field will be returned if set for the offer. For implementation help, refer to <a href='https://developer.ebay.com/api-docs/sell/inventory/types/slr:SoldOnEnum'>eBay API documentation</a> # noqa: E501
:return: The originally_sold_for_retail_price_on of this PricingSummary. # noqa: E501
:rtype: str
"""
return self._originally_sold_for_retail_price_on
@originally_sold_for_retail_price_on.setter
def originally_sold_for_retail_price_on(self, originally_sold_for_retail_price_on):
"""Sets the originally_sold_for_retail_price_on of this PricingSummary.
This field is needed if the Strikethrough Pricing (STP) feature will be used in the offer. This field indicates that the product was sold for the price in the <strong>originalRetailPrice</strong> field on an eBay site, or sold for that price by a third-party retailer. When using the <strong>createOffer</strong> or <strong>updateOffer</strong> calls, the seller will pass in a value of <code>ON_EBAY</code> to indicate that the product was sold for the <strong>originalRetailPrice</strong> on an eBay site, or the seller will pass in a value of <code>OFF_EBAY</code> to indicate that the product was sold for the <strong>originalRetailPrice</strong> through a third-party retailer. This field and the <strong>originalRetailPrice</strong> field are only applicable if the seller and listing are eligible to use the Strikethrough Pricing feature, a feature which is limited to the US (core site and Motors), UK, Germany, Canada (English and French versions), France, Italy, and Spain sites.<br/><br/>This field will be returned if set for the offer. For implementation help, refer to <a href='https://developer.ebay.com/api-docs/sell/inventory/types/slr:SoldOnEnum'>eBay API documentation</a> # noqa: E501
:param originally_sold_for_retail_price_on: The originally_sold_for_retail_price_on of this PricingSummary. # noqa: E501
:type: str
"""
self._originally_sold_for_retail_price_on = originally_sold_for_retail_price_on
@property
def original_retail_price(self):
"""Gets the original_retail_price of this PricingSummary. # noqa: E501
:return: The original_retail_price of this PricingSummary. # noqa: E501
:rtype: Amount
"""
return self._original_retail_price
@original_retail_price.setter
def original_retail_price(self, original_retail_price):
"""Sets the original_retail_price of this PricingSummary.
:param original_retail_price: The original_retail_price of this PricingSummary. # noqa: E501
:type: Amount
"""
self._original_retail_price = original_retail_price
@property
def price(self):
"""Gets the price of this PricingSummary. # noqa: E501
:return: The price of this PricingSummary. # noqa: E501
:rtype: Amount
"""
return self._price
@price.setter
def price(self, price):
"""Sets the price of this PricingSummary.
:param price: The price of this PricingSummary. # noqa: E501
:type: Amount
"""
self._price = price
@property
def pricing_visibility(self):
"""Gets the pricing_visibility of this PricingSummary. # noqa: E501
This field is needed if the Minimum Advertised Price (MAP) feature will be used in the offer. This field is only applicable if an eligible US seller is using the Minimum Advertised Price (MAP) feature and a <strong>minimumAdvertisedPrice</strong> has been specified. The value set in this field will determine whether the MAP price is shown to a prospective buyer prior to checkout through a pop-up window accessed from the View Item page, or if the MAP price is not shown until the checkout flow after the buyer has already committed to buying the item. To show the MAP price prior to checkout, the seller will set this value to <code>PRE_CHECKOUT</code>. To show the MAP price after the buyer already commits to buy the item, the seller will set this value to <code>DURING_CHECKOUT</code>. This field will be ignored if the seller and/or the listing is not eligible for the MAP feature.<br/><br/>This field will be returned if set for the offer. For implementation help, refer to <a href='https://developer.ebay.com/api-docs/sell/inventory/types/slr:MinimumAdvertisedPriceHandlingEnum'>eBay API documentation</a> # noqa: E501
:return: The pricing_visibility of this PricingSummary. # noqa: E501
:rtype: str
"""
return self._pricing_visibility
@pricing_visibility.setter
def pricing_visibility(self, pricing_visibility):
"""Sets the pricing_visibility of this PricingSummary.
This field is needed if the Minimum Advertised Price (MAP) feature will be used in the offer. This field is only applicable if an eligible US seller is using the Minimum Advertised Price (MAP) feature and a <strong>minimumAdvertisedPrice</strong> has been specified. The value set in this field will determine whether the MAP price is shown to a prospective buyer prior to checkout through a pop-up window accessed from the View Item page, or if the MAP price is not shown until the checkout flow after the buyer has already committed to buying the item. To show the MAP price prior to checkout, the seller will set this value to <code>PRE_CHECKOUT</code>. To show the MAP price after the buyer already commits to buy the item, the seller will set this value to <code>DURING_CHECKOUT</code>. This field will be ignored if the seller and/or the listing is not eligible for the MAP feature.<br/><br/>This field will be returned if set for the offer. For implementation help, refer to <a href='https://developer.ebay.com/api-docs/sell/inventory/types/slr:MinimumAdvertisedPriceHandlingEnum'>eBay API documentation</a> # noqa: E501
:param pricing_visibility: The pricing_visibility of this PricingSummary. # noqa: E501
:type: str
"""
self._pricing_visibility = pricing_visibility
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(PricingSummary, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PricingSummary):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
a = 123
print(a)
b = a
print(b)
c = 123.4
print(c)
d = c
print(d)
e = 'こんにちは'
print(e)
f = e
print(f)
g = True
print(g)
g = False
print(g) |
import logging
from typing import Optional
from networkx import shortest_path
import pulp
from vrpy.masterproblem import _MasterProblemBase
from vrpy.restricted_master_heuristics import _DivingHeuristic
logger = logging.getLogger(__name__)
class _MasterSolvePulp(_MasterProblemBase):
"""
Solves the master problem for the column generation procedure.
Inherits problem parameters from MasterProblemBase
"""
def __init__(self, *args):
super(_MasterSolvePulp, self).__init__(*args)
# create problem
self.prob = pulp.LpProblem("MasterProblem", pulp.LpMinimize)
# objective
self.objective = pulp.LpConstraintVar("objective")
# variables
self.y = {} # route selection variable
self.drop = {} # dropping variable
self.dummy = {} # dummy variable
self.dummy_bound = {} # dummy variable for vehicle bound cost
self.makespan = {} # maximum global span
# constrs
self.set_covering_constrs = {}
self.vehicle_bound_constrs = {}
self.drop_penalty_constrs = {}
self.makespan_constr = {}
# Restricted master heuristic
self.diving_heuristic = _DivingHeuristic()
# Parameter when minimizing global span
self._n_columns = 1000
self._formulate()
def solve(self, relax, time_limit):
self._solve(relax, time_limit)
logger.debug("master problem relax %s" % relax)
logger.debug("Status: %s" % pulp.LpStatus[self.prob.status])
logger.debug("Objective: %s" % pulp.value(self.prob.objective))
if pulp.LpStatus[self.prob.status] != "Optimal":
raise Exception("problem " + str(pulp.LpStatus[self.prob.status]))
if relax:
for r in self.routes:
val = pulp.value(self.y[r.graph["name"]])
if val > 0.1:
logger.debug("route %s selected %s" % (r.graph["name"], val))
duals = self.get_duals()
logger.debug("duals : %s" % duals)
return duals, self.prob.objective.value()
def solve_and_dive(self, time_limit):
self._solve(relax=True, time_limit=time_limit)
self.diving_heuristic.run_dive(self.prob)
self.prob.resolve()
return self.get_duals(), self.prob.objective.value()
def update(self, new_route):
"""Add new column."""
if self.minimize_global_span:
self._add_route_selection_variable_for_global_span(new_route)
else:
self._add_route_selection_variable(new_route)
def get_duals(self, relax: pulp.LpProblem = None):
"""Gets the dual values of each constraint of the master problem.
If a input LpProblem is given (assuming it's a copy),
it uses that instead.
Returns:
dict: Duals with constraint names as keys and dual variables as values
"""
duals = {}
# set covering duals
for node in self.G.nodes():
if (
node not in ["Source", "Sink"]
and "depot_from" not in self.G.nodes[node]
and "depot_to" not in self.G.nodes[node]
):
constr_name = "visit_node_%s" % node
if not relax:
duals[node] = self.prob.constraints[constr_name].pi
else:
duals[node] = relax.constraints[constr_name].pi
# num vehicles dual
if self.num_vehicles and not self.periodic:
duals["upper_bound_vehicles"] = {}
for k in range(len(self.num_vehicles)):
if not relax:
duals["upper_bound_vehicles"][k] = self.prob.constraints[
"upper_bound_vehicles_%s" % k
].pi
else:
duals["upper_bound_vehicles"][k] = relax.constraints[
"upper_bound_vehicles_%s" % k
].pi
# global span duals
if self.minimize_global_span:
for route in self.routes:
if not relax:
duals["makespan_%s" % route.graph["name"]] = self.prob.constraints[
"makespan_%s" % route.graph["name"]
].pi
else:
duals["makespan_%s" % route.graph["name"]] = relax.constraints[
"makespan_%s" % route.graph["name"]
].pi
return duals
def get_total_cost_and_routes(self, relax: bool):
best_routes = []
for r in self.routes:
val = pulp.value(self.y[r.graph["name"]])
if val is not None and val > 0:
logger.debug(
"%s cost %s load %s"
% (
shortest_path(r, "Source", "Sink"),
r.graph["cost"],
sum(self.G.nodes[v]["demand"] for v in r.nodes()),
)
)
best_routes.append(r)
if self.drop_penalty:
self.dropped_nodes = [
v for v in self.drop if pulp.value(self.drop[v]) > 0.5
]
self.prob.resolve()
total_cost = self.prob.objective.value()
if not relax and self.drop_penalty and len(self.dropped_nodes) > 0:
logger.info("dropped nodes : %s" % self.dropped_nodes)
logger.info("total cost = %s" % total_cost)
if not total_cost:
total_cost = 0
return total_cost, best_routes
def get_heuristic_distribution(self):
best_routes_heuristic = {
"BestPaths": 0,
"BestEdges1": 0,
"BestEdges2": 0,
"Exact": 0,
"Other": 0,
}
best_routes = []
for r in self.routes:
val = self.y[r.graph["name"]].value()
if val is not None and val > 0:
if "heuristic" not in r.graph:
r.graph["heuristic"] = "Other"
best_routes_heuristic[r.graph["heuristic"]] += 1
best_routes.append(r)
return best_routes, best_routes_heuristic
# Private methods to solve and output #
def _solve(self, relax: bool, time_limit: Optional[int]):
# Set variable types
if not relax:
# Set partitioning constraints and artificial variables to integer
# (for the periodic case)
for node in self.G.nodes():
if (
node not in ["Source", "Sink"]
and "depot_from" not in self.G.nodes[node]
and "depot_to" not in self.G.nodes[node]
):
for const in self.prob.constraints:
# Modify the self.prob object (the self.set_covering_constrs object cannot be modified (?))
if "visit_node" in const:
self.prob.constraints[const].sense = pulp.LpConstraintEQ
if (
self.periodic
and self.G.nodes[node]["frequency"] > 1
and node != "Source"
):
self.dummy[node].cat = pulp.LpInteger
# Set route variables to integer
for var in self.y.values():
var.cat = pulp.LpInteger
# Force vehicle bound artificial variable to 0
for var in self.dummy_bound.values():
if "artificial_bound_" in var.name:
var.upBound = 0
var.lowBound = 0
# self.prob.writeLP("master.lp")
# print(self.prob)
# Solve with appropriate solver
if self.solver == "cbc":
self.prob.solve(
pulp.PULP_CBC_CMD(
msg=False,
timeLimit=time_limit,
options=["startalg", "barrier", "crossover", "0"],
)
)
elif self.solver == "cplex":
self.prob.solve(
pulp.CPLEX_CMD(
msg=False,
timeLimit=time_limit,
options=["set lpmethod 4", "set barrier crossover -1"],
)
)
elif self.solver == "gurobi":
gurobi_options = [
("Method", 2), # 2 = barrier
("Crossover", 0),
]
# Only specify time limit if given (o.w. errors)
if time_limit is not None:
gurobi_options.append(
(
"TimeLimit",
time_limit,
)
)
self.prob.solve(pulp.GUROBI(msg=False, options=gurobi_options))
# Private methods for formulating and updating the problem #
def _formulate(self):
"""
Set covering formulation.
Variables are continuous when relaxed, otherwise binary.
"""
self._add_set_covering_constraints()
if self.num_vehicles and not self.periodic:
self._add_bound_vehicles()
if self.minimize_global_span:
self._add_maximum_makespan_constraints()
# Add variables #
# Route selection variables
if self.minimize_global_span:
for route in self.routes:
self._add_route_selection_variable_for_global_span(route)
for route in range(1, self._n_columns):
self.prob += self.makespan_constr[route]
else:
for route in self.routes:
self._add_route_selection_variable(route)
# if dropping nodes is allowed
if self.drop_penalty:
self._add_drop_variables()
# if frequencies, dummy variables are needed to find initial solution
if self.periodic:
self._add_artificial_variables()
# Add constraints to problem
for k in self.set_covering_constrs:
self.prob += self.set_covering_constrs[k]
for k in self.vehicle_bound_constrs:
self.prob += self.vehicle_bound_constrs[k]
# Add dummy_vehicle variables
self._add_vehicle_dummy_variables()
if self.minimize_global_span:
self._add_makespan_variable()
# Set objective function
self.prob.sense = pulp.LpMinimize
self.prob.setObjective(self.objective)
def _add_set_covering_constraints(self):
"""
All vertices must be visited exactly once, or periodically if
frequencies are given.
If dropping nodes is allowed, the drop variable is activated
(as well as a penalty is the cost function).
"""
for node in self.G.nodes():
if (
node not in ["Source", "Sink"]
and "depot_from" not in self.G.nodes[node]
and "depot_to" not in self.G.nodes[node]
):
# Set RHS
right_hand_term = (
self.G.nodes[node]["frequency"] if self.periodic else 1
)
# Set constraint sign (>= or =)
sign = pulp.LpConstraintGE # if self.relax else pulp.LpConstraintEQ
# Save set covering constraints
self.set_covering_constrs[node] = pulp.LpConstraintVar(
"visit_node_%s" % node, sign, right_hand_term
)
def _add_route_selection_variable(self, route):
self.y[route.graph["name"]] = pulp.LpVariable(
"y{}".format(route.graph["name"]),
lowBound=0,
upBound=1,
cat=pulp.LpContinuous,
e=(
pulp.lpSum(
self.set_covering_constrs[r]
for r in route.nodes()
if r not in ["Source", "Sink"]
)
+ pulp.lpSum(
self.vehicle_bound_constrs[k]
for k in range(len(self.num_vehicles))
if route.graph["vehicle_type"] == k
)
+ route.graph["cost"] * self.objective
),
)
def _add_route_selection_variable_for_global_span(self, route):
self.y[route.graph["name"]] = pulp.LpVariable(
"y{}".format(route.graph["name"]),
lowBound=0,
upBound=1,
cat=pulp.LpInteger,
e=(
pulp.lpSum(
self.set_covering_constrs[r]
for r in route.nodes()
if r not in ["Source", "Sink"]
)
+ pulp.lpSum(
self.vehicle_bound_constrs[k]
for k in range(len(self.num_vehicles))
if route.graph["vehicle_type"] == k
)
+ route.graph["cost"] * self.makespan_constr[route.graph["name"]]
),
)
def _add_vehicle_dummy_variables(self):
for key in range(len(self.num_vehicles)):
self.dummy_bound[key] = pulp.LpVariable(
"artificial_bound_%s" % key,
lowBound=0,
upBound=None,
cat=pulp.LpContinuous,
e=((-1) * self.vehicle_bound_constrs[key] + 1e10 * self.objective),
)
def _add_drop_variables(self):
"""
Boolean variable.
drop[v] takes value 1 if and only if node v is dropped.
"""
for node in self.G.nodes():
if self.G.nodes[node]["demand"] > 0 and node != "Source":
self.drop[node] = pulp.LpVariable(
"drop_%s" % node,
lowBound=0,
upBound=1,
cat=pulp.LpInteger,
e=(
self.drop_penalty * self.objective
+ self.set_covering_constrs[node]
),
)
def _add_artificial_variables(self):
"""Continuous variable used for finding initial feasible solution."""
for node in self.G.nodes():
if self.G.nodes[node]["frequency"] > 1 and node != "Source":
self.dummy[node] = pulp.LpVariable(
"periodic_%s" % node,
lowBound=0,
upBound=None,
cat=pulp.LpContinuous,
e=(1e10 * self.objective + self.set_covering_constrs[node]),
)
def _add_bound_vehicles(self):
"""Adds empty constraints and sets the right hand side"""
for k in range(len(self.num_vehicles)):
sign = pulp.LpConstraintEQ if self.use_all_vehicles else pulp.LpConstraintLE
self.vehicle_bound_constrs[k] = pulp.LpConstraintVar(
"upper_bound_vehicles_%s" % k, sign, self.num_vehicles[k]
)
def _add_makespan_variable(self, new_route=None):
"""Defines maximum makespan of each route"""
if new_route:
constraints = [self.makespan_constr[new_route.graph["name"]]]
cost = 0
else:
# constraints = [self.makespan_constr[r.graph["name"]] for r in self.routes]
constraints = [self.makespan_constr[x] for x in range(1, 75)]
cost = 1
self.makespan = pulp.LpVariable(
"makespan",
lowBound=0,
upBound=None,
cat=pulp.LpContinuous,
e=(cost * self.objective - pulp.lpSum(constraints)),
)
def _add_maximum_makespan_constraints(self):
"""Defines maximum makespan"""
for route in range(1, self._n_columns):
self.makespan_constr[route] = pulp.LpConstraintVar(
"makespan_%s" % route,
pulp.LpConstraintLE,
0,
)
|
from office365.sharepoint.base_entity import BaseEntity
class FieldLink(BaseEntity):
pass
|
'''' Solution uses reverse funtion to reavers each words and they are then joined in the string
using join method'''
class Solution:
def rev(self, s:str) -> str:
s = s[::-1]
return s
def reverseWords(self, s: str) -> str:
ls = s.split()
for i in range(len(ls)):
ls[i] = self.rev(ls[i])
s = " ".join(ls)
return s |
#!/usr/bin/python
import os
import sys
def train():
os.system("python tools/train_nlu.py")
os.system("sleep 5")
os.system("python tools/train_core.py")
os.system("sleep 5")
def test():
os.system('python tools/test_core.py')
def main(todo=None):
if (todo == 't'):
train()
elif (todo == 'tt'):
train()
test()
elif (todo == 'test'):
test()
if __name__ == '__main__':
try:
main(sys.argv[1])
except:
print("""Usage: {} <options>\n\toptions
-\n\t\tt\ttrain\n\t\ttt\ttrain and
test""".format(sys.argv[0]))
|
#!/usr/bin/python
# Define a stack to host a Consul cluster.
#
# Template Parameters (provided at templat creation time):
# - name
# Name of the stack, of course. Required.
# - description
# Description of the stack. Please provide one.
# - region
# The AWS region in which this template will be executed
# - bucket
# The S3 bucket where configuration and deployment files are located
# - vpc_id
# ID of the VPC in which this stack is built
# - vpc_cidr
# CIDR block of the vpc
# - server_subnet_ids
# List of subnets in which the consul server cluster should be built.
# - ui_subnet_ids
# List of subnets in which the consul ui cluster should be built.
# - server_cluster_size
# How many instances should participate in the cluster? Should be at least 3.
# - server_instance_type
# Instance type of the server instances. Defaults to t2.micro
# - ui_instance_type
# Instance type of the ui instances. Defaults to t2.micro
#
# Stack Parameters (provided to the template at stack create/update time):
#
# - ConsulKey
# Name of the key pair to use to connect to Consul cluster server instances
#
# Stack Outputs:
#
# - Consul{N}ASG
# ID of the autoscaling group controlling the Nth cluster member
# - Consul{N}ENI
# ID of the elastic network interface attached to the Nth cluster member
import troposphere.ec2 as ec2
import troposphere.iam as iam
import troposphere.autoscaling as asg
import troposphere.cloudformation as cf
import troposphere.cloudwatch as cw
import troposphere.logs as logs
import troposphere as tp
from scaffold.cf.template import asgtag, TemplateBuilder, AMI_REGION_MAP_NAME, REF_STACK_NAME
from scaffold.cf import net
from . import ConsulSoftware
class ConsulTemplate(TemplateBuilder):
BUILD_PARM_NAMES = ['vpc_id', 'vpc_cidr', 'server_subnet_ids', 'ui_subnet_ids', 'server_cluster_size',
'server_instance_type', 'ui_instance_type']
CONSUL_KEY_PARAM_NAME = 'ConsulKey'
def __init__(self, name,
region,
bucket,
key_prefix,
vpc_id,
vpc_cidr,
server_subnet_ids,
ui_subnet_ids=(),
description='[REPLACEME]',
server_cluster_size=3,
server_instance_type='t2.micro',
ui_instance_type='t2.micro'):
super(ConsulTemplate, self).__init__(name, description, ConsulTemplate.BUILD_PARM_NAMES)
self.region = region
self.bucket = bucket
self.key_prefix = key_prefix
self.vpc_id = vpc_id
self.vpc_cidr = vpc_cidr
self.server_subnet_ids = list(server_subnet_ids)
self.ui_subnet_ids = list(ui_subnet_ids)
self.server_cluster_size = int(server_cluster_size)
self.server_instance_type = server_instance_type
self.ui_instance_type = ui_instance_type
def internal_build_template(self):
self.create_parameters()
self.create_logstreams()
self.create_consul_sg()
self.create_server_cluster()
if len(self.ui_subnet_ids) > 0:
self.create_ui_cluster()
def create_server_cluster(self):
server_sg = self.create_server_sg()
self.iam_profile = self.create_server_iam_profile()
server_subnet_len = len(self.server_subnet_ids)
self.server_enis = [
self.create_server_eni(i, self.server_subnet_ids[i % server_subnet_len])
for i in range(self.server_cluster_size)
]
self.server_asgs = [
self.create_server_asg(i, server_sg, self.iam_profile,
self.server_subnet_ids[i % server_subnet_len], self.server_enis[i])
for i in range(self.server_cluster_size)
]
self.server_asgs[0].Metadata = self._create_server_metadata()
def create_ui_cluster(self):
self.ui_sg = self.create_ui_sg()
self.ui_asg = self.create_ui_asg(self.ui_sg, self.iam_profile)
def create_parameters(self):
self.add_parameter(tp.Parameter(self.CONSUL_KEY_PARAM_NAME, Type='String'))
def create_server_eni(self, index, subnet_id):
eni = ec2.NetworkInterface('Consul{}ENI'.format(index),
Description='ENI for Consul cluster member {}'.format(index),
GroupSet=[tp.Ref(self.consul_sg)],
SourceDestCheck=True,
SubnetId=subnet_id,
Tags=self._rename('{} ENI-'+str(index)))
self.add_resource(eni)
self.output(eni)
return eni
def create_server_sg(self):
rules = [
net.sg_rule(self.vpc_cidr, net.SSH, net.TCP)
]
sg = ec2.SecurityGroup('ConsulServerSecurityGroup',
GroupDescription='Consul Server Instance Security Group',
SecurityGroupIngress=rules,
VpcId=self.vpc_id,
Tags=self.default_tags)
self.add_resource(sg)
return sg
def create_server_asg(self, index, instance_sg, iam_profile, subnet_id, eni):
lc_name = self._server_lc_name(index)
lc = asg.LaunchConfiguration(lc_name,
ImageId=tp.FindInMap(AMI_REGION_MAP_NAME, self.region, 'GENERAL'),
InstanceType=self.server_instance_type,
SecurityGroups=[tp.Ref(instance_sg)],
KeyName=tp.Ref(self.CONSUL_KEY_PARAM_NAME),
IamInstanceProfile=tp.Ref(iam_profile),
InstanceMonitoring=False,
AssociatePublicIpAddress=False,
UserData=self._create_server_userdata(eni, index))
group = asg.AutoScalingGroup(self._server_asg_name(index),
MinSize=1, MaxSize=1,
LaunchConfigurationName=tp.Ref(lc),
VPCZoneIdentifier=[subnet_id],
Tags=asgtag(self._rename('{} Server-'+str(index))))
self.add_resources(lc, group)
self.output(group)
return group
def _server_lc_name(self, index):
return 'Consul{}LC'.format(index)
def _server_asg_name(self, index):
return 'Consul{}ASG'.format(index)
def create_consul_sg(self):
# see https://www.consul.io/docs/agent/options.html#ports-used
ingress_rules = [
net.sg_rule(self.vpc_cidr, port, net.TCP) for port in (53, (8300, 8302), 8400, 8500, 8600)
] + [
net.sg_rule(self.vpc_cidr, port, net.UDP) for port in ((8301, 8302), 8600)
]
self.consul_sg = ec2.SecurityGroup('ConsulAgentSecurityGroup',
GroupDescription='Security group for Cosul Agents',
SecurityGroupIngress=ingress_rules,
VpcId=self.vpc_id,
Tags=self.default_tags)
self.add_resource(self.consul_sg)
self.output_ref('ConsulAgentSG', self.consul_sg)
def create_ui_sg(self):
ingress_rules = [
net.sg_rule(net.CIDR_ANY, port, net.TCP) for port in [net.HTTP, net.HTTPS]
] + [
net.sg_rule(self.vpc_cidr, net.SSH, net.TCP)
]
# egress_rules = [] # TODO: egress should only talk to private subnets over specified ports,
# and to everyone over ephemeral ports
sg = ec2.SecurityGroup('ConsulUISecurityGroup',
GroupDescription='Consul UI Server Security Group',
SecurityGroupIngress=ingress_rules,
VpcId=self.vpc_id,
Tags=self.default_tags)
self.add_resource(sg)
return sg
def create_ui_asg(self, instance_sg, iam_profile):
lc = asg.LaunchConfiguration('ConsulUILC',
ImageId=tp.FindInMap(AMI_REGION_MAP_NAME, self.region, 'GENERAL'),
InstanceType=self.ui_instance_type,
SecurityGroups=[tp.Ref(self.consul_sg), tp.Ref(instance_sg)],
KeyName=tp.Ref(self.CONSUL_KEY_PARAM_NAME),
IamInstanceProfile=tp.Ref(iam_profile),
InstanceMonitoring=False,
AssociatePublicIpAddress=True, # TODO: Do we need this if we are behind an ELB?
UserData=self._create_ui_userdata())
group = asg.AutoScalingGroup('ConsulUIASG',
MinSize=1, MaxSize=2,
LaunchConfigurationName=tp.Ref(lc),
Cooldown=600,
HealthCheckGracePeriod=600,
HealthCheckType='EC2', # TODO: switch to ELB
TerminationPolicies=['OldestLaunchConfiguration', 'OldestInstance'],
# LoadBalancerNames=... # TODO
VPCZoneIdentifier=self.ui_subnet_ids,
Tags=asgtag(self._rename('{} UI')))
scale_out = asg.ScalingPolicy('ConsulUIScaleOutPolicy',
AutoScalingGroupName=tp.Ref(group),
AdjustmentType='ChangeInCapacity',
Cooldown=600,
PolicyType='SimpleScaling',
ScalingAdjustment=1)
scale_in = asg.ScalingPolicy('ConsulUIScaleInPolicy',
AutoScalingGroupName=tp.Ref(group),
AdjustmentType='ChangeInCapacity',
Cooldown=600,
PolicyType='SimpleScaling',
ScalingAdjustment=-1)
# TODO: better metrics, like response time or something
scale_out_alarm = cw.Alarm('ConsulUIScaleOutAlarm',
ActionsEnabled=True,
AlarmActions=[tp.Ref(scale_out)],
AlarmDescription='Scale out ConsulUIASG when instance CPU exceeds 50% for 15 minutes',
ComparisonOperator='GreaterThanThreshold',
Dimensions=[cw.MetricDimension(Name='AutoScalingGroupName', Value=tp.Ref(group))],
EvaluationPeriods=3,
MetricName='CPUUtilization',
Namespace='AWS/EC2',
Period=300,
Statistic='Average',
Threshold='50',
Unit='Percent')
scale_in_alarm = cw.Alarm('ConsulUIScaleInAlarm',
ActionsEnabled=True,
AlarmActions=[tp.Ref(scale_in)],
AlarmDescription='Scale in ConsulUIASG when instance CPU < 25% for 15 minutes',
ComparisonOperator='LessThanThreshold',
Dimensions=[cw.MetricDimension(Name='AutoScalingGroupName', Value=tp.Ref(group))],
EvaluationPeriods=3,
MetricName='CPUUtilization',
Namespace='AWS/EC2',
Period=300,
Statistic='Average',
Threshold='25',
Unit='Percent')
self.add_resources(lc, group, scale_out, scale_in, scale_out_alarm, scale_in_alarm)
self.output(group)
return group
def create_logstreams(self):
self.log_group = logs.LogGroup('ConsulLogGroup',
RetentionInDays=3)
self.add_resources(self.log_group)
def create_server_iam_profile(self):
# TODO: use common ec2_assumerolepolicydocument
role = iam.Role('ConsulInstanceRole',
AssumeRolePolicyDocument={
'Statement': [{
'Effect': 'Allow',
'Principal': {'Service': ['ec2.amazonaws.com']},
'Action': ['sts:AssumeRole']
}]
},
Policies=[
iam.Policy(
PolicyName='ConsulInstance',
PolicyDocument={
'Statement': [{
'Effect': 'Allow',
'Resource': ['*'],
'Action': ['ec2:Attach*', 'ec2:Describe*']
}, {
'Effect': 'Allow',
'Resource': 'arn:aws:s3:::{}/*'.format(self.bucket),
'Action': ['s3:Get*']
}, {
'Effect': 'Allow',
'Resource': 'arn:aws:logs:{}:*:*'.format(self.region),
'Action': [
'logs:CreateLogGroup',
'logs:CreateLogStream',
'logs:PutLogEvents',
'logs:DescribeLogStreams'
]
}]
}
)
])
profile = iam.InstanceProfile('ConsulInstanceProfile',
Path='/',
Roles=[tp.Ref(role)])
self.add_resources(role, profile)
return profile
def _create_server_userdata(self, eni, cluster_index):
resource_name = self._server_asg_name(0)
startup = [
'#!/bin/bash\n',
'yum update -y && yum install -y yum-cron && chkconfig yum-cron on\n',
'REGION=', self.region, '\n',
'ENI_ID=', tp.Ref(eni), '\n',
'INS_ID=$(curl http://169.254.169.254/latest/meta-data/instance-id)\n'
'aws ec2 attach-network-interface --instance-id $INS_ID --device-index 1 --network-interface-id $ENI_ID --region $REGION\n',
'mkdir -p -m 0755 /opt/consul\n'
'echo "', cluster_index, '" > /opt/consul/cluster_index\n',
'echo $ENI_ID > /opt/consul/eni_id\n',
'/opt/aws/bin/cfn-init -v ',
' --stack ', REF_STACK_NAME,
' --resource ', resource_name,
' --configsets install_server',
' --region $REGION\n',
]
return tp.Base64(tp.Join('', startup)) # TODO: There has GOT to be a better way to do userdata. UPDATE: there is, a bit. See ELK stack.
def _create_ui_userdata(self):
resource_name = self._server_asg_name(0)
startup = [
'#!/bin/bash\n',
'yum update -y && yum install -y yum-cron && chkconfig yum-cron on\n',
'REGION=', self.region, '\n',
'mkdir -p -m 0755 /opt/consul\n'
'/opt/aws/bin/cfn-init -v ',
' --stack ', REF_STACK_NAME,
' --resource ', resource_name,
' --configsets install_ui',
' --region $REGION\n',
]
return tp.Base64(tp.Join('', startup)) # TODO: There has GOT to be a better way to do userdata. UPDATE: there is, a bit. See ELK stack.
def _create_server_metadata(self):
return cf.Metadata(
cf.Init(
cf.InitConfigSets(
install_server=['install', 'config_server', 'startup'],
install_ui=['install', 'config_ui', 'startup']),
install=self._create_install_initconfig(),
startup=self._create_startup_initconfig(),
config_server=self._create_config_server_initconfig(),
config_ui=self._create_config_ui_initconfig()
))
def _get_consul_dir(self):
return '/opt/consul'
def _get_config_consul_py(self):
return '{}/config_consul.py'.format(self._get_consul_dir())
def _get_consul_config_file(self):
return '{}/config/config.json'.format(self._get_consul_dir())
def _get_consul_data_dir(self):
return '{}/data'.format(self._get_consul_dir())
def _get_consul_agent_dir(self):
return '{}/agent'.format(self._get_consul_dir())
def _get_consul_ui_dir(self):
return '{}/ui'.format(self._get_consul_dir())
def _get_consul_source_prefix(self):
return self._get_s3_url(self.key_prefix)
def _get_s3_url(self, key_prefix):
return 'http://{}.s3.amazonaws.com/{}'.format(self.bucket, key_prefix)
def _create_install_initconfig(self):
config_consul_py = self._get_config_consul_py()
consul_agent_dir = self._get_consul_agent_dir()
source_prefix = self._get_consul_source_prefix()
return cf.InitConfig(
packages={
'python': {
'botocore': ['1.4.60'], # Lesson learned, pin versions.
'boto3': ['1.4.1'] # boto3, why don't you declare botocore as a dependency?
}
},
# groups={}, # do we need a consul group?
# users={}, # do we need a consul user?
sources={
consul_agent_dir: self._get_s3_url(ConsulSoftware.linux_s3_key())
},
files={
config_consul_py: {
'source': '{}/config_consul.py'.format(source_prefix),
'mode': '000755',
'owner': 'root',
'group': 'root'
},
'/etc/init.d/consul': {
'source': '{}/consul.service'.format(source_prefix),
'mode': '000755',
'owner': 'root',
'group': 'root'
},
},
commands={
'20_mode': {
'command': 'chmod 755 {}/consul'.format(consul_agent_dir)
},
}
)
def _create_startup_initconfig(self):
config_consul_py = self._get_config_consul_py()
consul_config_file = self._get_consul_config_file()
consul_data_dir = self._get_consul_data_dir()
consul_agent_dir = self._get_consul_agent_dir()
return cf.InitConfig(
commands={
'10_dirs': {
'command': 'mkdir -m 0755 -p {}'.format(consul_data_dir)
},
'30_consul_config': {
'command': 'python {0} {1}'.format(config_consul_py, consul_config_file)
},
'50_chkconfig': {
'command': 'chkconfig --add consul'
}
},
services={
'sysvinit': {
'consul': {
'enabled': 'true',
'ensureRunning': 'true',
'files': [consul_config_file],
'sources': [consul_agent_dir],
'commands': 'config'
}
},
}
)
return None
def _create_config_server_initconfig(self):
cwlogs_config_file = '/opt/cw-logs/cwlogs.cfg'
config_cwlogs_py = '/opt/cw-logs/cwlogs.py'
consul_config_file = self._get_consul_config_file()
consul_data_dir = self._get_consul_data_dir()
source_prefix = self._get_consul_source_prefix()
return cf.InitConfig(
files={
# See https://www.consul.io/docs/agent/options.html#configuration_files
consul_config_file: {
'content': {
'datacenter': self.region,
'data_dir': consul_data_dir,
'log_level': 'INFO',
'server': True,
'bootstrap_expect': self.server_cluster_size,
'bind_addr': 'REPLACE AT RUNTIME',
'retry_join': 'REPLACE AT RUNTIME',
'_eni_ids': [tp.Ref(e) for e in self.server_enis] # used for runtime resolution
},
'mode': '000755',
'owner': 'root', # could be consul user?
'group': 'root' # could be consul group?
},
'/opt/cw-logs/awslogs-agent-setup.py': {
'source': 'https://s3.amazonaws.com/aws-cloudwatch/downloads/latest/awslogs-agent-setup.py',
'mode': '000755',
'owner': 'root',
'group': 'root'
},
cwlogs_config_file: {
'content': tp.Join('', [
'[general]\n',
'state_file = /var/awslogs/state/agent-state\n',
'\n',
'[consul_agent]\n',
'file = /var/log/consul\n',
'datetime_format = %b %d %H:%M:%S\n',
'log_group_name = ', tp.Ref(self.log_group), '\n',
'log_stream_name = REPLACE_AT_RUNTIME\n'
]),
'mode': '000755',
'owner': 'root',
'group': 'root'
},
config_cwlogs_py: {
'source': '{}/config_cwlogs.py'.format(source_prefix),
'mode': '000755',
'owner': 'root',
'group': 'root'
}
},
commands={
'31_cwlogs_config': {
'command': 'python {0} {1}'.format(config_cwlogs_py, cwlogs_config_file)
},
'40_wait': {
'command': 'while [ `ifconfig | grep "inet addr" | wc -l` -lt 3 ]; do echo "waiting for ip addr" >> /opt/consul/wait && sleep 2; done'
},
'60_cwlogs': {
'command': 'python /opt/cw-logs/awslogs-agent-setup.py -n -r {} -c {}'.format(self.region, cwlogs_config_file)
},
}
)
def _create_config_ui_initconfig(self):
ui_dir = self._get_consul_ui_dir()
consul_config_file = self._get_consul_config_file()
consul_data_dir = self._get_consul_data_dir()
return cf.InitConfig(
sources={
ui_dir: self._get_s3_url(ConsulSoftware.ui_s3_key())
},
files={
# See https://www.consul.io/docs/agent/options.html#configuration_files
consul_config_file: {
'content': {
'addresses': {
'http': '0.0.0.0'
},
'datacenter': self.region,
'data_dir': consul_data_dir,
'log_level': 'INFO',
'retry_join': 'REPLACE AT RUNTIME',
'ports': {
'http': 80
},
'ui': True,
'ui_dir': ui_dir,
'_eni_ids': [tp.Ref(e) for e in self.server_enis] # used for runtime resolution
},
'mode': '000755',
'owner': 'root', # could be consul user?
'group': 'root' # could be consul group?
}
}
)
if __name__ == '__main__':
import sys
name = sys.argv[1] if len(sys.argv) > 1 else 'Test'
region = sys.argv[2] if len(sys.argv) > 2 else 'us-west-2'
bucket = sys.argv[3] if len(sys.argv) > 3 else 'thousandleaves-us-west-2-laurel-deploy'
vpc_id = sys.argv[4] if len(sys.argv) > 4 else 'vpc-deadbeef'
vpc_cidr = sys.argv[5] if len(sys.argv) > 5 else '10.0.0.0/16'
server_subnet_ids = sys.argv[6:] if len(sys.argv) > 6 else ['subnet-deadbeef', 'subnet-cab4abba']
ui_subnet_ids = server_subnet_ids
key_prefix = 'scaffold/consul-YYYYMMDD-HHmmss'
template = ConsulTemplate(name, region, bucket, key_prefix, vpc_id, vpc_cidr, server_subnet_ids, ui_subnet_ids)
template.build_template()
print template.to_json()
|
"""
Request and Response model for position book request
"""
"""
Request and Response model for trade book request
"""
from typing import Optional
from pydantic import BaseModel
from datetime import datetime
from ....common.enums import ResponseStatus
from ....utils.decoders import build_loader, datetime_decoder
__all__ = ['PositionBookRequestModel', 'PositionBookResponseModel']
class PositionBookRequestModel(BaseModel):
"""
The request model for position book request
"""
uid: str
"""Logged in User Id"""
actid: str
"""Account Id of logged in user"""
class PositionBookResponseModel(BaseModel):
"""
The response model for position book endpoint
"""
stat: ResponseStatus
"""The position book success or failure status"""
request_time: Optional[datetime]
"""It will be present only on successful response."""
exch: Optional[str]
"""Exchange Segment"""
tsym: Optional[str]
"""Trading symbol / contract on which order is placed."""
token: Optional[str]
"""Token"""
uid: Optional[str]
"""User Id"""
actid: Optional[str]
"""Account Id"""
prd: Optional[str]
"""Display product alias name, using prarr returned in user details."""
netqty: Optional[str]
"""Net Position quantity"""
netavgprc: Optional[str]
"""Net position average price"""
daybuyqty: Optional[str]
"""Day Buy Quantity"""
daysellqty: Optional[str]
"""Day Sell Quantity"""
daybuyavgprc: Optional[str]
"""Day Buy average price"""
daysellavgprc: Optional[str]
"""Day buy average price"""
daybuyamt: Optional[str]
"""Day Buy Amount"""
daysellamt: Optional[str]
"""Day Sell Amount"""
cfbuyqty: Optional[str]
"""Carry Forward Buy Quantity"""
cforgavgprc: Optional[str]
"""Original Avg Price"""
cfsellqty: Optional[str]
"""Carry Forward Sell Quantity"""
cfbuyavgprc: Optional[str]
"""Carry Forward Buy average price"""
cfsellavgprc: Optional[str]
"""Carry Forward Buy average price"""
cfbuyamt: Optional[str]
"""Carry Forward Buy Amount"""
cfsellamt: Optional[str]
"""Carry Forward Sell Amount"""
lp: Optional[str]
"""LTP"""
rpnl: Optional[str]
"""RealizedPNL"""
urmtom: Optional[str]
"""
UnrealizedMTOM.
(Can be recalculated in LTP update := netqty * (lp from web socket - netavgprc) * prcftr bep Break even price
"""
openbuyqty: Optional[str]
opensellqty: Optional[str]
openbuyamt: Optional[str]
opensellamt: Optional[str]
openbuyavgprc: Optional[str]
opensellavgprc: Optional[str]
mult: Optional[str]
pp: Optional[str]
"""Price precision"""
ti: Optional[str]
"""Tick size"""
ls: Optional[str]
"""Lot size"""
prcftr: Optional[str]
"gn*pn/(gd*pd)"
emsg: Optional[str]
"""Error message if the request failed"""
class Config:
"""model configuration"""
json_loads = build_loader({
"request_time": datetime_decoder()
}) |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Schedule corpus pruning tasks."""
from base import tasks
from base import utils
from build_management import build_manager
from datastore import data_types
from datastore import fuzz_target_utils
from handlers import base_handler
from libs import handler
from metrics import logs
def _get_latest_job_revision(job):
"""Return the latest release revision for a job."""
job_environment = job.get_environment()
release_build_bucket_path = job_environment.get('RELEASE_BUILD_BUCKET_PATH')
if not release_build_bucket_path:
logs.log_error('Failed to get release build url pattern for %s.' % job.name)
return None
revisions = build_manager.get_revisions_list(release_build_bucket_path)
if not revisions:
logs.log_error('Failed to get revisions list for %s.' % job.name)
return None
logs.log('Latest revision for %s is %d.' % (job.name, revisions[-1]))
return revisions[-1]
def get_tasks_to_schedule():
"""Return (task_target, job_name, queue_name) arguments to schedule a task."""
for job in data_types.Job.query():
if not utils.string_is_true(job.get_environment().get('CORPUS_PRUNE')):
continue
if utils.string_is_true(job.get_environment().get('CUSTOM_BINARY')):
# Custom binary jobs do not have revisions.
latest_revision = None
else:
latest_revision = _get_latest_job_revision(job)
if not latest_revision:
continue
queue_name = tasks.queue_for_job(job.name)
for target_job in fuzz_target_utils.get_fuzz_target_jobs(job=job.name):
task_target = target_job.fuzz_target_name
if latest_revision:
task_target += '@%s' % latest_revision
yield (task_target, job.name, queue_name)
class Handler(base_handler.Handler):
"""Schedule corpus pruning tasks.."""
@handler.check_cron()
def get(self):
"""Schedule the corpus pruning tasks."""
for task_target, job_name, queue_name in get_tasks_to_schedule():
tasks.add_task('corpus_pruning', task_target, job_name, queue=queue_name)
|
import sys
from unittest.mock import MagicMock
import pytest
import pytorch_lightning as pl
import torch
from transformers import AutoTokenizer
from lightning_transformers.core.nlp import HFBackboneConfig
from lightning_transformers.task.nlp.translation import TranslationTransformer, WMT16TranslationDataModule
from lightning_transformers.task.nlp.translation.config import TranslationConfig, TranslationDataConfig
from lightning_transformers.task.nlp.translation.data import TranslationDataModule
def test_smoke_train(hf_cache_path):
class TestModel(TranslationTransformer):
def configure_optimizers(self):
return torch.optim.AdamW(self.parameters(), lr=1e-5)
tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path='patrickvonplaten/t5-tiny-random')
model = TestModel(backbone=HFBackboneConfig(pretrained_model_name_or_path='patrickvonplaten/t5-tiny-random'))
dm = WMT16TranslationDataModule(
cfg=TranslationDataConfig(
batch_size=1,
dataset_name='wmt16',
dataset_config_name='ro-en',
source_language='en',
target_language='ro',
cache_dir=hf_cache_path,
limit_train_samples=16,
limit_val_samples=16,
limit_test_samples=16,
max_source_length=32,
max_target_length=32
),
tokenizer=tokenizer
)
trainer = pl.Trainer(fast_dev_run=True)
trainer.fit(model, dm)
@pytest.mark.skipif(sys.platform == "win32", reason="Currently Windows is not supported")
def test_smoke_train_e2e(script_runner):
script_runner.hf_train(task='translation', dataset='wmt16', model='patrickvonplaten/t5-tiny-random')
def test_smoke_predict_e2e(script_runner):
y = script_runner.hf_predict(['+x="¡Hola Sean!"'], task='translation', model='patrickvonplaten/t5-tiny-random')
assert len(y) == 1
assert isinstance(y[0]['translation_text'], str)
def test_model_has_correct_cfg():
model = TranslationTransformer(HFBackboneConfig(pretrained_model_name_or_path='patrickvonplaten/t5-tiny-random'))
assert model.hparams.downstream_model_type == 'transformers.AutoModelForSeq2SeqLM'
assert type(model.cfg) is TranslationConfig
def test_datamodule_has_correct_cfg():
tokenizer = MagicMock()
dm = TranslationDataModule(tokenizer)
assert type(dm.cfg) is TranslationDataConfig
assert dm.tokenizer is tokenizer
|
import pandas as pd
import numpy as np
from joblib import load
model=load('app/assets/model.pkl')
"""
FEATURES USED FOR TRAINING:
['household_type', 'length_of_stay', 'case_members', 'race', 'gender',
'income', 'ethnicity', 'HIV_AIDs', 'drug_abuse', 'alcohol_abuse',
'mental_illness', 'chronic_health_issues', 'physical_disabilities',
'developmental_disabilities', 'enrolled_status']
The notebook used for training the model is located in this repo: notebooks/Model.ipynb
"""
#----- UTIL FUNTIONS FOR PREPROCESSING MEMBERS DATA
def start_pipeline(df):
'''Creates a copy of original dataframe to use in pipeline'''
return df.copy()
def unpack_json_cols(df):
'''Unpack json columns into a dataframe and concatenate each resulting dataframe to the original dataframe.'''
demographics = pd.json_normalize(df['demographics'])
barriers = pd.json_normalize(df['barriers'])
schools = pd.json_normalize(df['schools'])
df = pd.concat([df, demographics, barriers, schools], axis=1)
return df
def delete_cols(df):
'''Deletes original json columns as well as columns that will not be used in training the model (because of leakage)'''
json_cols = ['barriers', 'demographics', 'schools']
not_used = ['id', 'predicted_exit_destination', 'exit_destination', 'family_id', 'date_of_exit',
'income_at_exit', 'date_of_enrollment', 'relationship']
df.drop(columns=[*json_cols, *not_used], inplace=True)
return df
def barriers(df):
'''Assigns a value of True if the person has the barrier, else False'''
has_barrier = ['Alcohol Abuse', 'Developmental Disability', 'Chronic Health', 'Drug Abuse', 'HIV/AIDS', 'Mental Illness', 'Physical Disability']
barrier_cols = ['HIV_AIDs', 'drug_abuse', 'alcohol_abuse', 'mental_illness', 'chronic_health_issues',
'physical_disabilities', 'developmental_disabilities']
for barrier in barrier_cols:
df[barrier] = df[barrier].apply(lambda x: True if x in has_barrier else False)
return df
def replace_values(df):
'''
Replace missing and unknown values in data to NaN.
Currently, missing values in the database are denoted as either -1.0 or "" (an empty string)
'''
replace_list = [-1.0, ""]
df.replace(replace_list, np.NaN, inplace=True)
return df
#------- PREDICTION FUNCTION
def predictor(df):
df_clean = (df
.pipe(start_pipeline)
.pipe(unpack_json_cols)
.pipe(delete_cols)
.pipe(barriers)
.pipe(replace_values)
)
prediction = model.predict(df_clean)[0]
return prediction
|
import sys
# import libraries
import numpy as np
import pandas as pd
import re
from sqlalchemy import create_engine
import nltk
from sklearn.pipeline import Pipeline
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import classification_report, fbeta_score, make_scorer, accuracy_score
from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier
from sklearn.feature_extraction.text import TfidfTransformer, CountVectorizer
from sklearn.multioutput import MultiOutputClassifier
from nltk.tokenize import word_tokenize
from nltk.stem.porter import PorterStemmer
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.corpus import stopwords
import pickle
nltk.download('punkt')
nltk.download('wordnet')
nltk.download('stopwords')
def load_data(database_filepath):
'''
Function for loading the database into pandas DataFrames
Args: database_filepath: the path of the database
Returns: X: features (messages)
y: categories (one-hot encoded)
An ordered list of categories
'''
# load data from database
engine = create_engine('sqlite:///' + str(database_filepath))
df = pd.read_sql('SELECT * FROM disaster_response', engine)
print(df.columns.values)
# Define feature and target variables X and Y
X = df['message']
Y = df.drop(['id', 'message', 'original', 'genre'], axis=1)
#Y['related'] = Y['related'].map(lambda x: 1 if x==2 else x)
categories = Y.columns.tolist()
return X, Y, categories
def tokenize(text):
'''
Function for tokenizing string
Args: Text string
Returns: List of tokens
'''
text = re.sub(r"[^a-zA-Z0-9]", " ", text.lower())
stop_words = stopwords.words("english")
tokens = word_tokenize(text)
lemmatizer = nltk.WordNetLemmatizer()
return [lemmatizer.lemmatize(token).lower().strip() for token in tokens if token not in stop_words]
def build_model():
'''
Function for building pipeline and GridSearch
Args: None
Returns: Model (GridSearchCV)
'''
pipeline = Pipeline([('vect', CountVectorizer(tokenizer=tokenize)),
('tfidf', TfidfTransformer()),
('clf', MultiOutputClassifier(RandomForestClassifier(n_estimators=100), n_jobs=-1))
])
# model parameters for GridSerchcv
parameters = {
'vect__max_df': [0.8, 1.0],
'clf__estimator__n_estimators': [10, 20],
'clf__estimator__min_samples_split': [2, 5]
}
ftwo_scorer = make_scorer(fbeta_score, beta=2)
# Initialize a gridsearchcv object that is parallelized
#cv = GridSearchCV(pipeline, param_grid=parameters, scoring='f1_micro', verbose=1)
#cv = GridSearchCV(pipeline, param_grid=parameters, scoring='f1', cv=3, verbose=10, n_jobs=-1)
cv = GridSearchCV(pipeline, param_grid=parameters, verbose=1, n_jobs=-1)
return cv
def evaluate_model(model, X_test, Y_test, category_names):
# Generate predictions
Y_pred = model.predict(X_test)
# Print out the full classification report
print(classification_report(Y_test, Y_pred, target_names=category_names))
#for idx, cat in enumerate(Y_test.columns.values):
# print("{} -- {}".format(cat, accuracy_score(Y_test.values[:,idx], y_pred[:, idx])))
#print("accuracy = {}".format(accuracy_score(Y_test, y_pred)))
def save_model(model, model_filepath):
'''
Function for saving the model as picklefile
Args: Model, filepath
Returns: Nothing. Saves model to pickle file
'''
with open(model_filepath, 'wb') as file:
pickle.dump(model, file)
def main():
if len(sys.argv) == 3:
database_filepath, model_filepath = sys.argv[1:]
print('Loading data...\n DATABASE: {}'.format(database_filepath))
X, Y, category_names = load_data(database_filepath)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)
print('Building model...')
model = build_model()
print('Training model...')
model.fit(X_train, Y_train)
print('Evaluating model...')
evaluate_model(model, X_test, Y_test, category_names)
print('Saving model...\n MODEL: {}'.format(model_filepath))
save_model(model, model_filepath)
print('Trained model saved!')
else:
print('Please provide the filepath of the disaster messages database '\
'as the first argument and the filepath of the pickle file to '\
'save the model to as the second argument. \n\nExample: python '\
'train_classifier.py ../data/DisasterResponse.db classifier.pkl')
if __name__ == '__main__':
main() |
from ai.action import Action
from ability import EffectType, Effect
import random
class SingleTargetAttack(Action):
def __init__(self, name: str, cooldown: int, base_crit_chance: float, effects: [Effect]):
for effect in effects:
if effect.type not in [EffectType.damage_health, EffectType.burn, EffectType.bleed]:
raise Exception(f'SingleTargetAttack {name} has an unsupported effect type {effect.type}')
super().__init__()
self.name = name
self.cooldown = cooldown
self.base_crit_chance = base_crit_chance
self.effects = effects
self.targets_opponents = True
self.targets_allies = False
self.area = 0
self.area_modifiable = False
def do(self, user, target, fight):
out = f'{user.name} used {self.name} on {target.name}.'
crit = False
if random.random() <= self.base_crit_chance:
crit = True
out += f' CRITICAL HIT!'
out = self.deal_damage(crit, fight, out, [target], user)
out += self.handle_elements(fight)
self.check_cooldown()
return out
|
import datetime
import logging
from .tempsensor import TempReading
from boilerio import pid, pwm
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class TemperatureSetting(object):
def __init__(self, target, zone_width=0.6):
self._target = target
self._zone_width = zone_width
@property
def target(self):
return self._target
@property
def target_zone_min(self):
return self._target - self._zone_width / 2
@property
def target_zone_max(self):
return self._target + self._zone_width / 2
class Thermostat(object):
"""A thermostat: turns boiler on/off based on temperature input."""
STALE_PERIOD = datetime.timedelta(0, 600)
# The period of one on-off cycle when maintaining/monitoring the average
# temperature.
PWM_PERIOD = datetime.timedelta(0, 600)
PID_KP = 2.8
PID_KI = 0.3
PID_KD = 1.8
MODE_ON = "On"
MODE_PWM = "PWM"
MODE_OFF = "Off"
MODE_STALE = "Stale"
def __init__(self, boiler, sensor, state_change_callback=None):
"""Initialise thermostat object.
boiler: an object with 'on' and 'off' methods"""
self._boiler = boiler
self._pid = pid.PID(None, self.PID_KP, self.PID_KI, self.PID_KD)
self._pwm_control = pwm.PWM(0, self.PWM_PERIOD, boiler)
self._state_change_callback = state_change_callback
self._measurement_begin = None
self._sensor = sensor
self._target = None
self._state = {'mode': self.MODE_STALE, 'dutycycle': 0}
def _update_state(self, mode, dutycycle):
"""Updates local state and notifies observers if there was a change."""
# Using word 'mode' here to avoid confusion
state = {'mode': mode, 'dutycycle': dutycycle}
if state != self._state:
logger.debug("%s: State change: %s -> %s",
str(self), self._state, state)
self._state = state
if self._state_change_callback is not None:
self._state_change_callback(state['mode'], state['dutycycle'])
def set_state_change_callback(self, state_change_callback):
self._state_change_callback = state_change_callback
@property
def target(self):
return self._target.target if self._target else None
@property
def is_heating(self):
"""True when we're heating up to a temperature (not maintaining/off)."""
return self._state['mode'] == self.MODE_ON
def set_target_temperature(self, target):
"""Set a target temperature.
target: a floating-point target temperature value."""
if (self._target is None or
(self._target and self._target.target != target)):
self._target = TemperatureSetting(target)
self._pid.reset(target)
def interval_elapsed(self, now):
"""Act on time interval passing.
now: the current datetime"""
if (self._sensor.temperature is None or self._target is None or
self._sensor.temperature.when < (now - self.STALE_PERIOD)):
# Reading is stale: turn off the boiler:
self._update_state(self.MODE_STALE, 0)
self._boiler.off()
elif self._sensor.temperature.reading < self._target.target_zone_min:
# Reading is valid and below target range:
self._update_state(self.MODE_ON, 1)
self._boiler.on()
elif (self._sensor.temperature.reading > self._target.target_zone_min and
self._sensor.temperature.reading <= self._target.target_zone_max):
# Reading is valid and within the target range:
# New measurement cycle?
if (self._measurement_begin is None or
self._measurement_begin + self.PWM_PERIOD < now):
self._measurement_begin = now
# Adjust duty cycle:
pid_output = self._pid.update(self._sensor.temperature.reading)
self._pwm_control.setDutyCycle(pid_output)
logger.debug("PID output: %f", pid_output)
logger.debug("PID internals: prop %f, int %f, diff %f",
self._pid.last_prop, self._pid.error_integral,
self._pid.last_diff)
logger.debug("New measurement cycle started")
self._update_state(self.MODE_PWM, self._pwm_control.dutycycle)
self._pwm_control.update(now)
elif self._sensor.temperature.reading > self._target.target_zone_max:
# Reading is valid and above the target range:
self._update_state(self.MODE_OFF, 0)
self._boiler.off()
|
#src module
from src.utilities import Preprocessing
#sklearn
from sklearn.base import BaseEstimator
class PreprocessNgram(BaseEstimator):
'''Preporcesses data for ngram features.'''
def preprocess(self, text):
upre=Preprocessing()
text=upre.replace_emojis(text)
text=upre.remove_mention(text)
text=upre.remove_urls(text)
text=upre.remove_rt(text)
text=upre.remove_non_alnum(text)
text=upre.remove_space(text)
text=upre.lower_text(text)
text=upre.strip_text(text)
text=upre.compress_words(text)
tokens=upre.tokenize_tweettokenizer(text)
stems = [ upre.stem_porter(str(item)) for item in tokens]
return list(stems)
def fit(self, raw_docs, y=None):
return self
def transform(self, raw_docs):
return [self.preprocess(raw_doc) for raw_doc in raw_docs] |
"""
This contains functions that were not implemented (yet).
Consider them TODO items.
"""
def swatershed(f, g, B=None, LINEREG="LINES"):
"""
- Purpose
Detection of similarity-based watershed from markers.
- Synopsis
y = swatershed(f, g, B=None, LINEREG="LINES")
- Input
f: Gray-scale (uint8 or uint16) image.
g: Gray-scale (uint8 or uint16) or binary image. Marker
image. If binary, each connected component is an object
marker. If gray, it is assumed it is a labeled image.
B: Structuring Element Default: None (3x3 elementary
cross). (watershed connectivity)
LINEREG: String Default: "LINES". 'LINES' or ' REGIONS'.
- Output
y: Gray-scale (uint8 or uint16) or binary image.
- Description
swatershed creates the image y by detecting the domain of the
catchment basins of f indicated by g , according with the
connectivity defined by B . This watershed is a modified version
where each basin is defined by a similarity criterion between
pixels. The original watershed is normally applied to the
gradient of the image. In this case, the gradient is taken
internally. According to the flag LINEREG y will be a labeled
image of the catchment basins domain or just a binary image that
presents the watershed lines. The implementation of this
function is based on LotuFalc:00 .
- Examples
#
f = to_uint8([
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]])
m = to_uint8([
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 2, 0, 0, 0]])
print swatershed(f,m,secross(),'REGIONS')
"""
if B is None: B = secross()
print 'Not implemented yet'
return None
return y
def vmax(f, v=1, Bc=None):
"""
y = vmax(f, v=1, Bc={3x3 cross})
Remove domes with volume less than v.
This operator removes connected domes with volume less
than `v`. This function is very similar to `hmax`, but instead
of using a gray scale criterion (contrast) for the dome, it uses
a volume criterion.
Parameters
----------
f : Gray-scale (uint8 or uint16) image.
v : Volume parameter (default: 1).
Bc : Structuring element (default: 3x3 cross).
Returns
-------
y : Gray-scale (uint8 or uint16) or binary image.
"""
if Bc is None: Bc = secross()
raise NotImplementedError, 'Not implemented yet'
def flood(fin, T, option, Bc=None):
"""
y = flood(fin, T, option, Bc=None)
Flooding filter h,v,a-basin and dynamics (depth, area, volume)
This is a flooding algorithm. It is the basis to implement many
topological functions. It is a connected filter that floods an
image following some topological criteria: area, volume, depth.
These filters are equivalent to area-close, volume-basin or
h-basin, respectively. This code may be difficult to understand
because of its many options. Basically, when t is negative, the
generalized dynamics: area, volume, h is computed. When the
flooding is computed, every time a new level in the flooding
happens, a test is made to verify if the criterion has reached.
This is used to set the value to that height. This value image
will be used later for sup-reconstruction (flooding) at that
particular level. This test happens in the raising of the water
and in the merging of basins.
Parameters
----------
fin : Gray-scale image (uint8 or uint16).
T : Criterion value. If T==-1, then the dynamics is
determined, not the flooding at this criterion. This was
selected just to use the same algoritm to compute two
completely distinct functions.
option : One of ('AREA', 'VOLUME', 'H').
Bc : Structuring element (default: 3x3 cross)
Returns
-------
y : Gray-scale image (same type as input).
"""
if Bc is None: Bc = secross()
raise NotImplementedError, 'pymorph.flood'
|
#!/usr/bin/env python3
"""nested functions"""
def addBy(val):
def func(inc):
return val + inc
return func
addFive = addBy(5)
print(addFive(4))
addThree = addBy(3)
print(addThree(7))
|
import numpy as np
import pandas as pd
import pickle
import os
from bokeh.layouts import row
from bokeh.plotting import figure, output_file, show, gridplot
from bokeh.models import ColumnDataSource, FactorRange
from bokeh.palettes import Spectral6
from bokeh.transform import factor_cmap, dodge
##############################################################################
def quick_analysis(dataf):
print("Data Types:")
print(dataf.dtypes)
print("Rows and Columns:")
print(dataf.shape)
print("Column Names:")
print(dataf.columns)
print("Null Values:")
print(dataf.apply(lambda x: sum(x.isnull()) / len(dataf)))
##############################################################################
input_path = "/home/christian/dynasim/input/"
model_path = "/home/christian/dynasim/src/estimation/models/"
output_path = "/home/christian/dynasim/output/"
estimation_path = "/home/christian/dynasim/src/estimation/"
sim_path = "/home/christian/dynasim/src/sim/"
current_week = "51"
output_week = "/home/christian/dynasim/output/week" + str(current_week) + "/"
def make_cohort(dataf, birthyears):
dataf = dataf.copy()
birthyear = dataf["year"] - dataf["age_real"]
condition = [by in birthyears for by in birthyear]
dataf = dataf.loc[condition]
dataf = dataf[dataf["east_real"]==0]
return dataf
dataf = pd.read_pickle(input_path + "merged")
dataf1 = pd.read_pickle(output_week + "df_analysis_full")
# dataf2 = pd.read_pickle(output_week + "df_analysis_workingage")
palette = ["#c9d9d3", "#718dbf", "#e84d60", "#648450"]
cohorts = np.arange(1945, 1955)
df = make_cohort(dataf1, cohorts)
def plot_lifetime(df, type):
print(type)
ylist = []
list0 = []
list1 = []
list2 = []
list3 = []
interv = np.sort(df["age_real"].unique())
for a in interv:
df_rel = df[df["age_real"]==a]
n = len(df_rel)
status0 = sum(df_rel["employment_status_" + type] == 0)/n
status1 = sum(df_rel["employment_status_" + type] == 1)/n
status2 = sum(df_rel["employment_status_" + type] == 2)/n
status3 = sum(df_rel["employment_status_" + type] == 3)/n
ylist.append(str(a))
list0.append(status0)
list1.append(status1)
list2.append(status2)
list3.append(status3)
dici = {"age": ylist,
"0": list0,
"1": list1,
"2": list2,
"3": list3}
#alllist = ["0", "1", "2", "3"]
#labels = ["N.E.", "Rente", "Teilzeit", "Vollzeit"]
alllist = ["3", "2", "0", "1"]
labels = ["Vollzeit", "Teilzeit", "N.E.", "Rente"]
p = figure(x_range=ylist, plot_height=250, plot_width=1500, title="Employment Status by age: West Germany / type: " + type)
p.vbar_stack(alllist, x='age', width=0.9, color=palette, source=dici,
legend_label=labels)
p.y_range.start = 0
p.x_range.range_padding = 0.1
p.xgrid.grid_line_color = None
p.axis.minor_tick_line_color = None
p.outline_line_color = None
p.legend.location = "bottom_left"
p.legend.orientation = "horizontal"
show(p)
if __name__ == "__main__":
plot_lifetime(dataf1, "real")
plot_lifetime(dataf1, "ml")
plot_lifetime(dataf1, "ext")
|
import click
import geojson
import requests
@click.command()
@click.argument(
'files',
required=True,
type=click.Path(
exists=True, file_okay=True, dir_okay=False, resolve_path=True),
nargs=-1)
def main(files):
"""Get GeoJSON for grid cells
"""
# Load files
# Each file should be a text file where each line is like:
# https://api.weather.gov/gridpoints/HNX/100,101/forecast
lines = []
for file in files:
with open(file) as f:
lines.extend(f.readlines())
# Remove \n at end of each line
lines = [l.strip() for l in lines]
# For each url, send a request and keep the geometry and the elevation
features = []
for url in lines:
r = requests.get(url)
# Somewhat often the NWS returns 404
if r.status_code == 404:
continue
d = r.json()
geometry = d['geometry']
if geometry['type'] != 'GeometryCollection':
raise ValueError('not GeometryCollection')
box = [x for x in geometry['geometries'] if x['type'] == 'Polygon'][0]
props = {
'forecast_url': url,
'ele': d['properties']['elevation']['value']}
f = geojson.Feature(geometry=box, properties=props)
features.append(f)
fc = geojson.FeatureCollection(features)
click.echo(geojson.dumps(fc))
if __name__ == '__main__':
main()
|
def findComplement(num):
"""
:type num: int
:rtype: int
"""
# Conver number into binary string
num_str = format(num,'b')
# List to hold individual bits
num_list = []
for i in num_str:
# Calculate the reverse
val = (int(i)+1)%2
num_list.append(str(val))
print int(''.join(num_list),2)
if __name__ == "__main__":
number = 5
findComplement(5) |
from django.utils.functional import cached_property
from rest_framework import viewsets, mixins
from .models import (
Group,
BudgetAccountGroup,
TemplateAccountGroup,
BudgetSubAccountGroup,
TemplateSubAccountGroup
)
from .serializers import (
BudgetAccountGroupSerializer,
BudgetSubAccountGroupSerializer,
TemplateAccountGroupSerializer,
TemplateSubAccountGroupSerializer
)
class GroupViewSet(
mixins.UpdateModelMixin,
mixins.RetrieveModelMixin,
mixins.DestroyModelMixin,
viewsets.GenericViewSet
):
"""
Viewset to handle requests to the following endpoints:
(1) PATCH /groups/<pk>/
(2) GET /groups/<pk>/
(3) DELETE /groups/<pk>/
"""
lookup_field = 'pk'
@cached_property
def instance_cls(self):
instance = self.get_object()
return type(instance)
def get_serializer_class(self):
mapping = {
BudgetAccountGroup: BudgetAccountGroupSerializer,
TemplateAccountGroup: TemplateAccountGroupSerializer,
BudgetSubAccountGroup: BudgetSubAccountGroupSerializer,
TemplateSubAccountGroup: TemplateSubAccountGroupSerializer
}
return mapping[self.instance_cls]
def get_queryset(self):
return Group.objects.filter(created_by=self.request.user)
def perform_update(self, serializer):
serializer.save(updated_by=self.request.user)
|
import pytest
from fastapi.testclient import TestClient
from main import app
@pytest.fixture
def client():
return TestClient(app)
def test_base_endpoint_get(client):
""" test base endpoint """
response = client.get('/')
assert response.status_code == 404
def test_base_endpoint_post(client):
response = client.post('/')
assert response.status_code == 404
def test_predict_endpoint_get(client):
""" test predict endpoint """
# test get
response = client.get('/predict')
assert response.status_code == 405
def test_predict_endpoint_post_nojson(client):
# test invalid post
response = client.post('/predict')
assert response.status_code == 422
def test_predict_endpoint_post_valid_json(client):
# test valid post
response = client.post('/predict', json={
"text": "it's a beautiful world"
})
assert response.status_code == 200
assert response.json()["output"] in ["positive", "negative"] |
from block import Block
class Blockchain:
def __init__(self):
"""
This function initializes our Blockchain
Inputs Desc:
chain: collection of objects of type Block
unconfirmed_transactions:mempool i.e. pool of transactions yet to be confirmed
and inserted in the Blockchain chain
genesis_block:the first block of the Blockchain
"""
self.chain = []
self.unconfirmed_transactions = []
self.genesis_block()
def genesis_block(self):
"""
Defines the first block of our Blockchain
This block will have no transactions and will have a previous_hash value of 0
"""
transactions = []
genesis_block = Block(transactions, "0")
genesis_block.generate_hash()
self.chain.append(genesis_block)
def add_block(self, transactions):
"""
Used to add blocks to the Blockchain
Input: Transactions taken from the mempool to be confirmed into the chain after verification by the participant
Output: Proof of work and added block
Every miner needs to generate a hash requiring some constraints called the proof_of_work
Miners compete to find this hash!
"""
previous_hash = (self.chain[len(self.chain)-1]).hash
new_block = Block(transactions, previous_hash)
new_block.generate_hash()
proof = self.proof_of_work(new_block)
self.chain.append(new_block)
return proof, new_block
def print_blocks(self):
"""
This is used to print the contents of the entire Blockchain
"""
for i in range(len(self.chain)):
current_block = self.chain[i]
print("Block {} {}".format(i, current_block))
current_block.print_contents()
def validate_chain(self):
"""
This function checkes the validity of the Blockchain.
For the chain to be valid, the hash value for each block must be computed
correctly and the previous_hash of each block should be mapped correctly.
If we find a broken chain we return False.
"""
for i in range(1, len(self.chain)):
current = self.chain[i]
previous = self.chain[i-1]
if(current.hash != current.generate_hash()):
print("Finding Validity.. \nBlockchain invalid!")
return False
if(current.previous_hash != previous.generate_hash()):
print("Finding Validity.. \nBlockchain invalid!")
return False
print("Finding Validity.. \nBlockchain is valid")
return True
def proof_of_work(self,block, difficulty=4):
"""
This block defines the proof of work to be computed by the miners to add a block
into the chain. For this we increase the nonce value until a hash with required
difficulty is generated.
For our chain we want a hash whose first 4 digits are 0000
The level of hardness and constaints on the hash can be increased for more larger chains
"""
proof = block.generate_hash()
while proof[:difficulty] != '0'*difficulty:
block.nonce += 1
proof = block.generate_hash()
block.nonce = 0
return proof
|
import tensorflow as tf
a = tf.Variable(5)
b = tf.Variable(4.1151, name="float", dtype=tf.float16)
c = tf.Variable("english", dtype=tf.string)
d = tf.Variable(tf.ones([4, 4]))
init = tf.global_variables_initializer()
init_d = tf.variables_initializer([d])
sess = tf.Session()
sess.run(init)
sess.run(init_d)
print(a, b, c, d, sep="\n")
print(sess.run([a, b, c]))
print(sess.run(d))
sess.close() |
import numpy as np
from PIL import Image
from tempfile import mkdtemp
from redpil.bmp import imwrite, imread
from pathlib import Path
import imageio
import shutil
class BMPSuite8bpp:
params = ([(128, 128), (1024, 1024),
(2048, 4096)], # (2**5 * 1024, 2 ** 5 *1024)],
['redpil', 'pillow', 'imageio'])
param_names = ['shape', 'mode']
def setup(self, shape, mode):
self.img = np.random.randint(255, size=shape, dtype=np.uint8)
self.tmpdir = Path(mkdtemp())
self.filename = self.tmpdir / 'saved.bmp'
imwrite(self.filename, self.img)
def time_save(self, shape, mode):
if mode == 'pillow':
p = Image.fromarray(self.img)
filename = self.tmpdir / 'pillow.bmp'
p.save(filename)
elif mode == 'imageio':
filename = self.tmpdir / 'imageio.bmp'
imageio.imwrite(filename, self.img)
else:
filename = self.tmpdir / 'redpil.bmp'
imwrite(filename, self.img)
def time_load(self, shape, mode):
if mode == 'pillow':
try:
img = np.asarray(Image.open(self.filename).convert('L'))
except Image.DecompressionBombError:
pass
elif mode == 'imageio':
img = imageio.imread(self.filename)
else:
img = imread(self.filename)
assert np.array_equal(img, self.img)
def teardown(self, shape, mode):
shutil.rmtree(self.tmpdir)
class BMPSuite24bpp:
params = ([(32, 128, 3), (256, 1024, 3),
(2048, 1024, 3)], #, (2**5 * 1024, 2 ** 3 *1024, 3)],
['redpil', 'pillow', 'imageio'])
param_names = ['shape', 'mode']
def setup(self, shape, mode):
self.img = np.random.randint(255, size=shape, dtype=np.uint8)
self.tmpdir = Path(mkdtemp())
self.filename = self.tmpdir / 'saved.bmp'
imwrite(self.filename, self.img)
def time_save(self, shape, mode):
if mode == 'pillow':
p = Image.fromarray(self.img)
filename = self.tmpdir / 'pillow.bmp'
p.save(filename)
elif mode == 'imageio':
filename = self.tmpdir / 'imageio.bmp'
imageio.imwrite(filename, self.img)
else:
filename = self.tmpdir / 'redpil.bmp'
imwrite(filename, self.img)
def time_load(self, shape, mode):
if mode == 'pillow':
try:
img = np.asarray(Image.open(self.filename).convert('RGB'))
except Image.DecompressionBombError:
pass
elif mode == 'imageio':
img = imageio.imread(self.filename)
else:
img = imread(self.filename)
assert np.array_equal(img, self.img)
def teardown(self, shape, mode):
shutil.rmtree(self.tmpdir)
class BMPSuite32bpp:
params = ([(32, 128, 4), (256, 1024, 4),
(2048, 1024, 4)], #, (2**5 * 1024, 2 ** 3 *1024, 4)],
['redpil', 'pillow', 'imageio'])
param_names = ['shape', 'mode']
def setup(self, shape, mode):
self.img = np.random.randint(255, size=shape, dtype=np.uint8)
self.tmpdir = Path(mkdtemp())
self.filename = self.tmpdir / 'saved.bmp'
imwrite(self.filename, self.img, write_order='RGBA')
self.filename_pillow = self.tmpdir / 'saved_pillow.bmp'
# Pillow needs BGRA
imwrite(self.filename_pillow, self.img, write_order='BGRA')
def time_save(self, shape, mode):
if mode == 'pillow':
p = Image.fromarray(self.img)
filename = self.tmpdir / 'pillow.bmp'
p.save(filename)
elif mode == 'imageio':
filename = self.tmpdir / 'imageio.bmp'
imageio.imwrite(filename, self.img)
else:
filename = self.tmpdir / 'redpil.bmp'
imwrite(filename, self.img, write_order='RGBA')
def time_load(self, shape, mode):
if mode == 'pillow':
try:
img = np.asarray(
Image.open(self.filename_pillow).convert('RGBA'))
except Image.DecompressionBombError:
pass
elif mode == 'imageio':
img = imread(self.filename)
else:
img = imread(self.filename)
assert np.array_equal(img, self.img)
def teardown(self, shape, mode):
shutil.rmtree(self.tmpdir)
|
from __future__ import print_function
"""
Author:
Jack Duryea
Waterland Lab
Computational Epigenetics Section
Baylor College of Medicine
PReLIM: Preceise Read Level Imputation of Methylation
PReLIM imputes missing CpG methylation
states in CpG matrices.
"""
# standard imports
from scipy import stats
import numpy as np
import warnings
import numpy as np
import sys
from tqdm import tqdm
import copy
import time
from random import shuffle
from collections import defaultdict
import random
# sklearn imports
from sklearn.preprocessing import normalize
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_curve, auc
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
# Pickle
try:
import cPickle as p
except ModuleNotFoundError:
import pickle as p
# warnings suck, turn them off
if sys.version_info[0] < 3:
warnings.simplefilter("ignore", DeprecationWarning)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
import md5, sha
# TODO: most of these fields are redundant in our application
class CpGBin():
"""
A class that contains information about a CpG Bin. Does not need
to be used directly, PReLIM will use this class internally.
"""
def __init__(self,
matrix,
#relative_positions
binStartInc=None,
binEndInc=None,
cpgPositions=None,
sequence="",
encoding=None,
missingToken= -1,
chromosome=None,
binSize=100,
species="MM10",
verbose=True,
tag1=None,
tag2=None):
"""
Constructor for a bin
Inputs:
matrix: numpy array, the bin's CpG matrix.
binStartInc: integer, the starting, inclusive, chromosomal index of the bin.
binEndInc: integer, the ending, inclusive, chromosomal index of the bin.
cpgPositions: array of integers, the chromosomal positions of the CpGs in the bin.
sequence: string, nucleotide sequence (A,C,G,T)
encoding: array, a reduced representation of the bin's CpG matrix
missingToken: integer, the token that represents missing data in the matrix.
chromosome: string, the chromosome this bin resides in.
binSize: integer, the number of base pairs this bin covers
species: string, the speices this bin belongs too.
verbose: boolean, print warnings, set to "false" for no error checking and faster speed
tag1: anything, for custom use.
tag2: anything, for custom use.
"""
self.cpgDensity = matrix.shape[1]
self.readDepth = matrix.shape[0]
self.matrix = np.array(matrix, dtype=float)
self.binStartInc = binStartInc
self.binEndInc = binEndInc
self.cpgPositions = cpgPositions
self.sequence = sequence
self.missingToken = missingToken
self.chromosome = chromosome
self.binSize = binSize
self.species = species
self.tag1 = tag1
self.tag2 = tag2
class PReLIM():
"""
Class for a PReLIM model.
Example usage: \n
from PReLIM import PReLIM \n
import numpy as np \n
# Collect methylation matrices, 1 is methylated, 0 is unmethylated, -1 is unknown \n
# Each column is a cpg site, each row is a read \n
bin1 = np.array([[1,0],[0,-1],[-1,1],[0,0]],dtype=float) \n
bin2 = np.array([[1,0],[1,0],[-1,1],[0,0],[0,1],[1,1],[0,0]],dtype=float) \n
bin3 = np.array([[-1,1],[0,-1],[-1,1],[0,0]],dtype=float) \n
etc\n
bin1000 = np.array([[1,-1],[0,1],[-1,1],[1,0]],dtype=float) \n
bin1001 = np.array([[1,1],[0,0],[0,1],[1,1]],dtype=float) \n
bin1002 = np.array([[1,1],[1,1],[0,1],[1,0]],dtype=float) \n
bin1003 = np.array([[0,0],[1,0],[0,1],[1,1]],dtype=float) \n
# Collection of bins \n
bins = [bin1, bin2, bin3, ... bin1000, bin1001, bin1002, bin1003] \n
model = PReLIM(cpgDensity=2) \n
# Options for training/saving model \n
model.train(bins, model_file="no") # don't want a model file, must use "no" \n
# Use model for imputation \n
imputed_bin1 = model.impute(bin1) \n
# You can also use batch imputation to impute on many bins at once \n
imputed_bins = model.impute_many(bins) \n\n\n
"""
def __init__(self, cpgDensity=2):
"""
Constructor for a PReLIM model.
:param cpgDensity: the density of the bins that will be used
"""
self.model = None
self.cpgDensity = cpgDensity
self.METHYLATED = 1
self.UNMETHYLATED = 0
self.MISSING = -1
self.methylated = 1
self.unmethylated = 0
self.unknown = -1
# Train a model
def train(self, bin_matrices, model_file="no", verbose=False):
"""
Train a PReLIM model using cpg matrices.
:param bin_matrices: list of cpg matrices
:param model_file: The name of the file to save the model to. If None, then create a file name that includes a timestamp. If you don't want to save a file, set this to "no"
:param verbose: prints more info if true
"""
X,y = self.get_X_y(bin_matrices, verbose=verbose)
# Train the neural network model
self.fit(X,y, model_file=model_file, verbose=verbose)
def fit(self,
X_train,
y_train,
n_estimators = [10, 50, 100, 500, 1000],
cores = -1,
max_depths = [1, 5, 10, 20, 30],
model_file=None,
verbose=False
):
"""
Train a random forest model using grid search on a feature matrix (X) and class labels (y)
Usage:
model.fit(X_train, y_train)
:param X_train: numpy array, Contains feature vectors.
:param y_train: numpy array, Contains labels for training data.
:param n_estimators: list, the number of estimators to try during a grid search.
:param max_depths: list, the maximum depths of trees to try during a grid search.
:param cores: integer, the number of cores to use during training, helpful for grid search.
:param model_file: string,The name of the file to save the model to.
If None, then create a file name that includes a timestamp.
If you don't want to save a file, set this to "no"
:return: The trained sklearn model
"""
grid_param = {
"n_estimators": n_estimators,
"max_depth": max_depths,
}
# Note: let the grid search use a lot of cores, but only use 1 for each forest
# since dispatching can take a lot of time
rf = RandomForestClassifier(n_jobs=1)
self.model = GridSearchCV(rf, grid_param, n_jobs=2, cv=5, verbose=verbose)
self.model.fit(X_train, y_train)
# save the model
if model_file == "no":
return self.model
if not model_file:
model_file = "PReLIM_model" + str(time.time())
p.dump(self.model, open(model_file,"wb"))
return self.model
# Feature collection directly from bins
def get_X_y(self, bin_matrices, verbose=False):
"""
:param bin_matrices: list of CpG matrices
:param verbose: prints more info if true
:return: feature matrix (X) and class labels (y)
"""
bins = []
# convert to bin objects for ease of use
for matrix in bin_matrices:
mybin = CpGBin( matrix=matrix )
bins.append( mybin )
# find bins with no missing data
complete_bins = _filter_missing_data( bins )
shuffle( complete_bins )
# apply masks
masked_bins = _apply_masks( complete_bins, bins )
# extract features
X, y = self._collectFeatures( masked_bins )
return X, y
# Return a vector of predicted classes
def predict_classes(self, X):
"""
Predict the classes of the samples in the given feature matrix
Usage:
y_pred = CpGNet.predict_classes(X)
:param X: numpy array, contains feature vectors
:param verbose: prints more info if true
:return: 1-d numpy array of predicted classes
"""
return self.model.predict(X)
# Return a vector of probabilities for methylation
def predict(self, X):
"""
Predict the probability of methylation for each sample in the given feature matrix
Usage:
y_pred = CpGNet.predict(X)
:param X: numpy array, contains feature vectors
:param verbose: prints more info if true
:return: 1-d numpy array of prediction values
"""
return self.model.predict_proba(X)[:,1]
def predict_proba(self, X):
"""
Predict the classes of the samples in the given feature matrix
Same as predict, just a convenience to have in case of differen styles
Usage:
y_pred = CpGNet.predict_classes(X)
:param X: numpy array, contains feature vectors
:param verbose: prints more info if true
:return: 1-d numpy array of predicted classes
"""
return self.model.predict_proba(X)[:1]
# Load a saved model
def loadWeights(self, model_file):
"""
self.model is loaded with the provided weights
:param model_file: string, name of file with a saved model
"""
self.model = p.load(open(model_file,"rb"))
# Imputes missing values in Bins
def impute(self, matrix):
"""
Impute the missing values in a CpG matrix. Values are filled with the
predicted probability of methylation.
:param matrix: a 2d np array, dtype=float, representing a CpG matrix, 1=methylated, 0=unmethylated, -1=unknown
:return: A 2d numpy array with predicted probabilities of methylation
"""
X = self._get_imputation_features(matrix)
if len(X) == 0: # nothing to impute
return matrix
predictions = self.predict(X)
k = 0 # keep track of prediction index for missing states
predicted_matrix = np.copy(matrix)
for i in range(predicted_matrix.shape[0]):
for j in range(predicted_matrix.shape[1]):
if predicted_matrix[i, j] == -1:
predicted_matrix[i, j] = predictions[k]
k += 1
return predicted_matrix
# Extract all features for all matrices so we can predict in bulk, this is where the speedup comes from
def impute_many(self, matrices):
'''
Imputes a bunch of matrices at the same time to help speed up imputation time.
:param matrices: list of CpG matrices, where each matrix is a 2d np array, dtype=float, representing a CpG matrix, 1=methylated, 0=unmethylated, -1=unknown
:return: A List of 2d numpy arrays with predicted probabilities of methylation for unknown values.
'''
X = np.array([features for matrix_features in [self._get_imputation_features(matrix) for matrix in matrices] for features in matrix_features])
if len(X) == 0:
return matrices
predictions = self.predict(X)
predicted_matrices = []
k = 0 # keep track of prediction index for missing states, order is crucial!
for matrix in matrices:
predicted_matrix = np.copy(matrix)
for i in range(predicted_matrix.shape[0]):
for j in range(predicted_matrix.shape[1]):
if predicted_matrix[i, j] == -1:
predicted_matrix[i, j] = predictions[k]
k += 1
predicted_matrices.append(predicted_matrix)
return predicted_matrices
### Helper functions, for private use only ###
# get a feature matrix for the given cpg matrix
def _get_imputation_features(self,matrix):
'''
Returns a vector of features needed for the imputation of this matrix
Each sample is an individual CpG, and the features are
the row mean, the column mean, the position of the cpg in the matrix,
the row, and the relative proportions of each methylation pattern
:param matrix: a 2d np array, dtype=float, representing a CpG matrix, 1=methylated, 0=unmethylated, -1=unknown
:return: A feature vector for the matrix
'''
X = []
numReads = matrix.shape[0]
density = matrix.shape[1]
nan_copy = np.copy(matrix)
nan_copy[nan_copy == -1] = np.nan
# get the column and row means
column_means = np.nanmean(nan_copy, axis=0)
row_means = np.nanmean(nan_copy, axis=1)
encoding = self._encode_input_matrix(matrix)[0]
# iterate over all values in the matrix
for i in range(numReads):
for j in range(density):
observed_state = matrix[i, j]
# only record missing values
if observed_state != -1:
continue
row_mean = row_means[i]
col_mean = column_means[j]
row = np.copy(matrix[i])
row[j] = -1
# features for a single sample
data = [row_mean] + [col_mean] + [i, j] + list(row) + list(encoding)
X.append(data)
# list to np array
X = np.array(X)
return X
# Returns a matrix encoding of a CpG matrix
def _encode_input_matrix(self, m):
"""
:param m: a 2d np array, dtype=float, representing a CpG matrix, 1=methylated, 0=unmethylated, -1=unknown
:return: list of relative proportions of each type of methylation pattern, number of reads
"""
matrix = np.copy(m)
n_cpgs = matrix.shape[1]
matrix += 1 # deal with -1s
base_3_vec = np.power(3, np.arange(n_cpgs - 1, -1, -1))
encodings = np.dot(base_3_vec, matrix.T)
encoded_vector_dim = np.power(3, n_cpgs)
encoded_vector = np.zeros(encoded_vector_dim)
for x in encodings:
encoded_vector[int(x)] += 1
num_reads = encodings.shape[0]
# Now we normalize
encoded_vector_norm = normalize([encoded_vector], norm="l1")
return encoded_vector_norm[0], num_reads
# finds the majority class of the given column, discounting the current cpg
def _get_column_mean(self, matrix, col_i, current_cpg_state):
"""
:param matrix: a 2d np array, dtype=float, representing a CpG matrix, 1=methylated, 0=unmethylated, -1=unknown
:param col_i: integer, the column index
:param current_cpg_state: the cpg to discount
:return: the mean value of column col_i, discounting current_cpg_state
"""
sub = matrix[:, col_i]
return self._get_mean(sub, current_cpg_state)
# finds the majority class of the given read, discounting the current cpg
def _get_read_mean(self, matrix, read_i, current_cpg_state):
"""
:param matrix: a 2d np array, dtype=float, representing a CpG matrix, 1=methylated, 0=unmethylated, -1=unknown
:param read_i: integer, the row index
:param current_cpg_state: the cpg to discount
:return: the mean value of row read_i, discounting current_cpg_state
"""
sub = matrix[read_i, :]
return self._get_mean(sub, current_cpg_state)
# Return the mean of sub matrix, discounting the current cpg methylation state
def _get_mean(self, sub_matrix, current_cpg_state):
'''
:param sub_matrix: a list of individual cpgs
:param current_cpg_state: the cpg to discount
:return: the mean value of the list, discounting current_cpg_state
'''
num_methy = np.count_nonzero(sub_matrix == self.METHYLATED)
num_unmethy = np.count_nonzero(sub_matrix == self.UNMETHYLATED)
if current_cpg_state == self.METHYLATED:
num_methy -= 1
num_methy = max(0, num_methy)
if current_cpg_state == self.UNMETHYLATED:
num_unmethy -= 1
num_unmethy = max(0, num_unmethy)
if float(num_methy + num_unmethy) == 0:
return -2
return float(num_methy) / float(num_methy + num_unmethy)
# Returns X, y
# note: y can contain the labels 1,0, -1
def _collectFeatures(self, bins):
"""
Given a list of cpg bins, collect features for each artificially masked CpG
and record the hidden value as the class label.
:param matrix: bins: list of CpG bins that contain CpG matrices
:return: feature matrix X and class labels y
"""
X = []
Y = []
for Bin in tqdm(bins):
observed_matrix = Bin.tag2["observed"]
truth_matrix = Bin.tag2["truth"]
encoding = self._encode_input_matrix(observed_matrix)[0]
numReads = observed_matrix.shape[0]
density = observed_matrix.shape[1]
#positions = Bin.cpgPositions
nan_copy = np.copy(observed_matrix)
nan_copy[nan_copy == -1] = np.nan
column_means = np.nanmean(nan_copy,axis=0)
row_means = np.nanmean(nan_copy,axis=1)
for i in range(numReads):
for j in range(density):
observed_state = observed_matrix[i,j]
if observed_state != -1:
continue
state = truth_matrix[i,j]
Y.append(state)
# row and column means
row_mean = row_means[i]
col_mean = column_means[j]
# j is the current index in the row
# encoding is the matrix encoding vector
# differences is the difference in positions of the cpgs
row = np.copy(observed_matrix[i])
row[j] = -1
data = [row_mean] + [col_mean] + [i, j] + list(row) + list(encoding)
X.append(data)
X = np.array(X)
Y = np.array(Y)
Y.astype(int)
return X, Y
#### Helper functions ####
# Returns a list of bins similar to the input
# but matrix rows with missing values are removed
def _filter_bad_reads(bins):
"""
Given a list of cpg bins, remove reads with missing values
so we can mask them.
:param matrix: bins: list of CpG bins that contain CpG matrices
:return: bins, but all reads wiht missing values have been removed
"""
filtered_bins = []
for Bin in bins:
newBin = copy.deepcopy(Bin)
matrix = newBin.matrix
# find rows with missing values
counts = np.count_nonzero(matrix == -1, axis=1)
idx = counts == 0
matrix_filtered = matrix[idx]
newBin.matrix = matrix_filtered
filtered_bins.append(newBin)
return filtered_bins
# Returns a mapping of dimensions to list of masks that can be used on data
# of that size. the missing pattern is in matrix form.
# -1 is missing, 2 is known
def _extract_masks( bins):
"""
Given a list of cpg bins, return a list matrices that
represent the patterns of missing values, or "masks"
:param matrix: bins: list of CpG bins that contain CpG matrices
:return: list of matrices that represent the patterns of missing values
"""
masks = defaultdict(lambda: [])
for Bin in tqdm(bins):
matrix = np.copy(Bin.matrix)
matrix[matrix >= 0] = 2
#min_missing = 10
min_missing = 1 # must have at least 1 missing value
if np.count_nonzero(matrix == -1) >= min_missing:
masks[matrix.shape].append(matrix)
return masks
# Extract masks from original matrices and apply them to the complete matrices
def _apply_masks( filtered_bins, all_bins ):
"""
Given a list of filtered cpg bins and a list of all the bins,
extract masks from the original bins and apply them to the filtered bins.
:param filtered_bins: bins with no reads with missing values.
:param all_bins: list of CpG bins that contain CpG matrices
:return: list of matrices that represent the patterns of missing values
"""
masks = _extract_masks( all_bins )
ready_bins = []
for Bin in filtered_bins:
truth_matrix = Bin.matrix
m_shape = truth_matrix.shape
if m_shape in masks:
if len( masks [ m_shape ] ) > 0:
mask = random.choice(masks[m_shape])
observed = np.minimum(truth_matrix, mask)
Bin.tag2 = {"truth":truth_matrix, "observed":observed, "mask":mask}
ready_bins.append(Bin)
return ready_bins
# Get a list of bins with no missing data
def _filter_missing_data( bins, min_read_depth=1 ):
"""
Given a list of filtered cpg bins and a list of all the bins,
extract masks from the original bins and apply them to the filtered bins.
:param bins: list of CpG bins that contain CpG matrices
:param min_read_depth: minimum number of reads needed for a bin to be complete.
:return: remove reads with missing values from bins
"""
cpg_bins_complete = _filter_bad_reads(bins)
# secondary depth filter
cpg_bins_complete_depth = [bin_ for bin_ in cpg_bins_complete if bin_.matrix.shape[0] >= min_read_depth]
return cpg_bins_complete_depth
|
from app import db
class Dataset(db.Model):
__tablename__ = 'dataset'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(100))
user_id = db.Column(db.ForeignKey('user.id'), index=True)
creation_date = db.Column(db.DateTime)
modification_date = db.Column(db.DateTime)
rep_image = db.Column(db.String(300))
user = db.relationship(
'User', primaryjoin='Dataset.user_id == User.id', backref='datasets')
class ImageMetadata(db.Model):
__tablename__ = 'image_metadata'
id = db.Column(db.Integer, primary_key=True)
annotation = db.Column(db.JSON)
image_url = db.Column(db.String(300))
image_name = db.Column(db.String(50))
image_size = db.Column(db.String(30))
dataset_id = db.Column(db.ForeignKey(
'dataset.id', onupdate='CASCADE'), index=True)
size = db.Column(db.String(20))
creation_date = db.Column(db.DateTime)
modification_date = db.Column(db.DateTime)
dataset = db.relationship(
'Dataset', primaryjoin='ImageMetadata.dataset_id == Dataset.id', backref='image_metadata')
class User(db.Model):
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(30))
login_id = db.Column(db.String(30))
login_password = db.Column(db.String(30))
email = db.Column(db.String(30))
|
#!/usr/bin/env python
import os
import os.path as osp
import argparse
from dataclasses import dataclass
from datetime import datetime
import numpy as np
import tqdm
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torchvision.datasets import CocoDetection, CIFAR10, ImageNet # NOQA: F401
from torchvision import transforms
from yacs.config import CfgNode as CN
from efficient_net_v2.config import get_cfg
from efficient_net_v2.model import EfficientNetV2
from efficient_net_v2.logger import logger
def get_transforms() -> dict:
norm = transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
)
return {
'train': transforms.Compose(
[
transforms.RandomResizedCrop((256, 256)),
transforms.Resize([224, 224]),
# transforms.Grayscale(num_output_channels=3),
# transforms.ColorJitter([0.5, 2], [0.5, 2], 0.5, 0.5),
transforms.RandomAffine(degrees=180, scale=(0.2, 2.0)),
transforms.RandomHorizontalFlip(p=0.5),
transforms.ToTensor(),
norm,
]
),
'val': transforms.Compose(
[
# transforms.Resize(config.INPUT_SHAPE[1:]),
transforms.Resize([224, 224]),
transforms.ToTensor(),
norm,
]
),
}
class Coco2017Dataset(torch.utils.data.Dataset):
def __init__(
self, root: str, data_type='train', num_class: int = 91, **kwargs: dict
):
self.data_transforms = get_transforms()
file_name = 'train2017' if data_type == 'train' else 'val2017'
self.dataset = CocoDetection(
root=osp.join(root, file_name),
annFile=osp.join(root, f'annotations/instances_{file_name}.json'),
# transform=data_transforms[data_type],
)
"""
logger.info(f'Dataset DRY Run: {len(dataset)}')
self.dataset = []
for d in tqdm.tqdm(dataset):
if len(d[1]) == 0:
continue
self.dataset.append(d)
"""
self.data_type = data_type
self.num_class = num_class
np.random.seed(256)
def __getitem__(self, index: int, use_cropped: bool = True):
if use_cropped:
return self.get_cropped_instance(index)
image, targets = self.dataset.__getitem__(index)
labels = torch.zeros(self.num_class, dtype=torch.int64)
for target in targets:
cat_id = target['category_id']
labels[cat_id] = cat_id
return image, labels
def get_cropped_instance(self, index: int):
while True:
image, targets = self.dataset.__getitem__(index)
if len(targets) > 0:
break
index = np.random.randint(0, len(self.dataset))
selected = np.random.randint(0, len(targets))
target = targets[selected]
category_id = target['category_id']
bbox = np.array(target['bbox'], dtype=np.intp)
center = np.average([bbox[:2], bbox[:2] + bbox[2:]], axis=0)
new_size = np.array(
(
np.random.randint(bbox[2], 2 * np.maximum(bbox[2], 1)),
np.random.randint(bbox[3], 2 * np.maximum(bbox[3], 1)),
),
)
x1, y1 = np.maximum(center - new_size / 2, [0, 0]).astype(np.intp)
x2, y2 = np.minimum(center + new_size / 2, image.size).astype(np.intp)
image = image.crop([x1 + 1, y1 + 1, x2 - 1, y2 - 1])
return self.data_transforms[self.data_type](image), category_id
def __len__(self):
return len(self.dataset)
def get_name(prefix=''):
return prefix + '_' + datetime.now().strftime("%Y%m%d%H%M%S")
def create_logging_dir(output_dir):
if not osp.isdir(output_dir):
os.mkdir(output_dir)
log_dir = osp.join(output_dir, get_name('train'))
os.mkdir(log_dir)
return log_dir
@dataclass
class Optimizer(object):
cfg: CN
train_dl: DataLoader
val_dl: DataLoader = None
weight_path: str = None
def __post_init__(self):
assert self.cfg is not None
assert self.train_dl is not None
self.device = torch.device(
f'cuda:{0}' if torch.cuda.is_available() else 'cpu'
)
logger.info(f'Device: {self.device}')
self.model = EfficientNetV2(self.cfg)
self.model = self.model.to(self.device)
self.model = nn.DataParallel(self.model)
if self.weight_path is not None:
assert osp.isfile(self.weight_path), f'{self.weight} not found'
state_dict = torch.load(self.weight_path)['model_state_dict']
self.model.load_state_dict(state_dict, strict=True)
logger.info(f'Loaded Model Weights {self.weight_path}')
else:
logger.info('Using initialized weights')
self.criterion = nn.CrossEntropyLoss().to(self.device)
# self.criterion = nn.BCELoss()
self.optim = torch.optim.Adam(
params=self.model.parameters(),
lr=self.cfg.SOLVER.LR,
)
self.lr_scheduler = torch.optim.lr_scheduler.StepLR(
optimizer=self.optim,
step_size=self.cfg.SOLVER.LR_DECAY_STEP,
gamma=self.cfg.SOLVER.LR_DECAY,
)
total_params = 0
for parameter in self.model.parameters():
if parameter.requires_grad:
total_params += np.prod(parameter.size())
logger.info(f'total_params: {total_params}')
self.log_dir = None
def run(self):
self()
def __call__(self):
epochs = self.cfg.SOLVER.EPOCHS
lr = self.cfg.SOLVER.LR
batch = self.cfg.SOLVER.BATCH_SIZE
epoch_loss = ''
with tqdm.tqdm(total=epochs, desc='Epochs %s') as epoch_pbar:
for epoch in range(epochs):
epoch_pbar.set_description(
desc=f'Epoch {epoch + 1} | LOSS: {epoch_loss} '
+ f'| LR: {lr} | BATCH: {batch}',
refresh=True,
)
epoch_pbar.update(1)
epoch_loss = self.train_one_epoch(epoch=epoch)
if self.log_dir is None:
self.log_dir = create_logging_dir(self.cfg.OUTPUT_DIR)
logger.info(f'Output Directory: {self.log_dir}')
torch.save(
{
'model_state_dict': self.model.state_dict(),
},
osp.join(self.log_dir, 'checkpoint.pth.tar'),
)
def train_one_epoch(self, epoch):
self.model.train()
running_loss = 0.0
total_iter = len(self.train_dl)
with tqdm.tqdm(total=total_iter) as pbar:
for index, (images, targets) in enumerate(self.train_dl):
images = images.to(self.device)
targets = targets.to(self.device)
self.optim.zero_grad()
result = self.model(images)
loss = self.criterion(result, targets)
# loss_data = loss.data()
running_loss += loss
assert not (torch.isnan(loss) or torch.isinf(loss))
loss.backward()
self.optim.step()
pbar.set_description(desc='Loss %3.8f' % loss)
pbar.update(1)
return running_loss / len(self.train_dl)
def validation(self):
self.model.eval()
with torch.no_grad():
pass
def load_state_dict(self, path: str, strict: bool = True):
state_dict = torch.load(path, map_location=self.device)
self.model.load_state_dict(state_dict, strict=strict)
def main(args):
cfg = get_cfg()
cfg.OUTPUT_DIR = args.output_dir
num_workers = cfg.DATASETS.NUM_WORKER
batch_size = cfg.SOLVER.BATCH_SIZE
transforms = get_transforms()
train_dl = DataLoader(
# Coco2017Dataset(root=args.root),
# CIFAR10(root=args.root, train=True, download=False, transform=transforms['train']), # NOQA
ImageNet(root=args.root, split='train', transform=transforms['train']),
batch_size=batch_size,
num_workers=num_workers,
shuffle=True,
)
val_dl = DataLoader(
# Coco2017Dataset(root=args.root, data_type='val'),
# CIFAR10(root=args.root, train=False, download=False, transform=transforms['val']), # NOQA
ImageNet(root=args.root, split='val', transform=transforms['val']),
batch_size=batch_size,
num_workers=num_workers,
shuffle=False,
)
assert val_dl
optimizer = Optimizer(
cfg=cfg,
weight_path=args.weights,
train_dl=train_dl,
# val_dl=val_dl
)
optimizer()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--root', type=str, required=True)
parser.add_argument('--output_dir', type=str, required=True)
parser.add_argument('--weights', type=str, required=False, default=None)
args = parser.parse_args()
main(args)
|
import os, time, sys
import matplotlib.pyplot as plt
import itertools
import pickle
import imageio
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable
# G(z)
class generator(nn.Module):
# initializers
# batch size = d
def __init__(self, d=128):
super(generator, self).__init__()
self.deconv1_1 = nn.ConvTranspose2d(100, d*4, 4, 1, 0)
self.deconv1_1_bn = nn.BatchNorm2d(d*4)
self.deconv1_2 = nn.ConvTranspose2d(2, d*4, 4, 1, 0)
self.deconv1_2_bn = nn.BatchNorm2d(d*4)
self.deconv2 = nn.ConvTranspose2d(d*8, d*4, 4, 2, 1)
self.deconv2_bn = nn.BatchNorm2d(d*4)
self.deconv3 = nn.ConvTranspose2d(d*4, d*2, 4, 2, 1)
self.deconv3_bn = nn.BatchNorm2d(d*2)
# self.deconv4 = nn.ConvTranspose2d(d, 3, 4, 2, 1)
self.deconv4 = nn.ConvTranspose2d(d*2, d, 4, 2, 1)
self.deconv4_bn = nn.BatchNorm2d(d)
self.deconv5 = nn.ConvTranspose2d(d, 3, 4, 2, 1)
# weight_init
def weight_init(self, mean, std):
for m in self._modules:
normal_init(self._modules[m], mean, std)
# forward method
# def forward(self, input):
def forward(self, input, label):
x = F.leaky_relu(self.deconv1_1_bn(self.deconv1_1(input)), 0.2)
y = F.leaky_relu(self.deconv1_2_bn(self.deconv1_2(label)), 0.2)
x = torch.cat([x, y], 1)
x = F.leaky_relu(self.deconv2_bn(self.deconv2(x)), 0.2)
x = F.leaky_relu(self.deconv3_bn(self.deconv3(x)), 0.2)
# x = F.tanh(self.deconv4(x))
x = F.leaky_relu(self.deconv4_bn(self.deconv4(x)), 0.2)
x = F.tanh(self.deconv5(x))
return x
class discriminator(nn.Module):
# initializers
# batch size = d
def __init__(self, d=128):
super(discriminator, self).__init__()
self.conv1_1 = nn.Conv2d(3, d/2, 4, 2, 1)
self.conv1_2 = nn.Conv2d(2, d/2, 4, 2, 1)
self.conv2 = nn.Conv2d(d, d*2, 4, 2, 1)
self.conv2_bn = nn.BatchNorm2d(d*2)
self.conv3 = nn.Conv2d(d*2, d*4, 4, 2, 1)
self.conv3_bn = nn.BatchNorm2d(d*4)
# self.conv4 = nn.Conv2d(d*4, 1, 4, 1, 0)
self.conv4 = nn.Conv2d(d*4, d*8, 4, 2, 1)
self.conv4_bn = nn.BatchNorm2d(d*8)
self.conv5 = nn.Conv2d(d*8, 1, 4, 1, 0)
# weight_init
def weight_init(self, mean, std):
for m in self._modules:
normal_init(self._modules[m], mean, std)
# forward method
# def forward(self, input):
def forward(self, input, label):
x = F.leaky_relu(self.conv1_1(input), 0.2)
y = F.leaky_relu(self.conv1_2(label), 0.2)
x = torch.cat([x, y], 1)
x = F.leaky_relu(self.conv2_bn(self.conv2(x)), 0.2)
x = F.leaky_relu(self.conv3_bn(self.conv3(x)), 0.2)
# x = F.sigmoid(self.conv4(x))
x = F.leaky_relu(self.conv4_bn(self.conv4(x)), 0.2)
x = F.sigmoid(self.conv5(x))
return x
def normal_init(m, mean, std):
if isinstance(m, nn.ConvTranspose2d) or isinstance(m, nn.Conv2d):
m.weight.data.normal_(mean, std)
m.bias.data.zero_()
# label preprocess
img_size = 64
onehot = torch.zeros(2, 2)
onehot = onehot.scatter_(1, torch.LongTensor([0, 1]).view(2, 1), 1).view(2, 2, 1, 1)
fill = torch.zeros([2, 2, img_size, img_size])
for i in range(2):
fill[i, i, :, :] = 1
with open('data/resized_celebA/gender_label.pkl', 'rb') as fp:
y_gender_ = pickle.load(fp)
y_gender_ = torch.LongTensor(y_gender_).squeeze()
# fixed noise & label
temp_z0_ = torch.randn(4, 100)
temp_z0_ = torch.cat([temp_z0_, temp_z0_], 0)
temp_z1_ = torch.randn(4, 100)
temp_z1_ = torch.cat([temp_z1_, temp_z1_], 0)
fixed_z_ = torch.cat([temp_z0_, temp_z1_], 0)
fixed_y_ = torch.cat([torch.zeros(4), torch.ones(4), torch.zeros(4), torch.ones(4)], 0).type(torch.LongTensor).squeeze()
fixed_z_ = fixed_z_.view(-1, 100, 1, 1)
fixed_y_label_ = onehot[fixed_y_]
fixed_z_, fixed_y_label_ = Variable(fixed_z_.cuda(), volatile=True), Variable(fixed_y_label_.cuda(), volatile=True)
def show_result(num_epoch, show = False, save = False, path = 'result.png'):
G.eval()
test_images = G(fixed_z_, fixed_y_label_)
G.train()
size_figure_grid = 4
fig, ax = plt.subplots(size_figure_grid, size_figure_grid, figsize=(5, 5))
for i, j in itertools.product(range(size_figure_grid), range(size_figure_grid)):
ax[i, j].get_xaxis().set_visible(False)
ax[i, j].get_yaxis().set_visible(False)
for k in range(size_figure_grid*size_figure_grid):
i = k // size_figure_grid
j = k % size_figure_grid
ax[i, j].cla()
ax[i, j].imshow((test_images[k].cpu().data.numpy().transpose(1, 2, 0) + 1) / 2)
label = 'Epoch {0}'.format(num_epoch)
fig.text(0.5, 0.04, label, ha='center')
if save:
plt.savefig(path)
if show:
plt.show()
else:
plt.close()
def show_train_hist(hist, show = False, save = False, path = 'Train_hist.png'):
x = range(len(hist['D_losses']))
y1 = hist['D_losses']
y2 = hist['G_losses']
plt.plot(x, y1, label='D_loss')
plt.plot(x, y2, label='G_loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend(loc=4)
plt.grid(True)
plt.tight_layout()
if save:
plt.savefig(path)
if show:
plt.show()
else:
plt.close()
def show_noise_morp(show=False, save=False, path='result.png'):
source_z_ = torch.randn(10, 100)
z_ = torch.zeros(100, 100)
for i in range(5):
for j in range(10):
z_[i*20 + j] = (source_z_[i*2+1] - source_z_[i*2]) / 9 * (j+1) + source_z_[i*2]
for i in range(5):
z_[i*20+10:i*20+20] = z_[i*20:i*20+10]
y_ = torch.cat([torch.zeros(10, 1), torch.ones(10, 1)], 0).type(torch.LongTensor).squeeze()
y_ = torch.cat([y_, y_, y_, y_, y_], 0)
y_label_ = onehot[y_]
z_ = z_.view(-1, 100, 1, 1)
y_label_ = y_label_.view(-1, 2, 1, 1)
z_, y_label_ = Variable(z_.cuda(), volatile=True), Variable(y_label_.cuda(), volatile=True)
G.eval()
test_images = G(z_, y_label_)
G.train()
size_figure_grid = 10
fig, ax = plt.subplots(size_figure_grid, size_figure_grid, figsize=(img_size, img_size))
for i, j in itertools.product(range(size_figure_grid), range(size_figure_grid)):
ax[i, j].get_xaxis().set_visible(False)
ax[i, j].get_yaxis().set_visible(False)
for k in range(10 * 10):
i = k // 10
j = k % 10
ax[i, j].cla()
ax[i, j].imshow((test_images[k].cpu().data.numpy().transpose(1, 2, 0) + 1) / 2)
if save:
plt.savefig(path)
if show:
plt.show()
else:
plt.close()
# training parameters
batch_size = 128
lr = 0.0002
train_epoch = 20
# data_loader
isCrop = False
if isCrop:
transform = transforms.Compose([
transforms.Scale(108),
transforms.ToTensor(),
transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
])
else:
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
])
data_dir = 'data/resized_celebA' # this path depends on your computer
dset = datasets.ImageFolder(data_dir, transform)
dset.imgs.sort()
train_loader = torch.utils.data.DataLoader(dset, batch_size=128, shuffle=False)
temp = plt.imread(train_loader.dataset.imgs[0][0])
if (temp.shape[0] != img_size) or (temp.shape[0] != img_size):
sys.stderr.write('Error! image size is not 64 x 64! run \"celebA_data_preprocess.py\" !!!')
sys.exit(1)
# network
G = generator(128)
D = discriminator(128)
G.weight_init(mean=0.0, std=0.02)
D.weight_init(mean=0.0, std=0.02)
G.cuda()
D.cuda()
# Binary Cross Entropy loss
BCE_loss = nn.BCELoss()
# Adam optimizer
G_optimizer = optim.Adam(G.parameters(), lr=lr, betas=(0.5, 0.999))
D_optimizer = optim.Adam(D.parameters(), lr=lr, betas=(0.5, 0.999))
# results save folder
root = 'CelebA_cDCGAN_results/'
model = 'CelebA_cDCGAN_'
if not os.path.isdir(root):
os.mkdir(root)
if not os.path.isdir(root + 'Fixed_results'):
os.mkdir(root + 'Fixed_results')
train_hist = {}
train_hist['D_losses'] = []
train_hist['G_losses'] = []
train_hist['per_epoch_ptimes'] = []
train_hist['total_ptime'] = []
print('training start!')
start_time = time.time()
for epoch in range(train_epoch):
D_losses = []
G_losses = []
# learning rate decay
if (epoch+1) == 11:
G_optimizer.param_groups[0]['lr'] /= 10
D_optimizer.param_groups[0]['lr'] /= 10
print("learning rate change!")
if (epoch+1) == 16:
G_optimizer.param_groups[0]['lr'] /= 10
D_optimizer.param_groups[0]['lr'] /= 10
print("learning rate change!")
y_real_ = torch.ones(batch_size)
y_fake_ = torch.zeros(batch_size)
y_real_, y_fake_ = Variable(y_real_.cuda()), Variable(y_fake_.cuda())
epoch_start_time = time.time()
num_iter = 0
for x_, _ in train_loader:
# train discriminator D
D.zero_grad()
if isCrop:
x_ = x_[:, :, 22:86, 22:86]
mini_batch = x_.size()[0]
if mini_batch != batch_size:
y_real_ = torch.ones(mini_batch)
y_fake_ = torch.zeros(mini_batch)
y_real_, y_fake_ = Variable(y_real_.cuda()), Variable(y_fake_.cuda())
y_ = y_gender_[batch_size*num_iter:]
else:
y_ = y_gender_[batch_size*num_iter:batch_size*(num_iter+1)]
y_fill_ = fill[y_]
x_, y_fill_ = Variable(x_.cuda()), Variable(y_fill_.cuda())
D_result = D(x_, y_fill_).squeeze()
D_real_loss = BCE_loss(D_result, y_real_)
z_ = torch.randn((mini_batch, 100)).view(-1, 100, 1, 1)
y_ = (torch.rand(mini_batch, 1) * 2).type(torch.LongTensor).squeeze()
y_label_ = onehot[y_]
y_fill_ = fill[y_]
z_, y_label_, y_fill_ = Variable(z_.cuda()), Variable(y_label_.cuda()), Variable(y_fill_.cuda())
G_result = G(z_, y_label_)
D_result = D(G_result, y_fill_).squeeze()
D_fake_loss = BCE_loss(D_result, y_fake_)
D_fake_score = D_result.data.mean()
D_train_loss = D_real_loss + D_fake_loss
D_train_loss.backward()
D_optimizer.step()
D_losses.append(D_train_loss.data[0])
# train generator G
G.zero_grad()
z_ = torch.randn((mini_batch, 100)).view(-1, 100, 1, 1)
y_ = (torch.rand(mini_batch, 1) * 2).type(torch.LongTensor).squeeze()
y_label_ = onehot[y_]
y_fill_ = fill[y_]
z_, y_label_, y_fill_ = Variable(z_.cuda()), Variable(y_label_.cuda()), Variable(y_fill_.cuda())
G_result = G(z_, y_label_)
D_result = D(G_result, y_fill_).squeeze()
G_train_loss = BCE_loss(D_result, y_real_)
G_train_loss.backward()
G_optimizer.step()
G_losses.append(G_train_loss.data[0])
num_iter += 1
if (num_iter % 100) == 0:
print('%d - %d complete!' % ((epoch+1), num_iter))
epoch_end_time = time.time()
per_epoch_ptime = epoch_end_time - epoch_start_time
print('[%d/%d] - ptime: %.2f, loss_d: %.3f, loss_g: %.3f' % ((epoch + 1), train_epoch, per_epoch_ptime, torch.mean(torch.FloatTensor(D_losses)),
torch.mean(torch.FloatTensor(G_losses))))
fixed_p = root + 'Fixed_results/' + model + str(epoch + 1) + '.png'
show_result((epoch+1), save=True, path=fixed_p)
train_hist['D_losses'].append(torch.mean(torch.FloatTensor(D_losses)))
train_hist['G_losses'].append(torch.mean(torch.FloatTensor(G_losses)))
train_hist['per_epoch_ptimes'].append(per_epoch_ptime)
end_time = time.time()
total_ptime = end_time - start_time
train_hist['total_ptime'].append(total_ptime)
print("Avg one epoch ptime: %.2f, total %d epochs ptime: %.2f" % (torch.mean(torch.FloatTensor(train_hist['per_epoch_ptimes'])), train_epoch, total_ptime))
print("Training finish!... save training results")
torch.save(G.state_dict(), root + model + 'generator_param.pkl')
torch.save(D.state_dict(), root + model + 'discriminator_param.pkl')
with open(root + model + 'train_hist.pkl', 'wb') as f:
pickle.dump(train_hist, f)
show_train_hist(train_hist, save=True, path=root + model + 'train_hist.png')
images = []
for e in range(train_epoch):
img_name = root + 'Fixed_results/' + model + str(e + 1) + '.png'
images.append(imageio.imread(img_name))
imageio.mimsave(root + model + 'generation_animation.gif', images, fps=5)
show_noise_morp(save=True, path=root + model + 'warp.png') |
def parse_pointer_leak(leaked_data):
if isinstance(leaked_data, bytes):
leaked_data = leaked_data.decode()
leaked_data = leaked_data.replace("(nil)", "0x0")
return list(map(lambda addr: int(addr, 16), parts))
|
"""
Based on
https://github.com/chipsalliance/chisel3/blob/master/src/main/scala/chisel3/util/Decoupled.scala
(missing support for `flow` and `pipe` parameters)
"""
import magma as m
from mantle2.counter import CounterTo
class Queue(m.Generator2):
def __init__(self, entries: int, T: m.Kind):
assert entries >= 0
self.io = m.IO(
# Flipped since enq/deq is from perspective of the client
enq=m.DeqIO[T],
deq=m.EnqIO[T]
) + m.ClockIO()
# Data storage
ram = m.Memory(entries, T)()
# Read/write pointers (counts to entries - 1 since we include 0 as an
# index)
enq_ptr = CounterTo(entries - 1, has_enable=True)()
deq_ptr = CounterTo(entries - 1, has_enable=True)()
# Since the pointers can match when it's empty or full, we use an extra
# bit to track when it may be full (there's been a write without a
# read)
maybe_full = m.Register(init=False, has_enable=True)()
ptr_match = enq_ptr.O == deq_ptr.O
# Empty/full determined by pointers matching and maybe_full bit
empty = ptr_match & ~maybe_full.O
full = ptr_match & maybe_full.O
# deq data is valid when not empty
self.io.deq.valid @= ~empty
# enq is ready when not full
self.io.enq.ready @= ~full
# do enq/deq when ready/valid are high
do_enq = self.io.enq.fired()
do_deq = self.io.deq.fired()
# write enq data when do_enq
ram.write(self.io.enq.data, enq_ptr.O, m.enable(do_enq))
# Increment pointers on read/write
enq_ptr.CE @= m.enable(do_enq)
deq_ptr.CE @= m.enable(do_deq)
# Set maybe full when enq without deq
maybe_full.I @= m.enable(do_enq)
maybe_full.CE @= m.enable(do_enq != do_deq)
# Deq data from storage
self.io.deq.data @= ram[deq_ptr.O]
|
#! python3
import time
from colorama import Fore, Back
indent = 0
indent_increasing = True
while True:
print(" " * indent, end=" ")
print(f"{Back.RED + Fore.WHITE}*********\n\n")
time.sleep(0.1)
if indent_increasing:
indent = indent + 1
if indent == 15:
indent_increasing = False
else:
indent = indent - 1
if indent == 0:
indent_increasing = True
|
import torch
from typing import Optional, Tuple
from pytorch_lightning import LightningDataModule
from sklearn.model_selection import train_test_split
from torch.utils.data import DataLoader, Dataset, WeightedRandomSampler, Subset
import numpy as np
import pandas as pd
from collections import Counter
from src.utils import utils
import matplotlib.pyplot as plt
import os
from scripts.python.routines.plot.save import save_figure
from scripts.python.routines.plot.bar import add_bar_trace
import plotly.express as px
from scripts.python.routines.plot.layout import add_layout
import plotly.graph_objects as go
log = utils.get_logger(__name__)
class EEGDataset(Dataset):
def __init__(
self,
data: pd.DataFrame,
output: pd.DataFrame,
outcome: str
):
self.data = data
self.output = output
self.outcome = outcome
self.num_subjects = self.data.shape[0]
self.num_features = self.data.shape[1]
self.ys = self.output.loc[:, self.outcome].values
def __getitem__(self, idx: int):
x = self.data.iloc[idx, :].to_numpy()
y = self.ys[idx]
return (x, y, idx)
def __len__(self):
return self.num_subjects
class EEGDataModuleSeparate(LightningDataModule):
def __init__(
self,
path: str = "",
features_fn: str = "",
classes_fn: str = "",
trn_val_fn: str = "",
tst_fn: str = "",
outcome: str = "",
trn_val_split: Tuple[float, float, float] = (0.8, 0.2),
batch_size: int = 64,
num_workers: int = 0,
pin_memory: bool = False,
seed: int = 1337,
weighted_sampler = False,
**kwargs,
):
super().__init__()
self.path = path
self.features_fn = features_fn
self.classes_fn = classes_fn
self.trn_val_fn = trn_val_fn
self.tst_fn = tst_fn
self.outcome = outcome
self.trn_val_split = trn_val_split
self.batch_size = batch_size
self.num_workers = num_workers
self.pin_memory = pin_memory
self.seed = seed
self.weighted_sampler = weighted_sampler
self.dataset_trn: Optional[Dataset] = None
self.dataset_val: Optional[Dataset] = None
self.dataset_tst: Optional[Dataset] = None
def prepare_data(self):
"""Download data if needed. This method is called only from a single GPU.
Do not use it to assign state (self.x = y)."""
pass
def setup(self, stage: Optional[str] = None):
self.trn_val = pd.read_excel(f"{self.path}/{self.trn_val_fn}", index_col="index")
self.tst = pd.read_excel(f"{self.path}/{self.tst_fn}", index_col="index")
features_df = pd.read_excel(self.features_fn)
self.features_names = features_df.loc[:, 'features'].values
classes_df = pd.read_excel(self.classes_fn)
self.classes_dict = {}
for cl_id, cl in enumerate(classes_df.loc[:, self.outcome].values):
self.classes_dict[cl] = cl_id
self.trn_val = self.trn_val.loc[self.trn_val[self.outcome].isin(self.classes_dict)]
self.trn_val[f'{self.outcome}_origin'] = self.trn_val[self.outcome]
self.trn_val[self.outcome].replace(self.classes_dict, inplace=True)
self.tst = self.tst.loc[self.tst[self.outcome].isin(self.classes_dict)]
self.tst[f'{self.outcome}_origin'] = self.tst[self.outcome]
self.tst[self.outcome].replace(self.classes_dict, inplace=True)
self.data_trn_val = self.trn_val.loc[:, self.features_names]
self.data_trn_val = self.data_trn_val.astype('float32')
self.output_trn_val = self.trn_val.loc[:, [self.outcome, f'{self.outcome}_origin']]
self.data_tst = self.tst.loc[:, self.features_names]
self.data_tst = self.data_tst.astype('float32')
self.output_tst = self.tst.loc[:, [self.outcome, f'{self.outcome}_origin']]
self.data = pd.concat([self.data_trn_val, self.data_tst])
self.output = pd.concat([self.output_trn_val, self.output_tst])
if not list(self.data.index.values) == list(self.output.index.values):
log.info(f"Error! Indexes have different order")
raise ValueError(f"Error! Indexes have different order")
self.ids_trn_val = np.arange(self.trn_val.shape[0])
self.ids_tst = np.arange(self.tst.shape[0]) + self.trn_val.shape[0]
# self.dims is returned when you call datamodule.size()
self.dims = (1, self.data.shape[1])
self.dataset = EEGDataset(self.data, self.output, self.outcome)
assert abs(1.0 - sum(self.trn_val_split)) < 1.0e-8, "Sum of trn_val_split must be 1"
self.ids_trn, self.ids_val = train_test_split(
self.ids_trn_val,
test_size=self.trn_val_split[1],
stratify=self.dataset.ys[self.ids_trn_val],
random_state=self.seed
)
dict_to_plot = {
"trn": self.ids_trn,
"val": self.ids_val,
"tst": self.ids_tst
}
for name, ids in dict_to_plot.items():
if not os.path.exists(f"{self.path}/figs"):
os.makedirs(f"{self.path}/figs")
classes_counts = pd.DataFrame(Counter(self.output[f'{self.outcome}_origin'].values[ids]), index=[0])
classes_counts = classes_counts.reindex(classes_df.loc[:, self.outcome].values, axis=1)
fig = go.Figure()
for st, st_id in self.classes_dict.items():
add_bar_trace(fig, x=[st], y=[classes_counts.at[0, st]], text=[classes_counts.at[0, st]], name=st)
add_layout(fig, f"", f"Count", "")
fig.update_layout({'colorway': px.colors.qualitative.Set1})
fig.update_xaxes(showticklabels=False)
save_figure(fig, f"bar_{name}")
self.output.loc[self.output.index[self.ids_trn], 'Part'] = "trn"
self.output.loc[self.output.index[self.ids_val], 'Part'] = "val"
self.output.loc[self.output.index[self.ids_tst], 'Part'] = "tst"
self.output.to_excel(f"output.xlsx", index=True)
self.dataset_trn = Subset(self.dataset, self.ids_trn)
self.dataset_val = Subset(self.dataset, self.ids_val)
self.dataset_tst = Subset(self.dataset, self.ids_tst)
log.info(f"total_count: {len(self.dataset)}")
log.info(f"trn_count: {len(self.dataset_trn)}")
log.info(f"val_count: {len(self.dataset_val)}")
log.info(f"tst_count: {len(self.dataset_tst)}")
def get_trn_val_dataset_and_labels(self):
return Subset(self.dataset, self.ids_trn_val), self.dataset.ys[self.ids_trn_val]
def get_weighted_sampler(self):
return self.weighted_sampler
def train_dataloader(self):
ys_trn = self.dataset.ys[self.ids_trn]
class_counter = Counter(ys_trn)
class_weights = {c: 1.0 / class_counter[c] for c in class_counter}
weights = torch.FloatTensor([class_weights[y] for y in ys_trn])
if self.weighted_sampler:
weighted_sampler = WeightedRandomSampler(
weights=weights,
num_samples=len(weights),
replacement=True
)
return DataLoader(
dataset=self.dataset_trn,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=self.pin_memory,
sampler=weighted_sampler
)
else:
return DataLoader(
dataset=self.dataset_trn,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=self.pin_memory,
shuffle=True,
)
def val_dataloader(self):
return DataLoader(
dataset=self.dataset_val,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=self.pin_memory,
shuffle=False,
)
def test_dataloader(self):
return DataLoader(
dataset=self.dataset_tst,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=self.pin_memory,
shuffle=False,
)
def get_feature_names(self):
return self.data.columns.to_list()
def get_class_names(self):
return list(self.classes_dict.keys())
def get_raw_data(self):
data = pd.merge(self.output.loc[:, self.outcome], self.data, left_index=True, right_index=True)
train_data = data.iloc[self.ids_trn]
val_data = data.iloc[self.ids_val]
test_data = data.iloc[self.ids_tst]
raw_data = {}
raw_data['X_train'] = train_data.loc[:, self.data.columns.values].values
raw_data['y_train'] = train_data.loc[:, self.outcome].values
raw_data['X_val'] = val_data.loc[:, self.data.columns.values].values
raw_data['y_val'] = val_data.loc[:, self.outcome].values
raw_data['X_test'] = test_data.loc[:, self.data.columns.values].values
raw_data['y_test'] = test_data.loc[:, self.outcome].values
return raw_data
|
#!/usr/bin/env python
#
# system imports
#
import cPickle, Image
from scipy.misc import fromimage
import numpy as np
import sys
#
# user defined imports
#
from DefinitionsAndUtils import *
from ImageProcessing import thresholdNDArray
from GraphAndHistogramUtilities import timeToIdx, toProbs
removeNonresponders = True
removeSaturated = True
useLogscaleCounts = True
applyExpertThresholds = True
threshPath = "/home/mfenner/scipy_prep/data/input/"
threshFile = threshPath + "manual-thresholds-all-conditions.csv"
outputPath = "/home/mfenner/scipy_prep/data/output/pickle-probs/"
outputFile = outputPath + "expThresh-trimmedEnds-log-probs-by-OrgTime.pck"
threshDicts = readThresholdFileAsDictionaries(threshFile)
cndKeys = ["Organelle", "Stain", "Time"] # , "Series"]
#
# takes threshold file as "canonical" list of experiments and
# files
#
storedProbs = {}
#for thisStack in breakIntoStacks(threshDicts):
# for threshes in thisStack:
for thisCndSet in breakByConditions(threshDicts):
stackArray = np.empty((0,3), dtype=np.uint8)
print tuple(thisCndSet[0][k] for k in cndKeys), "...",
for threshes in thisCndSet:
# print threshes["Slice"],
# get file->image and thresholds together and apply them
exampleFilename = fileNameFormat[threshes["Organelle"]] % threshes
currentImage = Image.open(imageDataPath+exampleFilename)
asArray = fromimage(currentImage).reshape((numImagePoints, 3))
# port to thresholdNDArray
if applyExpertThresholds:
expertThresholds = dict(((c, threshes[c]) for c in colorNames))
thresholdNDArray(asArray, expertThresholds)
# asArray = fromimage(currentImage).reshape((numImagePoints, 3))
stackArray = np.concatenate((stackArray, asArray))
org = simplifyOrgStain(threshes["Organelle"], threshes["Stain"])
# r = threshes["Series"] - 1
t = timeToIdx(threshes["Time"])
print org, t
# convert image stack to counts and add to histograms
for c1, c2 in colorPairs:
probs = toProbs(stackArray[:,c1], stackArray[:,c2],
removeNonresponders = removeNonresponders,
removeSaturated = removeSaturated,
useLogscaleCounts = useLogscaleCounts)
cnd = (org, c1, c2, t)
storedProbs[cnd] = probs
outputFile = open(outputFile, "wb")
cPickle.dump(storedProbs, outputFile, -1)
outputFile.close()
|
import properties
import numpy as np
from scipy.constants import mu_0
import warnings
from SimPEG.Utils import Zero
from .. import Survey, Problem, Utils
from .. import Utils as emutils
from ..Base import BaseEMSrc
class BaseFDEMSrc(BaseEMSrc):
"""
Base source class for FDEM Survey
"""
freq = properties.Float("frequency of the source", min=0, required=True)
_ePrimary = None
_bPrimary = None
_hPrimary = None
_jPrimary = None
def __init__(self, rxList, **kwargs):
super().__init__(rxList, **kwargs)
def bPrimary(self, prob):
"""
Primary magnetic flux density
:param BaseFDEMProblem prob: FDEM Problem
:rtype: numpy.ndarray
:return: primary magnetic flux density
"""
if self._bPrimary is None:
return Zero()
return self._bPrimary
def bPrimaryDeriv(self, prob, v, adjoint=False):
"""
Derivative of the primary magnetic flux density
:param BaseFDEMProblem prob: FDEM Problem
:param numpy.ndarray v: vector
:param bool adjoint: adjoint?
:rtype: numpy.ndarray
:return: primary magnetic flux density
"""
return Zero()
def hPrimary(self, prob):
"""
Primary magnetic field
:param BaseFDEMProblem prob: FDEM Problem
:rtype: numpy.ndarray
:return: primary magnetic field
"""
if self._hPrimary is None:
return Zero()
return self._hPrimary
def hPrimaryDeriv(self, prob, v, adjoint=False):
"""
Derivative of the primary magnetic field
:param BaseFDEMProblem prob: FDEM Problem
:param numpy.ndarray v: vector
:param bool adjoint: adjoint?
:rtype: numpy.ndarray
:return: primary magnetic flux density
"""
return Zero()
def ePrimary(self, prob):
"""
Primary electric field
:param BaseFDEMProblem prob: FDEM Problem
:rtype: numpy.ndarray
:return: primary electric field
"""
if self._ePrimary is None:
return Zero()
return self._ePrimary
def ePrimaryDeriv(self, prob, v, adjoint=False):
"""
Derivative of the primary electric field
:param BaseFDEMProblem prob: FDEM Problem
:param numpy.ndarray v: vector
:param bool adjoint: adjoint?
:rtype: numpy.ndarray
:return: primary magnetic flux density
"""
return Zero()
def jPrimary(self, prob):
"""
Primary current density
:param BaseFDEMProblem prob: FDEM Problem
:rtype: numpy.ndarray
:return: primary current density
"""
if self._jPrimary is None:
return Zero()
return self._jPrimary
def jPrimaryDeriv(self, prob, v, adjoint=False):
"""
Derivative of the primary current density
:param BaseFDEMProblem prob: FDEM Problem
:param numpy.ndarray v: vector
:param bool adjoint: adjoint?
:rtype: numpy.ndarray
:return: primary magnetic flux density
"""
return Zero()
class RawVec_e(BaseFDEMSrc):
"""
RawVec electric source. It is defined by the user provided vector s_e
:param list rxList: receiver list
:param float freq: frequency
:param numpy.array s_e: electric source term
:param bool integrate: Integrate the source term (multiply by Me) [False]
"""
def __init__(self, rxList, freq, s_e, **kwargs):
self._s_e = np.array(s_e, dtype=complex)
self.freq = freq
super(BaseFDEMSrc, self).__init__(rxList, **kwargs)
def s_e(self, prob):
"""
Electric source term
:param BaseFDEMProblem prob: FDEM Problem
:rtype: numpy.ndarray
:return: electric source term on mesh
"""
if prob._formulation == "EB" and self.integrate is True:
return prob.Me * self._s_e
return self._s_e
class RawVec_m(BaseFDEMSrc):
"""
RawVec magnetic source. It is defined by the user provided vector s_m
:param float freq: frequency
:param rxList: receiver list
:param numpy.array s_m: magnetic source term
:param bool integrate: Integrate the source term (multiply by Me) [False]
"""
def __init__(self, rxList, freq, s_m, **kwargs):
self._s_m = np.array(s_m, dtype=complex)
self.freq = freq
super().__init__(rxList, **kwargs)
def s_m(self, prob):
"""
Magnetic source term
:param BaseFDEMProblem prob: FDEM Problem
:rtype: numpy.ndarray
:return: magnetic source term on mesh
"""
if prob._formulation == "HJ" and self.integrate is True:
return prob.Me * self._s_m
return self._s_m
class RawVec(BaseFDEMSrc):
"""
RawVec source. It is defined by the user provided vectors s_m, s_e
:param rxList: receiver list
:param float freq: frequency
:param numpy.array s_m: magnetic source term
:param numpy.array s_e: electric source term
:param bool integrate: Integrate the source term (multiply by Me) [False]
"""
def __init__(self, rxList, freq, s_m, s_e, **kwargs):
self._s_m = np.array(s_m, dtype=complex)
self._s_e = np.array(s_e, dtype=complex)
self.freq = freq
super().__init__(rxList, **kwargs)
def s_m(self, prob):
"""
Magnetic source term
:param BaseFDEMProblem prob: FDEM Problem
:rtype: numpy.ndarray
:return: magnetic source term on mesh
"""
if prob._formulation == "HJ" and self.integrate is True:
return prob.Me * self._s_m
return self._s_m
def s_e(self, prob):
"""
Electric source term
:param BaseFDEMProblem prob: FDEM Problem
:rtype: numpy.ndarray
:return: electric source term on mesh
"""
if prob._formulation == "EB" and self.integrate is True:
return prob.Me * self._s_e
return self._s_e
class MagDipole(BaseFDEMSrc):
"""
Point magnetic dipole source calculated by taking the curl of a magnetic
vector potential. By taking the discrete curl, we ensure that the magnetic
flux density is divergence free (no magnetic monopoles!).
This approach uses a primary-secondary in frequency. Here we show the
derivation for E-B formulation noting that similar steps are followed for
the H-J formulation.
.. math::
\\mathbf{C} \\mathbf{e} + i \\omega \\mathbf{b} = \\mathbf{s_m} \\\\
{\\mathbf{C}^T \\mathbf{M_{\\mu^{-1}}^f} \\mathbf{b} -
\\mathbf{M_{\\sigma}^e} \\mathbf{e} = \\mathbf{s_e}}
We split up the fields and :math:`\\mu^{-1}` into primary
(:math:`\\mathbf{P}`) and secondary (:math:`\\mathbf{S}`) components
- :math:`\\mathbf{e} = \\mathbf{e^P} + \\mathbf{e^S}`
- :math:`\\mathbf{b} = \\mathbf{b^P} + \\mathbf{b^S}`
- :math:`\\boldsymbol{\\mu}^{\\mathbf{-1}} =
\\boldsymbol{\\mu}^{\\mathbf{-1}^\\mathbf{P}} +
\\boldsymbol{\\mu}^{\\mathbf{-1}^\\mathbf{S}}`
and define a zero-frequency primary problem, noting that the source is
generated by a divergence free electric current
.. math::
\\mathbf{C} \\mathbf{e^P} = \\mathbf{s_m^P} = 0 \\\\
{\\mathbf{C}^T \\mathbf{{M_{\\mu^{-1}}^f}^P} \\mathbf{b^P} -
\\mathbf{M_{\\sigma}^e} \\mathbf{e^P} = \\mathbf{M^e} \\mathbf{s_e^P}}
Since :math:`\\mathbf{e^P}` is curl-free, divergence-free, we assume that
there is no constant field background, the :math:`\\mathbf{e^P} = 0`, so our
primary problem is
.. math::
\\mathbf{e^P} = 0 \\\\
{\\mathbf{C}^T \\mathbf{{M_{\\mu^{-1}}^f}^P} \\mathbf{b^P} =
\\mathbf{s_e^P}}
Our secondary problem is then
.. math::
\\mathbf{C} \\mathbf{e^S} + i \\omega \\mathbf{b^S} =
- i \\omega \\mathbf{b^P} \\\\
{\\mathbf{C}^T \\mathbf{M_{\\mu^{-1}}^f} \\mathbf{b^S} -
\\mathbf{M_{\\sigma}^e} \\mathbf{e^S} =
-\\mathbf{C}^T \\mathbf{{M_{\\mu^{-1}}^f}^S} \\mathbf{b^P}}
:param list rxList: receiver list
:param float freq: frequency
:param numpy.ndarray loc: source location
(ie: :code:`np.r_[xloc,yloc,zloc]`)
:param string orientation: 'X', 'Y', 'Z'
:param float moment: magnetic dipole moment
:param float mu: background magnetic permeability
"""
moment = properties.Float("dipole moment of the transmitter", default=1.0, min=0.0)
mu = properties.Float("permeability of the background", default=mu_0, min=0.0)
orientation = properties.Vector3(
"orientation of the source", default="Z", length=1.0, required=True
)
freq = properties.Float("frequency of the source (Hz)", required=True)
loc = properties.Vector3("location of the source", default=np.r_[0.0, 0.0, 0.0])
def __init__(self, rxList, freq, loc, **kwargs):
super().__init__(rxList, **kwargs)
self.freq = freq
self.loc = loc
@properties.validator("orientation")
def _warn_non_axis_aligned_sources(self, change):
value = change["value"]
axaligned = [
True
for vec in [
np.r_[1.0, 0.0, 0.0],
np.r_[0.0, 1.0, 0.0],
np.r_[0.0, 0.0, 1.0],
]
if np.all(value == vec)
]
if len(axaligned) != 1:
warnings.warn(
"non-axes aligned orientations {} are not rigorously"
" tested".format(value)
)
def _srcFct(self, obsLoc, component):
return emutils.MagneticDipoleVectorPotential(
self.loc,
obsLoc,
component,
mu=self.mu,
moment=self.moment,
orientation=self.orientation,
)
def bPrimary(self, prob):
"""
The primary magnetic flux density from a magnetic vector potential
:param BaseFDEMProblem prob: FDEM problem
:rtype: numpy.ndarray
:return: primary magnetic field
"""
formulation = prob._formulation
if formulation == "EB":
gridX = prob.mesh.gridEx
gridY = prob.mesh.gridEy
gridZ = prob.mesh.gridEz
C = prob.mesh.edgeCurl
elif formulation == "HJ":
gridX = prob.mesh.gridFx
gridY = prob.mesh.gridFy
gridZ = prob.mesh.gridFz
C = prob.mesh.edgeCurl.T
if prob.mesh._meshType == "CYL":
if not prob.mesh.isSymmetric:
# TODO ?
raise NotImplementedError("Non-symmetric cyl mesh not implemented yet!")
assert np.linalg.norm(self.orientation - np.r_[0.0, 0.0, 1.0]) < 1e-6, (
"for cylindrical symmetry, the dipole must be "
"oriented in the Z direction"
)
a = self._srcFct(gridY, "y")
else:
ax = self._srcFct(gridX, "x")
ay = self._srcFct(gridY, "y")
az = self._srcFct(gridZ, "z")
a = np.concatenate((ax, ay, az))
return C * a
def hPrimary(self, prob):
"""
The primary magnetic field from a magnetic vector potential
:param BaseFDEMProblem prob: FDEM problem
:rtype: numpy.ndarray
:return: primary magnetic field
"""
b = self.bPrimary(prob)
return 1.0 / self.mu * b
def s_m(self, prob):
"""
The magnetic source term
:param BaseFDEMProblem prob: FDEM problem
:rtype: numpy.ndarray
:return: primary magnetic field
"""
b_p = self.bPrimary(prob)
if prob._formulation == "HJ":
b_p = prob.Me * b_p
return -1j * emutils.omega(self.freq) * b_p
def s_e(self, prob):
"""
The electric source term
:param BaseFDEMProblem prob: FDEM problem
:rtype: numpy.ndarray
:return: primary magnetic field
"""
if all(np.r_[self.mu] == np.r_[prob.mu]):
return Zero()
else:
formulation = prob._formulation
if formulation == "EB":
mui_s = prob.mui - 1.0 / self.mu
MMui_s = prob.mesh.getFaceInnerProduct(mui_s)
C = prob.mesh.edgeCurl
elif formulation == "HJ":
mu_s = prob.mu - self.mu
MMui_s = prob.mesh.getEdgeInnerProduct(mu_s, invMat=True)
C = prob.mesh.edgeCurl.T
return -C.T * (MMui_s * self.bPrimary(prob))
def s_eDeriv(self, prob, v, adjoint=False):
if not hasattr(prob, "muMap") or not hasattr(prob, "muiMap"):
return Zero()
else:
formulation = prob._formulation
if formulation == "EB":
mui_s = prob.mui - 1.0 / self.mu
MMui_sDeriv = (
prob.mesh.getFaceInnerProductDeriv(mui_s)(self.bPrimary(prob))
* prob.muiDeriv
)
C = prob.mesh.edgeCurl
if adjoint:
return -MMui_sDeriv.T * (C * v)
return -C.T * (MMui_sDeriv * v)
elif formulation == "HJ":
return Zero()
# raise NotImplementedError
mu_s = prob.mu - self.mu
MMui_s = prob.mesh.getEdgeInnerProduct(mu_s, invMat=True)
C = prob.mesh.edgeCurl.T
return -C.T * (MMui_s * self.bPrimary(prob))
class MagDipole_Bfield(MagDipole):
"""
Point magnetic dipole source calculated with the analytic solution for the
fields from a magnetic dipole. No discrete curl is taken, so the magnetic
flux density may not be strictly divergence free.
This approach uses a primary-secondary in frequency in the same fashion as
the MagDipole.
:param list rxList: receiver list
:param float freq: frequency
:param numpy.ndarray loc: source location (ie:
:code:`np.r_[xloc,yloc,zloc]`)
:param string orientation: 'X', 'Y', 'Z'
:param float moment: magnetic dipole moment
:param float mu: background magnetic permeability
"""
def __init__(self, rxList, freq, loc, **kwargs):
super().__init__(rxList, freq=freq, loc=loc, **kwargs)
def _srcFct(self, obsLoc, component):
return emutils.MagneticDipoleFields(
self.loc,
obsLoc,
component,
mu=self.mu,
moment=self.moment,
orientation=self.orientation,
)
def bPrimary(self, prob):
"""
The primary magnetic flux density from the analytic solution for
magnetic fields from a dipole
:param BaseFDEMProblem prob: FDEM problem
:rtype: numpy.ndarray
:return: primary magnetic field
"""
formulation = prob._formulation
if formulation == "EB":
gridX = prob.mesh.gridFx
gridY = prob.mesh.gridFy
gridZ = prob.mesh.gridFz
elif formulation == "HJ":
gridX = prob.mesh.gridEx
gridY = prob.mesh.gridEy
gridZ = prob.mesh.gridEz
srcfct = emutils.MagneticDipoleFields
if prob.mesh._meshType == "CYL":
if not prob.mesh.isSymmetric:
# TODO ?
raise NotImplementedError("Non-symmetric cyl mesh not implemented yet!")
bx = srcfct(self.loc, gridX, "x", mu=self.mu, moment=self.moment)
bz = srcfct(self.loc, gridZ, "z", mu=self.mu, moment=self.moment)
b = np.concatenate((bx, bz))
else:
bx = srcfct(self.loc, gridX, "x", mu=self.mu, moment=self.moment)
by = srcfct(self.loc, gridY, "y", mu=self.mu, moment=self.moment)
bz = srcfct(self.loc, gridZ, "z", mu=self.mu, moment=self.moment)
b = np.concatenate((bx, by, bz))
return Utils.mkvc(b)
class CircularLoop(MagDipole):
"""
Circular loop magnetic source calculated by taking the curl of a magnetic
vector potential. By taking the discrete curl, we ensure that the magnetic
flux density is divergence free (no magnetic monopoles!).
This approach uses a primary-secondary in frequency in the same fashion as
the MagDipole.
:param list rxList: receiver list
:param float freq: frequency
:param numpy.ndarray loc: source location
(ie: :code:`np.r_[xloc,yloc,zloc]`)
:param string orientation: 'X', 'Y', 'Z'
:param float moment: magnetic dipole moment
:param float mu: background magnetic permeability
"""
radius = properties.Float("radius of the loop", default=1.0, min=0.0)
def __init__(self, rxList, freq, loc, **kwargs):
super().__init__(rxList, freq, loc, **kwargs)
def _srcFct(self, obsLoc, component):
return emutils.MagneticLoopVectorPotential(
self.loc,
obsLoc,
component,
mu=self.mu,
radius=self.radius,
orientation=self.orientation,
)
class PrimSecSigma(BaseFDEMSrc):
def __init__(self, rxList, freq, sigBack, ePrimary, **kwargs):
self.sigBack = sigBack
BaseFDEMSrc.__init__(self, rxList, freq=freq, _ePrimary=ePrimary, **kwargs)
def s_e(self, prob):
return (
prob.MeSigma - prob.mesh.getEdgeInnerProduct(self.sigBack)
) * self.ePrimary(prob)
def s_eDeriv(self, prob, v, adjoint=False):
if adjoint:
return prob.MeSigmaDeriv(self.ePrimary(prob)).T * v
return prob.MeSigmaDeriv(self.ePrimary(prob)) * v
class PrimSecMappedSigma(BaseFDEMSrc):
"""
Primary-Secondary Source in which a mapping is provided to put the current
model onto the primary mesh. This is solved on every model update.
There are a lot of layers to the derivatives here!
**Required**
:param list rxList: Receiver List
:param float freq: frequency
:param BaseFDEMProblem primaryProblem: FDEM primary problem
:param SurveyFDEM primarySurvey: FDEM primary survey
**Optional**
:param Mapping map2meshSecondary: mapping current model to act as primary
model on the secondary mesh
"""
def __init__(
self,
rxList,
freq,
primaryProblem,
primarySurvey,
map2meshSecondary=None,
**kwargs
):
self.primaryProblem = primaryProblem
self.primarySurvey = primarySurvey
if self.primaryProblem.ispaired is False:
self.primaryProblem.pair(self.primarySurvey)
self.map2meshSecondary = map2meshSecondary
BaseFDEMSrc.__init__(self, rxList, freq=freq, **kwargs)
def _ProjPrimary(self, prob, locType, locTypeTo):
# TODO: if meshes have not changed, store the projection
# if getattr(self, '__ProjPrimary', None) is None:
# TODO: implement for HJ formulation
if prob._formulation == "EB":
pass
else:
raise NotImplementedError(
"PrimSecMappedSigma Source has not been implemented for {} "
"formulation".format(prob._formulation)
)
# TODO: only set up for tensot meshes (Tree meshes should be easy/done)
# but have not been tried or tested.
assert prob.mesh._meshType in [
"TENSOR"
], "PrimSecMappedSigma source has not been implemented for {}".format(
prob.mesh._meshType
)
# if EB formulation, interpolate E, elif HJ interpolate J
# if self.primaryProblem._formulation == 'EB':
# locType = 'E'
# elif self.primaryProblem._formulation == 'HJ':
# locType = 'F'
# get interpolation mat from primary mesh to secondary mesh
if self.primaryProblem.mesh._meshType == "CYL":
return self.primaryProblem.mesh.getInterpolationMatCartMesh(
prob.mesh, locType=locType, locTypeTo=locTypeTo
)
return self.primaryProblem.mesh.getInterploationMat(
prob.mesh, locType=locType, locTypeTo=locTypeTo
)
# return self.__ProjPrimary
def _primaryFields(self, prob, fieldType=None, f=None):
# TODO: cache and check if prob.curModel has changed
if f is None:
f = self.primaryProblem.fields(prob.model)
if fieldType is not None:
return f[:, fieldType]
return f
def _primaryFieldsDeriv(self, prob, v, adjoint=False, f=None):
# TODO: this should not be hard-coded for j
# jp = self._primaryFields(prob)[:,'j']
# TODO: pull apart Jvec so that don't have to copy paste this code in
# A = self.primaryProblem.getA(self.freq)
# Ainv = self.primaryProblem.Solver(A, **self.primaryProblem.solverOpts) # create the concept of Ainv (actually a solve)
if f is None:
f = self._primaryFields(prob.sigma, f=f)
freq = self.freq
A = self.primaryProblem.getA(freq)
src = self.primarySurvey.srcList[0]
u_src = Utils.mkvc(f[src, self.primaryProblem._solutionType])
if adjoint is True:
Jtv = np.zeros(prob.sigmaMap.nP, dtype=complex)
ATinv = self.primaryProblem.Solver(A.T, **self.primaryProblem.solverOpts)
df_duTFun = getattr(
f,
"_{}Deriv".format(
"e" if self.primaryProblem._formulation == "EB" else "j"
),
None,
)
df_duT, df_dmT = df_duTFun(src, None, v, adjoint=True)
ATinvdf_duT = ATinv * df_duT
dA_dmT = self.primaryProblem.getADeriv(
freq, u_src, ATinvdf_duT, adjoint=True
)
dRHS_dmT = self.primaryProblem.getRHSDeriv(
freq, src, ATinvdf_duT, adjoint=True
)
du_dmT = -dA_dmT + dRHS_dmT
Jtv += df_dmT + du_dmT
ATinv.clean()
return Utils.mkvc(Jtv)
# create the concept of Ainv (actually a solve)
Ainv = self.primaryProblem.Solver(A, **self.primaryProblem.solverOpts)
# for src in self.survey.getSrcByFreq(freq):
dA_dm_v = self.primaryProblem.getADeriv(freq, u_src, v)
dRHS_dm_v = self.primaryProblem.getRHSDeriv(freq, src, v)
du_dm_v = Ainv * (-dA_dm_v + dRHS_dm_v)
# if self.primaryProblem._formulation == 'EB':
df_dmFun = getattr(
f,
"_{}Deriv".format("e" if self.primaryProblem._formulation == "EB" else "j"),
None,
)
# elif self.primaryProblem._formulation == 'HJ':
# df_dmFun = getattr(f, '_{0}Deriv'.format('j'), None)
df_dm_v = df_dmFun(src, du_dm_v, v, adjoint=False)
# Jv[src, rx] = rx.evalDeriv(src, self.mesh, f, df_dm_v)
Ainv.clean()
return df_dm_v
# return self.primaryProblem.Jvec(prob.curModel, v, f=f)
def ePrimary(self, prob, f=None):
if f is None:
f = self._primaryFields(prob)
if self.primaryProblem._formulation == "EB":
ep = self._ProjPrimary(prob, "E", "E") * f[:, "e"]
elif self.primaryProblem._formulation == "HJ":
ep = self._ProjPrimary(prob, "F", "E") * (
self.primaryProblem.MfI * (self.primaryProblem.MfRho * f[:, "j"])
)
return Utils.mkvc(ep)
def ePrimaryDeriv(self, prob, v, adjoint=False, f=None):
if f is None:
f = self._primaryFields(prob)
# if adjoint is True:
# raise NotImplementedError
if self.primaryProblem._formulation == "EB":
if adjoint is True:
epDeriv = self._primaryFieldsDeriv(
prob,
(self._ProjPrimary(prob, "E", "E").T * v),
f=f,
adjoint=adjoint,
)
else:
epDeriv = self._ProjPrimary(prob, "E", "E") * self._primaryFieldsDeriv(
prob, v, f=f
)
elif self.primaryProblem._formulation == "HJ":
if adjoint is True:
PTv = self.primaryProblem.MfI.T * (
self._ProjPrimary(prob, "F", "E").T * v
)
epDeriv = self.primaryProblem.MfRhoDeriv(
f[:, "j"]
).T * PTv + self._primaryFieldsDeriv(
prob, self.primaryProblem.MfRho.T * PTv, adjoint=adjoint, f=f
)
# epDeriv =(
# (self.primaryProblem.MfI.T * PTv)
# )
else:
epDeriv = self._ProjPrimary(prob, "F", "E") * (
self.primaryProblem.MfI
* (
(self.primaryProblem.MfRhoDeriv(f[:, "j"]) * v)
+ (
self.primaryProblem.MfRho
* self._primaryFieldsDeriv(prob, v, f=f)
)
)
)
return Utils.mkvc(epDeriv)
def bPrimary(self, prob, f=None):
if f is None:
f = self._primaryFields(prob)
if self.primaryProblem._formulation == "EB":
bp = self._ProjPrimary(prob, "F", "F") * f[:, "b"]
elif self.primaryProblem._formulation == "HJ":
bp = self._ProjPrimary(prob, "E", "F") * (
self.primaryProblem.MeI * (self.primaryProblem.MeMu * f[:, "h"])
)
return Utils.mkvc(bp)
def s_e(self, prob, f=None):
sigmaPrimary = self.map2meshSecondary * prob.model
return Utils.mkvc(
(prob.MeSigma - prob.mesh.getEdgeInnerProduct(sigmaPrimary))
* self.ePrimary(prob, f=f)
)
def s_eDeriv(self, prob, v, adjoint=False):
sigmaPrimary = self.map2meshSecondary * prob.model
sigmaPrimaryDeriv = self.map2meshSecondary.deriv(prob.model)
f = self._primaryFields(prob)
ePrimary = self.ePrimary(prob, f=f)
if adjoint is True:
return (
prob.MeSigmaDeriv(ePrimary).T * v
- (
sigmaPrimaryDeriv.T
* prob.mesh.getEdgeInnerProductDeriv(sigmaPrimary)(ePrimary).T
* v
)
+ self.ePrimaryDeriv(
prob,
(prob.MeSigma - prob.mesh.getEdgeInnerProduct(sigmaPrimary)).T * v,
adjoint=adjoint,
f=f,
)
)
return (
prob.MeSigmaDeriv(ePrimary) * v
- prob.mesh.getEdgeInnerProductDeriv(sigmaPrimary)(ePrimary)
* (sigmaPrimaryDeriv * v)
+ (prob.MeSigma - prob.mesh.getEdgeInnerProduct(sigmaPrimary))
* self.ePrimaryDeriv(prob, v, adjoint=adjoint, f=f)
)
|
# -*- coding: utf-8 -*-
from ..libgnss.constants import *
import numpy as np
class Discriminator():
"""A channel component for performing discriminations on correlations."""
def __init__(self, flavor, channel=None):
"""Constructs Discriminator with specified flavor."""
if flavor in ['DLL']:
self.update = self._dll_update
elif flavor in ['PLL']:
self.update = self._pll_update
elif flavor in ['FLL']:
self.update = self._fll_update
else:
print('Warning: unknown discriminator flavor, discriminator update function is not set')
if channel is not None:
self.channel = channel
def _dll_update(self, iE, qE, iL, qL):
"""Outputs DLL discrimination.
"""
xp = xf = 0.0
# Use the Normalized Early Minus Late Envelope discriminator.
E = np.sqrt(iE**2.0 + qE**2.0)
L = np.sqrt(iL**2.0 + qL**2.0)
if (E+L) != 0:
xp = (E-L) / (2.0*(E+L))
# NOTE: 2.0 because self.offset=0.5 in correlator
else:
print("Warning: very low correlation values")
return xp, xf
def _pll_update(self, iP, qP):
"""Outputs PLL discrimination.
"""
xp = xf = 0.0
if iP != 0:
xp = np.arctan(qP/iP) / (2.0*PI)
else:
print("Warning: very low correlation values")
return xp, xf
def _fll_update(self,iP1,qP1,iP0,qP0,N):
assert (N>1), "Error: N has to be > 1 for FLL, N is "+str(N)
assert (np.mod(N,2)==0), "Error: N has to be even, N is"+str(N)
xp = xf = 0.0
cross = iP0*qP1-iP1*qP0
dot = iP0*iP1+qP0*qP1
if dot > 0.0:
xf = np.arctan2(cross,dot)/(2.0*np.pi*T_CA* N)
else:
xf = np.arctan2(-1.0*cross,-1.0*dot)/(2.0*np.pi*T_CA*N)
return xp, xf
|
import os
import json
import logging
import requests
import urllib.parse
from datetime import datetime
# logger
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# Google Chat Webhook URL
WEBHOOK_URL = os.environ['GOOGLE_CHAT_WEBHOOK_URL']
CARD_IMAGE_URL = os.environ['CARD_IMAGE_URL']
# HTTP Header
message_headers = {'Content-Type': 'application/json; charset=UTF-8'}
# Cards Widgets detail Label
keyLabel = {
'stateMachineArn': 'StateMachine',
'name': 'ExecutionName',
'status': 'Status',
}
# Message template
message = {'text': '<users/all>', 'cards': [{'header':{},'sections': [{'widgets': []}]}]}
def lambda_handler(event, context):
"""
Google Chat Webhook にエラーイベント情報を送信する
Parameters
----------
event : json
トリガーイベント情報
context : json
実行環境情報
Returns
----------
response : json
POSTリクエスト実行結果
"""
try:
logger.info("Set Cards Info")
message['cards'][0]['header']['title'] = 'ERROR : Step Functions'
message['cards'][0]['header']['imageUrl'] = CARD_IMAGE_URL
logger.info("Setting Cards Widgets from event")
for key, val in keyLabel.items():
if str(key) == 'stateMachineArn':
contents = str(event['detail'][key]).split(':')[-1]
else:
contents = event['detail'][key]
append_widgets(message, val, contents)
logger.info("Load input")
input_str = urllib.parse.unquote_plus(event['detail']['input'])
input = json.loads(input_str, encoding='utf-8')
if 'Records' in input.keys():
logger.info("Set Cards Widgets from input: S3")
bucket_name = input['Records'][0]['s3']['bucket']['name']
key = input['Records'][0]['s3']['object']['key']
file_name = key.split('/')[-1]
key_prefix = key.replace(file_name, '')
event_name = input['Records'][0]['eventName']
append_widgets(message, 'S3 BucketName', bucket_name)
append_widgets(message, 'KeyPrefix', key_prefix)
append_widgets(message, 'FileName', file_name)
append_widgets(message, 'EventName', event_name)
# https://github.com/tosh223/aws-glue-crawlflow
elif 'CrawlerName' in input.keys():
logger.info("Set Cards Widgets from input: Glue Crawler")
append_widgets(message, 'CrawlerName', input['CrawlerName'])
logger.info("Send Message to Google Chat Webhook")
response = requests.post(
WEBHOOK_URL,
headers=message_headers,
data=json.dumps(message)
)
return json.loads(response.text)
except Exception as e:
logger.exception(e, exc_info=False)
raise e
def append_widgets(message, top_label, content):
"""
メッセージにウィジェットを追加する
Parameters
----------
message : json
メッセージ
top_label : string
ラベル
content : string
ウィジェット内に表示する文字列
"""
keyVal = {'keyValue': {}}
keyVal['keyValue']['topLabel'] = top_label
keyVal['keyValue']['content'] = content
keyVal['keyValue']['contentMultiline'] = 'true'
message['cards'][0]['sections'][0]['widgets'].append(keyVal) |
#!/usr/bin/python3
# forms.py
########## imports ##########
from flask_wtf import FlaskForm
from wtforms import TextField, StringField, SubmitField, HiddenField, SelectField, FloatField
from wtforms.validators import DataRequired, Length, ValidationError, Email
#from app import db
from application.models import *
########## New user ##########
#type_of_clothing =['top', 'botton', 'dress']
class NewClothing(FlaskForm):
clothing_id = HiddenField('id')
name = StringField('name', validators =[Length(min=2, max=200)])
# category = SelectField('category', choices=[(typ, typ) for typ in type_of_clothing])
# description = TextField('description', validators=[Length(min=2, max=500)])
|
# -*- coding: utf-8 -*-
import os
import itertools
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import globalcache
import definitions
# sns.set(style="white", rc={"axes.facecolor": (0, 0, 0, 0)})
sns.set()
pd.options.mode.chained_assignment = 'raise'
import votesim
from votesim.benchmarks import simple
from votesim import plots
benchmark = simple.simple3way()
dirname = definitions.DIR_DATA_BENCHMARKS
dirname = os.path.join(dirname, benchmark.name)
@globalcache.cache_decorate('read')
def read():
return benchmark.read(dirname=dirname)
def categorize(df):
"""
Category Combinations
Labels
-------
- M = majority winner
- P = plurality winner
- C = condorcet winner
- U = utility winner
Categories
----------
- MU = Has majority utility winner
- M = Has majority winner that is not utility winner.
-
- CPU = Has condorcet, utility, plurality winner
- CU = Has condorcet, utility winner that is not plurality winner
- CP = Has condorcet, plurality winner that is not utility winner
- C = Has condorcet winner who is not plurality and utility winner
-
- NC = Has no Condorcet winner
-
"""
iM = df['output.candidate.winner_majority']
iP = df['output.candidate.winner_plurality']
iC = df['output.candidate.winner_condorcet']
iU = df['output.candidate.winner_utility']
df = df.copy()
df.loc[:, 'categories'] = 'No category'
maj = iM > -1
no_maj = ~maj
MU = (iM == iU)
M = maj & (iM != iU)
CPU = no_maj & (iC == iP) & (iC == iU)
CP = no_maj & (iC == iP) & (iC != iU)
CU = no_maj & (iC == iU) & (iC != iP)
C = (iC > -1) & (iC != iP) & (iC != iU)
PU = no_maj & (iP == iU) & (iP != iC)
NC = (iC == -1)
df.loc[MU, 'categories'] = 'MU'
df.loc[M, 'categories'] = 'M'
df.loc[CPU, 'categories'] = 'CPU'
df.loc[CP, 'categories'] = 'CP'
df.loc[CU, 'categories'] = 'CU'
df.loc[C, 'categories'] = 'C'
df.loc[PU, 'categories'] = 'PU'
df.loc[NC, 'categories'] = 'nc'
return df
# %% Read
g = globalcache.create(globals())
p = read()
df = p.post_data
###########################################
# %% Post
os.makedirs('images', exist_ok=True)
os.chdir('images')
yname = 'args.etype'
# otype = 'regret-efficiency'
# xname = 'output.winner.regret_efficiency_candidate'
otype = 'regret-voter'
xname = 'output.winner.regret_efficiency_candidate'
no_majority = df['output.candidate.winner_majority'] == -1
no_condorcet = df['output.candidate.winner_condorcet'] == -1
regret = 100* (1 - df[xname])
pratio = df['output.candidate.plurality_ratio'] * 100
df = df.reset_index()
df.loc[:, 'plurality_ratio'] = pratio
df.loc[:, 'no_majority'] = no_majority
df.loc[:, 'no_condorcet'] = no_condorcet
df.loc[:, 'regret'] = regret
### Categorize scenario parameters
arg_tol = df['args.user.voter_tolerance']
groupby = df.groupby(by='args.user.voter_tolerance')
keys = groupby.groups.keys()
dframes = (groupby.get_group(k) for k in keys)
# # %% Plot categories
# ### Plot election categories
df = groupby.get_group(list(keys)[0])
etype_num = len(df['args.etype'].unique())
sim_num = len(df) / etype_num
# plots.vset()
# plots.subplot_2row()
# plt.subplot(2, 1, 1)
# sns.distplot(pratio, bins=10, norm_hist=True, kde=False)
# plt.xlabel('% plurality winner ratio')
# plt.ylabel('Scenario probability density')
# plt.title('Probability of Plurality Ratio in Benchmark')
# plt.subplot(2, 1, 2)
df = categorize(df)
c = df['categories']
counts = c.value_counts() / len(c)*100
# # sns.barplot(x=counts.keys(), y=counts.values)
# ax = plots.bar(x=counts.keys(), y=counts.values, fmt='g')
# plt.ylabel('% Occurrence')
# # sns.countplot(x='categories', data=df,)
# plt.xlabel('Scenario Categories')
# plt.title('Majority/Condorcet/Utility/Plurality Occurrences')
# string = '''MU = majority-utility winner
# CU = condorcet-utility winner
# CPU = condorcet-plurality-utility winner
# M = majority winner is not utility winner
# PU = plurality-utility winner
# nc = No condorcet winner.
# CP = condorcet-plurality winner is not utility winner'''
# # place a text box in upper left in axes coords
# props = dict(facecolor='white', alpha=0.5)
# ax.text(0.4, 0.9, string, transform=ax.transAxes, fontsize=10,
# verticalalignment='top',
# horizontalalignment='left',
# bbox=props)
# plt.suptitle('3-Way Election, 1-Dimensional, %d simulations' % sim_num)
# plt.savefig('scenario-categories.png')
# %% Plot heatmaps
i = 0
for key, df in zip(keys, dframes):
# plt.figure(figsize=(12,8))
plots.subplot_2row()
plt.subplot(2, 1, 1)
bins = [0, 30, 40, 50, 60, 70, 80, 90, 100]
ax, dfp = plots.heatmap(x='plurality_ratio', y='args.etype', hue='regret',
data=df, xbin=bins, vmax=25)
plt.xlabel('% plurality winner ratio')
plt.ylabel('')
plt.title('% VSE vs Plurality Ratio')
# plt.hist(pratio, density=True, )
# hist, _ = np.histogram(pratio, bins=bins,) / len(pratio)
# plots.bar(x=bins, )
###############################################################################
df = categorize(df)
ysortkey = dfp.index.values
xsortkey = counts.index.values
plt.subplot(2, 1, 2)
ax, dfp = plots.heatmap(x='categories', y='args.etype', hue='regret',
data=df,
xsortkey=xsortkey,
ysortkey=ysortkey,
vmax=50)
plt.ylabel('')
#ax.set_yticklabels('')
plt.title('% VSE vs Category')
plt.xlabel('Scenario Categories')
plt.subplots_adjust(left=.185, wspace=.025)
plt.suptitle('3-Way Election, 1-Dimensional, voter tolerance=%s, '
'%d simulations' % (key, sim_num))
plt.savefig('vse-%d.png' % i)
i += 1
###############################################################################
|
#Given the root node of a binary search tree (BST) and a value. You need to find the node in the BST that the #node's value equals the given value. Return the subtree rooted with that node. If such node doesn't exist, #you should return NULL.
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def searchBST(self, root: TreeNode, val: int) -> TreeNode:
if not root:
return None
while root:
if root.val == val:
return root
elif val < root.val:
root = root.left
else:
root = root.right
return None |
import cloupy as cl
import pytest
import matplotlib.pyplot as plt
import urllib.request
import urllib.error
def check_if_NOT_connected_to_the_internet(host='http://google.com'):
try:
urllib.request.urlopen(host)
return False
except urllib.error.URLError:
return True
@pytest.mark.skipif(check_if_NOT_connected_to_the_internet(), reason='internet connection required')
class TestDrawingWithDailyData:
def test_drawing_and_daily_data_downloading(self):
plotted_figures_before = plt.gcf().number
wl = cl.g_WalterLieth('POZNAŃ')
wl.d_imgw_data(interval='daily', stations_kind='synop', years_range=range(2010, 2011))
wl.draw()
plotted_figures_after = plt.gcf().number
assert plotted_figures_before < plotted_figures_after
def test_daily_data_downloading_and_drawing_from_global_df(self):
data = cl.d_imgw_data('daily', 'synop', range(2010, 2011))
cl.set_global_df(data)
plotted_figures_before = plt.gcf().number
assert TestDrawingWithDailyData.draw_from_global_df('WARSZAWA') > plotted_figures_before
plotted_figures_before = plt.gcf().number
assert TestDrawingWithDailyData.draw_from_global_df('KOŁOBRZEG') > plotted_figures_before
plotted_figures_before = plt.gcf().number
assert TestDrawingWithDailyData.draw_from_global_df('KOŁO') > plotted_figures_before
@staticmethod
def draw_from_global_df(station_name):
wl = cl.g_WalterLieth(station_name)
wl.import_global_df('imgw_daily')
wl.draw()
return plt.gcf().number
|
#!/usr/bin/python
import sys, os, getpass, subprocess, argparse, stat, logging, grp, pwd
logging.basicConfig(level = logging.INFO)
logger = logging.getLogger(__name__)
class IgnoreAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
# do nothing
pass
class DefaultAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
if not 'ordered_args' in namespace:
setattr(namespace, 'ordered_args', [])
previous = namespace.ordered_args
previous.append((self.dest, values))
setattr(namespace, 'ordered_args', previous)
def create_check_volume_action(uid):
class CheckVolume(DefaultAction):
def __init__(self, option_strings, dest, nargs=None, **kwargs):
super(CheckVolume, self).__init__(option_strings, dest, **kwargs)
self.uid = uid
self.user = pwd.getpwuid(int(uid)).pw_name
def __call__(self, parser, namespace, values, option_string=None):
values=self.check_volume(values)
if (values is not None):
super(CheckVolume, self).__call__(parser, namespace, values, option_string)
def check_volume(self, volume):
vspec = volume.split(":")
volumepath = vspec[0]
volume = vspec[0] + ":" + vspec[1]
pp = (":" + vspec[2]) if len(vspec) > 2 else ""
st = os.stat(volumepath)
logger.debug('(uid=%s, volume_path=%s)/(st_uid=%s, st_gid=%s, st_mode=%s)', self.uid, volumepath, st.st_uid, st.st_gid, oct(st.st_mode))
if (os.path.islink(volumepath)):
volumepath = os.path.realpath(volumepath)
# if user is the owner then OK
if (int(st.st_uid) == int(self.uid)):
return volume + pp
# if user is a memeber of the group then check RW permissions
groups = [g.gr_name for g in grp.getgrall() if self.user in g.gr_mem]
if (st.st_gid in groups):
if (bool(st.st_mode & stat.S_IWGRP)):
return volume + pp
elif (bool(st.st_mode & stat.S_IRGRP)):
return volume + ":ro"
# check others' permissions
if (bool(st.st_mode & stat.S_IWOTH)):
return volume + pp
elif (bool(st.st_mode & stat.S_IROTH)):
return volume + ":ro"
return None
return CheckVolume
IMAGE_AND_COMMAND = 'IMAGE_AND_COMMAND'
CONTAINER = 'CONTAINER'
def get_argparser():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(
title='subcommands', description='valid subcommands', help='additional help')
parser_stop = subparsers.add_parser('stop')
parser_stop.add_argument('-t', '--time')
parser_stop.add_argument(CONTAINER)
parser_pause = subparsers.add_parser('pause')
parser_pause.add_argument(CONTAINER)
parser_unpause = subparsers.add_parser('unpause')
parser_unpause.add_argument(CONTAINER)
parser_ps = subparsers.add_parser('ps')
parser_ps.add_argument('-a', dest='-a', nargs=0, action=DefaultAction)
parser_images = subparsers.add_parser('images')
parser_rm = subparsers.add_parser('rm')
parser_rm.add_argument(CONTAINER)
parser_run = subparsers.add_parser('run')
parser_run.add_argument('-e', '--env', dest='-e', action=DefaultAction)
parser_run.add_argument('-u', '--user', dest='-u', action=IgnoreAction)
parser_run.add_argument('-w', '--workdir', dest='-w', action=DefaultAction)
parser_run.add_argument('-v', '--volume', dest='-v', action=create_check_volume_action(get_uid()))
parser_run.add_argument('--volumes-from', dest='--volumes-from', action=DefaultAction)
parser_run.add_argument('--rm', dest='--rm', nargs=0, action=DefaultAction)
parser_run.add_argument('-t', '--tty', dest='-t', nargs=0, action=DefaultAction)
parser_run.add_argument('-i', dest='-i', nargs=0, action=DefaultAction)
parser_run.add_argument('--read-only', dest='--read-only', nargs=0, action=DefaultAction)
parser_run.add_argument('-p', '--publish', dest='-p', action=DefaultAction)
parser_run.add_argument('-P', '--publish-all', nargs=0, dest='-P', action=DefaultAction)
parser_run.add_argument('--name', dest='--name', action=DefaultAction)
parser_run.add_argument('--hostname', dest='--hostname', action=DefaultAction)
parser_run.add_argument('-m', '--memory', dest='-m', action=DefaultAction)
parser_run.add_argument('--memory-swap', dest='--memory-swap', action=DefaultAction)
parser_run.add_argument('-d', '--detach', dest='--detach', nargs=0, action=DefaultAction)
parser_run.add_argument('--add-host', dest='--add-host', action=DefaultAction)
parser_run.add_argument('--entrypoint', dest='--entrypoint', action=DefaultAction)
parser_run.add_argument('--env-file', dest='--env-file', action=DefaultAction)
parser_run.add_argument('-l', '--label', dest='-l', action=DefaultAction)
parser_run.add_argument('--label-file', dest='--label-file', action=DefaultAction)
parser_run.add_argument('--link', dest='--link', action=DefaultAction)
parser_run.add_argument(IMAGE_AND_COMMAND, nargs=argparse.REMAINDER, action=DefaultAction)
return parser
def get_username():
username = os.environ.get('SUDO_USER')
return getpass.getuser() if username is None else username
def get_uid():
sudo_uid = os.environ.get('SUDO_UID')
return str(os.getuid()) if sudo_uid is None else sudo_uid
def get_gid():
sudo_gid = os.environ.get('SUDO_GID')
return str(os.getgid()) if sudo_gid is None else sudo_gid
def run_command(command):
try:
output = subprocess.Popen(command, stderr=subprocess.STDOUT, shell=True)
exit_ok(output)
except subprocess.CalledProcessError as e:
exit_err(e.output)
def exit_ok(msg):
print msg
sys.exit(0)
def exit_err(msg):
print >> sys.stderr, msg
sys.exit(1)
def main(argv):
logger.debug('(uid=%s, username=%s)', get_uid(), get_username())
if (get_uid() == 0):
exit_err("root doesn't suppose to run this script.")
logger.debug('(uid=%s, arguments=[ %s ])', get_uid(), " ".join(argv))
if (len(argv) < 1):
exit_err(" Usage: dickercmd <run|stop|pouse|unpouse> <options>")
args = get_argparser().parse_args(argv)
if hasattr(args, 'ordered_args'):
logger.debug('(uid=%s, filtered_arguments=%s)', get_uid(), args.ordered_args)
ordered_args = reduce(lambda x, y: x + [y[0]] + (y[1] if isinstance(y[1], list) else [y[1]]), args.ordered_args, [])
ordered_args = filter(lambda x: x not in [IMAGE_AND_COMMAND, CONTAINER], ordered_args)
ordered_args = ["-u", get_uid() + ":" + get_gid()] + ordered_args
command = ' '.join(['docker', argv[0]] + ordered_args)
logger.info('(uid=%s, command=[ %s ])', get_uid(), command)
else:
command = ' '.join(['docker'] + argv)
run_command(command)
if __name__ == "__main__":
main(sys.argv[1:]) |
"""
Assigns attributes to dictionnary values for easier object navigation.
"""
from dataclasses import dataclass
from typing import Optional
from open_sea_v1.responses.abc import BaseResponse
from open_sea_v1.responses.collection import CollectionResponse
@dataclass
class _LastSale:
_last_sale: dict
def __str__(self) -> str:
return f"({_LastSale.__name__}, asset={self.asset}, date={self.event_timestamp}, quantity={self.quantity})"
def __post_init__(self):
self.asset: dict = self._last_sale['asset']
self.asset_bundle = self._last_sale['asset_bundle']
self.event_type = self._last_sale['event_type']
self.event_timestamp = self._last_sale['event_timestamp']
self.auction_type = self._last_sale['auction_type']
self.total_price = self._last_sale['total_price']
self.created_date = self._last_sale['created_date']
self.quantity = self._last_sale['quantity']
@property
def transaction(self) -> dict:
return self._last_sale['transaction']
@property
def payment_token(self) -> dict:
return self._last_sale['payment_token']
@dataclass
class _Traits:
_traits: dict
def __post_init__(self):
self.trait_type = self._traits['trait_type']
self.value = self._traits['value']
self.display_type = self._traits['display_type']
@dataclass
class _Owner:
_owner: dict
def __str__(self) -> str:
return f"({_Owner.__name__}, user={self.user['username']})"
def __post_init__(self):
self.address = self._owner['address']
self.config = self._owner['config']
self.profile_img_url = self._owner['profile_img_url']
self.user: dict = self._owner['user']
@dataclass
class _Contract:
_contract: dict
def __str__(self) -> str:
return f"({_Contract.__name__} - {self.name.title()}: {self.description})"
def __post_init__(self):
self.address = self._contract['address']
self.name = self._contract['name']
self.symbol = self._contract['symbol']
self.image_url = self._contract['image_url']
self.description = self._contract['description']
self.external_link = self._contract['external_link']
@dataclass
class OrderResponse(BaseResponse):
_json: dict
def __str__(self):
return f"{self.id=}" if self.id else f"{self.order_hash=}"
def __post_init__(self):
self._set_optional_attrs()
self._set_common_attrs()
@property
def asset(self) -> 'AssetResponse':
return AssetResponse(self._json['asset'])
def _set_optional_attrs(self):
"""Depending on the endpoint you use, the Order response object will contain optional attributes."""
self.id: Optional = self._json.get('id') # id is only provided if you use the OrdersEndpoint
self.asset_bundle = self._json.get('asset_bundle')
def _set_common_attrs(self):
self.order_hash = self._json.get('order_hash')
self.created_date = self._json['created_date']
self.closing_date = self._json['closing_date']
self.closing_extendable = self._json['closing_extendable']
self.expiration_time = self._json['expiration_time']
self.listing_time = self._json['listing_time']
self.order_hash = self._json['order_hash']
self.exchange = self._json['exchange']
self.current_price = self._json['current_price']
self.current_bounty = self._json['current_bounty']
self.bounty_multiple = self._json['bounty_multiple']
self.maker_relayer_fee = self._json['maker_relayer_fee']
self.taker_relayer_fee = self._json['taker_relayer_fee']
self.maker_protocol_fee = self._json['maker_protocol_fee']
self.taker_protocol_fee = self._json['taker_protocol_fee']
self.maker_referrer_fee = self._json['maker_referrer_fee']
self.fee_method = self._json['fee_method']
self.side = self._json['side']
self.sale_kind = self._json['sale_kind']
self.target = self._json['target']
self.how_to_call = self._json['how_to_call']
self.calldata = self._json['calldata']
self.replacement_pattern = self._json['replacement_pattern']
self.static_target = self._json['static_target']
self.static_extradata = self._json['static_extradata']
self.payment_token = self._json['payment_token']
self.base_price = self._json['base_price']
self.extra = self._json['extra']
self.quantity = self._json['quantity']
self.salt = self._json['salt']
self.v = self._json['v']
self.r = self._json['r']
self.s = self._json['s']
self.approved_on_chain = self._json['approved_on_chain']
self.cancelled = self._json['cancelled']
self.finalized = self._json['finalized']
self.marked_invalid = self._json['marked_invalid']
self.prefixed_hash = self._json['prefixed_hash']
self.metadata: dict = self._json['metadata']
self.maker: dict = self._json['maker']
self.taker: dict = self._json['taker']
self.fee_recipient: dict = self._json['fee_recipient']
self.payment_token_contract: dict = self._json['payment_token_contract']
@dataclass
class AssetResponse(BaseResponse):
_json: dict
def __str__(self):
id_str = f"token_id={self.token_id.zfill(5)}"
name = f"name={self.name}"
return " ".join([id_str, name])
def __post_init__(self):
self._set_common_attrs()
def _set_common_attrs(self):
"""
Depending on the EventType you request, some elements of the json response will be missing.
For that reason we use .get() on every element.
"""
self.token_id = str(self._json.get("token_id") or '')
self.num_sales = self._json.get("num_sales")
self.background_color = self._json.get("background_color")
self.image_url = self._json.get("image_url")
self.image_preview_url = self._json.get("image_preview_url")
self.image_thumbnail_url = self._json.get("image_thumbnail_url")
self.image_original_url = self._json.get("image_original_url")
self.animation_url = self._json.get("animation_url")
self.animation_original_url = self._json.get("animation_original_url")
self.name = self._json.get("name")
self.description = self._json.get("description")
self.external_link = self._json.get("external_link")
self.permalink = self._json.get("permalink")
self.decimals = self._json.get("decimals")
self.token_metadata = self._json.get("token_metadata")
self.id = str(self._json.get("id") or '')
self.transfer_fee = self._json.get("transfer_fee")
self.transfer_fee_payment_token = self._json.get("transfer_fee_payment_token")
self.is_presale = self._json.get("is_presale")
self.listing_date = self._json.get("listing_date")
self.top_bid = self._json.get("top_bid")
@property
def asset_contract(self) -> _Contract:
return _Contract(self._json['asset_contract'])
@property
def owner(self) -> _Owner:
return _Owner(self._json['owner'])
@property
def traits(self) -> Optional[list[_Traits]]:
traits = self._json.get('traits')
if traits:
return [_Traits(traits) for traits in self._json['traits']]
return None
@property
def last_sale(self) -> Optional[_LastSale]:
last_sale = self._json.get('last_sale')
if last_sale:
return _LastSale(self._json['last_sale'])
return None
@property
def collection(self):
return CollectionResponse(self._json['collection'])
@property
def sell_orders(self) -> Optional[list[OrderResponse]]:
if sell_orders := self._json.get('sell_orders'):
return [OrderResponse(order) for order in sell_orders]
return None
@property
def creator(self) -> Optional[dict]:
return self._json.get('creator')
|
import argparse
import os
import torch
from torch import nn, optim
from torchvision import models
from dataloader import get_dataloader
from trainer import Trainer
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--batch-size', type=int, default=64)
parser.add_argument('--lr', type=float, default=1e-3)
parser.add_argument('--size', type=int, default=224)
parser.add_argument('--root', type=str, default='data')
parser.add_argument('--valid-ratio', type=float, default=0.2)
parser.add_argument('--epochs', type=int, default=20)
parser.add_argument('--num-workers', type=int, default=0)
parser.add_argument('--parallel', action='store_true')
config = parser.parse_args()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
net = models.resnet50(pretrained=True)
net.fc = nn.Linear(net.fc.in_features, 2)
if config.parallel and torch.cuda.device_count() > 1:
net = nn.DataParallel(net)
net = net.to(device)
optimizer = optim.Adam(net.fc.parameters(), lr=config.lr)
train_loader, valid_loader = get_dataloader(
config.size, config.root, config.batch_size, config.valid_ratio,
config.num_workers)
trainer = Trainer(net, optimizer, train_loader, valid_loader, device)
for epoch in range(config.epochs):
train_loss, train_acc = trainer.train()
valid_loss, valid_acc = trainer.validate()
print('Epoch: {}/{},'.format(epoch + 1, config.epochs), \
'train loss: {:.6f},'.format(train_loss),
'train acc: {:.6f}.'.format(train_acc),
'valid loss: {:.6f},'.format(valid_loss),
'valid acc: {:.6f}.'.format(valid_acc))
os.makedirs('models', exist_ok=True)
torch.save(net.state_dict(),
'models/model_{:03d}.pth'.format(epoch + 1))
if __name__ == '__main__':
main()
|
import os, os.path
import sys
import ast
import megalex
from megalex import safelyAccessFirstString, safelyAccessStrings
import datetime
if 3 > len(sys.argv):
print("Must supply at least two parameters! Usage: \n\tbackend_KPML.py infile outfile\n")
sys.exit(1)
outfilePath = sys.argv[2]
infilePath = sys.argv[1]
def lemma2ClassName(lemma):
name = ""
first = True
for item in lemma:
if first:
first = False
else:
name += "-"
name += item
return name.replace(" ", "-")
def possessiveForm(spelling):
if 's' == spelling[-1]:
return spelling + "'"
return spelling + "'s"
def properNounMorphs(entry):
morphs = []
name = lemma2ClassName(entry[0])
singulars = safelyAccessStrings(entry[1], ('singular',))
plurals = safelyAccessStrings(entry[1], ('plural',))
for singular in singulars:
morphs.append("<entry pos=\"NP\" word=\"%s\" class=\"%s\" macros=\"@num.sg\"/>" % (singular, name))
morphs.append("<entry pos=\"NP.possessive\" word=\"%s\" stem=\"%s\" class=\"%s\" macros=\"@num.sg @case.gen\"/>" % (possessiveForm(singular), singular, name))
for plural in plurals:
if singulars:
morphs.append("<entry pos=\"NP\" word=\"%s\" stem=\"%s\" class=\"%s\" macros=\"@num.pl\"/>" % (plural, singulars[0], name))
morphs.append("<entry pos=\"NP.possessive\" word=\"%s\" stem=\"%s\" class=\"%s\" macros=\"@num.pl @case.gen\"/>" % (possessiveForm(plural), singulars[0], name))
else:
morphs.append("<entry pos=\"NP\" word=\"%s\" class=\"%s\" macros=\"@num.pl\"/>" % (plural, name))
morphs.append("<entry pos=\"NP.possessive\" word=\"%s\" class=\"%s\" macros=\"@num.pl @case.gen\"/>" % (possessiveForm(plural), name))
return morphs
def countableNounMorphs(entry):
morphs = []
name = lemma2ClassName(entry[0])
singulars = safelyAccessStrings(entry[1], ('singular',))
plurals = safelyAccessStrings(entry[1], ('plural',))
for singular in singulars:
morphs.append("<entry pos=\"N\" word=\"%s\" class=\"%s\" macros=\"@num.sg @pers.3rd\"/>" % (singular, name))
morphs.append("<entry pos=\"N.possessive\" word=\"%s\" stem=\"%s\" class=\"%s\" macros=\"@num.sg @pers.3rd @case.gen\"/>" % (possessiveForm(singular), singular, name))
for plural in plurals:
if singulars:
morphs.append("<entry pos=\"N\" word=\"%s\" stem=\"%s\" class=\"%s\" macros=\"@num.pl @pers.3rd\"/>" % (plural, singulars[0], name))
morphs.append("<entry pos=\"N.possessive\" word=\"%s\" stem=\"%s\" class=\"%s\" macros=\"@num.pl @pers.3rd @case.gen\"/>" % (possessiveForm(plural), singulars[0], name))
else:
morphs.append("<entry pos=\"N\" word=\"%s\" class=\"%s\" macros=\"@num.pl @pers.3rd\"/>" % (plural, name))
morphs.append("<entry pos=\"N.possessive\" word=\"%s\" class=\"%s\" macros=\"@num.pl @pers.3rd @case.gen\"/>" % (possessiveForm(plural), name))
return morphs
def uncountableNounMorphs(entry):
morphs = []
name = lemma2ClassName(entry[0])
singulars = safelyAccessStrings(entry[1], ('ietsial',))
plurals = safelyAccessStrings(entry[1], ('plural',))
for singular in singulars:
morphs.append("<entry pos=\"N\" word=\"%s\" class=\"%s\" macros=\"@num.sg @pers.3rd\"/>" % (singular, name))
morphs.append("<entry pos=\"N.possessive\" word=\"%s\" stem=\"%s\" class=\"%s\" macros=\"@num.sg @pers.3rd @case.gen\"/>" % (possessiveForm(singular), singular, name))
for plural in plurals:
if singulars:
morphs.append("<entry pos=\"N\" word=\"%s\" stem=\"%s\" class=\"%s\" macros=\"@num.pl @pers.3rd\"/>" % (plural, singulars[0], name))
morphs.append("<entry pos=\"N.possessive\" word=\"%s\" stem=\"%s\" class=\"%s\" macros=\"@num.pl @pers.3rd @case.gen\"/>" % (possessiveForm(plural), singulars[0], name))
else:
morphs.append("<entry pos=\"N\" word=\"%s\" class=\"%s\" macros=\"@num.pl @pers.3rd\"/>" % (plural, name))
morphs.append("<entry pos=\"N.possessive\" word=\"%s\" class=\"%s\" macros=\"@num.pl @pers.3rd @case.gen\"/>" % (possessiveForm(plural), name))
return morphs
def measureUnitMorphs(entry):
morphs = []
name = lemma2ClassName(entry[0])
singulars = safelyAccessStrings(entry[1], ('singular',))
plurals = safelyAccessStrings(entry[1], ('plural',))
for singular in singulars:
morphs.append("<entry pos=\"N.unit\" word=\"%s\" class=\"%s\" />" % (singular, name))
morphs.append("<entry pos=\"N.possessive\" word=\"%s\" stem=\"%s\" class=\"%s\" macros=\"@case.gen\"/>" % (possessiveForm(singular), singular, name))
for plural in plurals:
if singulars:
morphs.append("<entry pos=\"N.unit\" word=\"%s\" stem=\"%s\" class=\"%s\" />" % (plural, singulars[0], name))
morphs.append("<entry pos=\"N.possessive\" word=\"%s\" stem=\"%s\" class=\"%s\" macros=\"@case.gen\"/>" % (possessiveForm(plural), singulars[0], name))
else:
morphs.append("<entry pos=\"N.unit\" word=\"%s\" class=\"%s\" />" % (plural, name))
morphs.append("<entry pos=\"N.possessive\" word=\"%s\" class=\"%s\" macros=\"@case.gen\"/>" % (possessiveForm(plural), name))
return morphs
def noun2LEX(entry):
if "PRP" == entry[0][2]:
return properNounMorphs(entry)
if "CT" == entry[0][2]:
return countableNounMorphs(entry)
if "NCT" == entry[0][2]:
return uncountableNounMorphs(entry)
if "MCT" == entry[0][2]:
return measureUnitMorphs(entry)
lines = open(infilePath).read().splitlines()
pretxt = open("./backend_CCG_pre.txt").read()
postxt = open("./backend_CCG_post.txt").read()
entries = []
for line in lines:
line = line.strip()
entries.append(ast.literal_eval(line))
with open(outfilePath, "w") as outfile:
outfile.write('%s\n' % pretxt)
for entry in entries:
# TODO: for now only output nouns
if "NOUN" == entry[0][1]:
morphs = noun2LEX(entry)
for morph in morphs:
outfile.write("%s\n" % (morph,))
outfile.write('%s\n' % postxt)
|
from metrics.Metric import Metric
from metrics.TNR import TNR
from metrics.TPR import TPR
class BCR(Metric):
def __init__(self):
Metric.__init__(self)
self.name = 'BCR'
def calc(self, actual, predicted, dict_of_sensitive_lists, single_sensitive_name,
unprotected_vals, positive_pred):
tnr = TNR()
tnr_val = tnr.calc(actual, predicted, dict_of_sensitive_lists, single_sensitive_name,
unprotected_vals, positive_pred)
tpr = TPR()
tpr_val = tpr.calc(actual, predicted, dict_of_sensitive_lists, single_sensitive_name,
unprotected_vals, positive_pred)
bcr = (tpr_val + tnr_val) / 2.0
return bcr
|
from ..unit import Unit
from ..lin import Weights, Biases
from ..activations import Identity
from murmeltier.utils import assert_equal_or_none
class Layer(Unit):
'''
weights -> biases -> activation
'''
def __init__(self, in_specs, out_specs, activation_type = Identity, **kwargs):
self.config(in_specs = in_specs, out_specs = out_specs)
self.params['weights'] = Weights(in_specs = in_specs, out_specs = out_specs, **kwargs)
self.params['biases'] = Biases(in_specs = out_specs, out_specs = out_specs, **kwargs)
self.params['activation'] = activation_type(in_specs = out_specs, out_specs = out_specs, **kwargs)
def get_output(self, input):
pre_biases = self.params['weights'].get_output(input)
post_biases = self.params['biases'].get_output(pre_biases)
return self.params['activation'].get_output(post_biases)
|
from OpenGL.GL import *
import gcraft.utils.state_manager as sm
class RenderBuffer:
def __init__(self, buffer_id, texture_id):
self.texture_id = texture_id
self.buffer_id = buffer_id
def bind(self):
glBindFramebuffer(GL_FRAMEBUFFER, self.buffer_id)
def release(self):
glBindFramebuffer(GL_FRAMEBUFFER, 0)
sm.bind_2d_texture(0)
glDeleteFramebuffers(self.buffer_id)
glDeleteTextures(self.texture_id)
@staticmethod
def bind_screen():
glBindFramebuffer(GL_FRAMEBUFFER, 0)
@staticmethod
def create(texture_size: (int, int)):
frame_buffer_id = glGenFramebuffers(1)
texture_id = glGenTextures(1)
glBindFramebuffer(GL_FRAMEBUFFER, frame_buffer_id)
sm.bind_2d_texture(texture_id)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, texture_size[0], texture_size[1], 0, GL_RGBA, GL_UNSIGNED_BYTE, None)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
glFramebufferTexture(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, texture_id, 0)
buffer = RenderBuffer(frame_buffer_id, texture_id)
glBindFramebuffer(GL_FRAMEBUFFER, 0)
sm.bind_2d_texture(0)
return buffer
|
'''
DictUtils.py
Utility methods and classes that provide more efficient ways of handling python dictionaries
Copyright (C) 2013 Timothy Edmund Crosley
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
'''
from .MultiplePythonSupport import *
def missingKey(d1, d2):
"""
Returns a list of name value pairs for all the elements that are present in one dictionary and not the other
"""
l = []
l += [ {k:d1[k]} for k in d1 if k not in d2 ]
l += [ {k:d2[k]} for k in d2 if k not in d1 ]
return l
def dictCompare(d1, d2):
"""
Returns a list of name value pairs for all the elements that are different between the two dictionaries
"""
diffs = missingKey(d1, d2)
diffs += [ {k:str(d1[k]) + '->' + str(d2[k])} for k in d1 if k in d2 and d1[k] != d2[k]]
return diffs
def userInputStrip(uDict):
"""
Strip whitespace out of input provided by the user
"""
dictList = map(lambda x: (x[1] and type(x[1]) == type('')) and (x[0], x[1].strip()) or (x[0], x[1]), uDict.items())
return dict(dictList)
def setNestedValue(d, keyString, value):
"""
Sets the value in a nested dictionary where '.' is the delimiter
"""
keys = keyString.split('.')
currentValue = d
for key in keys:
previousValue = currentValue
currentValue = currentValue.setdefault(key, {})
previousValue[key] = value
def getNestedValue(dictionary, keyString, default=None):
"""
Returns the value from a nested dictionary where '.' is the delimiter
"""
keys = keyString.split('.')
currentValue = dictionary
for key in keys:
if not isinstance(currentValue, dict):
return default
currentValue = currentValue.get(key, None)
if currentValue is None:
return default
return currentValue
def stringKeys(dictionary):
"""
Modifies the passed in dictionary to ensure all keys are string objects, converting them when necessary.
"""
for key, value in dictionary.items():
if type(key) != str:
dictionary.pop(key)
dictionary[str(key)] = value
return dictionary
def iterateOver(dictionary, key):
"""
Returns a list version of the value associated with key in dictionary.
"""
results = dictionary.get(key, [])
if type(results) != list:
results = [results]
return enumerate(results)
def twoWayDict(dictionary):
"""
Doubles the size of the dictionary, by making every reverse lookup work.
"""
for key, value in dictionary.items():
dictionary[value] = key
return dictionary
class OrderedDict(dict):
"""
Defines a dictionary which maintains order - only necessary in older versions of python.
"""
class ItemIterator(dict):
def __init__(self, orderedDict, includeIndex=False):
self.orderedDict = orderedDict
self.length = len(orderedDict)
self.index = 0
self.includeIndex = includeIndex
def next(self):
if self.index < self.length:
key = self.orderedDict.orderedKeys[self.index]
value = self.orderedDict[key]
to_return = (self.includeIndex and (self.index, key, value)) or (key, value)
self.index += 1
return to_return
else:
raise StopIteration
def __iter__(self):
return self
def __init__(self, tuplePairs=()):
self.orderedKeys = []
for key, value in tuplePairs:
self[key] = value
def __add__(self, value):
if isinstance(value, OrderedDict):
newDict = self.copy()
newDict.update(value)
return newDict
return dict.__add__(self, value)
def copy(self):
newDict = OrderedDict()
newDict.update(self)
return newDict
def update(self, dictionary):
for key, value in iteritems(dictionary):
self[key] = value
def items(self):
items = []
for key in self.orderedKeys:
items.append((key, self[key]))
return items
def values(self):
values = []
for key in self.orderedKeys:
values.append(self[key])
return values
def keys(self):
return self.orderedKeys
def iterkeys(self):
return self.orderedKeys.__iter__()
def __iter__(self):
return self.iterkeys()
def iteritems(self):
return self.ItemIterator(self)
def iteritemsWithIndex(self):
return self.ItemIterator(self, includeIndex=True)
def __setitem__(self, keyString, value):
if not keyString in self.orderedKeys:
self.orderedKeys.append(keyString)
return dict.__setitem__(self, keyString, value)
def setdefault(self, keyString, value):
if not keyString in self.orderedKeys:
self.orderedKeys.append(keyString)
return dict.setdefault(self, keyString, value)
def getAllNestedKeys(dictionary, prefix=""):
"""
Returns all keys nested within nested dictionaries.
"""
keys = []
for key, value in iteritems(dictionary):
if isinstance(value, dict):
keys.extend(getAllNestedKeys(value, prefix=prefix + key + '.'))
continue
keys.append(prefix + key)
return keys
class NestedDict(dict):
"""
Defines a dictionary that enables easy safe retrieval of nested dict keys.
"""
def __init__(self, d=None):
if d:
self.update(d)
def setValue(self, keyString, value):
"""
Sets the value of a nested dict key.
"""
setNestedValue(self, keyString, value)
def allKeys(self):
"""
Returns all keys, including nested dict keys.
"""
return getAllNestedKeys(self)
def difference(self, otherDict):
"""
returns a list of tuples [(key, myValue, otherDictValue),]
allowing you to do:
for fieldName, oldValue, newValue in oldValues.difference(newValues)
"""
differences = []
for key in set(self.allKeys() + otherDict.allKeys()):
myValue = self.getValue(key, default=None)
otherDictValue = otherDict.getValue(key, default=None)
if myValue != otherDictValue:
differences.append((key, myValue, otherDictValue))
return differences
def getValue(self, keyString, **kwargs):
"""
Returns a nested value if it exists.
"""
keys = keyString.split('.')
currentNode = self
for key in keys:
if not key:
continue
currentNode = currentNode.get(key, None)
if not currentNode:
break
if currentNode:
return currentNode
elif 'default' in kwargs:
return kwargs.get('default')
else:
raise KeyError(keyString)
def createDictFromString(string, itemSeparator, keyValueSeparator, ordered=False):
"""
Creates a new dictionary based on the passed in string, itemSeparator and keyValueSeparator.
"""
if ordered:
newDict = OrderedDict()
else:
newDict = {}
if not string:
return newDict
for item in string.split(itemSeparator):
key, value = item.split(keyValueSeparator)
oldValue = newDict.get(key, None)
if oldValue is not None:
if type(oldValue) == list:
newDict[key].append(value)
else:
newDict[key] = [oldValue, value]
else:
newDict[key] = value
return newDict
|
"""
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django import forms
from django.utils.safestring import mark_safe
from django.forms.models import inlineformset_factory
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Fieldset, Div, Submit, Hidden, HTML, Field
from crispy_forms.bootstrap import FormActions, AppendedText, InlineRadios
from wells.models import ActivitySubmission
class ActivitySubmissionFilterPackForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.disable_csrf = True
self.helper.layout = Layout(
Fieldset(
'Filter Pack',
Div(
Div(AppendedText('filter_pack_from', 'ft'), css_class='col-md-2'),
Div(AppendedText('filter_pack_to', 'ft'), css_class='col-md-2'),
Div(AppendedText('filter_pack_thickness', 'in'), css_class='col-md-2'),
css_class='row',
),
Div(
Div('filter_pack_material', css_class='col-md-3'),
Div('filter_pack_material_size', css_class='col-md-3'),
css_class='row',
),
)
)
super(ActivitySubmissionFilterPackForm, self).__init__(*args, **kwargs)
def clean(self):
cleaned_data = super(ActivitySubmissionFilterPackForm, self).clean()
return cleaned_data
class Meta:
model = ActivitySubmission
fields = ['filter_pack_from', 'filter_pack_to', 'filter_pack_thickness', 'filter_pack_material', 'filter_pack_material_size']
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# @Author: José Sánchez-Gallego (gallegoj@uw.edu)
# @Date: 2019-10-03
# @Filename: events.py
# @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause)
import enum
__all__ = ["CameraSystemEvent", "CameraEvent"]
class CameraSystemEvent(enum.Enum):
"""Enumeration of camera system events."""
CAMERA_ADDED = enum.auto()
CAMERA_REMOVED = enum.auto()
class CameraEvent(enum.Enum):
"""Enumeration of camera events."""
CAMERA_CONNECTED = "connected"
CAMERA_CONNECT_FAILED = "connect_failed"
CAMERA_DISCONNECTED = "disconnected"
CAMERA_DISCONNECT_FAILED = "disconnect_failed"
EXPOSURE_IDLE = "idle"
EXPOSURE_FLUSHING = "flushing"
EXPOSURE_INTEGRATING = "integrating"
EXPOSURE_READING = "reading"
EXPOSURE_READ = "read"
EXPOSURE_DONE = "done"
EXPOSURE_FAILED = "failed"
EXPOSURE_WRITING = "writing"
EXPOSURE_WRITTEN = "written"
EXPOSURE_POST_PROCESSING = "post_processing"
EXPOSURE_POST_PROCESS_DONE = "post_process_done"
EXPOSURE_POST_PROCESS_FAILED = "post_process_failed"
NEW_SET_POINT = "new_set_point"
SET_POINT_REACHED = "set_point_reached"
|
import MySQLdb
import random
import argparse
import textwrap
# MySQL config
MYSQL_USER = 'root'
MYSQL_PASSWD = 'linux'
MYSQL_HOST = '127.0.0.1'
MYSQL_OAI_DB = 'oai_db'
# APN config
APN_NAME = 'oai.ipv4'
# MME config
MME_HOST = 'labuser.111.111'
MME_REALM = '111.111'
# PGW config
PGW_IPV4_VAL = '10.0.0.2'
PGW_IPV6_VAL = '0'
help_str = """Example usage:
./oai_adduser.py --imsi=208930000000001 \
--msisdn=33638060010 --ki=8BAF473F2F8FD09487CCCBD7097C6862 \
--opc=8E27B6AF0E692E750F32667A3B14605D
"""
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=textwrap.dedent(help_str))
parser.add_argument('--imsi', metavar='SIM_IMSI', help='The IMSI of the sim card', required=True)
parser.add_argument('--msisdn', metavar='SIM_MSISDN', help='The MSISDN of the sim card', required=True)
parser.add_argument('--ki', metavar='SIM_KI', help='The KI used to program the sim card', required=True)
parser.add_argument('--opc', metavar='SIM_OPC', help='The OPC of the sim card', required=True)
args = parser.parse_args()
user_imsi = args.imsi
user_msisdn = args.msisdn
user_ki = args.ki
user_opc = args.opc
# Connect to db
con = MySQLdb.connect(user=MYSQL_USER, passwd=MYSQL_PASSWD,
host=MYSQL_HOST, db=MYSQL_OAI_DB);
c = con.cursor()
print("Updating MySQL tables")
# Insert apn - ignore if already exists
print("Inserting to apn table")
c.execute("""INSERT IGNORE INTO `apn` (`apn-name`, `pdn-type`)
VALUES (%s, 'IPv4');""", (APN_NAME))
con.commit()
# Get idmmeidentity
c.execute("""SELECT `idmmeidentity` FROM `mmeidentity`
WHERE `mmehost`=%s AND `mmerealm`=%s""",
(MME_HOST, MME_REALM))
con.commit()
mmeid = -1
if c.rowcount > 0:
mmeid = c.fetchone()[0]
print("Found existing mmeidentity with id: %d" % mmeid)
else:
# Insert to mmeidentity with host information
print("Inserting to mmeidentity table")
c.execute("""INSERT INTO `mmeidentity`
(`mmehost`, `mmerealm`, `UE-Reachability`)
VALUES (%s, %s, 0);""", (MME_HOST, MME_REALM))
con.commit()
mmeid = c.lastrowid
print("New mmeidentity id: %d" % mmeid)
# Ensure pgw entry exists
c.execute("""SELECT `id` FROM `pgw`
WHERE `ipv4`=%s AND `ipv6`=%s""",
(PGW_IPV4_VAL, PGW_IPV6_VAL))
con.commit()
pgwid = -1
if c.rowcount > 0:
pgwid = c.fetchone()[0]
print("Found existing pgw with id: %d" % pgwid)
else:
# Insert to mmeidentity with host information
print("Inserting to pgw table")
c.execute("""INSERT INTO `pgw`
(`ipv4`, `ipv6`)
VALUES (%s, %s);""", (PGW_IPV4_VAL, PGW_IPV6_VAL))
con.commit()
pgwid = c.lastrowid
print("New pgw id: %d" % pgwid)
# Add pdn entry if not exists
c.execute("""SELECT `id` FROM `pdn`
WHERE `apn`=%s AND `pgw_id`=%s AND `users_imsi`=%s""",
(APN_NAME, pgwid, user_imsi))
con.commit()
if c.rowcount > 0:
pdnid = c.fetchone()[0]
print("Found existing pdn with id %d. Replacing..." % pdnid)
# Replace
c.execute("""UPDATE `pdn` SET
`apn`=%s, `pdn_type`='IPv4', `pdn_ipv4`='0.0.0.0',
`pdn_ipv6`='0:0:0:0:0:0:0:0',
`aggregate_ambr_ul`=50000000,
`aggregate_ambr_dl`=100000000,
`pgw_id`=%s, `users_imsi`=%s, `qci`=9, `priority_level`=15,
`pre_emp_cap`='DISABLED', `pre_emp_vul`='ENABLED',
`LIPA-Permissions`='LIPA-only'
WHERE `id`=%s;""",
(APN_NAME, pgwid, user_imsi, pdnid))
con.commit()
else:
print("Inserting to pdn table")
c.execute("""INSERT INTO `pdn`
(`apn`, `pdn_type`, `pdn_ipv4`, `pdn_ipv6`,
`aggregate_ambr_ul`, `aggregate_ambr_dl`,
`pgw_id`, `users_imsi`, `qci`, `priority_level`,
`pre_emp_cap`, `pre_emp_vul`, `LIPA-Permissions`)
VALUES
(%s, 'IPv4', '0.0.0.0', '0:0:0:0:0:0:0:0',
50000000, 100000000,
%s, %s, 9, 15,
'DISABLED', 'ENABLED', 'LIPA-only');""",
(APN_NAME, pgwid, user_imsi))
con.commit()
pdnid = c.lastrowid
print("New pdn id: %d" % pdnid)
# Add user
print("Inserting to users table")
c.execute("""REPLACE INTO `users`
(`imsi`, `msisdn`, `imei`,
`imei_sv`, `ms_ps_status`,
`rau_tau_timer`, `ue_ambr_ul`,
`ue_ambr_dl`, `access_restriction`,
`mme_cap`, `mmeidentity_idmmeidentity`,
`key`, `RFSP-Index`, `urrp_mme`,
`sqn`, `rand`, `OPc`)
VALUES
(%s, %s, NULL,
NULL, 'PURGED',
120, 50000000,
100000000, 47,
0000000000, %s,
UNHEX(%s), 1, 0,
0, UNHEX(%s), UNHEX(%s));""",
(user_imsi, user_msisdn, mmeid, user_ki,
"%x" % random.randint(0, 2**(8*16)), user_opc))
con.commit()
print("Added new user")
c.close()
con.close()
|
# -*- coding: utf-8 -*-
"""
Simple Counter
~~~~~~~~~~~~~~
Instrumentation example that gathers method invocation counts
and dumps the numbers when the program exists, in JSON format.
:copyright: (c) 2014 by Romain Gaucher (@rgaucher)
:license: Apache 2, see LICENSE for more details.
"""
import sys
from equip import Program, \
Instrumentation, \
SimpleRewriter, \
MethodVisitor
import equip.utils.log as logutils
from equip.utils.log import logger
logutils.enableLogger(to_file='./equip.log')
# Declaration of the code to be injected in various places. This
# code is compiled to bytecode which is then added to the various
# code_objects (e.g., method, etc.) based on what the visitor specifies.
BEFORE_CODE = """
GlobalCounterInst.count(file='{file_name}',
class_name='{class_name}',
method='{method_name}',
lineno={lineno})
"""
# We need to inject a new import statement that contains the GlobalCounterInst
IMPORT_CODE = """
from counter import GlobalCounterInst
"""
ON_ENTER_CODE = """
print "Starting instrumented program"
"""
# When the instrumented code exits, we want to serialize the data
ON_EXIT_CODE = """
GlobalCounterInst.to_json('./data.json')
"""
# The visitor is called for each method in the program (function or method)
class CounterInstrumentationVisitor(MethodVisitor):
def __init__(self):
MethodVisitor.__init__(self)
def visit(self, meth_decl):
rewriter = SimpleRewriter(meth_decl)
# Ensure we have imported our `GlobalCounterInst`
rewriter.insert_import(IMPORT_CODE, module_import=True)
# This is the main instrumentation code with a callback to
# `GlobalCounterInst::count`
rewriter.insert_before(BEFORE_CODE)
HELP_MESSAGE = """
1. Run counter_instrument.py on the code you want to instrument:
$ python counter_instrument.py <path/to/code>
2. Run your original program:
$ export PYTHONPATH=$PYTHONPATH:/path/to/counter
$ python start_my_program.pyc
"""
def main(argc, argv):
if argc < 2:
print HELP_MESSAGE
return
visitor = CounterInstrumentationVisitor()
instr = Instrumentation(argv[1])
instr.set_option('force-rebuild')
if not instr.prepare_program():
print "[ERROR] Cannot find program code to instrument"
return
# Add code at the very beginning of each module (only triggered if __main__ routine)
instr.on_enter(ON_ENTER_CODE)
# Add code at the end of each module (only triggered if __main__ routine)
instr.on_exit(ON_EXIT_CODE)
# Apply the instrumentation with the visitor, and when a change has been made
# it will overwrite the pyc file.
instr.apply(visitor, rewrite=True)
if __name__ == '__main__':
main(len(sys.argv), sys.argv)
|
# Copyright 2018 reinforce.io. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import json
import os.path
import re
from warnings import warn
import tensorforce_client.utils as util
from tensorforce_client.cluster import Cluster, get_cluster_from_string
class Experiment(object):
def __init__(self, **kwargs):
"""
Keyword Args:
file (str): The Experiment's json spec file (can contain all other args).
name (str): The name of the Experiment. This is also the name of the folder where it is stored.
environment (str): The filename of the json env-spec file to use (see TensorForce documentation).
agent (str): The filename of the json agent-spec file to use (see TensorForce documentation).
network (str): The filename of the json network-spec file to use (see TensorForce documentation).
cluster (str): The filename of the json cluster-spec file to use (see class `Cluster`).
episodes (int): The total number of episodes to run (all parallel agents).
total_timesteps (int): The max. total number of timesteps to run (all parallel agents).
max_timesteps_per_episode (int): The max. number of timesteps to run in each episode.
deterministic (bool): Whether to not(!) use stochastic exploration on top of plain action outputs.
repeat_actions (int): The number of actions to repeat for each action selection (by calling agent.act()).
debug_logging (bool): Whether to switch on debug logging (default: False).
run_mode (str): Which runner mode to use. Valid values are only 'single', 'multi-threaded' and
'distributed'.
num_workers (int): The number of worker processes to use (see `distributed` and `multi-threaded`
run_modes).
num_parameter_servers (int): The number of parameter servers to use (see distributed tensorflow).
saver_frequency (str): The frequency with which to save the model. This is a combination of an int
and a unit (e.g. "600s"), where unit can be "s" (seconds), "e" (episodes), or "t" (timesteps).
summary_frequency (str): The frequency with which to save a tensorboard summary.
This is a combination of an int and a unit (e.g. "600s"), where unit can be "s" (seconds)
or "t" (timesteps). The episode unit (e) is not allowed here.
"""
# see whether we have a json (yaml?) file for the experiment
# TODO: yaml support
self.file = kwargs.get("file")
if self.file:
from_json = util.read_json_spec(self.file, "experiments")
# get all attributes from kwargs
else:
from_json = {}
# From here on, give kwargs priority over spec (from file), so that single settings in the json file can be
# overwritten by command line.
# sanity check name
self.name = kwargs.get("name") or from_json.get("name", "")
if not re.match(r'^\w+$', self.name):
raise util.TFCliError("ERROR: Name of experiment needs to be all alphanumeric characters")
self.name_hyphenated = re.sub(r'_', '-', self.name)
self.path = "experiments/{}/".format(self.name)
self.k8s_config = "{}experiment.yaml".format(self.path)
# read in sub-spec files (to JSON)
self.environment = kwargs.get("environment") or from_json.get("environment")
if isinstance(self.environment, str):
self.environment = util.read_json_spec(self.environment, "environments")
if self.environment.get("remote") and not self.environment.get("image"):
raise util.TFCliError("WARNING: Defining a remote environment without a docker image in experiment spec! "
"Use field `image` to define a docker image for the remote env.")
self.network = kwargs.get("network") or from_json.get("network")
if isinstance(self.network, str):
self.network = util.read_json_spec(self.network, "networks")
self.agent = kwargs.get("agent") or from_json.get("agent")
if isinstance(self.agent, str):
self.agent = util.read_json_spec(self.agent, "agents")
self.cluster = kwargs.get("cluster") or from_json.get("cluster")
if isinstance(self.cluster, str):
cluster = get_cluster_from_string(self.cluster)
self.cluster = cluster.get_spec()
elif not isinstance(self.cluster, dict):
raise util.TFCliError("ERROR: Cluster (-c option) has to be given as json filename.")
self.episodes = kwargs.get("episodes") or from_json.get("episodes", 10000)
self.total_timesteps = kwargs.get("total_timesteps") or from_json.get("total_timesteps", 1000000)
self.max_timesteps_per_episode = kwargs.get("max_timesteps_per_episode") or \
from_json.get("max_timesteps_per_episode", 1000)
self.deterministic = kwargs.get("deterministic")
if self.deterministic is None:
self.deterministic = from_json.get("deterministic", False)
self.repeat_actions = kwargs.get("repeat_actions") or from_json.get("repeat_actions", 1)
self.num_workers = kwargs.get("num_workers") or from_json.get("num_workers", 3)
self.num_parameter_servers = kwargs.get("num_parameter_servers") or from_json.get("num_parameter_servers", 1)
# update our json file pointer and write us into the experiment's dir
self.file = "{}experiment.json".format(self.path)
self.debug_logging = kwargs.get("debug_logging") or from_json.get("debug_logging", False)
# the experiment's run type
self.run_mode = kwargs.get("run_mode") or from_json.get("run_mode", "distributed")
assert self.run_mode in ["distributed", "multi-threaded", "single"],\
"ERROR: run-type needs to be one of distributed|multi-threaded|single!"
if self.run_mode == "distributed" and self.num_parameter_servers <= 0:
raise util.TFCliError("ERROR: Cannot create experiment of run-mode=distributed and zero parameter servers!")
self.saver_frequency = kwargs.get("saver_frequency")\
or from_json.get("saver_frequency", "600s" if self.run_mode == "distributed" else "100e")
self.summary_frequency = kwargs.get("summary_frequency")\
or from_json.get("summary_frequency", "120s" if self.run_mode == "distributed" else "10e")
# whether this experiment runs on a dedicated cluster
self.has_dedicated_cluster = kwargs.get("has_dedicated_cluster") or from_json.get("has_dedicated_cluster", True)
# status (running, paused, stopped, etc..)
self.status = kwargs.get("status") or from_json.get("status", None)
# json file specific to a certain experiment 'run' (e.g. cluster may differ from experiment's base config)
self.running_json_file = "experiment_running.json"
def generate_locally(self):
"""
Writes the local json spec file for this Experiment object into the Experiment's dir.
This file contains all settings (including agent, network, cluster, run-mode, etc..).
"""
# check whether this experiment already exists (as a folder inside project's folder)
# create a new dir for this experiment
if not os.path.exists(self.path+"results/"):
print("+ Creating experiment's directory {}.".format(self.path))
os.makedirs(self.path+"results/")
# write experiment data into experiment file (for future fast constructs of experiment parameters)
print("+ Writing Experiment's settings to local disk.")
self.write_json_file(self.path + "experiment.json")
def setup_cluster(self, cluster, project_id, start=False):
"""
Given a cluster name (or None) and a remote project-ID,
sets up the cluster settings for this Experiment locally.
Also starts the cluster if start is set to True.
Args:
cluster (str): The name of the cluster. If None, will get cluster-spec from the Experiment, or create a
default Cluster object.
project_id (str): The remote gcloud project ID.
start (bool): Whether to already create (start) the cluster in the cloud.
Returns: The Cluster object.
"""
clusters = util.get_cluster_specs()
# cluster is given (separate from experiment's own cluster)
if cluster:
cluster = get_cluster_from_string(cluster, running_clusters=clusters)
self.has_dedicated_cluster = False
# use experiment's own cluster
elif self.cluster:
cluster = Cluster(running_clusters=clusters, **self.cluster)
self.has_dedicated_cluster = True
# use a default cluster
else:
cluster = Cluster(name=self.name_hyphenated)
self.has_dedicated_cluster = True
# start cluster if not up yet
if start and not cluster.started:
cluster.create()
# cluster up but not in good state
elif clusters[cluster.name_hyphenated]["status"] != "RUNNING":
raise util.TFCliError("ERROR: Given cluster {} is not in status RUNNING (but in status {})!".
format(cluster.name_hyphenated, clusters[cluster.name_hyphenated]["status"]))
# check cluster vs experiment setup and warn or abort if something doesn't match
if self.run_mode != "distributed" and cluster.num_nodes > 1:
warn("WARNING: Running non-distributed experiment on cluster with more than 1 node. Make sure you are "
"not wasting costly resources!")
num_gpus = cluster.num_nodes * cluster.gpus_per_node
if self.run_mode == "distributed" and self.num_workers + self.num_parameter_servers > num_gpus:
warn("WARNING: Running distributed experiment with {} processes total on cluster with only {} GPUs! "
"This could lead to K8s scheduling problems.".
format(self.num_workers + self.num_parameter_servers, num_gpus))
print("+ Setting up credentials to connect to cluster {}.".format(cluster.name_hyphenated))
util.syscall("gcloud container clusters get-credentials {} --zone {} --project {}".
format(cluster.name_hyphenated, cluster.location, project_id))
print("+ Setting kubectl to point to cluster {}.".format(cluster.name_hyphenated))
util.syscall("kubectl config set-cluster {} --server={}".
format(cluster.name_hyphenated, clusters[cluster.name_hyphenated]["master_ip"]))
self.cluster = cluster.get_spec()
return cluster
def start(self, project_id, resume=False, cluster=None):
"""
Starts the Experiment in the cloud (using kubectl).
The respective cluster is started (if it's not already running).
Args:
project_id (str): The remote gcloud project-ID.
resume (bool): Whether we are resuming an already started (and paused) experiment.
cluster (str): The name of the cluster to use (will be started if not already running). None for
using the Experiment's own cluster or - if not given either - a default cluster.
"""
# Update our cluster spec
cluster = self.setup_cluster(cluster, project_id, start=False if resume else True)
# Rewrite our json file.
self.status = "running"
self.write_json_file(file=self.path+self.running_json_file)
# Render the k8s yaml config file for the experiment.
print("+ Generating experiment's k8s config file.")
#gpus_per_container = 0
if self.run_mode == "distributed":
gpus_per_container = int(cluster.num_gpus /
(self.num_workers + self.num_parameter_servers))
else:
gpus_per_container = cluster.gpus_per_node
util.write_kubernetes_yaml_file(self, self.k8s_config, gpus_per_container)
print("+ Deleting old Kubernetes Workloads.")
_ = util.syscall("kubectl delete -f {}".format(self.k8s_config), return_outputs="as_str")
# TODO: wipe out previous experiments' results
# Copy all required files to all nodes' disks.
print("+ Copying all necessary config files to all nodes ...")
# - create /experiment directory on primary disk
# - change permissions on the experiment's folder
# - copy experiment-running config file into /experiment directory
cluster.ssh_parallel("sudo mount --make-shared /mnt/stateful_partition/", # make partition shared
"sudo mkdir /mnt/stateful_partition/experiment/ ; " # create experiment dir
"sudo chmod -R 0777 /mnt/stateful_partition/experiment/", # make writable
# copy experiment's json file into new dir
[self.path+self.running_json_file, "_NODE_:/mnt/stateful_partition/experiment/."],
silent=False)
# Create kubernetes services (which will start the experiment).
print("+ Creating new Kubernetes Services and ReplicaSets.")
util.syscall("kubectl create -f {}".format(self.k8s_config))
def pause(self, project_id):
"""
Pauses the already running Experiment.
Args:
project_id (str): The remote gcloud project-ID.
"""
_ = self.setup_cluster(cluster=None, project_id=project_id)
# delete the kubernetes workloads
print("+ Deleting Kubernetes Workloads.")
util.syscall("kubectl delete -f {}".format(self.k8s_config))
self.status = "paused"
self.write_json_file(file=self.path+self.running_json_file)
print("+ Experiment is paused. Resume with `experiment start --resume -e {}`.".format(self.name_hyphenated))
def stop(self, no_download=False):
"""
Stops an already running Experiment by deleting the Kubernetes workload. If no_download is set to False
(default), will download all results before stopping. If the cluster that the experiment runs on
is dedicated to this experiment, will also delete the cluster.
Args:
no_download (bool): Whether to not(!) download the experiment's results so far (default: False).
"""
# download data before stopping
if not no_download:
self.download()
if self.status == "stopped":
warn("WARNING: Experiment seems to be stopped already. Trying anyway. ...")
# figure out whether cluster was created along with experiment
# if yes: shut down cluster
if self.has_dedicated_cluster:
cluster = get_cluster_from_string(self.cluster.get("name"))
print("+ Shutting down experiment's cluster {}.".format(cluster.name_hyphenated))
cluster.delete()
# if not: simply stop k8s jobs
else:
print("+ Deleting Kubernetes Workloads.")
_ = util.syscall("kubectl delete -f {}".format(self.k8s_config), return_outputs=True)
self.status = "stopped"
self.write_json_file(file=self.path+self.running_json_file)
def download(self):
"""
Downloads the experiment's results (model checkpoints and tensorboard summary files) so far.
"""
cluster = get_cluster_from_string(self.cluster.get("name"))
cluster.ssh_parallel(["_NODE_:/mnt/stateful_partition/experiment/{}*".
format("results/" if self.run_mode != "distributed" else ""),
self.path+"results/."])
def write_json_file(self, file=None):
"""
Writes all the Experiment's settings to disk as a json file.
Args:
file (str): The filename to use. If None, will use the Experiment's filename.
"""
with open(self.file if not file else file, "w") as f:
json.dump(self.__dict__, f, indent=4)
def get_experiment_from_string(experiment, running=False):
"""
Returns an Experiment object given a string of either a json file or a name of an already existing eperiment.
Args:
experiment (str): The string to look for (either local json file or local experiment's name)
running (bool): Whether this experiment is already running.
Returns:
The found Experiment object.
"""
file = "experiments/{}/{}.json". \
format(experiment, "experiment" if not running else "experiment_running")
if not os.path.exists(file):
if running:
raise util.TFCliError("ERROR: Experiment {} does not seem to be running right now! You have to create, then"
"start it with 'experiment new/start'.".format(experiment))
else:
raise util.TFCliError("ERROR: Experiment {} not found! You have to create it first with 'experiment new'.".
format(experiment))
# get the experiment object from its json file
with open(file) as f:
spec = json.load(f)
exp_obj = Experiment(**spec)
return exp_obj
def get_local_experiments(as_objects=False):
"""
Args:
as_objects (bool): Whether to return a list of strings (names) or actual Experiment objects.
Returns: A list of all Experiment names/objects that already exist in this project.
"""
content = os.listdir("experiments/")
experiments = [get_experiment_from_string(c) if as_objects else c
for c in content if os.path.isdir("experiments/"+c)]
return experiments
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.