content stringlengths 5 1.05M |
|---|
def test_eth_blockNumber(rpc_client):
result = rpc_client('eth_blockNumber')
assert result == 0
|
import unittest
from appium import webdriver
from config.context import Context
from libs.finder import By
from libs.pageutil import AndroidPageUtil, IOSPageUtil
class AndroidBaseTest(unittest.TestCase):
def setUp(self):
desired_caps = {}
android_config = Context.get_instance().get_android_desired_caps()
desired_caps['platformVersion'] = getattr(android_config, 'platformVersion', '7.1')
desired_caps['deviceName'] = getattr(android_config, 'deviceName', 'Genymotion')
desired_caps['platformName'] = getattr(android_config, 'platformName', 'Android')
desired_caps['noReset'] = getattr(android_config, 'noReset', False)
desired_caps['app'] = getattr(android_config, 'app', '/Users/qingge/Downloads/example-debug.apk')
desired_caps["unicodeKeyboard"] = getattr(android_config, 'noReset', True)
desired_caps['browser'] = getattr(android_config, 'browser', 'Chrome')
self.wd = webdriver.Remote('http://127.0.0.1:4723/wd/hub', desired_caps)
self.wd.implicitly_wait(60)
Context.get_instance().set_android_wd(self.wd)
By.set_wd(self.wd)
self.page_util = AndroidPageUtil(self.wd)
def tearDown(self):
self.wd.quit()
class IOSBaseTest(unittest.TestCase):
def setUp(self):
# try:
# self.wd.quit()
# except Exception:
# pass
desired_caps = {}
ios_config = Context.get_instance().get_android_desired_caps()
desired_caps['platformName'] = getattr(ios_config, 'platformName', 'IOS')
desired_caps['platformVersion'] = getattr(ios_config, 'platformVersion', '11.4')
desired_caps['deviceName'] = getattr(ios_config, 'deviceName', 'iPhone Simulator')
desired_caps['app'] = getattr(ios_config, 'app', '/Users/qingge/Downloads/PlanckDemoExample.app')
desired_caps['noReset'] = getattr(ios_config, 'noReset', False)
desired_caps["unicodeKeyboard"] = getattr(ios_config, 'unicodeKeyboard', True)
desired_caps['autoAcceptAlerts'] = getattr(ios_config, 'autoAcceptAlerts', True)
desired_caps['connectHardwareKeyboard'] = getattr(ios_config, 'connectHardwareKeyboard', False)
desired_caps['showXcodeLog'] = getattr(ios_config, 'showXcodeLog', True)
self.wd = webdriver.Remote('http://127.0.0.1:3274/wd/hub', desired_caps)
self.wd.implicitly_wait(60)
Context.get_instance().set_ios_wd(self.wd)
By.set_wd(self.wd)
self.page_util = IOSPageUtil(self.wd)
def tearDown(self):
self.wd.quit()
|
#!/usr/bin/env python
"""
__ __ __
.----.-----.---.-.-----| |_.-----.----.--------.-----| | |_
| __| _ | _ |__ --| _| -__| _| | -__| | _|
|____|_____|___._|_____|____|_____|__| |__|__|__|_____|__|____|
--IPython Shell for Interactive Exploration--------------------
Read, write, or fill ARM memory. Numbers are hex. Trailing _ is
short for 0000, leading _ adds 'pad' scratchpad RAM offset.
Internal _ are ignored so you can use them as separators.
rd 1ff_ 100
wr _ 1febb
ALSO: rdw, wrb, fill, watch, find
bitset, bitfuzz, peek, poke, read_block
Disassemble, assemble, and invoke ARM assembly:
dis 3100
asm _4 mov r3, #0x14
dis _4 10
ea mrs r0, cpsr; ldr r1, =0xaa000000; orr r0, r1
ALSO: tea, blx, assemble, disassemble, evalasm
Or compile and invoke C++ code with console output:
ec 0x42
ec ((uint16_t*)pad)[40]++
ecc println("Hello World!")
ALSO: console, compile, evalc
Live code patching and tracing:
hook -Rrcm "Eject button" 18eb4
ALSO: ovl, wrf, asmf, ivt
You can use integer globals in C++ and ASM snippets,
or define/replace a named C++ function:
fc uint32_t* words = (uint32_t*) buffer
buffer = pad + 0x100
ec words[0] += 0x50
asm _ ldr r0, =buffer; bx lr
You can script the device's SCSI interface too:
sc c ac # Backdoor signature
sc 8 ff 00 ff # Undocumented firmware version
ALSO: reset, eject, sc_sense, sc_read, scsi_in, scsi_out
With a hardware serial port, you can backdoor the 8051:
bitbang -8 /dev/tty.usb<tab>
wx8 4b50 a5
rx8 4d00
Happy hacking! -- Type 'thing?' for help on 'thing' or
~MeS`14 '?' for IPython, '%h' for this again.
"""
from IPython.terminal.embed import InteractiveShellEmbed
from shell_magics import ShellMagics
from remote import Device
import shell_namespace
messages = ''
# Make a global device, but only give it to the user namespace.
# Make it default by assigning it to 'd', our current device.
try:
shell_namespace.d = shell_namespace.d_remote = Device()
except IOError, e:
messages += "\n-------- There is NO DEVICE available! --------\n"
messages += "\n%s\n\n" % e
messages += "--> Try again to attach via USB: %reset\n"
messages += "--> Reattach over bitbang serial: %bitbang -a /dev/tty.usb<tab>\n"
shell_namespace.d = shell_namespace.d_remote = None
# Make a shell that feels like a debugger
ipy = InteractiveShellEmbed(user_ns = shell_namespace.__dict__)
shell_namespace.ipy = ipy
ipy.register_magics(ShellMagics)
ipy.register_magic_function(lambda _: ipy.write(__doc__), magic_name='h')
ipy.alias_manager.define_alias('git', 'git')
ipy.alias_manager.define_alias('make', 'make')
# Hello, tiny world
ipy.mainloop(display_banner = __doc__ + messages)
|
from .auth import auth_api as Authentication
from .notes import notes_api as Notes
from .export import export_api as Export |
# SPDX-License-Identifier: Apache-2.0
import os
import subprocess
import time
import unittest
import uuid
from hfc.fabric.client import Client
from hfc.fabric.user import create_user
from test.integration.config import E2E_CONFIG
class BaseTestCase(unittest.TestCase):
"""
Base class for test cases.
All test cases can feel free to implement this.
"""
def setUp(self):
self.gopath_bak = os.environ.get('GOPATH', '')
gopath = os.path.normpath(os.path.join(os.path.dirname(__file__),
"../fixtures/chaincode"))
os.environ['GOPATH'] = os.path.abspath(gopath)
self.channel_tx = \
E2E_CONFIG['test-network']['channel-artifacts']['channel.tx']
self.compose_file_path = \
E2E_CONFIG['test-network']['docker']['compose_file_tls']
self.config_yaml = \
E2E_CONFIG['test-network']['channel-artifacts']['config_yaml']
self.channel_profile = \
E2E_CONFIG['test-network']['channel-artifacts']['channel_profile']
self.client = Client('test/fixtures/network.json')
self.channel_name = "businesschannel" # default application channel
self.user = self.client.get_user('org1.example.com', 'Admin')
self.assertIsNotNone(self.user, 'org1 admin should not be None')
# Boot up the testing network
self.shutdown_test_env()
self.start_test_env()
time.sleep(1)
def tearDown(self):
time.sleep(1)
self.shutdown_test_env()
def check_logs(self):
cli_call(["docker-compose", "-f", self.compose_file_path, "logs",
"--tail=200"])
def start_test_env(self):
cli_call(["docker-compose", "-f", self.compose_file_path, "up", "-d"])
def shutdown_test_env(self):
cli_call(["docker-compose", "-f", self.compose_file_path, "down"])
class ChannelEventHubTestCase(BaseTestCase):
evts = {}
def onTxEvent(self, tx_id, tx_status, block_number):
if tx_id in self.evts:
if 'txEvents' not in self.evts[tx_id]:
self.evts[tx_id]['txEvents'] = []
self.evts[tx_id]['txEvents'] += [{
'tx_status': tx_status,
'block_number': block_number,
}]
def create_onCcEvent(self, _uuid):
class CCEvent(object):
def __init__(self, _uuid, evts, evt_tx_id):
self.uuid = _uuid
self.evts = evts # keep reference, no copy
self.evt_tx_id = evt_tx_id
def cc_event(self, cc_event, block_number, tx_id, tx_status):
if tx_id in self.evts:
if 'txEvents' not in self.evts[tx_id]:
self.evts[tx_id]['txEvents'] = []
self.evts[tx_id]['txEvents'] += [{
'cc_event': cc_event,
'tx_status': tx_status,
'block_number': block_number,
}]
# unregister chaincode event if same tx_id
# and disconnect as chaincode evt are unregister False
if tx_id == self.evt_tx_id:
for x in self.evts[tx_id]['peer']:
if x['uuid'] == self.uuid:
x['channel_event_hub'].\
unregisterChaincodeEvent(x['cr'])
x['channel_event_hub'].disconnect()
o = CCEvent(_uuid, self.evts, self.evt_tx_id)
return o.cc_event
def registerChaincodeEvent(self, tx_id, cc_name, cc_pattern,
channel_event_hub):
_uuid = uuid.uuid4().hex
self.evt_tx_id = tx_id
cr = channel_event_hub.registerChaincodeEvent(
cc_name, cc_pattern, onEvent=self.create_onCcEvent(_uuid))
if self.evt_tx_id not in self.evts:
self.evts[self.evt_tx_id] = {'peer': []}
self.evts[self.evt_tx_id]['peer'] += [
{
'uuid': _uuid,
'channel_event_hub': channel_event_hub,
'cr': cr
}
]
# This should be deprecated, and use client.get_user() API instead
def get_peer_org_user(org, user, state_store):
"""Loads the requested user for a given peer org
and returns a user object.
"""
peer_user_base_path = os.path.join(
os.getcwd(),
'test/fixtures/e2e_cli/crypto-config/peerOrganizations/{0}'
'/users/{1}@{0}/msp/'.format(org, user)
)
key_path = os.path.join(
peer_user_base_path, 'keystore/',
E2E_CONFIG['test-network'][org]['users'][user]['private_key']
)
cert_path = os.path.join(
peer_user_base_path, 'signcerts/',
E2E_CONFIG['test-network'][org]['users'][user]['cert']
)
msp_id = E2E_CONFIG['test-network'][org]['mspid']
return create_user(user, org, state_store, msp_id, key_path, cert_path)
def get_orderer_org_user(org='example.com', user='Admin', state_store=None):
"""Loads the admin user for a given orderer org and
returns an user object.
Currently, orderer org only has Admin
"""
msp_path = os.path.join(
os.getcwd(),
'test/fixtures/e2e_cli/crypto-config/ordererOrganizations/'
'example.com/users/Admin@example.com/msp/')
key_path = os.path.join(
msp_path, 'keystore/',
E2E_CONFIG['test-network']['orderer']['users'][user]['private_key']
)
cert_path = os.path.join(
msp_path, 'signcerts',
E2E_CONFIG['test-network']['orderer']['users'][user]['cert']
)
msp_id = E2E_CONFIG['test-network']['orderer']['mspid']
return create_user(user, org, state_store, msp_id, key_path, cert_path)
def cli_call(arg_list, expect_success=True, env=os.environ.copy()):
"""Executes a CLI command in a subprocess and return the results.
Args:
arg_list: a list command arguments
expect_success: use False to return even if an error occurred
when executing the command
env:
Returns: (string, string, int) output message, error message, return code
"""
p = subprocess.Popen(arg_list, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=env)
output, error = p.communicate()
if p.returncode != 0:
if output:
print("Output:\n" + str(output))
if error:
print("Error Message:\n" + str(error))
if expect_success:
raise subprocess.CalledProcessError(
p.returncode, arg_list, output)
return output, error, p.returncode
|
# -*- coding: utf-8 -*-
"""Class for the ogs KINETRIC REACTION file."""
from ogs5py.fileclasses.base import BlockFile
class KRC(BlockFile):
"""
Class for the ogs KINETRIC REACTION file.
Parameters
----------
task_root : str, optional
Path to the destiny model folder.
Default: cwd+"ogs5model"
task_id : str, optional
Name for the ogs task.
Default: "model"
Notes
-----
Main-Keywords (#):
- MICROBE_PROPERTIES
- REACTION
- BLOB_PROPERTIES
- KINREACTIONDATA
Sub-Keywords ($) per Main-Keyword:
- MICROBE_PROPERTIES
- MICROBENAME
- _drmc__PARAMETERS
- MONOD_REACTION_NAME
- REACTION
- NAME
- TYPE
- BACTERIANAME
- EQUATION
- RATECONSTANT
- GROWTH
- MONODTERMS
- THRESHHOLDTERMS
- INHIBITIONTERMS
- PRODUCTIONTERMS
- PRODUCTIONSTOCH
- BACTERIAL_YIELD
- ISOTOPE_FRACTIONATION
- BACTERIA_SPECIFIC_CAPACITY
- TEMPERATURE_DEPENDENCE
- _drmc_
- STANDARD_GIBBS_ENERGY
- EXCHANGE_PARAMETERS
- SORPTION_TYPE
- NAPL_PROPERTIES
- REACTION_ORDER
- MINERALNAME
- CHEMAPPNAME
- EQUILIBRIUM_CONSTANT
- RATE_EXPONENTS
- REACTIVE_SURFACE_AREA
- PRECIPITATION_BY_BASETERM_ONLY
- PRECIPITATION_FACTOR
- PRECIPITATION_EXPONENT
- BASETERM
- MECHANISMTERM
- SWITCH_OFF_GEOMETRY
- BLOB_PROPERTIES
- NAME
- D50
- DM
- DS
- UI
- NAPL_CONTENT_INI
- NAPL_CONTENT_RES
- GRAIN_SPHERE_RATIO
- TORTUOSITY
- LENGTH
- CALC_SHERWOOD
- CALC_SHERWOOD_MODIFIED
- SHERWOOD_MODEL
- GEOMETRY
- GAS_DISSOLUTION
- INTERFACIAL_AREA
- KINREACTIONDATA
- SOLVER_TYPE
- RELATIVE_ERROR
- MIN_TIMESTEP
- INITIAL_TIMESTEP
- BACTERIACAPACITY
- MIN_BACTERIACONC
- MIN_CONCENTRATION_REPLACE
- SURFACES
- ALLOW_REACTIONS
- NO_REACTIONS
- COPY_CONCENTRATIONS
- LAGNEAU_BENCHMARK
- SCALE_DCDT
- SORT_NODES
- OMEGA_THRESHOLD
- REACTION_DEACTIVATION
- DEBUG_OUTPUT
- ACTIVITY_MODEL
Standard block:
None
Keyword documentation:
https://ogs5-keywords.netlify.com/ogs/wiki/public/doc-auto/by_ext/krc
Reading routines:
https://github.com/ufz/ogs5/blob/master/FEM/rf_kinreact.cpp
MICROBE_PROPERTIES :
https://github.com/ufz/ogs5/blob/master/FEM/rf_kinreact.cpp#L232
REACTION :
https://github.com/ufz/ogs5/blob/master/FEM/rf_kinreact.cpp#L1549
BLOB_PROPERTIES :
https://github.com/ufz/ogs5/blob/master/FEM/rf_kinreact.cpp#L2622
KINREACTIONDATA :
https://github.com/ufz/ogs5/blob/master/FEM/rf_kinreact.cpp#L3185
See Also
--------
add_block
"""
MKEYS = [
"MICROBE_PROPERTIES",
"REACTION",
"BLOB_PROPERTIES",
"KINREACTIONDATA",
]
# these are not sorted at the moment
SKEYS = [
[ # MICROBE_PROPERTIES
"MICROBENAME",
"_drmc__PARAMETERS",
"MONOD_REACTION_NAME",
],
[ # REACTION
"NAME",
"TYPE",
"BACTERIANAME",
"EQUATION",
"RATECONSTANT",
"GROWTH",
"MONODTERMS",
"THRESHHOLDTERMS",
"INHIBITIONTERMS",
"PRODUCTIONTERMS",
"PRODUCTIONSTOCH",
"BACTERIAL_YIELD",
"ISOTOPE_FRACTIONATION",
"BACTERIA_SPECIFIC_CAPACITY",
"TEMPERATURE_DEPENDENCE",
"_drmc_",
"STANDARD_GIBBS_ENERGY",
"EXCHANGE_PARAMETERS",
"SORPTION_TYPE",
"NAPL_PROPERTIES",
"REACTION_ORDER",
"MINERALNAME",
"CHEMAPPNAME",
"EQUILIBRIUM_CONSTANT",
"RATE_EXPONENTS",
"REACTIVE_SURFACE_AREA",
"PRECIPITATION_BY_BASETERM_ONLY",
"PRECIPITATION_FACTOR",
"PRECIPITATION_EXPONENT",
"BASETERM",
"MECHANISMTERM",
"SWITCH_OFF_GEOMETRY",
],
[ # BLOB_PROPERTIES
"NAME",
"D50",
# "CALC_SHERWOOD",
"DM",
"DS",
"UI",
"NAPL_CONTENT_INI",
"NAPL_CONTENT_RES",
"GRAIN_SPHERE_RATIO",
"TORTUOSITY",
"LENGTH",
"CALC_SHERWOOD",
"CALC_SHERWOOD_MODIFIED",
"SHERWOOD_MODEL",
"GEOMETRY",
"GAS_DISSOLUTION",
"INTERFACIAL_AREA",
],
[ # KINREACTIONDATA
"SOLVER_TYPE",
"RELATIVE_ERROR",
"MIN_TIMESTEP",
"INITIAL_TIMESTEP",
"BACTERIACAPACITY",
"MIN_BACTERIACONC",
"MIN_CONCENTRATION_REPLACE",
"SURFACES",
"ALLOW_REACTIONS",
"NO_REACTIONS",
"COPY_CONCENTRATIONS",
"LAGNEAU_BENCHMARK",
"SCALE_DCDT",
"SORT_NODES",
"OMEGA_THRESHOLD",
"REACTION_DEACTIVATION",
"DEBUG_OUTPUT",
"ACTIVITY_MODEL",
"REALATIVE_ERROR", # really?
"MAX_TIMESTEP", # really?
],
]
STD = {}
def __init__(self, **OGS_Config):
super(KRC, self).__init__(**OGS_Config)
self.file_ext = ".krc"
|
"""
LeetCode Problem 122. Best Time to Buy and Sell Stock II
Link: https://leetcode.com/problems/best-time-to-buy-and-sell-stock-ii/
Written by: Mostofa Adib Shakib
Language: Python
"""
class Solution:
def maxProfit(self, prices: List[int]) -> int:
if len(prices) < 2: return 0 # special case
ans = 0
for i in range(1, len(prices)):
if prices[i] - prices[i-1] > 0:
ans += prices[i] - prices[i-1]
return ans |
# THIS FILE IS SAFE TO EDIT. It will not be overwritten when rerunning go-raml.
from gridcapacity.models import NodeRegistration
from flask import request, jsonify
from io import StringIO
def ListCapacityHandler():
nodes = []
country = request.values.get('country')
mru = request.values.get('mru')
cru = request.values.get('cru')
hru = request.values.get('hru')
sru = request.values.get('sru')
farmer = request.values.get('farmer')
nodes = NodeRegistration.search(country, mru, cru, hru, sru, farmer)
output = []
for node in nodes:
if node.farmer.location and node.farmer.location.latitude and node.farmer.location.longitude:
node.location = node.farmer.location
d = node.ddict_hr
d['node_id'] = d.pop('id')
d['farmer_id'] = d.pop('farmer')
output.append(d)
return jsonify(output)
|
import numpy as np
import simtk.unit as unit
def get_n_lipids_from_size(total_area = 10000 * unit.angstroms**2, p_POPC = 0.5 , p_DPPC = 0.3, p_DOPC = 0.2):
area_POPC = (4 / np.pi)* 68.3 * unit.angstroms**2
area_DPPC = (4 / np.pi)* 63 * unit.angstroms**2
area_DOPC = (4 / np.pi)* 69.7 * unit.angstroms**2
r_POPC = p_POPC / np.min([p_POPC, p_DPPC, p_DOPC])
r_DPPC = p_DPPC / np.min([p_POPC, p_DPPC, p_DOPC])
r_DOPC = p_DOPC / np.min([p_POPC, p_DPPC, p_DOPC])
area_unidad = r_POPC * area_POPC + r_DPPC * area_DPPC + r_DOPC * area_DOPC
no_unidades_totales = total_area / area_unidad
print (no_unidades_totales)
n_POPC = int(np.round(no_unidades_totales *r_POPC))
n_DPPC = int(np.round(no_unidades_totales *r_DPPC))
n_DOPC = int(np.round(no_unidades_totales *r_DOPC))
n_total_lipids = n_POPC + n_DPPC + n_DOPC
print ("The n_POPC in the membrane is", n_POPC, "(", n_POPC / n_total_lipids,")")
print ("The n_DPPC in the membrane is", n_DPPC, "(", n_DPPC / n_total_lipids,")")
print ("The n_DOPC in the membrane is", n_DOPC, "(", n_DOPC / n_total_lipids,")")
print ("The result area is", n_POPC * area_POPC + n_DPPC * area_DPPC + n_DOPC * area_DOPC)
|
# xpyBuild - eXtensible Python-based Build System
#
# Copyright (c) 2013 - 2018 Software AG, Darmstadt, Germany and/or its licensors
# Copyright (c) 2013 - 2018 Ben Spiller and Matthew Johnson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# $Id: root.xpybuild.py 301527 2017-02-06 15:31:43Z matj $
# Requires: Python 2.7
# xpybuild release build file. Creates pydoc API docs and versioned zip file for releases.
from propertysupport import *
from buildcommon import *
from pathsets import *
from targets.zip import Zip
from targets.copy import Copy
from targets.writefile import WriteFile
from targets.custom import CustomCommand
from utils.process import call
requireXpyBuildVersion('1.12')
# Need the caller to provide the path to epydoc
definePathProperty('EPYDOC_ROOT', None, mustExist=True) # parent of the /lib directory; used for local builds but not Travis
defineOutputDirProperty('OUTPUT_DIR', 'release-output')
with open('XPYBUILD_VERSION') as f: defineStringProperty('VERSION', f.read().strip())
def markdownToTxt(f): return f.replace('.md', '.txt')
CustomCommand('${OUTPUT_DIR}/doc/api/',
command=[
sys.executable,
'-m', 'epydoc.cli',
'-o', CustomCommand.TARGET,
'--no-private',
'-v',
'--name', 'xpybuild v${VERSION}',
'--fail-on-docstring-warning',
CustomCommand.DEPENDENCIES
],
dependencies=FindPaths('./', includes='**/*.py', excludes=['**/root.xpybuild.py', 'tests/**', 'internal/**', 'xpybuild.py']),
env={'PYTHONPATH' : PathSet('${EPYDOC_ROOT}/lib')}
)
# Zip all the distributables into a release zip file.
Zip('${OUTPUT_DIR}/xpybuild_${VERSION}.zip', [
AddDestPrefix('doc/api/', FindPaths(DirGeneratedByTarget('${OUTPUT_DIR}/doc/api/'))),
AddDestPrefix('doc/', MapDest(markdownToTxt, FindPaths('doc/', includes=['*.md']))),
FindPaths('./', includes='**/*.py', excludes=['tests/**', 'root.xpybuild.py']),
'XPYBUILD_VERSION',
MapDest(markdownToTxt, 'README.md'),
'LICENSE.txt',
])
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-02 01:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0012_remove_null_urls'),
]
operations = [
migrations.AlterField(
model_name='experiment',
name='status',
field=models.CharField(choices=[(b'INACTIVE', 'Not active'), (b'ACTIVE', 'Active, no round in progress'), (b'ROUND_IN_PROGRESS', 'Round in progress'), (b'COMPLETED', 'Completed'), (b'PUBLISHED', 'Published')], default=b'INACTIVE', max_length=32),
),
]
|
def is_iree_enabled():
try:
import iree.runtime
import iree.compiler
except:
return False
return True
|
#!/usr/bin/env python3
"""
Geoscience Australia - Python Geodesy Package
Constants Module
"""
from math import sqrt
from datetime import date
c_vac = 299792.458
k_0 = 0.9996
# Ellipsoid Constants
class Ellipsoid(object):
def __init__(self, semimaj, inversef):
self.semimaj = semimaj
self.inversef = inversef
self.f = 1 / self.inversef
self.semimin = float(self.semimaj * (1 - self.f))
self.ecc1sq = float(self.f * (2 - self.f))
self.ecc2sq = float(self.ecc1sq / (1 - self.ecc1sq))
self.ecc1 = sqrt(self.ecc1sq)
self.n = float(self.f / (2 - self.f))
self.n2 = self.n ** 2
# Geodetic Reference System 1980 (http://www.epsg-registry.org/export.htm?gml=urn:ogc:def:ellipsoid:EPSG::7019)
grs80 = Ellipsoid(6378137, 298.257222101)
# World Geodetic System 1984 (http://www.epsg-registry.org/export.htm?gml=urn:ogc:def:ellipsoid:EPSG::7030)
wgs84 = Ellipsoid(6378137, 298.257223563)
# Australian National Spheroid (http://www.epsg-registry.org/export.htm?gml=urn:ogc:def:ellipsoid:EPSG::7003)
ans = Ellipsoid(6378160, 298.25)
# International (Hayford) 1924 (http://www.epsg-registry.org/export.htm?gml=urn:ogc:def:ellipsoid:EPSG::7022)
intl24 = Ellipsoid(6378388, 297)
# Projections
class Projection(object):
def __init__(self, falseeast, falsenorth, cmscale, zonewidth, initialcm):
self.falseeast = falseeast
self.falsenorth = falsenorth
self.cmscale = cmscale
self.zonewidth = zonewidth
self.initialcm = initialcm
utm = Projection(500000, 10000000, 0.9996, 6, -177)
# Helmert 14 Parameter Transformation Parameters
class Transformation(object):
def __init__(self, from_datum, to_datum, ref_epoch,
tx, ty, tz, sc, rx, ry, rz,
d_tx=0.0, d_ty=0.0, d_tz=0.0, d_sc=0.0, d_rx=0.0, d_ry=0.0, d_rz=0.0):
self.from_datum = from_datum # Datum Transforming From
self.to_datum = to_datum # Datum Transforming To
self.ref_epoch = ref_epoch # Reference Epoch (datetime.date Object)
self.tx = tx # Translation in x (m)
self.ty = ty # Translation in y (m)
self.tz = tz # Translation in z (m)
self.sc = sc # Scale Change (parts per million)
self.rx = rx # Rotation about x (arcseconds)
self.ry = ry # Rotation about y (arcseconds)
self.rz = rz # Rotation about z (arcseconds)
self.d_tx = d_tx # Rate of change in Translation in x (m per year)
self.d_ty = d_ty # Rate of change in Translation in y (m per year)
self.d_tz = d_tz # Rate of change in Translation in z (m per year)
self.d_sc = d_sc # Rate of change in Scale Change (parts per million per year)
self.d_rx = d_rx # Rate of change in Rotation about x (arcseconds per year)
self.d_ry = d_ry # Rate of change in Rotation about y (arcseconds per year)
self.d_rz = d_rz # Rate of change in Rotation about z (arcseconds per year)
def __repr__(self):
return ('Transformation: '
+ 'From ' + repr(self.from_datum) + ' to ' + repr(self.to_datum) + '\n'
+ 'Reference Epoch: ' + repr(self.ref_epoch) + '\n'
+ ' tx: ' + repr(self.tx) + 'm + ' + repr(self.d_tx) + 'm/yr' + '\n'
+ ' ty: ' + repr(self.ty) + 'm + ' + repr(self.d_ty) + 'm/yr' + '\n'
+ ' tz: ' + repr(self.tz) + 'm + ' + repr(self.d_tz) + 'm/yr' + '\n'
+ ' sc: ' + repr(self.sc) + 'ppm + ' + repr(self.d_sc) + 'ppm/yr' + '\n'
+ ' rx: ' + repr(self.rx) + '\" + ' + repr(self.d_rx) + '\"/yr' + '\n'
+ ' ry: ' + repr(self.ry) + '\" + ' + repr(self.d_ry) + '\"/yr' + '\n'
+ ' rz: ' + repr(self.rz) + '\" + ' + repr(self.d_rz) + '\"/yr' + '\n')
def __neg__(self):
"""
Reverses Direction of Transformation Object
i.e. ITRF2014 to ITRF2000 transformation becomes ITRF2000 to ITRF2014 transformation
:return: Reversed Direction Transformation Object
"""
return Transformation(self.to_datum,
self.from_datum,
self.ref_epoch,
-self.tx, -self.ty, -self.tz,
-self.sc,
-self.rx, -self.ry, -self.rz,
-self.d_tx, -self.d_ty, -self.d_tz,
-self.d_sc,
-self.d_rx, -self.d_ry, -self.d_rz)
def __add__(self, other):
"""
Change Transformation Epoch to a specified date.
Advances all transformation parameters by their respective rates of change.
:param other: datetime.date Object
:return: Transformation object with parameters and ref epoch moved to specified date
"""
if type(other) == date:
timediff = (other - self.ref_epoch).days/365.25
return Transformation(self.to_datum,
self.from_datum,
other,
round(self.tx + (self.d_tx * timediff), 8),
round(self.ty + (self.d_ty * timediff), 8),
round(self.tz + (self.d_tz * timediff), 8),
round(self.sc + (self.d_sc * timediff), 8),
round(self.rx + (self.d_rx * timediff), 8),
round(self.ry + (self.d_ry * timediff), 8),
round(self.rz + (self.d_rz * timediff), 8),
self.d_tx,
self.d_ty,
self.d_tz,
self.d_sc,
self.d_rx,
self.d_ry,
self.d_rz)
else:
ValueError('supports adding datetime.date objects only')
def iers2trans(itrf_from, itrf_to, ref_epoch, tx, ty, tz, sc, rx, ry, rz, d_tx, d_ty, d_tz, d_sc, d_rx, d_ry, d_rz):
"""
Used to convert IERS transformation parameters into Transformation Class parameters.
Note: All rotation and delta rotation terms have sign change applied
:param itrf_from: ITRF Realization Transforming From
:param itrf_to: ITRF Realization Transforming To
:param ref_epoch: Reference Epoch (YYYY.DOY)
:param tx: Translation in x (mm)
:param ty: Translation in y (mm)
:param tz: Translation in z (mm)
:param sc: Scale Change (parts per billion)
:param rx: Rotation about x (milliarcseconds)
:param ry: Rotation about y (milliarcseconds)
:param rz: Rotation about z (milliarcseconds)
:param d_tx: Rate of change in Translation in x (mm per year)
:param d_ty: Rate of change in Translation in y (mm per year)
:param d_tz: Rate of change in Translation in z (mm per year)
:param d_sc: Rate of change in Scale Change (parts per billion per year)
:param d_rx: Rate of change in Rotation about x (milliarcseconds per year)
:param d_ry: Rate of change in Rotation about y (milliarcseconds per year)
:param d_rz: Rate of change in Rotation about z (milliarcseconds per year)
:return: Transformation Object following Australian Convention
"""
return Transformation(itrf_from, itrf_to, ref_epoch,
round(tx / 1000, 8), round(ty / 1000, 8), round(tz / 1000, 8),
round(sc / 1000, 8),
round(-rx / 1000, 8), round(-ry / 1000, 8), round(-rz / 1000, 8),
round(d_tx / 1000, 8), round(d_ty / 1000, 8), round(d_tz / 1000, 8),
round(d_sc / 1000, 8),
round(-d_rx / 1000, 8), round(-d_ry / 1000, 8), round(-d_rz / 1000, 8))
# GDA1994 to GDA2020 Transformation Parameters from GDA2020 Tech Manual v1.2
gda94to20 = Transformation('GDA1994', 'GDA2020', 0,
0.06155, -0.01087, -0.04019, -0.009994, -0.0394924, -0.0327221, -0.0328979)
# ITRF2014 to GDA2020 (Australian Plate Motion Model) Transformation Parameters from GDA2020 Tech Manual v1.2. The
# model was derived using 109 ARGN and AuScope GNSS CORS which were used to define the RVS.
itrf14togda20 = Transformation('ITRF2014', 'GDA2020', date(2020, 1, 1),
0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0.00150379, 0.00118346, 0.00120716)
# ATRF to GDA2020 (Australian Plate Motion Model) Transformation Parameters (as advised by N. Brown, Geoscience
# Australia). The model was derived using 109 ARGN and AuScope GNSS CORS which were used to define the RVS.
atrf_gda2020 = Transformation('ATRF', 'GDA2020', date(2020, 1, 1),
0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0.00150379, 0.00118346, 0.00120716)
# GDA1994 to ITRF Transformation Parameters from Dawson and Woods (2010)
# AGD66 and AGD84 to GDA94 Transformation Parameters from GDA94 Tech Manual v2.4
# link: http://www.icsm.gov.au/datum/gda2020-and-gda94-technical-manuals
itrf08togda94 = Transformation('ITRF2008', 'GDA1994', date(1994, 1, 1),
-0.08468, -0.01942, 0.03201, 0.00971, -0.0004254, 0.0022578, 0.0024015,
0.00142, 0.00134, 0.00090, 0.000109, 0.0015461, 0.001820, 0.0011551)
itrf05togda94 = Transformation('ITRF2005', 'GDA1994', date(1994, 1, 1),
-0.07973, -0.00686, 0.03803, 0.006636, -0.0000351, 0.0021211, 0.0021411,
0.00225, -0.00062, -0.00056, 0.000294, 0.0014707, 0.0011443, 0.0011701)
itrf00togda94 = Transformation('ITRF2000', 'GDA1994', date(1994, 1, 1),
-0.04591, -0.02985, -0.02037, 0.00707, -0.0016705, 0.0004594, 0.0019356,
-0.00466, 0.00355, 0.01124, 0.000249, 0.0017454, 0.0014868, 0.001224)
itrf97togda94 = Transformation('ITRF1997', 'GDA1994', date(1994, 1, 1),
-0.01463, -0.02762, -0.02532, 0.006695, -0.0017893, -0.0006047, 0.0009962,
-0.00860, 0.00036, 0.01125, 0.000007, 0.0016394, 0.0015198, 0.0013801)
itrf96togda94 = Transformation('ITRF1996', 'GDA1994', date(1994, 1, 1),
0.02454, -0.03643, -0.06812, 0.006901, -0.0027359, -0.0020431, 0.0003731,
-0.02180, 0.00471, 0.02627, 0.000388, 0.0020203, 0.0021735, 0.0016290)
agd84togda94 = Transformation('AGD84', 'GDA94', 0,
-117.763, -51.510, 139.061, -0.191, -0.292, -0.443, -0.277)
agd66togda94 = Transformation('AGD1966', 'GDA1994', 0,
-117.808, -51.536, 137.784, -0.290, -0.303, -0.446, -0.234)
agd66togda94_act = Transformation('AGD66', 'GDA94', 0,
-129.193, -41.212, 130.730, -2.955, -0.246, -0.374, -0.329)
agd66togda94_tas = Transformation('AGD66', 'GDA94', 0,
-120.271, -64.543, 161.632, 2.499, -0.217, 0.067, 0.129)
agd66togda94_vicnsw = Transformation('AGD66', 'GDA94', 0,
-119.353, -48.301, 139.484, -0.613, -0.415, -0.260, -0.437)
agd66togda94_nt = Transformation('AGD66', 'GDA94', 0,
-124.133, -42.003, 137.400, -1.854, 0.008, -0.557, -0.178)
# ITRF2014 Parameters
# link: http://itrf.ign.fr/doc_ITRF/Transfo-ITRF2014_ITRFs.txt
itrf14to08 = iers2trans('ITRF2014', 'ITRF2008', date(2010, 1, 1),
1.6, 1.9, 2.4, -0.02, 0, 0, 0,
0.0, 0.0, -0.1, 0.03, 0, 0, 0)
itrf14to05 = iers2trans('ITRF2014', 'ITRF2005', date(2010, 1, 1),
2.6, 1.0, -2.3, 0.92, 0, 0, 0,
0.3, 0.0, -0.1, 0.03, 0, 0, 0)
itrf14to00 = iers2trans('ITRF2014', 'ITRF2000', date(2010, 1, 1),
0.7, 1.2, -26.1, 2.12, 0, 0, 0,
0.1, 0.1, -1.9, 0.11, 0, 0, 0)
itrf14to97 = iers2trans('ITRF2014', 'ITRF1997', date(2010, 1, 1),
7.4, -0.5, -62.8, 3.80, 0, 0, 0.26,
0.1, -0.5, -3.3, 0.12, 0, 0, 0.02)
itrf14to96 = iers2trans('ITRF2014', 'ITRF1996', date(2010, 1, 1),
7.4, -0.5, -62.8, 3.80, 0, 0, 0.26,
0.1, -0.5, -3.3, 0.12, 0, 0, 0.02)
itrf14to94 = iers2trans('ITRF2014', 'ITRF1994', date(2010, 1, 1),
7.4, -0.5, -62.8, 3.80, 0, 0, 0.26,
0.1, -0.5, -3.3, 0.12, 0, 0, 0.02)
itrf14to93 = iers2trans('ITRF2014', 'ITRF1993', date(2010, 1, 1),
-50.4, 3.3, -60.2, 4.29, -2.81, -3.38, 0.40,
-2.8, -0.1, -2.5, 0.12, -0.11, -0.19, 0.07)
itrf14to92 = iers2trans('ITRF2014', 'ITRF1992', date(2010, 1, 1),
15.4, 1.5, -70.8, 3.09, 0, 0, 0.26,
0.1, -0.5, -3.3, 0.12, 0, 0, 0.02)
itrf14to91 = iers2trans('ITRF2014', 'ITRF1991', date(2010, 1, 1),
27.4, 15.5, -76.8, 4.49, 0, 0, 0.26,
0.1, -0.5, -3.3, 0.12, 0, 0, 0.02)
itrf14to90 = iers2trans('ITRF2014', 'ITRF1990', date(2010, 1, 1),
25.4, 11.5, -92.8, 4.79, 0, 0, 0.26,
0.1, -0.5, -3.3, 0.12, 0, 0, 0.02)
itrf14to89 = iers2trans('ITRF2014', 'ITRF1989', date(2010, 1, 1),
30.4, 35.5, -130.8, 8.19, 0, 0, 0.26,
0.1, -0.5, -3.3, 0.12, 0, 0, 0.02)
itrf14to88 = iers2trans('ITRF2014', 'ITRF1988', date(2010, 1, 1),
25.4, -0.5, -154.8, 11.29, 0.1, 0, 0.26,
0.1, -0.5, -3.3, 0.12, 0, 0, 0.02)
# ITRF2008 Parameters
# link: http://itrf.ign.fr/doc_ITRF/Transfo-ITRF2008_ITRFs.txt
itrf08to05 = iers2trans('ITRF2008', 'ITRF2005', date(2000, 1, 1),
-2.0, -0.9, -4.7, 0.94, 0, 0, 0,
0.3, 0.0, 0.0, 0.0, 0, 0, 0)
itrf08to00 = iers2trans('ITRF2008', 'ITRF2000', date(2000, 1, 1),
-1.9, -1.7, -10.5, 1.34, 0, 0, 0,
0.1, 0.1, -1.8, 0.08, 0, 0, 0)
itrf08to97 = iers2trans('ITRF2008', 'ITRF1997', date(2000, 1, 1),
4.8, 2.6, -33.2, 2.92, 0, 0, 0.06,
0.1, -0.5, -3.2, 0.09, 0, 0, 0.02)
itrf08to96 = iers2trans('ITRF2008', 'ITRF1996', date(2000, 1, 1),
4.8, 2.6, -33.2, 2.92, 0, 0, 0.06,
0.1, -0.5, -3.2, 0.09, 0, 0, 0.02)
itrf08to94 = iers2trans('ITRF2008', 'ITRF1994', date(2000, 1, 1),
4.8, 2.6, -33.2, 2.92, 0, 0, 0.06,
0.1, -0.5, -3.2, 0.09, 0, 0, 0.02)
itrf08to93 = iers2trans('ITRF2008', 'ITRF1993', date(2000, 1, 1),
-24.0, 2.4, -38.6, 3.41, -1.71, -1.48, -0.30,
-2.8, -0.1, -2.4, 0.09, -0.11, -0.19, 0.07)
itrf08to92 = iers2trans('ITRF2008', 'ITRF1992', date(2000, 1, 1),
12.8, 4.6, -41.2, 2.21, 0, 0, 0.06,
0.1, -0.5, -3.2, 0.09, 0, 0, 0.02)
itrf08to91 = iers2trans('ITRF2008', 'ITRF1991', date(2000, 1, 1),
24.8, 18.6, -47.2, 3.61, 0, 0, 0.06,
0.1, -0.5, -3.2, 0.09, 0, 0, 0.02)
itrf08to90 = iers2trans('ITRF2008', 'ITRF1990', date(2000, 1, 1),
22.8, 14.6, -63.2, 3.91, 0, 0, 0.06,
0.1, -0.5, -3.2, 0.09, 0, 0, 0.02)
itrf08to89 = iers2trans('ITRF2008', 'ITRF1989', date(2000, 1, 1),
27.8, 38.6, -101.2, 7.31, 0, 0, 0.06,
0.1, -0.5, -3.2, 0.09, 0, 0, 0.02)
itrf08to88 = iers2trans('ITRF2008', 'ITRF1988', date(2000, 1, 1),
22.8, 2.6, -125.2, 10.41, 0.10, 0, 0.06,
0.1, -0.5, -3.2, 0.09, 0, 0, 0.02)
# ITRF2005 Parameters
# link: http://itrf.ensg.ign.fr/ITRF_solutions/2005/tp_05-00.php
itrf05to00 = iers2trans('ITRF2005', 'ITRF2000', date(2000, 1, 1),
0.1, -0.8, -5.8, 0.40, 0, 0, 0,
-0.2, 0.1, -1.8, 0.08, 0, 0, 0)
# ITRF2000 Parameters
# link: ftp://itrf.ensg.ign.fr/pub/itrf/ITRF.TP
# NOTE: This ref lists translations in centimetres. All other ITRF transformations are shown in millimetres.
# NOTE: All translations and rates of translation shown below have been converted to millimetres.
itrf00to97 = iers2trans('ITRF2000', 'ITRF1997', date(1997, 1, 1),
6.7, 6.1, -18.5, 1.55, 0, 0, 0,
0.0, -0.6, -1.4, 0.01, 0, 0, 0.02)
itrf00to96 = iers2trans('ITRF2000', 'ITRF1996', date(1997, 1, 1),
6.7, 6.1, -18.5, 1.55, 0, 0, 0,
0.0, -0.6, -1.4, 0.01, 0, 0, 0.02)
itrf00to94 = iers2trans('ITRF2000', 'ITRF1994', date(1997, 1, 1),
6.7, 6.1, -18.5, 1.55, 0, 0, 0,
0.0, -0.6, -1.4, 0.01, 0, 0, 0.02)
itrf00to93 = iers2trans('ITRF2000', 'ITRF1993', date(1988, 1, 1),
12.7, 6.5, -20.9, 1.95, -0.39, 0.80, -1.14,
-2.9, -0.2, -0.6, 0.01, -0.11, -0.19, 0.07)
itrf00to92 = iers2trans('ITRF2000', 'ITRF1992', date(1988, 1, 1),
14.7, 13.5, -13.9, 0.75, 0, 0, -0.18,
0.0, -0.6, -1.4, 0.01, 0, 0, 0.02)
itrf00to91 = iers2trans('ITRF2000', 'ITRF1991', date(1988, 1, 1),
26.7, 27.5, -19.9, 2.15, 0, 0, -0.18,
0.0, -0.6, -1.4, 0.01, 0, 0, 0.02)
itrf00to90 = iers2trans('ITRF2000', 'ITRF1990', date(1988, 1, 1),
14.7, 13.5, -13.9, 0.75, 0, 0, -0.18,
0.0, -0.6, -1.4, 0.01, 0, 0, 0.02)
itrf00to89 = iers2trans('ITRF2000', 'ITRF1989', date(1988, 1, 1),
29.7, 47.5, -73.9, 5.85, 0, 0, -0.18,
0.0, -0.6, -1.4, 0.01, 0, 0, 0.02)
itrf00to88 = iers2trans('ITRF2000', 'ITRF1988', date(1988, 1, 1),
24.7, 11.5, -97.9, 8.95, 0, 0, -0.18,
0.0, -0.6, -1.4, 0.01, 0, 0, 0.02)
|
#!/usr/bin/env python3
import os
import sys
def main ():
NAMES = sys.argv [1:]
if len (NAMES) == 0:
script=os.path.basename(sys.argv[0])
print('Usage: {} NAME [NAME2 ...]'.format(script))
sys.exit(1)
if len (NAMES) == 1:
print ('Hello to the 1 of you: ' + NAMES[0] + '!')
if len (NAMES) == 2:
print ('Hello to the 2 of you: {}!'.format(' and '.join(NAMES)))
if len (NAMES) > 2:
NUM = len(NAMES)
last = NAMES.pop()
print('Hello to the {} of you: {}, and {}!'.format(NUM, ', '.join(NAMES), last))
main()
|
import torch
import math
import torch.nn as nn
import torch.nn.functional as F
class EntropyCoder(object):
def __init__(self, entropy_coder_precision=16):
super().__init__()
from .MLCodec_rans import RansEncoder, RansDecoder
self.encoder = RansEncoder()
self.decoder = RansDecoder()
self.entropy_coder_precision = int(entropy_coder_precision)
self._offset = None
self._quantized_cdf = None
self._cdf_length = None
def encode_with_indexes(self, *args, **kwargs):
return self.encoder.encode_with_indexes(*args, **kwargs)
def decode_with_indexes(self, *args, **kwargs):
return self.decoder.decode_with_indexes(*args, **kwargs)
def set_cdf_states(self, offset, quantized_cdf, cdf_length):
self._offset = offset
self._quantized_cdf = quantized_cdf
self._cdf_length = cdf_length
@staticmethod
def pmf_to_quantized_cdf(pmf, precision=16):
from .MLCodec_CXX import pmf_to_quantized_cdf as _pmf_to_quantized_cdf
cdf = _pmf_to_quantized_cdf(pmf.tolist(), precision)
cdf = torch.IntTensor(cdf)
return cdf
def pmf_to_cdf(self, pmf, tail_mass, pmf_length, max_length):
cdf = torch.zeros((len(pmf_length), max_length + 2), dtype=torch.int32)
for i, p in enumerate(pmf):
prob = torch.cat((p[: pmf_length[i]], tail_mass[i]), dim=0)
_cdf = self.pmf_to_quantized_cdf(prob, self.entropy_coder_precision)
cdf[i, : _cdf.size(0)] = _cdf
return cdf
def _check_cdf_size(self):
if self._quantized_cdf.numel() == 0:
raise ValueError("Uninitialized CDFs. Run update() first")
if len(self._quantized_cdf.size()) != 2:
raise ValueError(f"Invalid CDF size {self._quantized_cdf.size()}")
def _check_offsets_size(self):
if self._offset.numel() == 0:
raise ValueError("Uninitialized offsets. Run update() first")
if len(self._offset.size()) != 1:
raise ValueError(f"Invalid offsets size {self._offset.size()}")
def _check_cdf_length(self):
if self._cdf_length.numel() == 0:
raise ValueError("Uninitialized CDF lengths. Run update() first")
if len(self._cdf_length.size()) != 1:
raise ValueError(f"Invalid offsets size {self._cdf_length.size()}")
def compress(self, inputs, indexes):
"""
"""
if len(inputs.size()) != 4:
raise ValueError("Invalid `inputs` size. Expected a 4-D tensor.")
if inputs.size() != indexes.size():
raise ValueError("`inputs` and `indexes` should have the same size.")
symbols = inputs.int()
self._check_cdf_size()
self._check_cdf_length()
self._check_offsets_size()
assert symbols.size(0) == 1
rv = self.encode_with_indexes(
symbols[0].reshape(-1).int().tolist(),
indexes[0].reshape(-1).int().tolist(),
self._quantized_cdf.tolist(),
self._cdf_length.reshape(-1).int().tolist(),
self._offset.reshape(-1).int().tolist(),
)
return rv
def decompress(self, strings, indexes):
"""
Decompress char strings to tensors.
Args:
strings (str): compressed tensors
indexes (torch.IntTensor): tensors CDF indexes
"""
assert indexes.size(0) == 1
if len(indexes.size()) != 4:
raise ValueError("Invalid `indexes` size. Expected a 4-D tensor.")
self._check_cdf_size()
self._check_cdf_length()
self._check_offsets_size()
cdf = self._quantized_cdf
outputs = cdf.new(indexes.size())
values = self.decode_with_indexes(
strings,
indexes[0].reshape(-1).int().tolist(),
self._quantized_cdf.tolist(),
self._cdf_length.reshape(-1).int().tolist(),
self._offset.reshape(-1).int().tolist(),
)
outputs[0] = torch.Tensor(values).reshape(outputs[0].size())
return outputs.float()
def set_stream(self, stream):
self.decoder.set_stream(stream)
def decode_stream(self, indexes):
rv = self.decoder.decode_stream(
indexes.squeeze().int().tolist(),
self._quantized_cdf.tolist(),
self._cdf_length.reshape(-1).int().tolist(),
self._offset.reshape(-1).int().tolist(),
)
rv = torch.Tensor(rv).reshape(1, -1, 1, 1)
return rv
class Bitparm(nn.Module):
def __init__(self, channel, final=False):
super(Bitparm, self).__init__()
self.final = final
self.h = nn.Parameter(torch.nn.init.normal_(
torch.empty(channel).view(1, -1, 1, 1), 0, 0.01))
self.b = nn.Parameter(torch.nn.init.normal_(
torch.empty(channel).view(1, -1, 1, 1), 0, 0.01))
if not final:
self.a = nn.Parameter(torch.nn.init.normal_(
torch.empty(channel).view(1, -1, 1, 1), 0, 0.01))
else:
self.a = None
def forward(self, x):
if self.final:
return torch.sigmoid(x * F.softplus(self.h) + self.b)
else:
x = x * F.softplus(self.h) + self.b
return x + torch.tanh(x) * torch.tanh(self.a)
class BitEstimator(nn.Module):
def __init__(self, channel):
super(BitEstimator, self).__init__()
self.f1 = Bitparm(channel)
self.f2 = Bitparm(channel)
self.f3 = Bitparm(channel)
self.f4 = Bitparm(channel, True)
self.channel = channel
self.entropy_coder = None
def forward(self, x):
x = self.f1(x)
x = self.f2(x)
x = self.f3(x)
return self.f4(x)
def update(self, force=False):
# Check if we need to update the bottleneck parameters, the offsets are
# only computed and stored when the conditonal model is update()'d.
if self.entropy_coder is not None and not force: # pylint: disable=E0203
return
self.entropy_coder = EntropyCoder()
with torch.no_grad():
device = next(self.parameters()).device
medians = torch.zeros((self.channel), device=device)
minima = medians + 50
for i in range(50, 1, -1):
samples = torch.zeros_like(medians) - i
samples = samples[None, :, None, None]
probs = self.forward(samples)
probs = torch.squeeze(probs)
minima = torch.where(probs < torch.zeros_like(medians) + 0.0001,
torch.zeros_like(medians) + i, minima)
maxima = medians + 50
for i in range(50, 1, -1):
samples = torch.zeros_like(medians) + i
samples = samples[None, :, None, None]
probs = self.forward(samples)
probs = torch.squeeze(probs)
maxima = torch.where(probs > torch.zeros_like(medians) + 0.9999,
torch.zeros_like(medians) + i, maxima)
minima = minima.int()
maxima = maxima.int()
offset = -minima
pmf_start = medians - minima
pmf_length = maxima + minima + 1
max_length = pmf_length.max()
device = pmf_start.device
samples = torch.arange(max_length, device=device)
samples = samples[None, :] + pmf_start[:, None, None]
half = float(0.5)
lower = self.forward(samples - half).squeeze(0)
upper = self.forward(samples + half).squeeze(0)
pmf = upper - lower
pmf = pmf[:, 0, :]
tail_mass = lower[:, 0, :1] + (1.0 - upper[:, 0, -1:])
quantized_cdf = self.entropy_coder.pmf_to_cdf(pmf, tail_mass, pmf_length, max_length)
cdf_length = pmf_length + 2
self.entropy_coder.set_cdf_states(offset, quantized_cdf, cdf_length)
@staticmethod
def build_indexes(size):
N, C, H, W = size
indexes = torch.arange(C).view(1, -1, 1, 1)
indexes = indexes.int()
return indexes.repeat(N, 1, H, W)
def compress(self, x):
indexes = self.build_indexes(x.size())
return self.entropy_coder.compress(x, indexes)
def decompress(self, strings, size):
output_size = (1, self.entropy_coder._quantized_cdf.size(0), size[0], size[1])
indexes = self.build_indexes(output_size)
return self.entropy_coder.decompress(strings, indexes)
class GaussianEncoder(object):
def __init__(self):
self.scale_table = self.get_scale_table()
self.entropy_coder = None
@staticmethod
def get_scale_table(min=0.01, max=16, levels=64): # pylint: disable=W0622
return torch.exp(torch.linspace(math.log(min), math.log(max), levels))
def update(self, force=False):
if self.entropy_coder is not None and not force:
return
self.entropy_coder = EntropyCoder()
pmf_center = torch.zeros_like(self.scale_table) + 50
scales = torch.zeros_like(pmf_center) + self.scale_table
mu = torch.zeros_like(scales)
gaussian = torch.distributions.laplace.Laplace(mu, scales)
for i in range(50, 1, -1):
samples = torch.zeros_like(pmf_center) + i
probs = gaussian.cdf(samples)
probs = torch.squeeze(probs)
pmf_center = torch.where(probs > torch.zeros_like(pmf_center) + 0.9999,
torch.zeros_like(pmf_center) + i, pmf_center)
pmf_center = pmf_center.int()
pmf_length = 2 * pmf_center + 1
max_length = torch.max(pmf_length).item()
device = pmf_center.device
samples = torch.arange(max_length, device=device) - pmf_center[:, None]
samples = samples.float()
scales = torch.zeros_like(samples) + self.scale_table[:, None]
mu = torch.zeros_like(scales)
gaussian = torch.distributions.laplace.Laplace(mu, scales)
upper = gaussian.cdf(samples + 0.5)
lower = gaussian.cdf(samples - 0.5)
pmf = upper - lower
tail_mass = 2 * lower[:, :1]
quantized_cdf = torch.Tensor(len(pmf_length), max_length + 2)
quantized_cdf = self.entropy_coder.pmf_to_cdf(pmf, tail_mass, pmf_length, max_length)
self.entropy_coder.set_cdf_states(-pmf_center, quantized_cdf, pmf_length+2)
def build_indexes(self, scales):
scales = torch.maximum(scales, torch.zeros_like(scales) + 1e-5)
indexes = scales.new_full(scales.size(), len(self.scale_table) - 1).int()
for s in self.scale_table[:-1]:
indexes -= (scales <= s).int()
return indexes
def compress(self, x, scales):
indexes = self.build_indexes(scales)
return self.entropy_coder.compress(x, indexes)
def decompress(self, strings, scales):
indexes = self.build_indexes(scales)
return self.entropy_coder.decompress(strings, indexes)
def set_stream(self, stream):
self.entropy_coder.set_stream(stream)
def decode_stream(self, scales):
indexes = self.build_indexes(scales)
return self.entropy_coder.decode_stream(indexes)
|
#!/usr/bin/env python
from setuptools import setup, find_packages
from pathlib import Path
datadir = Path(__file__).parent / 'cardanopy' / 'data'
files = ['data/' + str(p.relative_to(datadir)) for p in datadir.rglob('*')]
setup(name='cardanopy',
version='0.1.10-dev1',
description='Cardano CLI tools for python3',
author='Bourke Floyd',
author_email='chbfiv@floydcraft.com',
url='https://github.com/floydcraft/cardano-py',
# package_dir={'cardanopy': '.'},
package_data={'cardanopy': files},
packages=find_packages(exclude=['tests']),
keywords='cardano,ada,cli',
python_requires='>=3.5.3,<4',
install_requires=[
'click~=7.1.2',
'pyyaml~=5.4.1',
'jsonschema~=3.2.0'
],
entry_points={
'console_scripts': [
'cardanopy=cardanopy:main',
],
},
project_urls={
'Bug Reports': 'https://github.com/floydcraft/cardano-py/issues',
'Chat': 'https://discord.gg/FyDz4Xrt4x',
'Source': 'https://github.com/floydcraft/cardano-py',
}
)
|
import codecs
import time
import geopy.geocoders
import unicodecsv
from django.core.management import BaseCommand, CommandError
from django.db import transaction
from django.template import Context, Template, TemplateSyntaxError
from django.utils.encoding import force_text
from geopy import get_geocoder_for_service
from geopy.exc import GeopyError
from fluentcms_googlemaps.models import Marker, MarkerGroup
geopy.geocoders.options.default_user_agent = "fluentcms_googlemaps"
class Command(BaseCommand):
"""
Import markers from a CSV file
"""
args = "csv-file, ..."
help = r"""
Import CSV data as markers.
The data can be converted into the database by using the options.
For example:
manage.py import_markers \
--name="{{ Name }}" \
--geocode='{{ Address }} {{ Zipcode }} {{ City }} {{ County }}' \
--description="<p>{{ Address }}<br>{{ Zipcode }} {{ City }}<br>{{ Country }}</p>"
Tip: export NL=$'\n' so you can use $NL in the strings for a newline.
"""
def add_arguments(self, parser):
parser.add_argument('--title', action='store', default='{{ name }}', help="A template that fills the name field")
parser.add_argument('--group', default='{{ group }}', help='A template that fills the group field')
parser.add_argument('--geocode', default='{{ address }}', help='A template that fills the address for Geocoding')
parser.add_argument('--description', default='{{ description }}', help='A template that fills the description field')
parser.add_argument('--image', default='', help='A template that fills the image field')
parser.add_argument('--dialect', default='excel')
parser.add_argument('--delimiter', default=',')
parser.add_argument('--quotechar', default='"')
parser.add_argument('--geocoder', default='nominatim')
parser.add_argument('--start-at', default=0, action='store', type=int)
parser.add_argument('--dry-run', action='store_true', default=False)
parser.add_argument('csv-files', nargs='+')
def handle(self, *args, **options):
if not options.get('csv-files'):
raise CommandError("Expected CSV filename to import")
try:
geocoder = get_geocoder_for_service(options['geocoder'])()
except GeopyError as e:
raise CommandError(str(e))
dry_run = options['dry_run']
start_at = options['start_at'] or 0
for filename in options['csv-files']:
# Not passing the utf-8 codec to codecs.open()
# the file is opened in ascii, and unicodecsv performs the conversion.
with codecs.open(filename, 'rb') as f:
csv_data = unicodecsv.DictReader(f, dialect=options['dialect'], delimiter=options['delimiter'], quotechar=options['quotechar'])
first = True
marker_data = []
row_num = 0
for row in csv_data:
row_num += 1
if row_num < start_at:
continue
# Parse the row data
# Print first results immediately, for easy debugging
title = _format_field(options, 'title', row, allow_empty=not first)
if not first:
self.stdout.write('----')
self.stdout.write(u"Row: {0}".format(row_num))
self.stdout.write(u"Name: {0}".format(title))
# Parse the rest
geocode = _format_field(options, 'geocode', row, allow_empty=not first)
description = _format_field(options, 'description', row, allow_html=True, allow_empty=not first)
group_id = _format_field(options, 'group', row, allow_html=False, allow_empty=not first)
image = _format_field(options, 'image', row, allow_empty=True)
group = _get_group(group_id)
if not dry_run:
# Avoid exceeding rate limit on dry-run tests
if not first:
time.sleep(0.3) # 300ms
try:
location = geocoder.geocode(geocode)
except GeopyError as e:
raise CommandError(str(e))
if not location:
raise CommandError("Unable to geocode: {0}".format(geocode))
self.stdout.write(u"Group: {0}".format(group))
self.stdout.write(u"Geocode: {0}".format(geocode))
if dry_run:
self.stdout.write(u"Location: (not determined for dry-run)")
else:
self.stdout.write(u"Location: ({0}, {1}) {2}".format(location.latitude, location.longitude, location))
self.stdout.write(u"Image: {0}".format(image))
self.stdout.write(u"Description:\n{0}".format(description))
first = False
if not dry_run:
marker_data.append(Marker(
title=title,
image=image or '',
description=description,
group=group,
location=[location.latitude, location.longitude],
))
if dry_run:
continue
self.stdout.write('----')
self.stdout.write(u"Writing objects..")
with transaction.atomic():
Marker.objects.bulk_create(marker_data)
self.stdout.write(u"Done")
def _format_field(options, name, data, allow_html=False, allow_empty=False):
template = options[name]
try:
result = Template(template).render(Context(data, autoescape=allow_html))
except TemplateSyntaxError as e:
raise CommandError("Invalid syntax for --{0}='{1}'\n{2}".format(name, template, e))
if not result.strip() and not allow_empty:
raise CommandError("No results for the '{0}' fields, please update --{0}. It currently uses '{1}'.".format(name, template))
if allow_html:
return result
else:
return force_text(result)
def _get_group(group_id):
try:
if group_id.isdigit():
return MarkerGroup.objects.get(pk=int(group_id))
else:
return MarkerGroup.objects.get(title=group_id)
except MarkerGroup.DoesNotExist:
raise CommandError("Unable to find group '{0}'".format(group_id))
|
# -*- coding: utf-8 -*-
"""Test that models can be executed."""
import importlib
import os
import unittest
from typing import Any, MutableMapping, Optional
import numpy
import torch
import unittest_templates
import pykeen.experiments
import pykeen.models
from pykeen.models import (
ERModel, EntityEmbeddingModel, EntityRelationEmbeddingModel, Model,
_NewAbstractModel, _OldAbstractModel, model_resolver,
)
from pykeen.models.multimodal.base import LiteralModel
from pykeen.models.predict import get_novelty_mask, predict
from pykeen.models.unimodal.trans_d import _project_entity
from pykeen.nn import EmbeddingSpecification
from pykeen.nn.emb import Embedding
from pykeen.utils import all_in_bounds, clamp_norm, extend_batch
from tests import cases
from tests.constants import EPSILON
from tests.mocks import MockModel
from tests.test_model_mode import SimpleInteractionModel
SKIP_MODULES = {
Model,
_OldAbstractModel,
_NewAbstractModel,
# DummyModel,
LiteralModel,
EntityEmbeddingModel,
EntityRelationEmbeddingModel,
ERModel,
MockModel,
SimpleInteractionModel,
}
SKIP_MODULES.update(LiteralModel.__subclasses__())
class TestCompGCN(cases.ModelTestCase):
"""Test the CompGCN model."""
cls = pykeen.models.CompGCN
create_inverse_triples = True
num_constant_init = 3 # BN(2) + Bias
cli_extras = ['--create-inverse-triples']
def _pre_instantiation_hook(self, kwargs: MutableMapping[str, Any]) -> MutableMapping[str, Any]: # noqa: D102
kwargs = super()._pre_instantiation_hook(kwargs=kwargs)
kwargs["encoder_kwargs"] = dict(
embedding_specification=EmbeddingSpecification(
embedding_dim=(kwargs.pop("embedding_dim")),
),
)
return kwargs
class TestComplex(cases.ModelTestCase):
"""Test the ComplEx model."""
cls = pykeen.models.ComplEx
class TestConvE(cases.ModelTestCase):
"""Test the ConvE model."""
cls = pykeen.models.ConvE
embedding_dim = 12
create_inverse_triples = True
kwargs = {
'output_channels': 2,
'embedding_height': 3,
'embedding_width': 4,
}
# 3x batch norm: bias + scale --> 6
# entity specific bias --> 1
# ==================================
# 7
num_constant_init = 7
class TestConvKB(cases.ModelTestCase):
"""Test the ConvKB model."""
cls = pykeen.models.ConvKB
kwargs = {
'num_filters': 2,
}
# two bias terms, one conv-filter
num_constant_init = 3
class TestDistMult(cases.ModelTestCase):
"""Test the DistMult model."""
cls = pykeen.models.DistMult
def _check_constraints(self):
"""Check model constraints.
Entity embeddings have to have unit L2 norm.
"""
entity_norms = self.instance.entity_embeddings(indices=None).norm(p=2, dim=-1)
assert torch.allclose(entity_norms, torch.ones_like(entity_norms))
def _test_score_all_triples(self, k: Optional[int], batch_size: int = 16):
"""Test score_all_triples.
:param k: The number of triples to return. Set to None, to keep all.
:param batch_size: The batch size to use for calculating scores.
"""
top_triples, top_scores = predict(model=self.instance, batch_size=batch_size, k=k)
# check type
assert torch.is_tensor(top_triples)
assert torch.is_tensor(top_scores)
assert top_triples.dtype == torch.long
assert top_scores.dtype == torch.float32
# check shape
actual_k, n_cols = top_triples.shape
assert n_cols == 3
if k is None:
assert actual_k == self.factory.num_entities ** 2 * self.factory.num_relations
else:
assert actual_k == min(k, self.factory.num_triples)
assert top_scores.shape == (actual_k,)
# check ID ranges
assert (top_triples >= 0).all()
assert top_triples[:, [0, 2]].max() < self.instance.num_entities
assert top_triples[:, 1].max() < self.instance.num_relations
def test_score_all_triples(self):
"""Test score_all_triples with a large batch size."""
# this is only done in one of the models
self._test_score_all_triples(k=15, batch_size=16)
def test_score_all_triples_singleton_batch(self):
"""Test score_all_triples with a batch size of 1."""
self._test_score_all_triples(k=15, batch_size=1)
def test_score_all_triples_large_batch(self):
"""Test score_all_triples with a batch size larger than k."""
self._test_score_all_triples(k=10, batch_size=16)
def test_score_all_triples_keep_all(self):
"""Test score_all_triples with k=None."""
# this is only done in one of the models
self._test_score_all_triples(k=None)
class TestERMLP(cases.ModelTestCase):
"""Test the ERMLP model."""
cls = pykeen.models.ERMLP
kwargs = {
'hidden_dim': 4,
}
# Two linear layer biases
num_constant_init = 2
class TestERMLPE(cases.ModelTestCase):
"""Test the extended ERMLP model."""
cls = pykeen.models.ERMLPE
kwargs = {
'hidden_dim': 4,
}
# Two BN layers, bias & scale
num_constant_init = 4
class TestHolE(cases.ModelTestCase):
"""Test the HolE model."""
cls = pykeen.models.HolE
def _check_constraints(self):
"""Check model constraints.
Entity embeddings have to have at most unit L2 norm.
"""
assert all_in_bounds(self.instance.entity_embeddings(indices=None).norm(p=2, dim=-1), high=1., a_tol=EPSILON)
class TestKG2EWithKL(cases.BaseKG2ETest):
"""Test the KG2E model with KL similarity."""
kwargs = {
'dist_similarity': 'KL',
}
class TestMuRE(cases.ModelTestCase):
"""Test the MuRE model."""
cls = pykeen.models.MuRE
num_constant_init = 2 # biases
class TestKG2EWithEL(cases.BaseKG2ETest):
"""Test the KG2E model with EL similarity."""
kwargs = {
'dist_similarity': 'EL',
}
class TestNTNLowMemory(cases.BaseNTNTest):
"""Test the NTN model with automatic memory optimization."""
kwargs = {
'num_slices': 2,
}
training_loop_kwargs = {
'automatic_memory_optimization': True,
}
class TestNTNHighMemory(cases.BaseNTNTest):
"""Test the NTN model without automatic memory optimization."""
kwargs = {
'num_slices': 2,
}
training_loop_kwargs = {
'automatic_memory_optimization': False,
}
class TestPairRE(cases.ModelTestCase):
"""Test the PairRE model."""
cls = pykeen.models.PairRE
class TestProjE(cases.ModelTestCase):
"""Test the ProjE model."""
cls = pykeen.models.ProjE
class TestQuatE(cases.ModelTestCase):
"""Test the QuatE model."""
cls = pykeen.models.QuatE
# quaternion have four components
embedding_dim = 4 * cases.ModelTestCase.embedding_dim
class TestRESCAL(cases.ModelTestCase):
"""Test the RESCAL model."""
cls = pykeen.models.RESCAL
class TestRGCNBasis(cases.BaseRGCNTest):
"""Test the R-GCN model."""
kwargs = {
'interaction': "transe",
'interaction_kwargs': dict(p=1),
'decomposition': "bases",
"decomposition_kwargs": dict(
num_bases=3,
),
}
#: one bias per layer
num_constant_init = 2
class TestRGCNBlock(cases.BaseRGCNTest):
"""Test the R-GCN model with block decomposition."""
embedding_dim = 6
kwargs = {
'interaction': "distmult",
'decomposition': "block",
"decomposition_kwargs": dict(
num_blocks=3,
),
'edge_weighting': "symmetric",
'use_batch_norm': True,
}
#: (scale & bias for BN) * layers
num_constant_init = 4
class TestRotatE(cases.ModelTestCase):
"""Test the RotatE model."""
cls = pykeen.models.RotatE
def _check_constraints(self):
"""Check model constraints.
Relation embeddings' entries have to have absolute value 1 (i.e. represent a rotation in complex plane)
"""
relation_abs = (
self.instance
.relation_embeddings(indices=None)
.view(self.factory.num_relations, -1, 2)
.norm(p=2, dim=-1)
)
assert torch.allclose(relation_abs, torch.ones_like(relation_abs))
class TestSimplE(cases.ModelTestCase):
"""Test the SimplE model."""
cls = pykeen.models.SimplE
class _BaseTestSE(cases.ModelTestCase):
"""Test the Structured Embedding model."""
cls = pykeen.models.StructuredEmbedding
def _check_constraints(self):
"""Check model constraints.
Entity embeddings have to have unit L2 norm.
"""
norms = self.instance.entity_embeddings(indices=None).norm(p=2, dim=-1)
assert torch.allclose(norms, torch.ones_like(norms))
class TestSELowMemory(_BaseTestSE):
"""Tests SE with low memory."""
training_loop_kwargs = {
'automatic_memory_optimization': True,
}
class TestSEHighMemory(_BaseTestSE):
"""Tests SE with low memory."""
training_loop_kwargs = {
'automatic_memory_optimization': False,
}
class TestTransD(cases.DistanceModelTestCase):
"""Test the TransD model."""
cls = pykeen.models.TransD
kwargs = {
'relation_dim': 4,
}
def _check_constraints(self):
"""Check model constraints.
Entity and relation embeddings have to have at most unit L2 norm.
"""
for emb in (self.instance.entity_embeddings, self.instance.relation_embeddings):
assert all_in_bounds(emb(indices=None).norm(p=2, dim=-1), high=1., a_tol=EPSILON)
def test_score_hrt_manual(self):
"""Manually test interaction function of TransD."""
# entity embeddings
weights = torch.as_tensor(data=[[2., 2.], [4., 4.]], dtype=torch.float)
entity_embeddings = Embedding(
num_embeddings=2,
embedding_dim=2,
)
entity_embeddings._embeddings.weight.data.copy_(weights)
self.instance.entity_embeddings = entity_embeddings
projection_weights = torch.as_tensor(data=[[3., 3.], [2., 2.]], dtype=torch.float)
entity_projection_embeddings = Embedding(
num_embeddings=2,
embedding_dim=2,
)
entity_projection_embeddings._embeddings.weight.data.copy_(projection_weights)
self.instance.entity_projections = entity_projection_embeddings
# relation embeddings
relation_weights = torch.as_tensor(data=[[4.], [4.]], dtype=torch.float)
relation_embeddings = Embedding(
num_embeddings=2,
embedding_dim=1,
)
relation_embeddings._embeddings.weight.data.copy_(relation_weights)
self.instance.relation_embeddings = relation_embeddings
relation_projection_weights = torch.as_tensor(data=[[5.], [3.]], dtype=torch.float)
relation_projection_embeddings = Embedding(
num_embeddings=2,
embedding_dim=1,
)
relation_projection_embeddings._embeddings.weight.data.copy_(relation_projection_weights)
self.instance.relation_projections = relation_projection_embeddings
# Compute Scores
batch = torch.as_tensor(data=[[0, 0, 0], [0, 0, 1]], dtype=torch.long)
scores = self.instance.score_hrt(hrt_batch=batch)
self.assertEqual(scores.shape[0], 2)
self.assertEqual(scores.shape[1], 1)
first_score = scores[0].item()
self.assertAlmostEqual(first_score, -16, delta=0.01)
# Use different dimension for relation embedding: relation_dim > entity_dim
# relation embeddings
relation_weights = torch.as_tensor(data=[[3., 3., 3.], [3., 3., 3.]], dtype=torch.float)
relation_embeddings = Embedding(
num_embeddings=2,
embedding_dim=3,
)
relation_embeddings._embeddings.weight.data.copy_(relation_weights)
self.instance.relation_embeddings = relation_embeddings
relation_projection_weights = torch.as_tensor(data=[[4., 4., 4.], [4., 4., 4.]], dtype=torch.float)
relation_projection_embeddings = Embedding(
num_embeddings=2,
embedding_dim=3,
)
relation_projection_embeddings._embeddings.weight.data.copy_(relation_projection_weights)
self.instance.relation_projections = relation_projection_embeddings
# Compute Scores
batch = torch.as_tensor(data=[[0, 0, 0]], dtype=torch.long)
scores = self.instance.score_hrt(hrt_batch=batch)
self.assertAlmostEqual(scores.item(), -27, delta=0.01)
batch = torch.as_tensor(data=[[0, 0, 0], [0, 0, 0]], dtype=torch.long)
scores = self.instance.score_hrt(hrt_batch=batch)
self.assertEqual(scores.shape[0], 2)
self.assertEqual(scores.shape[1], 1)
first_score = scores[0].item()
second_score = scores[1].item()
self.assertAlmostEqual(first_score, -27, delta=0.01)
self.assertAlmostEqual(second_score, -27, delta=0.01)
# Use different dimension for relation embedding: relation_dim < entity_dim
# entity embeddings
weights = torch.as_tensor(data=[[1., 1., 1.], [1., 1., 1.]], dtype=torch.float)
entity_embeddings = Embedding(
num_embeddings=2,
embedding_dim=3,
)
entity_embeddings._embeddings.weight.data.copy_(weights)
self.instance.entity_embeddings = entity_embeddings
projection_weights = torch.as_tensor(data=[[2., 2., 2.], [2., 2., 2.]], dtype=torch.float)
entity_projection_embeddings = Embedding(
num_embeddings=2,
embedding_dim=3,
)
entity_projection_embeddings._embeddings.weight.data.copy_(projection_weights)
self.instance.entity_projections = entity_projection_embeddings
# relation embeddings
relation_weights = torch.as_tensor(data=[[3., 3.], [3., 3.]], dtype=torch.float)
relation_embeddings = Embedding(
num_embeddings=2,
embedding_dim=2,
)
relation_embeddings._embeddings.weight.data.copy_(relation_weights)
self.instance.relation_embeddings = relation_embeddings
relation_projection_weights = torch.as_tensor(data=[[4., 4.], [4., 4.]], dtype=torch.float)
relation_projection_embeddings = Embedding(
num_embeddings=2,
embedding_dim=2,
)
relation_projection_embeddings._embeddings.weight.data.copy_(relation_projection_weights)
self.instance.relation_projections = relation_projection_embeddings
# Compute Scores
batch = torch.as_tensor(data=[[0, 0, 0], [0, 0, 0]], dtype=torch.long)
scores = self.instance.score_hrt(hrt_batch=batch)
self.assertEqual(scores.shape[0], 2)
self.assertEqual(scores.shape[1], 1)
first_score = scores[0].item()
second_score = scores[1].item()
self.assertAlmostEqual(first_score, -18, delta=0.01)
self.assertAlmostEqual(second_score, -18, delta=0.01)
def test_project_entity(self):
"""Test _project_entity."""
# random entity embeddings & projections
e = torch.rand(1, self.instance.num_entities, self.embedding_dim, generator=self.generator)
e = clamp_norm(e, maxnorm=1, p=2, dim=-1)
e_p = torch.rand(1, self.instance.num_entities, self.embedding_dim, generator=self.generator)
# random relation embeddings & projections
r = torch.rand(self.batch_size, 1, self.instance.relation_dim, generator=self.generator)
r = clamp_norm(r, maxnorm=1, p=2, dim=-1)
r_p = torch.rand(self.batch_size, 1, self.instance.relation_dim, generator=self.generator)
# project
e_bot = _project_entity(e=e, e_p=e_p, r=r, r_p=r_p)
# check shape:
assert e_bot.shape == (self.batch_size, self.instance.num_entities, self.instance.relation_dim)
# check normalization
assert (torch.norm(e_bot, dim=-1, p=2) <= 1.0 + 1.0e-06).all()
class TestTransE(cases.DistanceModelTestCase):
"""Test the TransE model."""
cls = pykeen.models.TransE
def _check_constraints(self):
"""Check model constraints.
Entity embeddings have to have unit L2 norm.
"""
entity_norms = self.instance.entity_embeddings(indices=None).norm(p=2, dim=-1)
assert torch.allclose(entity_norms, torch.ones_like(entity_norms))
class TestTransH(cases.DistanceModelTestCase):
"""Test the TransH model."""
cls = pykeen.models.TransH
def _check_constraints(self):
"""Check model constraints.
Entity embeddings have to have unit L2 norm.
"""
entity_norms = self.instance.normal_vector_embeddings(indices=None).norm(p=2, dim=-1)
assert torch.allclose(entity_norms, torch.ones_like(entity_norms))
class TestTransR(cases.DistanceModelTestCase):
"""Test the TransR model."""
cls = pykeen.models.TransR
kwargs = {
'relation_dim': 4,
}
def test_score_hrt_manual(self):
"""Manually test interaction function of TransR."""
# entity embeddings
weights = torch.as_tensor(data=[[2., 2.], [3., 3.]], dtype=torch.float)
entity_embeddings = Embedding(
num_embeddings=2,
embedding_dim=2,
)
entity_embeddings._embeddings.weight.data.copy_(weights)
self.instance.entity_embeddings = entity_embeddings
# relation embeddings
relation_weights = torch.as_tensor(data=[[4., 4], [5., 5.]], dtype=torch.float)
relation_embeddings = Embedding(
num_embeddings=2,
embedding_dim=2,
)
relation_embeddings._embeddings.weight.data.copy_(relation_weights)
self.instance.relation_embeddings = relation_embeddings
relation_projection_weights = torch.as_tensor(data=[[5., 5., 6., 6.], [7., 7., 8., 8.]], dtype=torch.float)
relation_projection_embeddings = Embedding(
num_embeddings=2,
embedding_dim=4,
)
relation_projection_embeddings._embeddings.weight.data.copy_(relation_projection_weights)
self.instance.relation_projections = relation_projection_embeddings
# Compute Scores
batch = torch.as_tensor(data=[[0, 0, 0], [0, 0, 1]], dtype=torch.long)
scores = self.instance.score_hrt(hrt_batch=batch)
self.assertEqual(scores.shape[0], 2)
self.assertEqual(scores.shape[1], 1)
first_score = scores[0].item()
# second_score = scores[1].item()
self.assertAlmostEqual(first_score, -32, delta=0.01)
def _check_constraints(self):
"""Check model constraints.
Entity and relation embeddings have to have at most unit L2 norm.
"""
for emb in (self.instance.entity_embeddings, self.instance.relation_embeddings):
assert all_in_bounds(emb(indices=None).norm(p=2, dim=-1), high=1., a_tol=1.0e-06)
class TestTuckEr(cases.ModelTestCase):
"""Test the TuckEr model."""
cls = pykeen.models.TuckER
kwargs = {
'relation_dim': 4,
}
#: 2xBN (bias & scale)
num_constant_init = 4
class TestUM(cases.DistanceModelTestCase):
"""Test the Unstructured Model."""
cls = pykeen.models.UnstructuredModel
class TestTesting(unittest_templates.MetaTestCase[Model]):
"""Yo dawg, I heard you like testing, so I wrote a test to test the tests so you can test while you're testing."""
base_test = cases.ModelTestCase
base_cls = Model
skip_cls = SKIP_MODULES
def test_documentation(self):
"""Test all models have appropriate structured documentation."""
for name, cls in sorted(model_resolver.lookup_dict.items()):
with self.subTest(name=name):
try:
docdata = cls.__docdata__
except AttributeError:
self.fail('missing __docdata__')
self.assertIn('citation', docdata)
self.assertIn('author', docdata['citation'])
self.assertIn('link', docdata['citation'])
self.assertIn('year', docdata['citation'])
def test_importing(self):
"""Test that all models are available from :mod:`pykeen.models`."""
models_path = os.path.abspath(os.path.dirname(pykeen.models.__file__))
model_names = set()
for directory, _, filenames in os.walk(models_path):
for filename in filenames:
if not filename.endswith('.py'):
continue
path = os.path.join(directory, filename)
relpath = os.path.relpath(path, models_path)
if relpath.endswith('__init__.py'):
continue
import_path = 'pykeen.models.' + relpath[:-len('.py')].replace(os.sep, '.')
module = importlib.import_module(import_path)
for name in dir(module):
value = getattr(module, name)
if (
isinstance(value, type)
and issubclass(value, Model)
):
model_names.add(value.__name__)
star_model_names = _remove_non_models(set(pykeen.models.__all__) - SKIP_MODULES)
model_names = _remove_non_models(model_names - SKIP_MODULES)
self.assertEqual(model_names, star_model_names, msg='Forgot to add some imports')
@unittest.skip('no longer necessary?')
def test_models_have_experiments(self):
"""Test that each model has an experiment folder in :mod:`pykeen.experiments`."""
experiments_path = os.path.abspath(os.path.dirname(pykeen.experiments.__file__))
experiment_blacklist = {
'DistMultLiteral', # FIXME
'ComplExLiteral', # FIXME
'UnstructuredModel',
'StructuredEmbedding',
'RESCAL',
'NTN',
'ERMLP',
'ProjE', # FIXME
'ERMLPE', # FIXME
'PairRE',
'QuatE',
}
model_names = _remove_non_models(set(pykeen.models.__all__) - SKIP_MODULES - experiment_blacklist)
for model in _remove_non_models(model_names):
with self.subTest(model=model):
self.assertTrue(
os.path.exists(os.path.join(experiments_path, model.lower())),
msg=f'Missing experimental configuration for {model}',
)
def _remove_non_models(elements):
rv = set()
for element in elements:
try:
model_resolver.lookup(element)
except ValueError: # invalid model name - aka not actually a model
continue
else:
rv.add(element)
return rv
class TestModelUtilities(unittest.TestCase):
"""Extra tests for utility functions."""
def test_get_novelty_mask(self):
"""Test `get_novelty_mask()`."""
num_triples = 7
base = torch.arange(num_triples)
mapped_triples = torch.stack([base, base, 3 * base], dim=-1)
query_ids = torch.randperm(num_triples).numpy()[:num_triples // 2]
exp_novel = query_ids != 0
col = 2
other_col_ids = numpy.asarray([0, 0])
mask = get_novelty_mask(
mapped_triples=mapped_triples,
query_ids=query_ids,
col=col,
other_col_ids=other_col_ids,
)
assert mask.shape == query_ids.shape
assert (mask == exp_novel).all()
def test_extend_batch(self):
"""Test `_extend_batch()`."""
batch = torch.tensor([[a, b] for a in range(3) for b in range(4)]).view(-1, 2)
all_ids = [2 * i for i in range(5)]
batch_size = batch.shape[0]
num_choices = len(all_ids)
for dim in range(3):
h_ext_batch = extend_batch(batch=batch, all_ids=all_ids, dim=dim)
# check shape
assert h_ext_batch.shape == (batch_size * num_choices, 3)
# check content
actual_content = set(tuple(map(int, hrt)) for hrt in h_ext_batch)
exp_content = set()
for i in all_ids:
for b in batch:
c = list(map(int, b))
c.insert(dim, i)
exp_content.add(tuple(c))
assert actual_content == exp_content
class ERModelTests(cases.ModelTestCase):
"""Tests for the general ER-Model."""
cls = pykeen.models.ERModel
kwargs = dict(
interaction="distmult", # use name to test interaction resolution
)
def _pre_instantiation_hook(self, kwargs: MutableMapping[str, Any]) -> MutableMapping[str, Any]: # noqa: D102
kwargs = super()._pre_instantiation_hook(kwargs=kwargs)
embedding_dim = kwargs.pop("embedding_dim")
kwargs["entity_representations"] = EmbeddingSpecification(embedding_dim=embedding_dim)
kwargs["relation_representations"] = EmbeddingSpecification(embedding_dim=embedding_dim)
return kwargs
def test_has_hpo_defaults(self): # noqa: D102
raise unittest.SkipTest(f"Base class {self.cls} does not provide HPO defaults.")
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from conda.exports import text_type
from contextlib import contextmanager
from logging import getLogger, Handler
from os.path import exists, join
from shlex import split
from shutil import rmtree
from tempfile import mkdtemp
from unittest import TestCase
from uuid import uuid4
import mock
from conda.cli import conda_argparse
from conda.install import on_win
from conda_env.cli.main_create import configure_parser as create_configure_parser
from conda_env.cli.main_update import configure_parser as update_configure_parser
from conda.core.linked_data import linked
from . import utils
PYTHON_BINARY = 'python.exe' if on_win else 'bin/python'
def escape_for_winpath(p):
return p.replace('\\', '\\\\')
def disable_dotlog():
class NullHandler(Handler):
def emit(self, record):
pass
dotlogger = getLogger('dotupdate')
saved_handlers = dotlogger.handlers
dotlogger.handlers = []
dotlogger.addHandler(NullHandler())
return saved_handlers
def reenable_dotlog(handlers):
dotlogger = getLogger('dotupdate')
dotlogger.handlers = handlers
class Commands:
CREATE = "create"
UPDATE = "update"
parser_config = {
Commands.CREATE: create_configure_parser,
Commands.UPDATE: update_configure_parser,
}
def run_command(command, envs_dir, env_name, *arguments):
p = conda_argparse.ArgumentParser()
sub_parsers = p.add_subparsers(metavar='command', dest='cmd')
parser_config[command](sub_parsers)
arguments = list(map(escape_for_winpath, arguments))
command_line = "{0} -n {1} -f {2}".format(command, env_name, " ".join(arguments))
args = p.parse_args(split(command_line))
with mock.patch('conda_env.cli.common.envs_dirs', new=(envs_dir,)):
args.func(args, p)
@contextmanager
def make_temp_envs_dir():
envs_dir = mkdtemp()
try:
yield envs_dir
finally:
rmtree(envs_dir, ignore_errors=True)
def package_is_installed(prefix, dist, exact=False):
packages = list(linked(prefix))
if '::' not in text_type(dist):
packages = [p.dist_name for p in packages]
if exact:
return dist in packages
return any(p.startswith(dist) for p in packages)
def assert_package_is_installed(prefix, package, exact=False):
if not package_is_installed(prefix, package, exact):
print(list(linked(prefix)))
raise AssertionError("package {0} is not in prefix".format(package))
class IntegrationTests(TestCase):
def setUp(self):
self.saved_dotlog_handlers = disable_dotlog()
def tearDown(self):
reenable_dotlog(self.saved_dotlog_handlers)
def test_create_update(self):
with make_temp_envs_dir() as envs_dir:
env_name = str(uuid4())[:8]
prefix = join(envs_dir, env_name)
python_path = join(prefix, PYTHON_BINARY)
run_command(Commands.CREATE, envs_dir, env_name, utils.support_file('example/environment_pinned.yml'))
assert exists(python_path)
assert_package_is_installed(prefix, 'flask-0.9')
run_command(Commands.UPDATE, envs_dir, env_name, utils.support_file('example/environment_pinned_updated.yml'))
assert_package_is_installed(prefix, 'flask-0.10.1')
assert not package_is_installed(prefix, 'flask-0.9')
|
# String Format
# As we learned in the Python Variables chapter, we cannot combine strings and numbers like this:
'''
age = 23
txt = "My name is Nayan, I am " + age + "Years old"
print(txt) #Error
'''
# But we can combine strings and numbers by using the format() method!
# The format() method takes the passed arguments, formats them, and places them in the string where the placeholders {} are:
age = 23
txt = "My name is Nayan, and I'm {} Years old"
print(txt.format(age))
# The format() method takes unlimited number of arguments, and are placed into the respective placeholders:
quantity = 5
itemno = 1001
price = 101.05
myorder = "I want {} pieces of item {} for {} Rupees"
print(myorder.format(quantity, itemno, price))
# You can use index numbers {0} to be sure the arguments are placed in the correct placeholders:
quantity = 5
itemno = 1001
price = 101.05
myorder = "I want to pay {2} Rupees for {0} pieces of item {1}. "
print(myorder.format(quantity, itemno, price))
|
# Copyright 2019-present Kensho Technologies, LLC.
from itertools import chain
from graphql.type import GraphQLSchema
from ...schema import (
FilterDirective,
FoldDirective,
MacroEdgeDefinitionDirective,
MacroEdgeDirective,
MacroEdgeTargetDirective,
OptionalDirective,
RecurseDirective,
TagDirective,
check_for_nondefault_directive_names,
)
# Directives reserved for macro edges
MACRO_EDGE_DIRECTIVES = (
MacroEdgeDirective,
MacroEdgeDefinitionDirective,
MacroEdgeTargetDirective,
)
# Names of directives required present in a macro edge definition
DIRECTIVES_REQUIRED_IN_MACRO_EDGE_DEFINITION = frozenset(
{MacroEdgeDefinitionDirective.name, MacroEdgeTargetDirective.name}
)
# Names of directives allowed within a macro edge definition
DIRECTIVES_ALLOWED_IN_MACRO_EDGE_DEFINITION = frozenset(
{
FoldDirective.name,
FilterDirective.name,
OptionalDirective.name,
TagDirective.name,
RecurseDirective.name,
}.union(DIRECTIVES_REQUIRED_IN_MACRO_EDGE_DEFINITION)
)
def get_schema_for_macro_edge_definitions(querying_schema):
"""Given a schema object used for querying, create a schema used for macro edge definitions."""
original_directives = querying_schema.directives
check_for_nondefault_directive_names(original_directives)
directives_required_in_macro_edge_definition = [
MacroEdgeDefinitionDirective,
MacroEdgeTargetDirective,
]
new_directives = [
directive
for directive in chain(original_directives, directives_required_in_macro_edge_definition)
if directive.name in DIRECTIVES_ALLOWED_IN_MACRO_EDGE_DEFINITION
]
schema_arguments = querying_schema.to_kwargs()
schema_arguments["directives"] = new_directives
macro_edge_schema = GraphQLSchema(**schema_arguments)
return macro_edge_schema
|
"""
Blueprint module for accessing request states and results
"""
import logging
from flask import abort, Blueprint, jsonify, url_for
log = logging.getLogger(__name__)
USE_EXTERNAL_URLS = True
def get_state_response(ticket_id: str):
"""
Creates a response containing a valid url for the state of the request
associated with the supplied ticket id.
:param ticket_id: the ticket id
:return: JSON representation of the state url response
"""
return jsonify({
'stateUrl': url_for('requests.get_state',
request_id=ticket_id,
_external=USE_EXTERNAL_URLS)
})
def create_blueprint(request_manager):
"""
Creates an instance of the blueprint.
"""
blueprint = Blueprint('requests', __name__, url_prefix='/requests')
# pylint: disable=unused-variable
@blueprint.route('<request_id>/state')
def get_state(request_id):
"""
Retrieves the state of the specified request.
---
parameters:
- name: request_id
description: id of the request
in: path
type: string
required: true
definitions:
RequestResponse:
description: Object containing the URL of a requests state
type: object
properties:
stateUrl:
description: URL the requests state can be retrieved from
type: string
StateResponse:
description: Object describing request state and result url
type: object
properties:
done:
description: whether the processing of the request is done
type: boolean
resultUrl:
description: URL the requests result can be retrieved from
type: string
responses:
200:
application/json:
schema:
$ref: '#/definitions/StateResponse'
"""
# TODO 404 on invalid request_id or no futures
return jsonify({
'done': request_manager.request_processed(request_id),
'resultUrl':
url_for('requests.get_result',
request_id=request_id,
_external=USE_EXTERNAL_URLS)
})
@blueprint.route('<request_id>/result')
def get_result(request_id):
"""
Retrieves the result of the specified request.
---
parameters:
- name: request_id
description: id of the request
in: path
type: string
required: true
responses:
200:
application/json:
schema:
description: object defined by the type of request
type: object
"""
if not request_manager.request_processed(request_id):
log.info('request "%s" not done or result already retrieved',
request_id)
abort(404)
result = request_manager.get_result(request_id)
log.debug(result)
if not result:
return jsonify({})
return jsonify(result)
return blueprint
|
import sys
DESTINATIONS = {
'': '000',
'M': '001',
'D': '010',
'MD': '011',
'A': '100',
'AM': '101',
'AD': '110',
'AMD': '111'
}
JUMPTARGETS = {
'': '000',
'JGT': '001',
'JEQ': '010',
'JGE': '011',
'JLT': '100',
'JNE': '101',
'JLE': '110',
'JMP': '111'
}
COMPUTATIONS = {
'0': '0101010',
'1': '0111111',
'-1': '0111010',
'D': '0001100',
'A': '0110000',
'!D': '0001101',
'!A': '0110001',
'-D': '0001111',
'-A': '0110011',
'D+1': '0011111',
'A+1': '0110111',
'D-1': '0001110',
'A-1': '0110010',
'D+A': '0000010',
'D-A': '0010011',
'A-D': '0000111',
'D&A': '0000000',
'D|A': '0010101',
'M': '1110000',
'!M': '1110001',
'-M': '1110011',
'M+1': '1110111',
'M-1': '1110010',
'D+M': '1000010',
'D-M': '1010011',
'M-D': '1000111',
'D&M': '1000000',
'D|M': '1010101'
}
def makeSymbolTable():
global SYMBOLS
SYMBOLS = {
'SP': '0',
'LCL': '1',
'ARG': '2',
'THIS': '3',
'THAT': '4',
'SCREEN': '16384',
'KBD': '24576',
}
for i in range(16):
SYMBOLS['R'+str(i)] = str(i)
def isSymbol(s):
validSymbols = "_.$:"
if len(s) < 1:
return False
if s[0].isdigit():
return False
if s not in SYMBOLS:
for c in s:
if not c.isalnum():
if not c in validSymbols:
return False
return True
def isConstant(s):
if s.isdigit():
if len(s) >= 1 and len(s) <=5:
t = int(s)
if t >= 0 and t <= 32767:
return True
return False
def fail(msg, srcline, srclinenumber):
print >> sys.stderr, "%s from %s at %s" %(msg, srcline, srclinenumber)
sys.exit(1)
def parseLine(s, lineNum):
srcLine = s
#if its a comment
comment = s.find("//")
if comment != -1:
s = s[0:comment]
s = s.strip()
if s == '':
return None
#if its an a instruction
if s[0] == '@':
s = s[1:]
s = s.strip()
if isSymbol(s) or isConstant(s):
return ('A_INSTRUCTION', s, srcLine, lineNum)
fail("not a valid A instruction", srcLine, lineNum)
#if its a label
if s[0] == '(':
label = s.find(')')
s=s[1:label].strip()
if isSymbol(s):
return ('L_INSTRUCTION', s, srcLine, lineNum)
fail("not a valid L instruction", srcLine, lineNum)
#if its none of the above it must be a C
destination = s.find('=')
dest = ''
if destination != -1:
dest = s[0:destination].strip()
s = s[destination+1:]
if dest not in DESTINATIONS or dest == "":
fail("not a valid destination", srcLine, lineNum)
targets = s.find(';')
jump = ''
if targets != -1:
jump = s[targets+1:].strip()
s = s[0:targets]
if jump not in JUMPTARGETS or jump == "":
fail("not a jump target", srcLine, lineNum)
s = s.strip()
if s in COMPUTATIONS:
return ('C_INSTRUCTION', dest, s, jump, srcLine, lineNum)
else:
fail("not a valid C instruction", srcLine, lineNum)
def main():
#initializing everything
if len(sys.argv) != 2 or sys.argv[1][-4:] != ".asm":
print ("command line arguement should be name of input file ending in a .asm")
return
total = []
makeSymbolTable()
fin = open(sys.argv[1], 'r')
fout = open(sys.argv[1][0:-4] + ".hack", 'w')
lineCount = 1
#parse everything
for line in fin:
parsed = parseLine(line, lineCount)
lineCount += 1
if parsed != None:
total.append(parsed)
#translate to binary
#first pass
lineCount = -1
lInstCount = 0
for line in total:
lineCount+=1
if line[0][0] == 'L':
SYMBOLS[line[1]] = str(lineCount-lInstCount)
lInstCount += 1
#second pass
#print SYMBOLS
symbolCount = 16
for line in total:
if line[0][0] == 'A':
if isConstant(line[1]):
num = int(line[1])
num = bin(num)[2:].zfill(15)
fout.write('0' + str(num)+"\n")
else:
if line[1] not in SYMBOLS:
SYMBOLS[line[1]] = symbolCount
symbolCount += 1
#print SYMBOLS
if line[1] in SYMBOLS:
num = int(SYMBOLS[line[1]])
num = bin(num)[2:].zfill(15)
fout.write('0' + str(num)+"\n")
elif line[0][0] == 'C':
fout.write("111"+COMPUTATIONS[line[2]]+DESTINATIONS[line[1]]+JUMPTARGETS[line[3]] + "\n")
fin.close()
fout.close()
if __name__ == '__main__':
main() |
#! /usr/bin/env python
# title : SisoDecoder.py
# description : This class implements soft input soft output decoder for specified trellis.
# Its input is a trellis instance. The max-log-BCJR algorithm is employed.
# author : Felix Arnold
# python_version : 3.5.2
class SisoDecoder(object):
def __init__(self, trellis):
self.state = 0
self.trellis = trellis
self.forward_init = True
self.backward_init = True
self.minus_inf = -10
def decode(self, input_u, input_c, n_data):
minus_inf = self.minus_inf
trellis = self.trellis
n_stages = int(n_data / self.trellis.wu)
sm_vec_init = [0] + [minus_inf * self.forward_init] * (trellis.Ns - 1) # init state metric vector
# forward (alpha)
sm_vec = sm_vec_init
sm_forward = []
for i in range(0, n_stages): # for each stage
sm_vec_new = []
cin = input_c[trellis.wc * i:trellis.wc * (i + 1)]
uin = input_u[trellis.wu * i: trellis.wu * (i + 1)]
for j in range(trellis.Ns): # for each state
branches = trellis.get_prev_branches_pc[j]
branch_sums = []
for k in range(len(branches)): # for each branch
branch_metric = 0
for l in range(trellis.wc): # for each encoded bit
if trellis.get_enc_bits_pc[branches[k]][l] == 1:
branch_metric += cin[l]
for l in range(trellis.wu): # for each data bit
if trellis.get_dat_pc[branches[k]][l]:
branch_metric += uin[l]
branch_sums.append(sm_vec[trellis.get_prev_state_pc[branches[k]]] + branch_metric) # add (gamma)
sm_vec_new.append(max(branch_sums)) # compare and select
sm_vec = list(sm_vec_new)
sm_forward.append(sm_vec)
# backward (beta)
output_u = []
output_c = []
sm_vec = [0] + [minus_inf * self.backward_init] * (trellis.Ns - 1) # init state metric vector
for i in reversed(range(0, n_stages)): # for each stage
sm_vec_new = []
cin = input_c[trellis.wc * i:trellis.wc * (i + 1)]
uin = input_u[trellis.wu * i: trellis.wu * (i + 1)]
max_branch_dat = [[minus_inf, minus_inf] for i in range(trellis.wu)]
max_branch_enc = [[minus_inf, minus_inf] for i in range(trellis.wc)]
for j in range(trellis.Ns): # for each state
branches = trellis.get_next_branches_pc[j]
branch_sums = []
for k in range(len(branches)): # for each branch
branch_metric = 0
for l in range(trellis.wc): # for each encoded bit
if trellis.get_enc_bits_pc[branches[k]][l] == 1:
branch_metric += cin[l]
for l in range(trellis.wu): # for each data bit
if trellis.get_dat_pc[branches[k]][l]:
branch_metric += uin[l]
branch_sum = sm_vec[trellis.get_next_state_pc[branches[k]]] + branch_metric # add (gamma)
branch_sums.append(branch_sum)
# add the state metric from the forward propagation -> total = alpha + gamma + beta
if i == 0:
total_metric = branch_sum + sm_vec_init[j]
else:
total_metric = branch_sum + sm_forward[i - 1][j]
# soft encoded output calculation
enc = trellis.get_enc_bits_pc[branches[k]]
for n in range(trellis.wc):
if total_metric > max_branch_enc[n][enc[n]]:
max_branch_enc[n][enc[n]] = total_metric
# soft data output calculation
dat = trellis.get_dat_pc[branches[k]]
for n in range(trellis.wu):
if total_metric > max_branch_dat[n][dat[n]]:
max_branch_dat[n][dat[n]] = total_metric
sm_vec_new.append(max(branch_sums)) # compare and select
sm_vec = list(sm_vec_new)
for n in reversed(range(trellis.wu)): # soft output
output_u.insert(0, max_branch_dat[n][1] - max_branch_dat[n][0])
for n in reversed(range(trellis.wc)): # soft encoded output
output_c.insert(0, max_branch_enc[n][1] - max_branch_enc[n][0])
return output_u, output_c
|
# TODO
#
# @author Oktay Acikalin <oktay.acikalin@gmail.com>
# @copyright Oktay Acikalin
# @license MIT (LICENSE.txt)
# TODO make use of http://docs.python.org/library/itertools.html
from inspect import getargspec
from diamond.helper.weak_ref import Wrapper
# from diamond.decorators import time, dump_args
listeners = {}
basic_rules = dict(
instance__is=lambda context, value: context is not value,
instance__is_not=lambda context, value: context is value,
class__is=lambda context, value: not isinstance(context, value),
)
context_rules = {
'eq': lambda context_value, value: context_value != value,
'neq': lambda context_value, value: context_value == value,
'contains': lambda context_value, value: value not in context_value,
'gt': lambda context_value, value: context_value <= value,
'gte': lambda context_value, value: context_value < value,
'lt': lambda context_value, value: context_value >= value,
'lte': lambda context_value, value: context_value > value,
'is': lambda context_value, value: context_value is not value,
'is_not': lambda context_value, value: context_value is value,
'in': lambda context_value, value: context_value not in value,
'returns': lambda context_value, value: context_value() != value,
'amp': lambda context_value, value: not (context_value & value),
}
proxy_rules = set([
'instance__is',
'instance__is_not',
'class__is',
'is',
'is_not',
])
proxy_context_rules = set([
'__contains',
'__is',
'__is_not',
])
class Listener(object):
def __init__(self, func, event_name, filters):
self.func = func
self.event_name = event_name
self.filters = filters
def __repr__(self):
return 'Listener(%s, %s, %s)' % (self.func, self.event_name, self.filters)
# @time
def clear_listeners():
# print 'event.clear_listeners()'
listeners.clear()
def get_listeners(hide_empty_lists=True):
if hide_empty_lists:
results = {}
for key, val in listeners.iteritems():
if val:
results[key] = val
return results
else:
return listeners.copy()
# @time
def add_listener(func, event_name, **filters):
# print 'event.add_listener(func=%s, event_name=%s, filters=%s)' % (func, event_name, filters)
func = Wrapper(func)
if filters:
# print 'event.add_listener(func=%s, event_name=%s, filters=%s)' % (func, event_name, filters)
for key, val in filters.iteritems():
if key in proxy_rules:
try:
filters[key] = Wrapper(val)
except TypeError:
pass
continue
for rule in proxy_context_rules:
if key.endswith(rule):
try:
filters[key] = Wrapper(val)
except TypeError:
pass
break
handler = Listener(func, event_name, filters)
try:
listeners[event_name] |= set([handler])
except KeyError:
listeners[event_name] = set([handler])
return Wrapper(handler)
# @dump_args
def remove_listener(candidate):
# print 'Remove listener:', candidate
try:
listener = candidate.resolve()
except ReferenceError:
# Try to find dead candidate and remove it.
for key in listeners:
try:
listeners[key].remove(candidate)
break
except ValueError:
pass
else:
if listener is not None:
listeners[listener.event_name].remove(listener)
# @dump_args
def remove_listeners(candidates):
[remove_listener(listener) for listener in candidates]
def _parse(context, operator):
try:
var, operator = operator.rsplit('__', 1)
except ValueError:
var, operator = '', operator
# print 'context parts =', var, operator, context
# The following loop supports deep paths into context.
if '__' in var:
while 1:
try:
var, next = var.split('__', 1)
except ValueError:
next = None
if isinstance(context, dict):
context = context.get(var)
elif isinstance(context, list):
context = context[int(var)]
else:
context = getattr(context, var)
if next is not None:
var = next
else:
break
elif len(var):
if isinstance(context, dict):
context = context.get(var)
else:
context = getattr(context, var)
# else:
# print 'context parts =', key, var, operator, context
return operator, context
# @time
def emit(event_name, context=None):
# print 'event.emit(event_name=%s, context=%s)' % (event_name, context)
parse = _parse
results = []
for listener in listeners.get(event_name, set()).copy():
func = listener.func
event_name = listener.event_name
filters = listener.filters
# Resolve weak ref.
try:
func = func.resolve()
except ReferenceError:
# It might happen that a race conditions brings us here.
try:
listeners[event_name].remove(listener)
# print 'removed stale listener: %s' % listener
except KeyError:
pass
continue
matching_failed = False
# print 'event iteration =', func, filters
for key, value in filters.iteritems():
# print 'key =', key, ';', 'value =', value
# Resolve weak ref.
if type(value) is Wrapper:
value = value.resolve()
if key in basic_rules:
matching_failed = basic_rules[key](context, value)
elif key.startswith('context'):
key, operator = key.split('__', 1)
# print 'filter =', key, operator, value
try:
operator, context_value = parse(context, operator)
except AttributeError:
matching_failed = True
break
# print 'context_value =', context_value
try:
rule = context_rules[operator]
except KeyError:
raise Exception('Unknown operator "%s" in filter for func "%s". Possible operators are: %s' % (operator, func, ', '.join(context_rules.keys())))
# print 'operator parts =', context_value, operator, value
matching_failed = rule(context_value, value)
else:
raise Exception('Unknown key "%s" in filter for func "%s". Possible keys are: %s' % (operator, func, ', '.join(basic_rules.keys())))
if matching_failed:
break
if not matching_failed:
if hasattr(func, '__wrapped__'):
spec = getargspec(func.__wrapped__)
else:
spec = getargspec(func)
args = spec.args
if 'context' in args:
# print 'executing %s with context: %s' % (func, context)
results.append((func, func(context=context)))
elif 'event' in args:
# print 'executing %s with event: %s' % (func, context)
results.append((func, func(event=context)))
else:
# print 'DEBUG: event.emit() got function without proper event interface:', func
# print 'executing %s without context.' % func
results.append((func, func()))
return results
|
def annual_query(df):
return df["subscription.plan.interval"] == 'year'
def time_range_query(df,start_time,end_time):
return ((df.created >= start_time) & (df.created < end_time)) & (df.amount_due > 0) # TODO - move amount due to separate filter
def billing_status_query(df):
return (df.status != 'draft') & (df.status != 'void')
# query filters
def by_day_query(df,start_time,end_time,manual=False):
return time_range_query(df,start_time,end_time) & (df.billing_reason != "manual" if not manual else df.billing_reason == "manual") & billing_status_query(df)
|
import copy
import torch
from pathlib import Path
import numpy as np
from metrics import *
from utils import get_predictions
def test(model_best, model_last, device, test_set_generator, wandb, multiclass_labels):
model_best.eval()
model_last.eval()
print("")
print("########################")
print("")
print("Results on the TEST set:")
metrics_single_label = ["wacc", "uacc"]
if wandb is not None:
metrics_single_label.append("conf_matrix")
if not multiclass_labels:
metrics_best = get_metrics(model_best, test_set_generator, device, metrics_single_label)
metrics_last = get_metrics(model_last, test_set_generator, device, metrics_single_label)
print("weighted accuracy (best): %2.2f%%" % metrics_best["wacc"])
print("unweighted accuracy (best): %2.2f%%" % metrics_best["uacc"])
print("")
print("weighted accuracy (last): %2.2f%%" % metrics_last["wacc"])
print("unweighted accuracy (last): %2.2f%%" % metrics_last["uacc"])
if wandb is not None:
wandb.log({"weighted test accuracy best": metrics_best["wacc"],
"weighted test accuracy last": metrics_last["wacc"],
"unweighted test accuracy best": metrics_best["uacc"],
"unweighted test accuracy last": metrics_last["uacc"],
"test confusion matrix best": metrics_best["conf_matrix"],
"test confusion matrix last": metrics_last["conf_matrix"]})
else:
metrics_best = get_metrics(model_best, test_set_generator, device, ["acc_multilabel", "f1"])
metrics_last = get_metrics(model_last, test_set_generator, device, ["acc_multilabel", "f1"])
print("multilabel accuracy (best): %2.2f%%" % metrics_best["acc_multilabel"])
print("f1 score (best): %2.2f" % metrics_best["f1"])
print("")
print("multilabel accuracy (last): %2.2f%%" % metrics_last["acc_multilabel"])
print("f1 score (last): %2.2f" % metrics_last["f1"])
if wandb is not None:
wandb.log({"multilabel accuracy best": metrics_best["acc_multilabel"],
"multilabel accuracy last": metrics_last["acc_multilabel"],
"f1 score best": metrics_best["f1"],
"f1 score last": metrics_last["f1"]})
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Description
"""
import torch
from ptranking.metric.adhoc_metric import torch_ideal_dcg
from ptranking.ltr_global import global_gpu as gpu, tensor
#######
# For Delta Metrics
#######
def get_delta_ndcg(batch_stds, batch_stds_sorted_via_preds, multi_level_rele=True):
'''
Delta-nDCG w.r.t. pairwise swapping of the currently predicted ltr_adhoc
:param batch_stds: the standard labels sorted in a descending order
:param batch_stds_sorted_via_preds: the standard labels sorted based on the corresponding predictions
:return:
'''
# ideal discount cumulative gains
batch_idcgs = torch_ideal_dcg(batch_sorted_labels=batch_stds, gpu=gpu, multi_level_rele=multi_level_rele)
if multi_level_rele:
batch_gains = torch.pow(2.0, batch_stds_sorted_via_preds) - 1.0
else:
batch_gains = batch_stds_sorted_via_preds
batch_n_gains = batch_gains / batch_idcgs # normalised gains
batch_ng_diffs = torch.unsqueeze(batch_n_gains, dim=2) - torch.unsqueeze(batch_n_gains, dim=1)
batch_std_ranks = torch.arange(batch_stds_sorted_via_preds.size(1)).type(tensor)
batch_dists = 1.0 / torch.log2(batch_std_ranks + 2.0) # discount co-efficients
batch_dists = torch.unsqueeze(batch_dists, dim=0)
batch_dists_diffs = torch.unsqueeze(batch_dists, dim=2) - torch.unsqueeze(batch_dists, dim=1)
batch_delta_ndcg = torch.abs(batch_ng_diffs) * torch.abs(batch_dists_diffs) # absolute changes w.r.t. pairwise swapping
return batch_delta_ndcg
def get_sharp_swap_deltas(batch_stds, batch_stds_sorted_via_preds, pos_swap_const=1., neg_swap_const=1.):
'''
pure changes w.r.t. pairwise swapping of the currently predicted ltr_adhoc
pure changes w.r.t. pairwise swapping is given that: (1) (1/D_i - 1/D_j)(G_j - G_i) (2)(G_i - G_j)(1/D_j - 1/D_i)
:param batch_stds: the standard labels sorted in a descending order
:param batch_stds_sorted_via_preds: the standard labels sorted based on the corresponding predictions
:return:
'''
batch_idcgs = torch_ideal_dcg(batch_sorted_labels=batch_stds, gpu=gpu) # ideal discount cumulative gains
batch_gains = torch.pow(2.0, batch_stds_sorted_via_preds) - 1.0
batch_n_gains = batch_gains / batch_idcgs # normalised gains
batch_ng_diffs = torch.unsqueeze(batch_n_gains, dim=2) - torch.unsqueeze(batch_n_gains, dim=1)
batch_std_ranks = torch.arange(batch_stds_sorted_via_preds.size(1)).type(tensor)
batch_dists = 1.0 / torch.log2(batch_std_ranks + 2.0) # discount co-efficients
batch_dists = torch.unsqueeze(batch_dists, dim=0)
batch_dists_diffs = torch.unsqueeze(batch_dists, dim=2) - torch.unsqueeze(batch_dists, dim=1)
t_batch_dists_diffs = torch.transpose(batch_dists_diffs, dim0=1, dim1=2)
batch_swap_ndcg = batch_ng_diffs * t_batch_dists_diffs # pure changes
batch_pos_swap_ones = (batch_swap_ndcg > 0).type(tensor) # s_ij is one for positive swap, otherwise 0
batch_pos_swap_cofs = batch_pos_swap_ones * pos_swap_const
batch_neg_swap_ones = (batch_swap_ndcg < 0).type(tensor) # negative swap means that the current pairwise order is consistent with the standard order
batch_neg_swap_cofs = batch_neg_swap_ones * neg_swap_const
batch_all_cofs = batch_pos_swap_cofs + batch_neg_swap_cofs
#1 what is the meaning?
#batch_swap_ndcg = torch.clamp(batch_swap_ndcg, min=0.0, max=100000.) # keeping positive swapping
#batch_swap_streths = batch_swap_ndcg + batch_neg_swap_cofs
#2
#batch_delta_ndcg = torch.abs(batch_swap_ndcg)
#batch_swap_streths = batch_all_cofs * batch_delta_ndcg
#3 all constant
batch_swap_streths = torch.ones_like(batch_swap_ndcg)
return batch_swap_streths
def metric_results_to_string(list_scores=None, list_cutoffs=None, split_str=', '):
"""
Convert metric results to a string representation
:param list_scores:
:param list_cutoffs:
:param split_str:
:return:
"""
list_str = []
for i in range(len(list_scores)):
list_str.append('nDCG@{}:{:.4f}'.format(list_cutoffs[i], list_scores[i]))
return split_str.join(list_str)
|
# -*- coding: utf-8 -*-
"""
Created on Wed May 28 13:13:37 2014
@author: eladn
"""
import logging
import numpy as np
import matplotlib.pyplot as plt
from scipy.io import savemat
from component_contribution.thermodynamic_constants import R, default_c_range, default_c_mid
from component_contribution.component_contribution_trainer import ComponentContribution
from component_contribution.kegg_model import KeggModel
from scripts.mdf_dual import KeggPathway
from scripts.html_writer import HtmlWriter, NullHtmlWriter
from scripts.kegg_parser import ParsedKeggFile
class ConcentrationConstraints(object):
pass
class MaxMinDrivingForce(object):
def __init__(self, model, fluxes, bounds, pH, I, T, html_writer=None,
cid2name=None):
"""
model - a KeggModel object
fluxes - a list of fluxes, should match the model in length
bounds - a dictionary mapping KEGG compound IDs to tuples of
(low,up) bound
pH, I, T - the aqueous conditions for the thermodynamics
html_writer - (optional) write a report to HTML file
"""
self.model = model
self.fluxes = fluxes
self.cid2name = cid2name
self.pH, self.I, self.T = pH, I, T
self.c_range = default_c_range
self.c_mid = default_c_mid
self.bounds = bounds
if html_writer is None:
self.html_writer = NullHtmlWriter()
else:
self.html_writer = html_writer
self.html_writer.write('Parameters:</br>\n')
condition_list = ['pH = %g' % self.pH,
'Ionic strength = %g M' % self.I,
'Temperature = %g K' % self.T,
'Concentration range = %g - %g M' % self.c_range,
'Default concentration = %g M' % self.c_mid]
self.html_writer.write_ul(condition_list)
def MapCIDs(self, cid_dict):
try:
self.cids = map(cid_dict.get, self.cids)
except KeyError as e:
raise KeyError('Error: not all CIDs are mapped to their new names '
+ str(e))
def GetBounds(self):
cid2bounds = {cid:self.c_range for cid in self.model.cids}
cid2bounds['C00001'] = (1, 1) # the default for H2O is 1
for cid, b in self.bounds.iteritems():
cid2bounds[cid] = b
return cid2bounds
def _Solve(self, uncertainty_factor=3.0, diagonal_covariance=False):
S = self.model.S
f = self.fluxes
cids = self.model.cids
rids = self.model.rids or ['R%05d' % i for i in xrange(S.shape[1])]
dG0_prime, dG0_std, sqrt_Sigma = self.model.get_transformed_dG0(pH=self.pH, I=self.I, T=self.T)
if diagonal_covariance:
sigma = uncertainty_factor*np.matrix(np.diag(dG0_std))
else:
sigma = uncertainty_factor*sqrt_Sigma
cid2bounds = self.GetBounds()
keggpath = KeggPathway(S, rids, f, cids, dG0_prime, sigma,
cid2bounds=cid2bounds,
c_range=self.c_range,
cid2name=self.cid2name)
_mdf, params = keggpath.FindMDF(calculate_totals=True)
return _mdf, params, keggpath
def Solve(self, uncertainty_factor=3.0, diagonal_covariance=False, verbose=True):
_mdf, params, keggpath = self._Solve(uncertainty_factor=uncertainty_factor,
diagonal_covariance=diagonal_covariance)
total_dG_prime = params.get('maximum total dG', np.nan)
odfe = 100 * np.tanh(_mdf / (2*R*self.T))
average_dG_prime = total_dG_prime/np.sum(self.fluxes)
average_dfe = 100 * np.tanh(-average_dG_prime / (2*R*self.T))
if verbose:
res = ["MDF = %.1f (avg. = %.1f) kJ/mol" % (_mdf, -average_dG_prime),
"ODFE = %.1f%% (avg. = %.1f%%)" % (odfe, average_dfe),
"Total Δ<sub>r</sub>G' = %.1f kJ/mol" % total_dG_prime,
"no. steps = %g" % np.sum(self.fluxes)]
self.html_writer.write_ul(res)
profile_fig = keggpath.PlotProfile(params)
plt.title('ODFE = %.1f%%' % odfe, figure=profile_fig)
self.html_writer.embed_matplotlib_figure(profile_fig, width=320, height=320)
keggpath.WriteProfileToHtmlTable(self.html_writer, params)
keggpath.WriteConcentrationsToHtmlTable(self.html_writer, params)
return _mdf, params['gibbs energies raw']
def SolveIterative(self, uncertainty_factor=3.0, diagonal_covariance=False):
S = self.model.S
f = self.fluxes
rids = self.model.rids or ['R%05d' % i for i in xrange(S.shape[1])]
cids = self.model.cids
dG0_prime, dG0_std, sqrt_Sigma = self.model.get_transformed_dG0(pH=self.pH, I=self.I, T=self.T)
if diagonal_covariance:
#sigma = uncertainty_factor * np.matrix(np.diag(dG0_std))
sigma = 0.0 * sqrt_Sigma
dG0_prime -= uncertainty_factor * dG0_std
else:
sigma = uncertainty_factor * sqrt_Sigma
cid2bounds = self.GetBounds()
rid2bounds = {}
total_active_reactions = len(filter(None, f.flat))
iter_counters = [-1] * len(rids)
params_list = []
for i in xrange(len(rids)):
keggpath = KeggPathway(S, rids, f, cids, dG0_prime, sigma,
rid2bounds=rid2bounds,
cid2bounds=cid2bounds,
c_range=self.c_range,
cid2name=self.cid2name)
_mdf, params = keggpath.FindMDF(calculate_totals=False)
params_list.append(params)
# is not the same and maybe there is a mixup
tmp = zip(rids,
map(lambda x:'%.1f' % x, params['gibbs energies'].flat),
map(lambda x:'%.1f' % x, params['reaction prices'].flat))
logging.debug('\n'.join(map(', '.join, tmp)))
# fix the driving force of the reactions that have shadow prices
# to the MDF value, and remove them from the optimization in the
# next round
shadow_prices = params['reaction prices']
print '\rIterative MDF: %3d%%' % \
(len(rid2bounds) * 100 / total_active_reactions),
for rid, s_p in zip(rids, shadow_prices):
if rid not in rid2bounds and s_p > 1e-5:
rid2bounds[rid] = -_mdf + 1e-4 # add 'epsilon' for numerical reasons
iter_counters[rids.index(rid)] = i
if len(rid2bounds) == total_active_reactions:
break
print '\rIterative MDF: [DONE]'
self.html_writer.write("<p>MDF = %.1f kJ/mol</p>\n" % params_list[0]['MDF'])
params_list[-1]['profile figure'] = keggpath.PlotProfile(params_list[-1])
self.html_writer.embed_matplotlib_figure(params_list[-1]['profile figure'],
width=320, height=320)
self.WriteIterativeReport(keggpath, params_list, iter_counters)
return params_list[-1]
def WriteIterativeReport(self, keggpath, params_list, iter_counters):
headers = ["reaction", "formula", "flux",
"Δ<sub>r</sub>G' [kJ/mol]"] + ['I%02d' % i for i in xrange(len(params_list))]
dict_list = []
for r, rid in enumerate(keggpath.rids):
if keggpath.fluxes[0, r] == 0:
continue
d = {'reaction' : rid,
'flux' : keggpath.fluxes[0, r],
'formula' : keggpath.GetReactionString(r),
headers[3] : params_list[-1]['gibbs energies'][r, 0],
'iteration' : iter_counters[r]}
for i, p in enumerate(params_list):
if i < iter_counters[r]:
d['I%02d' % i] = '%.3f' % p['gibbs energies'][r, 0]
else:
d['I%02d' % i] = '<b>%.3f</b>' % p['gibbs energies'][r, 0]
dict_list.append(d)
dict_list.sort(key=lambda d:d['iteration'], reverse=False)
d = {'reaction' : 'Total',
'flux' : '1',
'formula' : keggpath.GetTotalReactionString(),
headers[3] : float(keggpath.fluxes * params_list[-1]['gibbs energies'])}
dict_list.append(d)
self.html_writer.write_table(dict_list, headers=headers, decimal=1)
concentrations = params_list[-1]['concentrations']
headers = ['compound', 'Concentration LB [M]',
'Concentration [M]', 'Concentration UB [M]']
dict_list = []
for c, cid in enumerate(keggpath.cids):
d = {}
d['compound'] = keggpath.c_names[c]
lb, ub = keggpath.GetConcentrationBounds(cid)
d['Concentration LB [M]'] = '%.2e' % lb
d['Concentration [M]'] = '%.2e' % concentrations[c, 0]
d['Concentration UB [M]'] = '%.2e' % ub
dict_list.append(d)
self.html_writer.write_table(dict_list, headers=headers)
###############################################################################
def KeggFile2ModelList(pathway_file):
kegg_file = ParsedKeggFile.FromKeggFile(pathway_file)
entries = kegg_file.entries()
pathways = []
for entry in entries:
fields = kegg_file[entry]
rids, fluxes, reactions = ParsedKeggFile.ParseReactionModule(fields)
bounds = ParsedKeggFile.ParseBoundModule(fields)
model = KeggModel.from_formulas(reactions)
model.rids = rids
pH = fields.GetFloatField('PH', 7.5)
I = fields.GetFloatField('I', 0.2)
T = fields.GetFloatField('T', 298.15)
pathways.append({'entry': entry, 'model': model, 'fluxes': fluxes,
'bounds': bounds, 'pH': pH, 'I': I, 'T': T})
return pathways
if __name__ == '__main__':
#fname = sys.argv[1]
fname = 'mdf_pathways'
REACTION_FNAME = 'scripts/%s.txt' % fname
pathways = KeggFile2ModelList(REACTION_FNAME)
html_writer = HtmlWriter('res/%s.html' % fname)
cc = ComponentContribution.init()
matdict = {}
for p in pathways:
html_writer.write('<h2>%s</h2>' % p['entry'])
p['model'].add_thermo(cc)
mdf = MaxMinDrivingForce(p['model'], p['fluxes'], p['bounds'],
pH=p['pH'], I=p['I'], T=p['T'],
html_writer=html_writer)
mdf_solution, dGm_prime = mdf.Solve()
logging.info('Pathway %s: MDF = %.1f' % (p['entry'], mdf_solution))
matdict[p['entry'] + '.dGm_prime'] = dGm_prime
savemat('res/%s.mat' % fname, matdict)
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 1 23:58:42 2018
Snake Mini project Starter Code
Name: Tomer Ferdman
Date: 08/07/2018
"""
import turtle
import random # We'll need this later in the lab
import time
turtle.tracer(1,0) # This helps the turtle move more smoothly
SIZE_A = 1000
SIZE_B = 1000
SIZE_X=550
SIZE_Y=550
score = 0
turtle.setup(SIZE_A, SIZE_B) # Curious? It's the turtle window size.
border = turtle.clone()
border.color('dark orange')
border.penup()
border.width(20)
border.goto(-300,-300)
border.pendown()
border.goto(-300,300)
border.goto(300,300)
border.goto(300,-300)
border.goto(-300,-300)
border.penup()
turtle.penup()
border.hideturtle()
label_game = turtle.Turtle()
label_game.ht()
label_game.penup()
label_game.color('red')
label_game.width('10')
label_game.goto(-100, 350)
label_game.pendown()
label_game.write('SNAKE GAME!!!', font = ('Arial', 30, 'normal'))
num_label = turtle.Turtle()
num_label.ht()
num_label.color('light blue')
num_label.width(10)
num_label.penup()
num_label.goto(0, -400)
num_label.write(str(score))
SQUARE_SIZE = 20
START_LENGTH = 5
# Initialize lists
pos_list = []
stamp_list = []
food_pos = []
food_stamps = []
# Set up positions (x,y) of boxes that make up the snake
snake = turtle.clone()
snake.shape('square')
snake.color('dark blue')
# Hide the turtle object (it's an arrow - we don't need to see it)
turtle.hideturtle()
# Draw a snake at the start of the game with a for loop
# for loop should use range() and count up to the number of pieces
# in the snake (i.e. START_LENGTH)
for snake1 in range(START_LENGTH):
x_pos=snake.pos()[0]# Get x-position with snake.pos()[0]
y_pos=snake.pos()[1]
# Add SQUARE_SIZE to x_pos. Where does x_pos point to now?
# You're RIGHT!
x_pos+=SQUARE_SIZE
my_pos=(x_pos,y_pos) # Store position variables in a tuple
snake.goto(x_pos,y_pos) # Move snake to new (x,y)
# Append the new position tuple to pos_list
pos_list.append(my_pos)
# Save the stamp ID! You'll need to erase it later. Then append
# it to stamp_list.
stamp_t = snake.stamp()
stamp_list.append(stamp_t)
###############################################################
# PART 2 -- READ INSTRUCTIONS!!
###############################################################
UP_ARROW = "Up" # Make sure you pay attention to upper and lower
# case
LEFT_ARROW = "Left" # Pay attention to upper and lower case
DOWN_ARROW = "Down" # Pay attention to upper and lower case
RIGHT_ARROW = "Right" # Pay attention to upper and lower case
TIME_STEP = 150 # Update snake position after this many
currentTIME_STEP = TIME_STEP # milliseconds
SPACEBAR = "space" # Careful, it's not supposed to be capitalized!
UP = 0
LEFT = 1
DOWN = 2
RIGHT = 3
# 1. Make variables LEFT, DOWN, and RIGHT with values 1, 2, and 3
#### WRITE YOUR CODE HERE!!
direction = UP
UP_EDGE = 300
DOWN_EDGE = -300
RIGHT_EDGE = 300
LEFT_EDGE = -300
def up():
global direction # snake direction is global (same everywhere)
if not direction == DOWN:
direction=UP # Change direction to up
# Update the snake drawing <- remember me later
print("You pressed the up key!")
def left():
global direction
if not direction == RIGHT:
direction=LEFT
print('You pressed the left key!')
def down():
global direction
if not direction == UP:
direction=DOWN
print('You pressed the down key!')
def right():
global direction
if not direction == LEFT:
direction=RIGHT
print('You pressed the right key!')
# 2. Make functions down(), left(), and right() that change direction
#### WRITE YOUR CODE HERE!!
isPause = False
tpause = turtle.clone()
def pause():
global isPause, TIME_STEP
tpause.penup()
tpause.goto(-100,0)
isPause = not isPause
if isPause:
tpause.write('PAUSE', font = ('Arial', 60, 'normal'))
else:
tpause.clear()
'''
tpause = turtle.clone()
tpause.penup()
tpause.goto(-100,0)
tpause.write('PAUSE', font = ('Arial', 60, 'normal'))
if isPause:
TIME_STEP = 100000
else:
TIME_STEP = currentTIME_STEP
move_snake()
while isPause:
tpause.clear()
time.sleep(.25)
tpause.write('PAUSE', font = ('Arial', 60, 'normal'))
time.sleep(.25)
tpause.clear()
'''
turtle.onkeypress(up, UP_ARROW) # Create listener for up key
turtle.onkeypress(left, LEFT_ARROW)
turtle.onkeypress(down, DOWN_ARROW)
turtle.onkeypress(right, RIGHT_ARROW)
turtle.onkeypress(pause, "space")
turtle.listen()
food = turtle.clone()
turtle.register_shape('trash.gif')
food.shape("trash.gif")
food_pos = [(100,100)]
food_stamps = []
for this_food_pos in food_pos:
food.goto(this_food_pos)
food_stamp=food.stamp()
food_stamps.append(food_stamp)
def make_food():
min_x=-int(SIZE_X/2/SQUARE_SIZE)+1
max_x=int(SIZE_X/2/SQUARE_SIZE)-1
min_y=-int(SIZE_Y/2/SQUARE_SIZE)-1
max_y=int(SIZE_Y/2/SQUARE_SIZE)+1
food_x = random.randint(min_x,max_x)*SQUARE_SIZE
food_y = random.randint(min_y,max_y)*SQUARE_SIZE
food.goto(food_x, food_y)
food_turtle_pos = (food_x, food_y)
food_pos.append(food_turtle_pos)
food_stamp = food.stamp()
food_stamps.append(food_stamp)
print(food_pos)
def move_snake():
if not isPause:
my_pos = snake.pos()
x_pos = my_pos[0]
y_pos = my_pos[1]
if direction==RIGHT:
snake.goto(x_pos + SQUARE_SIZE, y_pos)
print("You moved right!")
my_pos=(x_pos + SQUARE_SIZE, y_pos)
elif direction==LEFT:
snake.goto(x_pos - SQUARE_SIZE, y_pos)
print("You moved left!")
my_pos=(x_pos - SQUARE_SIZE, y_pos)
elif direction==UP:
snake.goto(x_pos, y_pos + SQUARE_SIZE)
print('You moved up!')
my_pos=(x_pos, y_pos + SQUARE_SIZE)
elif direction==DOWN:
snake.goto(x_pos, y_pos - SQUARE_SIZE)
print('You moved down!')
my_pos=(x_pos, y_pos - SQUARE_SIZE)
new_pos = snake.pos()
new_x_pos = new_pos[0]
new_y_pos = new_pos[1]
# 4. Write the conditions for UP and DOWN on your own
##### YOUR CODE HERE
if new_x_pos >= RIGHT_EDGE:
print('You hit the right edge! Game over!')
quit()
elif new_x_pos <= LEFT_EDGE:
print('You hit the left edge! Game over!')
quit()
elif new_y_pos >= UP_EDGE:
print('You hit the up edge! Game oover!')
quit()
elif new_y_pos <= DOWN_EDGE:
print('You hit the down edge! game over!')
quit()
# Stamp new element and append new stamp in list
# Remember: The snake position changed - update my_pos()
my_pos=snake.pos()
pos_list.append(my_pos)
new_stamp = snake.stamp()
stamp_list.append(new_stamp)
######## SPECIAL PLACE - Remember it for Part 5
if snake.pos() in pos_list[:-1]:
print('You hit yourself! Game over!')
quit()
global food_stamps, food_pos, score
if snake.pos() in food_pos:
food_ind=food_pos.index(snake.pos())
food.clearstamp(food_stamps[food_ind])
food_pos.pop(food_ind)
food_stamps.pop(food_ind)
print('You have eaten the food!')
score = score + 100
global TIME_STEP
if TIME_STEP > 10:
TIME_STEP = int(TIME_STEP*0.9)
else:
old_stamp = stamp_list.pop(0)
snake.clearstamp(old_stamp)
pos_list.pop(0)
# pop zeroth element in pos_list to get rid of last the last
# piece of the tail
score = score + 1
num_label.clear()
num_label.write(str(score), font = ('Arial', 40, 'normal'))
if len(food_stamps) <= 1:
make_food()
turtle.ontimer(move_snake,TIME_STEP)
move_snake()
turtle.mainloop()
|
#!flask/bin/python
#==============================================================================#
# --- globo-fan-remote --- #
# author: Max Stark #
# date: March 2018 #
#==============================================================================#
from enum import Enum
import logging
import os
import ctypes
import time
#===============================================================================
GPIO_PIN = 18
MAX_PULSES_PER_WAVE = 12000 # from pigpio.h
FREQUENCY = 38000
PERIOD_TIME = 1000000.0 / FREQUENCY
DUTY_CYCLE = 0.5
DURATION_ON = int(round(PERIOD_TIME * DUTY_CYCLE))
DURATION_OFF = int(round(PERIOD_TIME * (1.0 - DUTY_CYCLE)))
PI_WAVE_MODE_REPEAT_SYNC = 3
class Pulses_struct(ctypes.Structure):
_fields_ = [("gpioOn", ctypes.c_uint32),
("gpioOff", ctypes.c_uint32),
("usDelay", ctypes.c_uint32)]
class Wave():
def __init__(self):
Pulses_array = Pulses_struct * MAX_PULSES_PER_WAVE
self.pulses = Pulses_array()
self.pulse_count = 0
self.wave_duration = 0
def addPulse(self, gpioOn, gpioOff, usDelay):
self.pulses[self.pulse_count].gpioOn = gpioOn
self.pulses[self.pulse_count].gpioOff = gpioOff
self.pulses[self.pulse_count].usDelay = usDelay
self.pulse_count += 1
self.wave_duration += usDelay
# Pull the specified output pin low
def zero(self, duration):
self.addPulse(0, 1 << GPIO_PIN, duration)
# Protocol-agnostic square wave generator
def one(self, duration):
total_periods = int(round(duration/PERIOD_TIME))
total_pulses = total_periods * 2
# Generate square wave on the specified output pin
for i in range(total_pulses):
if i % 2 == 0:
self.addPulse(1 << GPIO_PIN, 0, DURATION_ON)
else:
self.addPulse(0, 1 << GPIO_PIN, DURATION_OFF)
class WaveGenerator():
def __init__(self):
print("Loading libpigpio.so")
self.pigpio = ctypes.CDLL('libpigpio.so')
self.pigpio.gpioInitialise()
self.pigpio.gpioSetMode(GPIO_PIN, 1)
self.waves = []
def add(self, durationPulse, durationPause):
wave = self.waves[-1]
if wave.pulse_count > (MAX_PULSES_PER_WAVE - 1000):
wave = Wave()
self.waves.append(wave)
wave.one(durationPulse)
wave.zero(durationPause)
def generateWave(self, ircode):
self.waves = []
self.waves.append(Wave())
for i in ircode:
if i == "0":
self.add(400, 1200)
elif i == "1":
self.add(1200, 400)
elif i == "*":
self.add(400, 8000)
elif i == "#":
self.add(1200, 7000)
pulseCount = 0
for wave in self.waves:
pulseCount += wave.pulse_count
print "Generated " + str(len(self.waves)) + " waves " + \
"with " + str(pulseCount) + " pulses."
def sendWave(self):
for wave in self.waves:
self.pigpio.gpioWaveAddGeneric(wave.pulse_count, wave.pulses)
waveId = self.pigpio.gpioWaveCreate()
if waveId >= 0:
print("Sending wave...")
result = self.pigpio.gpioWaveTxSend(waveId, PI_WAVE_MODE_REPEAT_SYNC)
if result >= 0:
print("... success! (result: %d)" % result)
# Since we send the wave in repeat mode, we have to stop it after
# the calculated time
wait = round(wave.wave_duration / 100000.0, 3)
print("Waiting for %f seconds ..." % wait)
time.sleep(wait)
self.pigpio.gpioWaveTxStop()
print("... now stop and delete wave.")
else:
print("... error! (result: %d)" % result)
else:
print("Error creating wave: %d" % waveId)
self.pigpio.gpioWaveDelete(waveId)
self.pigpio.gpioWaveClear()
self.waves = []
def __def__(self):
print("Terminating pigpio")
self.pigpio.gpioTerminate()
#===============================================================================
IRCODE_PREAMBLE = "11000000000*11000111111#"
IRCODE_LIGHT_POWER = IRCODE_PREAMBLE + 4 * "11000000100*"
IRCODE_LIGHT_DIMM = "11000010000*"
IRCODE_FAN_OFF = IRCODE_PREAMBLE + 4 * "11000001000*"
IRCODE_FAN_LOW = IRCODE_PREAMBLE + 4 * "11000100001#"
IRCODE_FAN_MED = IRCODE_PREAMBLE + 4 * "11000000010*"
IRCODE_FAN_HIGH = IRCODE_PREAMBLE + 4 * "11000000000#"
IRCODE_DISCO = IRCODE_LIGHT_POWER + IRCODE_PREAMBLE
WAVE_GENERATOR = WaveGenerator()
#===============================================================================
class GloboLightCommand(Enum):
POWER = 0
DIMM = 1
DISCO = 2
class GloboLightIRController():
def sendCommand(self, command, value):
if command == GloboLightCommand.POWER:
print("Generate wave: LIGHT POWER")
WAVE_GENERATOR.generateWave(IRCODE_LIGHT_POWER)
elif command == GloboLightCommand.DIMM:
repeats = ((100 - value) / 10) + 6
print("Generate wave: LIGHT DIMM (" + str(value) + "%) with " + str(repeats) + " repeats.")
WAVE_GENERATOR.generateWave(IRCODE_PREAMBLE + repeats * IRCODE_LIGHT_DIMM)
elif command == "DISCO":
print("Generate wave: LIGHT DISCO")
WAVE_GENERATOR.generateWave(10 * IRCODE_DISCO)
else:
print("Unknown light command '" + command + "'.")
return
print("Sending wave ...")
WAVE_GENERATOR.sendWave()
print("Sending wave finished.")
#===============================================================================
class GloboFanCommand(Enum):
LOW = 0
MED = 1
HIGH = 2
OFF = 3
class GloboFanIRController():
def sendCommand(self, command):
if command == GloboFanCommand.LOW:
print("Send FAN LOW command.")
WAVE_GENERATOR.generateWave(IRCODE_FAN_LOW)
elif command == GloboFanCommand.MED:
print("Send FAN MED command.")
WAVE_GENERATOR.generateWave(IRCODE_FAN_MED)
elif command == GloboFanCommand.HIGH:
print("Send FAN HIGH command.")
WAVE_GENERATOR.generateWave(IRCODE_FAN_HIGH)
elif command == GloboFanCommand.OFF:
print("Send FAN OFF command.")
WAVE_GENERATOR.generateWave(IRCODE_FAN_OFF)
else:
print("Unknown fan command '" + command + "'.")
return
print("Sending wave ...")
WAVE_GENERATOR.sendWave()
print("Sending wave finished.") |
import torch
use_cuda = torch.cuda.is_available()
def check_nan(model):
for name, param in model.named_parameters():
if torch.isnan(param).any():
return False
return True
def evaluate(model, criterion, dataloader):
"""evaluate auto-encoder"""
model.eval() # change BN
loss = 0.
length = 0
for test_data, label in dataloader:
if use_cuda:
test_data, label = test_data.cuda(), label.cuda()
out = model(test_data).view(test_data.size())
loss += criterion(out, test_data)
length += 1
model.train()
return loss / length |
import torch.nn as nn
import torch.nn.functional as F
"""
MLP Layer used after graph vector representation
"""
class MLPReadout(nn.Module):
def __init__(self, input_dim, output_dim, L=2): # L=nb_hidden_layers
super().__init__()
list_FC_layers = [
nn.Linear(input_dim // 2**l, input_dim // 2 ** (l + 1), bias=True)
for l in range(L)
]
list_FC_layers.append(nn.Linear(input_dim // 2**L, output_dim, bias=True))
self.FC_layers = nn.ModuleList(list_FC_layers)
self.L = L
def forward(self, x):
y = x
for l in range(self.L):
y = self.FC_layers[l](y)
y = F.relu(y)
y = self.FC_layers[self.L](y)
return y
|
from django.db import models
from rest_framework import serializers
from rest_framework_bulk import BulkListSerializer, BulkSerializerMixin, ListBulkCreateUpdateAPIView
class Foo(models.Model):
name = models.CharField(max_length=20)
class Meta:
app_label = "foobar"
class FooSerializer(BulkSerializerMixin, serializers.ModelSerializer):
class Meta(object):
model = Foo
list_serializer_class = BulkListSerializer
class FooView(ListBulkCreateUpdateAPIView):
queryset = Foo.objects.all()
serializer_class = FooSerializer
|
from matplotlib.font_manager import FontProperties
# Attributes on FontProperties object to check for consistency
keys = [
"get_family",
"get_style",
"get_variant",
"get_weight",
"get_size",
]
def test_fontconfig_pattern():
"""Test converting a FontProperties to string then back."""
# Defaults
test = "defaults "
f1 = FontProperties()
s = str(f1)
f2 = FontProperties(s)
for k in keys:
assert getattr(f1, k)() == getattr(f2, k)(), test + k
# Basic inputs
test = "basic "
f1 = FontProperties(family="serif", size=20, style="italic")
s = str(f1)
f2 = FontProperties(s)
for k in keys:
assert getattr(f1, k)() == getattr(f2, k)(), test + k
# Full set of inputs.
test = "full "
f1 = FontProperties(family="sans-serif", size=24, weight="bold",
style="oblique", variant="small-caps",
stretch="expanded")
s = str(f1)
f2 = FontProperties(s)
for k in keys:
assert getattr(f1, k)() == getattr(f2, k)(), test + k
def test_fontconfig_str():
"""Test FontProperties string conversions for correctness."""
# Known good strings taken from actual font config specs on a linux box
# and modified for MPL defaults.
# Default values found by inspection.
test = "defaults "
s = ("sans\\-serif:style=normal:variant=normal:weight=normal"
":stretch=normal:size=12.0")
font = FontProperties(s)
right = FontProperties()
for k in keys:
assert getattr(font, k)() == getattr(right, k)(), test + k
test = "full "
s = ("serif:size=24:style=oblique:variant=small-caps:weight=bold"
":stretch=expanded")
font = FontProperties(s)
right = FontProperties(family="serif", size=24, weight="bold",
style="oblique", variant="small-caps",
stretch="expanded")
for k in keys:
assert getattr(font, k)() == getattr(right, k)(), test + k
|
# Copyright (C) 2020-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import time
from tempfile import gettempdir
# pylint: disable=import-error
from hyperopt import trials_from_docs
from openvino.tools.pot.benchmark.benchmark import benchmark_embedded
from openvino.tools.pot.graph import save_model
from openvino.tools.pot.utils.logger import get_logger
from openvino.tools.pot.utils.object_dump import object_dumps, object_loads
from openvino.tools.pot.utils.utils import create_tmp_dir
try:
from pymongo import MongoClient
from pymongo.errors import ServerSelectionTimeoutError
from bson.objectid import ObjectId
import gridfs
except ImportError:
raise ImportError('Pymongo is not installed. Please install it before using multinode configuration.')
REMOTE_DATA_INFO_FREQ_S = 10
RESTORATION_TIME_LIMIT_S = 60 * 60
TRIALS_RESTORATION_TIME_LIMIT_S = 10
UNLOCK_TIME_LIMIT_S = 60
logger = get_logger(__name__)
class Multinode:
def __init__(self, system_config, config_name):
self.type = None
self.server_addr = None
self.tag = config_name
self.name = 'no_name'
self.time_limit = RESTORATION_TIME_LIMIT_S
self.unlock_time_limit_s = UNLOCK_TIME_LIMIT_S
self.config = system_config.multinode
self.client = None
self.trials = None
self.model = None
self.search_space = None
self.evaluated_params = None
self.params = None
self.remote_fs = None
self.mode = 'peer'
self.id = None
self.wait_for_client = True
self._set_config()
def _set_config(self):
if all(key in self.config for key in ['type', 'server_addr']):
self.type = self.config['type']
if self.type not in ['server', 'client']:
raise Exception('Illegal value for type in multinode config!')
self.server_addr = self.config["server_addr"]
else:
raise Exception('Missing "server_addr" or "type" in multinode config')
if 'mode' in self.config:
if self.config['mode'] in ['master', 'peer']:
self.mode = self.config['mode']
if 'tag' in self.config:
self.tag = self.config['tag']
if 'name' in self.config:
self.name = self.config['name']
if 'time_limit' in self.config:
self.time_limit = self.config['time_limit']
self.client = MongoClient(self.server_addr)
database = self.client['pot']
self.trials = database[self.tag + '.trials']
self.model = database[self.tag + '.model']
self.search_space = database[self.tag + '.search_space']
self.evaluated_params = database[self.tag + '.evaluated_params']
self.params = database[self.tag + '.params']
self.fp32 = database[self.tag + '.fp32']
self.clients = database[self.tag + '.clients']
self._init_gridfs(database)
self._clear_remote_data(database)
def _init_gridfs(self, database):
""" Initialize gridfs for mongodb."""
try:
self.client.server_info()
except ServerSelectionTimeoutError:
raise Exception('WARNING: Could not connect to MongoDB!!!')
# GridFS
self.remote_fs = gridfs.GridFSBucket(database, bucket_name=self.tag)
def _clear_remote_data(self, database):
""" Removes remote data from remote database """
chunks = database[self.tag + '.chunks']
files = database[self.tag + '.files']
if self.type == 'server':
if self.trials.count_documents({}):
self.trials.drop()
logger.info('Remote trials found and removed.')
if self.search_space.count_documents({}):
self.search_space.drop()
logger.info('Remote search space found and removed.')
if self.evaluated_params.count_documents({}):
self.evaluated_params.drop()
logger.info('Remote evaluated parameters set found and removed.')
if self.params.count_documents({}):
self.params.drop()
logger.info('Remote config found and removed.')
# GridFS data cleanup
if chunks.count_documents({}) or files.count_documents({}):
chunks.drop()
files.drop()
logger.info('Remote trials file found and removed.')
if self.model.count_documents({}):
self.model.drop()
logger.info('Remote model params found and removed.')
if self.clients.count_documents({}):
self.clients.drop()
logger.info('Remote clients found and removed.')
if self.fp32.count_documents({}):
self.fp32.drop()
logger.info('Remote fp32 data found and removed.')
def update_or_restore_config(self, _config, valid=True):
""" Update or restore remote loss function config."""
if self.type == 'client':
_config = self._restore_remote_config()
logger.info('Remote config restored')
else:
self._update_remote_config(_config, valid)
logger.info('Remote config updated')
return _config
def update_or_restore_fp32(self, _fp32_acc, _fp32_lat):
""" Update or restore fp32 data (lat only)."""
if self.mode == 'master':
if self.type == 'server':
self._update_remote_fp32(_fp32_acc, _fp32_lat)
if self.type == 'client':
_, lat = self._restore_fp32()
return lat
return _fp32_lat
def _update_remote_fp32(self, _fp32_acc, _fp32_lat):
""" Update remote fp32 function config."""
if self.type == 'server':
self.fp32.insert_one({
'fp32_lat': object_dumps(_fp32_lat),
'fp32_acc': object_dumps(_fp32_acc)})
logger.info('Remote fp32 data updated under name: {}'.format(self.tag))
def _restore_fp32(self):
""" Restore fp32 function config from remote database."""
if self.type == 'client':
time_left = self.time_limit
while time_left:
if self.fp32.count_documents({}):
fp32_object = self.fp32.find({})
_fp32_acc = object_loads(fp32_object[0]['fp32_acc'])
_fp32_lat = object_loads(fp32_object[0]['fp32_lat'])
logger.info('Remote fp32 data restored')
return _fp32_acc, _fp32_lat
if not time_left % REMOTE_DATA_INFO_FREQ_S:
logger.info('Waiting for remote data (fp32): {}s'.format(time_left))
time.sleep(1)
time_left -= 1
raise Exception('WARNING: Time limit for Remote reached!!! config name: {}'.format(
self.tag))
return None, None
def restore_remote_trials(self):
""" Restore trials from remote database."""
time_left = TRIALS_RESTORATION_TIME_LIMIT_S
while time_left:
if self.trials.count_documents({}):
trials_object = self.trials.find({})
trials_object_gfs = self.remote_fs.open_download_stream(trials_object[0]['file_id'])
hpopt_trials = object_loads(trials_object_gfs.read())
logger.info('Remote trials restored: {}'.format(len(hpopt_trials.trials)))
return hpopt_trials
if not time_left % REMOTE_DATA_INFO_FREQ_S:
logger.info('Waiting for remote data (trials): {}s'.format(time_left))
time.sleep(1)
time_left -= 1
raise Exception('WARNING: Time limit for Remote reached! config name: {}'.format(
self.tag))
def _restore_remote_config(self):
""" Restore loss function config from remote database."""
time_left = self.time_limit
while time_left:
if self.params.count_documents({}):
params_object = self.params.find({})
config_valid = params_object[0]['valid']
if config_valid:
config = params_object[0]['config']
logger.info('\nRemote params to be restored:\n\
max_trials: {}\n\
max_minutes: {}\n\
accuracy_loss: {}\n\
latency_reduce: {}\n\
expected_quantization_ratio: {}\n\
accuracy_weight: {}\n\
latency_weight: {}\n\
quantization_ratio_weight: {}\n\
eval_subset_size: {}'.format(config.get('max_trials', None),
config.get('max_minutes', None),
config.get('accuracy_loss', 1),
config.get('latency_reduce', 1),
config.get('expected_quantization_ratio', 0.5),
config.get('accuracy_weight', 1.0),
config.get('latency_weight', 1.0),
config.get('quantization_ratio_weight', 1.0),
config.get('eval_subset_size', None)))
return config
if not time_left % REMOTE_DATA_INFO_FREQ_S:
logger.info('Found old config. Waiting for config to be updated by server.')
if not time_left % REMOTE_DATA_INFO_FREQ_S:
logger.info('Waiting for remote config: {}s'.format(time_left))
time.sleep(1)
time_left -= 1
raise Exception('WARNING: Time limit for Remote reached!!! config name: {}'.format(
self.tag))
def restore_remote_search_space(self):
""" Restore search_space from remote database."""
time_left = self.time_limit
while time_left:
if self.search_space.count_documents({}):
search_space_object = self.search_space.find({})
search_space = object_loads(search_space_object[0]['data'])
logger.info('Remote search_space restored')
return search_space
if not time_left % REMOTE_DATA_INFO_FREQ_S:
logger.info('Waiting for remote data (search_space): {}s'.format(time_left))
time.sleep(1)
time_left -= 1
raise Exception('WARNING: Time limit for Remote reached!!! config name: {}'.format(
self.tag))
def _update_remote_config(self, _config=None, valid=True):
""" Update remote loss function config."""
if self.params.count_documents({}):
self.params.update_one({}, {'$set': {'valid': valid}})
else:
self.params.insert_one({'config': _config, 'valid': valid})
logger.info('Remote config updated under name: {}'.format(self.tag))
def update_remote_search_space(self, _search_space):
""" Update remote search space."""
self.search_space.insert_one({'data': object_dumps(_search_space)})
logger.info('Remote search space updated under name: {}'.format(self.tag))
def update_remote_trials(self, _hpopt_trials):
""" Upload local trials to remote database.
- if some trials already in database, update only last trial and merge it,
- if not, upload all local trials to remote database (for warm start mode),
"""
remote_data_updated = False
retry_time_left = self.unlock_time_limit_s
while not remote_data_updated and retry_time_left:
retry_time_left -= 1
# Update remote data if exist, if not create new remote config
if self.trials.count_documents({'app': 'tpe'}):
trials_object_remote_s = self.trials.find({'app': 'tpe'})
if trials_object_remote_s[0]['Lock']:
logger.info('Remote collection locked. Waiting for unlock')
if not retry_time_left:
raise Exception('WARNING: Retry limit for Trial remote write reached!!!')
time.sleep(1)
else:
self.trials.update_one({'app': 'tpe'}, {'$set': {'Lock': 1}})
# GridFS get remote trials file
current_file = trials_object_remote_s[0]['file_id']
if not ObjectId.is_valid(current_file):
raise Exception('Remote file corrupted!')
trials_object_remote_s_gfs = self.remote_fs.open_download_stream(current_file)
trials_object_remote_gfs = object_loads(trials_object_remote_s_gfs.read())
# Merge last local trial with remote data
_hpopt_trials = trials_from_docs(list(trials_object_remote_gfs) + [list(_hpopt_trials)[-1]])
# Upload to database
# GridFS upload new trials file
new_file = self.remote_fs.upload_from_stream(
self.tag,
object_dumps(_hpopt_trials), metadata={'trials_count': len(_hpopt_trials.trials)})
# Update trials info with new trials file_id
self.trials.update_one({'app': 'tpe'}, {'$set': {'Lock': 0, 'file_id': new_file}})
# Remove old remote trials file
self.remote_fs.delete(current_file)
logger.info('Remote trials updated. Total: {} (tag: {})'.format(len(_hpopt_trials.trials),
self.tag))
remote_data_updated = True
return _hpopt_trials
else:
# GridFS write trials file
new_file = self.remote_fs.upload_from_stream(self.tag, object_dumps(_hpopt_trials),
metadata={'trials_count': len(_hpopt_trials.trials)})
# unlock trials to be available for other nodes with current trials id
self.trials.insert_one({'app': 'tpe', 'Lock': 0, 'file_id': new_file})
logger.info('No remote trials. First write for config {}'.format(self.tag))
remote_data_updated = True
return _hpopt_trials
def update_remote_evaluated_params(self, _evaluated_params):
""" Upload local evaluated params to remote database.
- if some params already in database, update only last params and merge it,
- if not, upload all local params to remote database (for warm start mode),
"""
remote_data_updated = False
retry_time_left = self.unlock_time_limit_s
while not remote_data_updated and retry_time_left:
retry_time_left -= 1
# Update remote data if exist, if not create new remote config
if self.evaluated_params.count_documents({'app': 'tpe'}):
params_object_remote_s = self.evaluated_params.find({'app': 'tpe'})
if params_object_remote_s[0]['Lock']:
logger.info('Remote collection locked. Waiting for unlock')
if not retry_time_left:
raise Exception('WARNING: Retry limit for evaluated parameters remote write reached!!!')
time.sleep(1)
else:
self.evaluated_params.update_one({'app': 'tpe'}, {'$set': {'Lock': 1}})
remote_params = object_loads(params_object_remote_s[0]['data'])
if not isinstance(remote_params, list):
raise Exception('Received remote parameters object is not a list!!!')
_evaluated_params = list(remote_params) + [list(_evaluated_params)[-1]]
self.evaluated_params.update_one(
{'app': 'tpe'}, {'$set': {'Lock': 0, 'data': object_dumps(_evaluated_params)}})
remote_data_updated = True
logger.info('Remote evaluated parameters set updated under name: {}'.format(self.tag))
return _evaluated_params
else:
self.evaluated_params.insert_one({'app': 'tpe', 'Lock': 0, 'data': object_dumps(_evaluated_params)})
remote_data_updated = True
logger.info('Remote evaluated parameters set updated under name: {}'.format(self.tag))
return _evaluated_params
def restore_remote_evaluated_params(self):
""" Restore evaluated_params from remote database."""
time_left = self.time_limit
while time_left:
if self.evaluated_params.count_documents({}):
evaluated_params_object = self.evaluated_params.find({})
evaluated_params = object_loads(evaluated_params_object[0]['data'])
logger.info('Remote evaluated_params restored')
return evaluated_params
if not time_left % REMOTE_DATA_INFO_FREQ_S:
logger.info('Waiting for remote data (evaluated_params): {}s'.format(time_left))
time.sleep(1)
time_left -= 1
raise Exception('WARNING: Time limit for Remote reached!!! config name: {}'.format(
self.tag))
def request_remote_benchmark(self, model, iteration):
""" For Clients only.
Upload request with params for server to create model and run benchmark.
Wait for response.
"""
if self.type == 'client' and self.mode == 'master':
self._set_client_status(active=True)
model_file_id, weights_file_id = self.upload_model(model, iteration)
remote_id = self.model.insert_one({
'name': self.name,
'iter': iteration,
'lat': 0,
'file_id': model_file_id,
'bin_file_id': weights_file_id})
logger.info('Model queued for remote evaluation id: {}'.format(remote_id.inserted_id))
time_left = 0
remote_lat = 0
while not remote_lat and time_left < self.time_limit:
remote_params_obj = self.model.find({'_id':remote_id.inserted_id})[0]
remote_lat = remote_params_obj['lat']
time_left += 1
if not time_left % REMOTE_DATA_INFO_FREQ_S:
logger.info('Waiting for remote benchmark: {}s'.format(time_left))
time.sleep(1)
if remote_lat:
logger.info('remote_lat_client: {}'.format(remote_lat))
self.model.delete_one({'_id' : remote_id.inserted_id})
self.remote_fs.delete(model_file_id)
self.remote_fs.delete(weights_file_id)
return remote_lat
return None
def upload_model(self, model, trial_no):
tmp_dir, path_to_model, path_to_weights = self._create_temp_dir()
save_model(model, tmp_dir, 'tmp_model')
with open(path_to_model, 'rb') as file:
model_file_id = self.remote_fs.upload_from_stream(
self.tag,
file, metadata={'iter': trial_no, 'type': 'model'})
with open(path_to_weights, 'rb') as file:
weights_file_id = self.remote_fs.upload_from_stream(
self.tag,
file, metadata={'iter': trial_no, 'type': 'weights'})
return model_file_id, weights_file_id
def calculate_remote_requests(self):
""" For Server only.
Check in loop for requests from clients to run benchmark.
Finish when all clients clear activation flag.
"""
if self.type == 'server' and self.mode == 'master':
waiting_time = 1
while self._check_clients_status(not waiting_time % REMOTE_DATA_INFO_FREQ_S):
waiting_time += 1
model_count = self.model.count_documents({'lat' : {'$eq': 0}})
if not waiting_time % REMOTE_DATA_INFO_FREQ_S:
logger.info('Waiting for requests: {}'.format(waiting_time))
if model_count:
logger.info('Models in queue: {}'.format(model_count))
if model_count:
remote_params_obj = self.model.find({'lat' : {'$eq': 0}})[0]
remote_lat = remote_params_obj['lat']
remote_iter = remote_params_obj['iter']
remote_name = remote_params_obj['name']
remote_file_id = remote_params_obj['file_id']
remote_file_id_bin = remote_params_obj['bin_file_id']
logger.info('Starting for: {} iter: {}'.format(remote_name, remote_iter))
if remote_lat == 0:
lat = self._run_remote_benchmark(remote_file_id, remote_file_id_bin)
logger.info('name: {} remote_lat_res: {} for iter: {}'.format(remote_name, lat, remote_iter))
self.model.update_one({'_id': remote_params_obj['_id']}, {'$set': {'lat': lat}})
time.sleep(1)
def _create_temp_dir(self):
__MODEL_PATH__ = create_tmp_dir(gettempdir())
model_name = 'tmp_model'
path_to_model = __MODEL_PATH__.name + '/' + model_name + '.xml'
path_to_weights = __MODEL_PATH__.name + '/' + model_name + '.bin'
return __MODEL_PATH__.name, path_to_model, path_to_weights
def _run_remote_benchmark(self, file_id, remote_file_id_bin):
__MODEL_PATH__ = create_tmp_dir(gettempdir())
model_name = 'tmp_model'
path_to_model = __MODEL_PATH__.name + '/' + model_name + '.xml'
path_to_weights = __MODEL_PATH__.name + '/' + model_name + '.bin'
new_file = self.remote_fs.open_download_stream(file_id)
new_file_bin = self.remote_fs.open_download_stream(remote_file_id_bin)
with open(path_to_model, 'wb') as file:
file.write(new_file.read())
with open(path_to_weights, 'wb') as file:
file.write(new_file_bin.read())
lat = benchmark_embedded(mf=path_to_model)
return lat
def _check_clients_status(self, log=False):
""" Check if are any clients in 'active' state."""
clients_number = self.clients.count_documents({})
clients_active = self.clients.count_documents({'active' : {'$eq': 1}})
if log:
logger.info('Total/Active clients:{}/{}'.format(clients_number, clients_active))
if clients_active:
self.wait_for_client = False
return 1 if self.wait_for_client else clients_active
def _set_client_status(self, active=False):
""" Set or clear 'active' flag for clients."""
if self.type == 'client':
if self.id is None:
self.id = self.clients.insert_one({'active': 1 if active else 0})
logger.info('Setting client status to True')
else:
self.clients.update_one({'_id': self.id.inserted_id}, {'$set': {'active': 1 if active else 0}})
if not active:
logger.info('Setting client status to False')
def cleanup(self):
""" Ending cleanup """
if self.type == 'client':
self._set_client_status(active=False)
if self.type == 'server':
self._update_remote_config(valid=False)
|
###############################################################################
# Copyright (c) 2017-2020 Koren Lev (Cisco Systems), #
# Yaron Yogev (Cisco Systems), Ilia Abashin (Cisco Systems) and others #
# #
# All rights reserved. This program and the accompanying materials #
# are made available under the terms of the Apache License, Version 2.0 #
# which accompanies this distribution, and is available at #
# http://www.apache.org/licenses/LICENSE-2.0 #
###############################################################################
from genson import SchemaBuilder
from base.utils.mongo_access import MongoAccess
from base.utils.string_utils import stringify_doc
from scan.processors.processor import Processor
class ProcessInventorySchema(Processor):
# Prerequisites should include all processors that modify the inventory fields
PREREQUISITES = ["ProcessVedgeType", "ProcessVnicCount"]
COLLECTION = "schemas"
def run(self):
super().run()
objects_by_type_groups = self.inv.collections["inventory"].aggregate([
{
"$group": {
"_id": "$type",
"objects": {"$push": "$$ROOT"}
}
}
])
for objects_by_type in objects_by_type_groups:
schema_builder = SchemaBuilder()
schema_builder.add_schema({"type": "object", "properties": {}})
for obj in objects_by_type["objects"]:
stringify_doc(obj)
schema_builder.add_object(obj)
self.inv.collections["schemas"].update_one({
"type": objects_by_type["_id"],
}, {
"$set": {
"type": objects_by_type["_id"],
"schema": MongoAccess.encode_mongo_keys(schema_builder.to_schema())
}
},
upsert=True
)
|
from gym.envs.registration import register
register(
id='warehouse-v0',
entry_point='gym_wahrehouse.envs:WareHouse',
)
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="Medeina",
version="1.6",
author="Daniel Davies",
author_email="dd16785@bristol.ac.uk",
description="A cumulative food web",
long_description="hello",
long_description_content_type=long_description,
url="https://github.com/Daniel-Davies/Medeina",
packages=setuptools.find_packages(),
include_package_data=True,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
keywords=["Food Webs", "Species networks", "Cumulative Networks"],
install_requires=[
"pycountry",
"taxon_parser",
"networkx",
"numpy",
"msgpack",
"EcoNameTranslator",
"requests",
"pandas",
],
)
|
#!/usr/bin/env python
import pdb
import argparse
from brick_server import create_app
def get_args():
argparser = argparse.ArgumentParser()
argparser.add_argument('--configfile',
help='Location of the config file',
type=str,
default='configs/configs.json',
)
argparser.add_argument('--port',
type=int,
default=7889,
)
argparser.add_argument('--host',
type=str,
default='0.0.0.0',
)
return argparser.parse_args()
def main():
args = get_args()
app = create_app(configfile=args.configfile)
app.run(args.host, port=args.port)
if __name__ == '__main__':
main()
|
import os
import uuid
import wandb
import logging
from tensorboardX import SummaryWriter
class Base:
"""
Base configure file, which contains the basic training parameters and should be inherited by other attribute configure file.
"""
def __init__(self, config_name, work_dir):
self.type = config_name
self.id = str(uuid.uuid4())
self.note = ""
self.work_dir = work_dir
self.data_dir = self.work_dir + "/data/%s/" % self.type
self.model_dir = self.work_dir + "/model/%s/%s/" % (self.type, self.id)
self.log_dir = self.work_dir + "/log/%s/%s/" % (self.type, self.id)
#data
self.train_tsv_file = os.path.join(self.data_dir, "train.tsv")
self.train_pic_dir = os.path.join(self.data_dir, "images/")
self.train_num_workers = 0
self.val_tsv_file = os.path.join(self.data_dir, "val.tsv")
self.val_pic_dir = os.path.join(self.data_dir, "images/")
self.val_num_workers = 0
self.test_tsv_file = os.path.join(self.data_dir, "test.tsv")
self.test_pic_dir = os.path.join(self.data_dir, "images/")
self.test_num_workers = 0
self.debug = False
self.loader_type = "alignment"
#train
self.batch_size = 32
self.val_batch_size = 1
self.test_batch_size = 1
self.channels = 3
self.width = 128
self.height = 128
# mean values in r, g, b channel.
self.means = (127, 127, 127)
self.scale = 0.0078125
self.fix_backbone = False
self.finetune_lastlayer = False
self.display_iteration = 100
self.val_epoch = 1
self.milestones = [50, 80]
self.max_epoch = 100
self.nstack = 1
self.classes_num = [1000]
self.label_num = len(self.classes_num)
#["adam", "sgd"]
self.optimizer = "adam"
self.learn_rate = 0.1
self.momentum = 0.01# caffe: 0.99
self.weight_decay = 0.0
self.nesterov = False
self.scheduler = "MultiStepLR"
self.gamma = 0.1
self.net = "resnet18"
self.loss_weights = [1.0]
self.criterions = ["SoftmaxWithLoss"]
self.metrics = ["Accuracy"]
self.key_metric_index = 0
self.use_tags = False
#model
self.ema = False
self.save_initial_model = True
self.model_save_epoch = 1
#visualization
self.writer = None
# wandb
self.wandb = None
#log file
self.logger = None
def init_instance(self):
#visualization
self.writer = SummaryWriter(log_dir=self.log_dir, comment=self.type)
# wandb
wandb_key = "3462de1f0c2817d194002922c8ffd438ff4c5b6c"# to be changed to yours.
if wandb_key is not None:
wandb.login(key=wandb_key)
wandb.init(project=self.type, dir=self.log_dir,
name=self.id, tensorboard=True, sync_tensorboard=True)
self.wandb = wandb
#log file
log_formatter = logging.Formatter("%(asctime)s %(levelname)-8s: %(message)s")
root_logger = logging.getLogger()
file_handler = logging.FileHandler(self.log_dir + "log.txt")
file_handler.setFormatter(log_formatter)
file_handler.setLevel(logging.NOTSET)
root_logger.addHandler(file_handler)
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_formatter)
console_handler.setLevel(logging.NOTSET)
root_logger.addHandler(console_handler)
root_logger.setLevel(logging.NOTSET)
self.logger = root_logger
def __del__(self):
# tensorboard --logdir self.log_dir
if self.writer is not None:
#self.writer.export_scalars_to_json(self.log_dir + "visual.json")
self.writer.close()
|
import os
from config import paths
from utils import generic
def screen(fasta_path=None, output_path=None):
if not fasta_path:
fasta_fname = "mg_50.fasta"
fasta_path = os.path.join(paths.ROOT, 'data', 'user', 'input', fasta_fname)
output = []
alphabets = set(generic.AA3_to_AA1.values())
to_keep = True
with open(fasta_path, 'r') as file:
title = next(file)
current_seq = ""
for line in file:
if line.startswith(">"):
if to_keep:
output.append(title)
output.append(current_seq)
title = line
current_seq = ""
continue
else:
title = line
current_seq = ""
to_keep = True
continue
if not to_keep:
continue
for char in line.strip():
if char not in alphabets:
to_keep = False
break
current_seq += line
if to_keep:
output.append(title)
output.append(current_seq)
if output_path is None:
output_path = fasta_path
with open(output_path, 'w') as file:
file.writelines(output)
if __name__ == "__main__":
screen(os.path.join(paths.ROOT, "mg_100.fasta"), os.path.join(paths.ROOT,
"mg_100_screened.fasta"))
|
import os
from setuptools import setup
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as readme:
README = readme.read()
setup(
name = 'csvTovcf',
packages = ['csv2vcard'],
version = '0.3.0',
description = 'A library for converting csv to .vcf file',
long_description = README,
long_description_content_type='text/markdown',
author = 'Shivam Mani Tripathi',
author_email = 'abc@privatemail.com',
url = 'https://github.com/stripathi669/csv2vcard',
download_url = 'https://github.com/stripathi669/csv2vcard/archive/0.3.0.tar.gz',
keywords = ['csv', 'vcard', 'export', 'vcf'],
python_requires = '>=3.6',
classifiers = [
'Development Status :: 3 - Alpha','License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
#-------------------------------------------------------------------------------
# Name: moidfy_annotation_path
# Purpose: strips old path from the annotaion
# Environment: Windows 10 OS, Python 3.6
# Author: No This Is Patrick
#
# Created: 17/02/2018
# Modify XML File Library: https://docs.python.org/2/library/xml.etree.elementtree.html#module-xml.etree.ElementTree
# Get current directory: https://stackoverflow.com/questions/5137497/find-current-directory-and-files-directory
# XML manipulation: https://stackoverflow.com/questions/37868881/how-to-search-and-replace-text-in-an-xml-file-using-python
#
# Commands: 1. You can provide no arguments, where you will be prompted to
# select the .xml file's directory and the new path string for the files
# 2. You can provide a single command line argument for the new path string
# and you will be prompted to select the .xml file's directory
# 3. You may provide two arguments where you provide the .xml file's directory
# and the newly desired path string
#-------------------------------------------------------------------------------
import os # directory library
import sys # sys.argv library
import xml.etree.ElementTree as ET
from tkinter.filedialog import askdirectory
"""
#
"""
def replace_file_path(f_name, new_path, label):
array = []
name = f_name.split("\\")
name = name[len(name)-1]
#print(name, new_path)
try:
with open(f_name, encoding='utf-8') as f:
tree = ET.parse(f)
root = tree.getroot()
for path in root.iter(label):
if(label == "name"):
path.text = str(new_path)
else:
path.text = str(new_path + "/" + name)
tree.write(f_name)
except:
print("Error: Unable to extract .xml file contents!!!", f_name)
exit(1)
def main():
label = ""
if(len(sys.argv) > 1 and sys.argv[1] == "-name"):
label = "name"
if(len(sys.argv) == 4):
directory = sys.argv[2]
new_path = sys.argv[3]
elif(len(sys.argv) == 3):
print("Select the directory containing the annotated files")
directory = askdirectory()
new_path = sys.argv[2]
elif(len(sys.argv) == 2):
print("Select the directory containing the annotated files")
directory = askdirectory()
print("Select the directory containing the new path for the files")
new_path = askdirectory()
else:
print("Usage: <program name> <directory of .xml files> <new path name>")
print("OR")
print("Usage: <program name>")
exit(1)
else:
label = "path"
if(len(sys.argv) == 3):
directory = sys.argv[1]
new_path = sys.argv[2]
elif(len(sys.argv) == 2):
print("Select the directory containing the annotated files")
directory = askdirectory()
new_path = sys.argv[1]
elif(len(sys.argv) == 1):
print("Select the directory containing the annotated files")
directory = askdirectory()
print("Select the directory containing the new path for the files")
new_path = askdirectory()
else:
print("Usage: <program name> <directory of .xml files> <new path name>")
print("OR")
print("Usage: <program name>")
exit(1)
try:
for file in os.listdir(directory):
if(file.endswith(".xml")):
file = os.path.join(directory, file)
replace_file_path(file, new_path, label)
except:
print("Error: Wut?!")
exit(1)
if __name__ == '__main__':
main()
|
import json
import os
from appdirs import user_config_dir
def merge_config_dicts(base_config, merge_src):
result = {}
for key in merge_src:
if key in base_config:
if isinstance(base_config[key], dict) and isinstance(merge_src[key], dict):
result[key] = merge_config_dicts(base_config[key], merge_src[key])
elif not isinstance(base_config[key], dict) and not isinstance(merge_src[key], dict):
result[key] = merge_src[key]
else: # objects are of different types (one is dict, the other isn't)
result[key] = base_config[key] # just use the base config in that case
else:
result[key] = merge_src[key]
for key in base_config:
if not key in result:
result[key] = base_config[key]
return result
class Config:
def __init__(self):
self._config_path = os.path.join(user_config_dir("OVRT Soundpad", "jangxx"), "config.json")
self._config = {
"board": {
"rows": 3,
"columns": 4,
"pages": 1,
},
"sounds": {},
"server": {
"http_port": 64152,
"ws_port": 64153
},
"overlay": {},
"soundpad": {
"autostart_path": None,
"launch_soundpad_steam": False,
}
}
if os.path.exists(self._config_path):
with open(self._config_path, "r") as configfile:
file_config = json.load(configfile)
self._config = merge_config_dicts(self._config, file_config)
def getExternalSerialized(self):
"""Return a dict of all settings that are relevant for the frontend"""
return {
"board": self.get("board"),
"sounds": self.get("sounds"),
"overlay": self.get("overlay"),
}
def get(self, path):
ret = self._config
if not type(path) is list:
path = [ path ]
for e in path:
ret = ret[e]
return ret
def set(self, path, value):
elem = self._config
for e in path[:-1]:
elem = elem[e]
elem[path[-1]] = value
with open(self._config_path, "w+") as configfile:
json.dump(self._config, configfile)
def delete(self, path):
elem = self._config
for e in path[:-1]:
elem = elem[e]
del elem[path[-1]]
with open(self._config_path, "w+") as configfile:
json.dump(self._config, configfile)
def exists(self, path):
cur = self._config
if not type(path) is list:
path = [ path ]
for e in path:
if not e in cur:
return False
else:
cur = cur[e]
return True
|
from enum import Enum, unique
from math import fabs
import random
from tetris_engine import TetrisEngine, Playfield, TetrominoShape
"""
TODO if we record how the agent performs we can use emojis
🡲 Wide-Headed Rightwards Medium Barb Arrow
🡰 Wide-Headed Leftwards Medium Barb Arrow
🡳 Wide-Headed Downwards Medium Barb Arrow
↶ anticlockwise top semicircle arrow symbol
↷ clockwise top semicircle arrow symbol
⟱ downwards quadruple arrow
☰ Trigram for heaven
So for example if we record:
L🡰🡰↶⟱2☰
It means that we had an L-shape tetromino, we moved it twice to the left, rotate 90deg counterclockwise,
drop it and we made 2 lines
"""
from time import time
from functools import wraps
def timing(f):
@wraps(f)
def wrap(*args, **kwargs):
t_start = time()
result = f(*args, **kwargs)
t_end = time()
print(f'Function {f.__name__} took {t_end-t_start:2.4f} s')
return result
return wrap
class TetrisAgent(TetrisEngine):
@unique
class GameAction(Enum):
MOVE_LEFT = "🡰"
MOVE_RIGHT = "🡲"
ROTATE_LEFT = "↶"
ROTATE_RIGHT = "↷"
DROP = "⟱"
def __str__(self) -> str:
return self.value
def __init__(self) -> None:
random.seed(7) # Important here so we will have always the same first falling piece for our tests. This starts with an L
# This has to be done before super_init because there we choose already the first piece.
super().__init__()
# ON_PLAYFIELD_UPDATED is not bound (we don't have UI in the agent)
self.bind_event(TetrisEngine.Events.ON_LINES_CLEARED, self.update_lines_cleared_counter)
self.bind_event(TetrisEngine.Events.ON_GAME_OVER, self.game_over)
self.state = {}
self.is_game_over = False
self.lines_cleared = 0
def calculate_heuristics(self, playfield_statistics :dict, lines_cleared :int, weights :dict) -> float:
fitting_algorithm = (
weights["weight_aggregated_height"] * playfield_statistics["aggregated_height"] +
weights["weight_total_holes"] * playfield_statistics["total_holes"] +
weights["weight_bumpiness"] * playfield_statistics["bumpiness"] +
weights["weight_lines_cleared"] * lines_cleared
)
return fitting_algorithm
def get_best_sequence(self, sequences :list[list[GameAction]], weights :dict) -> list[GameAction]:
results = []
for sequence in sequences:
self.restore_state() # All the sequences start from the same beginning
self.lines_cleared = 0
can_play_sequence = self.play_sequence(sequence)
if can_play_sequence:
statistics = self.get_playfield_statistics(self.playfield)
fitting_algorithm = self.calculate_heuristics(statistics, self.lines_cleared, weights)
results.append((sequence, fitting_algorithm))
best_result = min(results, key=lambda item: item[1])
return best_result[0] # the sequence
def get_playfield_column_statistics(self, column :list[str], first_row :int) -> tuple[int,int]:
highest_non_empty_row_found = False
highest_non_empty_row = 0
holes_count = 0
for row, value in reversed(list(enumerate(column, start=first_row))):
if not highest_non_empty_row_found:
if value != str(TetrominoShape.NONE):
highest_non_empty_row_found = True
highest_non_empty_row = row
else:
if value == str(TetrominoShape.NONE):
holes_count += 1
return (highest_non_empty_row, holes_count)
def get_playfield_statistics(self, playfield: Playfield) -> dict:
"""
Analyses a playfield after a piece has fallen and the lines are cleared
and returns a list of useful information about the playfield
"""
columns = [list(column) for column in zip(*playfield.get_all_rows())]
first_row = playfield.min_y
column_statistics = [ self.get_playfield_column_statistics(column, first_row) for column in columns ]
heights, holes = zip(*column_statistics)
# It is the sum of the absolute difference in height between adjacent columns
bumpiness = sum([abs(current-next) for current, next in zip(heights, heights[1:])])
return {
"aggregated_height": sum(heights),
"total_holes": sum(holes),
"bumpiness": bumpiness
}
def get_possible_sequences_with_drop(self) -> list[list[GameAction]]:
# TODO very careful not to change this. I got a surprised that when changing the order of the sequence
# I got a lot less lines in my example. Investigate.
ga = self.GameAction
rotation_sequence = [
[ ],
[ ga.ROTATE_LEFT ],
[ ga.ROTATE_LEFT, ga.ROTATE_LEFT ],
[ ga.ROTATE_LEFT, ga.ROTATE_LEFT, ga.ROTATE_LEFT ]
]
sequences = [x + [ga.DROP] for x in rotation_sequence ] # The sequence without moving left or right just in the 'spawn' position
moving_left_sequence = []
moving_right_sequence = []
movements_each_side = self.playfield.columns // 2
for _ in range(movements_each_side):
moving_left_sequence += [ ga.MOVE_LEFT]
sequences_left = [ seq + moving_left_sequence + [ ga.DROP ] for seq in rotation_sequence]
sequences += sequences_left
moving_right_sequence += [ ga.MOVE_RIGHT]
sequences_right = [ seq + moving_right_sequence + [ ga.DROP ] for seq in rotation_sequence]
sequences += sequences_right
return sequences
def play_sequence(self, sequence :list) -> bool:
"""
Try all the movements in a sequence of movements. If we run all it will return true.
If we can't reach the end of the sequence it means that the sequence is not valid and it will return false
"""
ga = self.GameAction
sequence_length = len(sequence)
can_move = True
i = 0
while i<sequence_length and can_move:
if sequence[i] == ga.DROP:
self.drop()
can_move = True
elif sequence[i] == ga.MOVE_LEFT:
can_move = self.move_left()
elif sequence[i] == ga.MOVE_RIGHT:
can_move = self.move_right()
elif sequence[i] == ga.ROTATE_LEFT:
can_move = self.rotate_left()
elif sequence[i] == ga.ROTATE_RIGHT:
can_move = self.rotate_right()
i += 1
return can_move
@timing
def start_new_game(self, weights :dict, max_number_of_movements = -1) -> None:
"""
:param max_number_of_movements: The number of movements that the agent will run.
Default value -1 means to play until game over
Because we play sequences of 1+ movements
we will stop when total movements + final sequence <= max_number_of_movements
"""
self.new_game()
possible_sequences = self.get_possible_sequences_with_drop()
total_lines_cleared = 0
total_movements = 0
while not self.is_game_over:
self.save_state()
self.enable_on_game_over_event = False
self.enable_on_playfield_updated_event = False
best_sequence = self.get_best_sequence(possible_sequences, weights)
self.restore_state() # So we can really play the sequence, not only try-outs
self.is_game_over = False # Important trying a sequence can cause game over by mistake
self.lines_cleared = 0
self.enable_on_game_over_event = True
self.enable_on_playfield_updated_event = True
if max_number_of_movements > 0:
if total_movements + len(best_sequence) > max_number_of_movements:
break
self.play_sequence(best_sequence)
total_movements += len(best_sequence)
total_lines_cleared += self.lines_cleared
print(f'Game over with {total_lines_cleared} lines cleared and {total_movements} total movements done')
def game_over(self):
self.is_game_over = True
#print("I reached game over") # TODO remove
def update_lines_cleared_counter(self, lines_cleared :int):
self.lines_cleared = lines_cleared
#print("I have lines cleared")
def restore_state(self):
self.playfield.set_all_rows(self.state["rows"])
self.falling_piece.center_x = self.state["center_x"]
self.falling_piece.center_y = self.state["center_y"]
self.falling_piece.set_shape(self.state["shape"])
self.falling_piece.set_angle(self.state["angle"])
def save_state(self):
self.state["rows"] = self.playfield.get_all_rows()
self.state["center_x"] = self.falling_piece.center_x
self.state["center_y"] = self.falling_piece.center_y
self.state["shape"] = self.falling_piece.shape
self.state["angle"] = self.falling_piece.angle
if __name__ == "__main__":
agent = TetrisAgent()
weights = {
"weight_aggregated_height": 5,
"weight_total_holes": 1.1,
"weight_bumpiness": 0.8,
"weight_lines_cleared": -10
}
agent.start_new_game(weights, 1000) # our example is 76 lines cleared and 998 movements in 1.9s |
from typing import *
from faker.providers import BaseProvider
from .models import Player, ScoreStats, Score, Characteristic, Difficulty, Badge
class ScoreSaberProvider(BaseProvider):
def role(self) -> Optional[str]:
roles = [
"Owner", "Nomination Assessment Team", "Ranking Team", "Quality Assurance Team",
"Ranking Team Recruit", "Criteria Assurance Team", "Supporter", None
]
return self.random_choices(roles, 1)
def rank(self) -> int:
return self.random_int(1, 1000000000)
def history(self) -> str:
ranks = []
for index in range(48):
ranks.append(str(self.rank()))
return ",".join(ranks)
def badge(self) -> Badge:
badge = Badge()
badge.image = self.generator.image_url(80, 30)
badge.description = self.generator.sentence()
return badge
def badges(self) -> List[Badge]:
badges = []
rnd = self.random_digit()
if rnd == 0:
return badges
for index in range(rnd):
badges.append(self.badge())
return badges
def player_basic(self, player_id: int) -> Player:
player = Player()
player.player_id = player_id
player.player_name = self.generator.user_name()
player.avatar = self.generator.image_url(184, 184)
player.rank = self.rank()
player.country_rank = self.rank()
player.pp = float(self.numerify("%####.##"))
player.country = self.generator.country_code()
player.role = self.role()
player.badges = self.badges()
player.history = self.history()
player.permissions = self.random_int(0, 100)
player.inactive = self.random_int(0, 1)
player.banned = self.random_int(0, 1)
return player
def score_stats(self) -> ScoreStats:
score_stats = ScoreStats()
score_stats.total_score = self.random_int(0, 999999999999)
score_stats.total_ranked_score = self.random_int(0, 999999999999)
score_stats.average_ranked_accuracy = float(self.numerify("%#.##############"))
score_stats.total_play_count = self.random_int(0, 99999)
score_stats.ranked_play_count = self.random_int(0, 99999)
return score_stats
def player_full(self, player_id: int) -> Player:
player = self.player_basic(player_id)
player.score_stats = self.score_stats()
return player
def score(self) -> Score:
score = Score()
score.rank = self.rank()
score.score_id = self.random_int(0, 999999999999)
score.score = self.random_int(0, 999999999999)
score.unmodified_score = score.score
score.mods = self.random_elements(["NF", "FS", "SS", "GN"], unique=True)
score.pp = float(self.numerify("%##.###"))
score.weight = float(self.numerify("%#.##############"))
score.time_set = self.generator.past_datetime()
score.leaderboard_id = self.random_int(0, 999999999999)
score.song_hash = self.generator.sha1(raw_output=False)
score.song_name = self.generator.text(35)
score.song_sub_name = self.generator.text(35)
score.song_author_name = self.generator.user_name()
score.level_author_name = self.generator.user_name()
score.characteristic = self.random_choices(list(Characteristic), 1)
score.difficulty = self.random_choices(list(Difficulty), 1)
score.max_score = score.score * float(self.numerify("%.###"))
return score
def scores(self, count: int = 1) -> List[Score]:
scores = []
for _ in range(count):
scores.append(self.score())
return scores
|
import socket
from threading import Thread
class TcpSock(object):
def __init__(self,host="127.0.0.1",port=9999, responder = None ):
self.host = host
self.port = port
self.bufsize = 128
self.sock = None
self.socketThread = None
self.responder = responder
def _connect(self):
while True:
conn, addr = self.sock.accept()
while True:
data = conn.recv(self.bufsize)
if not data: break
data = data.decode()
if self.responder == None:
conn.send(data)
else:
msg = "{}\n".format(self.responder.getInfo(data.strip()))
conn.send( msg.encode() )
conn.close()
def get(self,msg="nothing"):
sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
r = sock.connect((self.host,self.port))
ret = sock.send( msg.encode() )
chunks = []
done = False
while not done:
chunk = sock.recv(2048).decode()
chunks.append(chunk)
done = chunk[-1] == "\n"
msg = "".join(chunks)
sock.close()
return msg
def close(self):
self.sock.close()
self.socketThread.join()
def start(self):
self.sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.sock.bind((self.host,self.port))
self.sock.listen(1)
self.socketThread = Thread(target = self._connect )
self.socketThread.start()
|
import os
import numpy as np
import json
from PIL import Image
from model import *
import matplotlib.pyplot as plt
import matplotlib.patches as patches
data_path = '../data/RedLights2011_Medium'
preds_path = '../data/hw01_preds'
kernels = buildKernels()
good_examples = ['RL-011.jpg', 'RL-019.jpg', 'RL-021.jpg', 'RL-031.jpg']
bad_examples = ['RL-046.jpg', 'RL-040.jpg', 'RL-023.jpg', 'RL-017.jpg']
for file_name in good_examples:
I = Image.open(os.path.join(data_path,file_name))
fig, ax = plt.subplots()
ax.imshow(I)
ax.set_axis_off()
I = np.asarray(I)
boxes = detect_red_lights(I, kernels, .95)
for box in boxes:
tl_row, tl_col, br_row, br_col = box[0], box[1], box[2], box[3]
rectangle = patches.Rectangle((tl_col, tl_row), br_col-tl_col, br_row-tl_row, linewidth=1, edgecolor='w', facecolor='none')
ax.add_patch(rectangle)
plt.savefig(os.path.join(preds_path,file_name), bbox_inches='tight', pad_inches=0)
for file_name in bad_examples:
I = Image.open(os.path.join(data_path,file_name))
fig, ax = plt.subplots()
ax.imshow(I)
ax.set_axis_off()
I = np.asarray(I)
boxes = detect_red_lights(I, kernels, .95)
for box in boxes:
tl_row, tl_col, br_row, br_col = box[0], box[1], box[2], box[3]
rectangle = patches.Rectangle((tl_col, tl_row), br_col-tl_col, br_row-tl_row, linewidth=1, edgecolor='w', facecolor='none')
ax.add_patch(rectangle)
plt.savefig(os.path.join(preds_path,file_name), bbox_inches='tight', pad_inches=0)
|
from itertools import product
from functools import reduce
from operator import getitem
import random
from uuid import uuid4
from joblib import Parallel, delayed
import numpy as np
class GridParallel(object):
def __init__(self, *args, **kwargs):
"""Wrapper for joblib.Parallel for computing the same function over a grid of parameters. Each function call has
both the python and numpy random seeds set to a different random value.
:param args: args for joblib.Parallel
:param kwargs: kwargs for joblib.Parallel
"""
self.parallel = Parallel(*args, **kwargs)
def __call__(self, fun, *args):
"""Run a parallel job over a grid, calling the same function with the "coordinates" of the grid as arguments.
Returns the grid of returned function values. The "coordinates" of the grid are provided as the additional
arguments (*args), with each argument being the values of one axis. For example, if this is invoked with
__call__(lambda x, y: (x, y), range(10), ['a', 'b']), then the output will be a 10 x 2 list of lists where each
element is a tuple such as (7, 'b').
:param fun: function to run at each point on the grid
:param args: coordinate axes to construct the grid
:return: list of lists of lists of ... of size S1 x S2 x S3 x ... where S1, S2, ... are the lengths of the
coordinate axes provided in *args
"""
grid_shape = tuple(len(x) for x in args)
grid = self.build_grid(grid_shape)
print('Number of tasks: %d.' % reduce(lambda x, y: x*y, grid_shape, 1))
fs = (delayed(WrapFunctionRandomSeed(WrapFunctionWithReturn(fun, indices)))(*coordinates)
for indices, coordinates in self.indices_coordinates_generator(*args))
for val, indices in self.parallel(fs):
self.assign_to_grid(grid, indices, val)
return grid
@staticmethod
def build_grid(shape):
if isinstance(shape, int):
return [None]*shape
elif len(shape) == 1:
return [None]*shape[0]
else:
return [GridParallel.build_grid(shape[1:]) for _ in range(shape[0])]
@staticmethod
def assign_to_grid(grid, indices, val):
if isinstance(indices, int):
grid[indices] = val
else:
reduce(getitem, indices[:-1], grid)[indices[-1]] = val
@staticmethod
def indices_coordinates_generator(*args):
for index_coordinate_pairs in product(*(enumerate(x) for x in args)):
indices, coordinates = zip(*index_coordinate_pairs)
yield indices, coordinates
class FakeParallel(object):
def __init__(self, *args, **kwargs):
pass
def __call__(self, iterable):
res = []
for f, *a in iter(iterable):
if len(a) >= 1:
args = a[0]
if len(a) >= 2:
kwargs = a[1]
res.append(f(*args, **kwargs))
return res
class WrapFunctionRandomSeed(object):
def __init__(self, fun):
self.fun = fun
# create a random 32-bit unsigned integer to use as the seed
self.seed = uuid4().int % 0x100000000
def __call__(self, *args, **kwargs):
random.seed(self.seed)
np.random.seed(self.seed)
return self.fun(*args, **kwargs)
class WrapFunctionWithReturn(object):
def __init__(self, fun, val):
self.fun = fun
self.val = val
def __call__(self, *args, **kwargs):
return self.fun(*args, **kwargs), self.val
|
#!/usr/bin/python
# Classification (U)
"""Program: main.py
Description: Integration testing of main in rmq_2_sysmon.py.
Usage:
test/integration/rmq_2_sysmon/main.py
Arguments:
"""
# Libraries and Global Variables
# Standard
import sys
import os
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
# Third-party
import mock
# Local
sys.path.append(os.getcwd())
import rmq_2_sysmon
import rmq_cleanup
import lib.gen_libs as gen_libs
import version
__version__ = version.__version__
class UnitTest(unittest.TestCase):
"""Class: UnitTest
Description: Class which is a representation of a unit testing.
Methods:
setUp
test_main_program_args
tearDown
"""
def setUp(self):
"""Function: setUp
Description: Initialization for integration testing.
Arguments:
"""
self.base_dir = "test/integration/rmq_2_sysmon"
self.test_path = os.path.join(os.getcwd(), self.base_dir)
self.config_path = os.path.join(self.test_path, "config")
self.cfg = gen_libs.load_module("rabbitmq", self.config_path)
log_path = os.path.join(self.test_path, self.cfg.log_dir)
self.cfg.log_file = os.path.join(log_path, self.cfg.log_file)
self.cfg.message_dir = os.path.join(self.test_path,
self.cfg.message_dir)
self.cfg.queue_list[0]["directory"] = os.path.join(
self.test_path, self.cfg.queue_list[0]["directory"])
self.connect_true = "Connected to RabbitMQ node"
self.argv_list = [os.path.join(self.base_dir, "main.py"), "-M", "-c",
"rabbitmq", "-d", "config"]
@mock.patch("rmq_2_sysmon.gen_libs.get_base_dir")
@mock.patch("rmq_2_sysmon.gen_libs.load_module")
@mock.patch("rmq_2_sysmon.rabbitmq_class.RabbitMQCon.consume")
def test_main_program_args(self, mock_consume, mock_cfg, mock_base):
"""Function: test_main_program_args
Description: Test passing arguments via program call.
Arguments:
"""
mock_consume.return_value = "RabbitMQ_Tag"
mock_cfg.return_value = self.cfg
mock_base.return_value = self.test_path
rmq_2_sysmon.main(argv_list=self.argv_list)
self.assertTrue(self.connect_true in open(self.cfg.log_file).read())
def tearDown(self):
"""Function: tearDown
Description: Clean up of integration testing.
Arguments:
"""
os.remove(self.cfg.log_file)
rmq_cleanup.rmq_cleanup(self.cfg, self.cfg.queue_list[0]["queue"],
True)
if __name__ == "__main__":
unittest.main()
|
from __future__ import print_function
import heapq
import itertools
import copy
import codecs, sys
stdout = codecs.getwriter('utf-8')(sys.stdout)
def forward_dp(dictionary, graph):
graph.nodes_list[0][0].f = 0
for i in range(1, graph.x_length + 2):
for node in graph.nodes_list[i]:
score = float('-inf')
best_prev = None
for prev_node in graph.nodes_list[node.start_pos]:
bigram_weight = dictionary.get_bigram_weight(prev_node.deep, node.deep)
# print(node.surface, node.weight, bigram_weight)
current_score = prev_node.f + node.weight + bigram_weight
if current_score > score:
score = current_score
best_prev = prev_node
node.best_prev = best_prev
node.f = score
def backward_a_star(dictionary, graph, n):
result = []
pq = []
counter = itertools.count()
eos = graph.nodes_list[graph.x_length + 1][0]
eos.g = 0
heapq.heappush(pq, (0, next(counter), eos))
while pq != [] and len(result) < n:
cost, count, front = heapq.heappop(pq)
if front.start_pos == -1:
result.append(front)
else:
for prev_node in graph.nodes_list[front.start_pos]:
bigram_weight = dictionary.get_bigram_weight(prev_node.deep, front.deep)
prev_node.g = front.g + front.weight + bigram_weight
new_front = copy.copy(prev_node)
# print(new_front.surface, new_front.g, new_front.f, file=stdout)
new_front.next = front
heapq.heappush(pq, (- prev_node.f - prev_node.g, next(counter), new_front))
n_best = []
for node in result:
nodes = []
while node != eos:
nodes.append(node)
node = node.next
n_best.append(nodes[1:])
return n_best
|
import imsearch
import unittest
from unittest.mock import patch
class TestInit(unittest.TestCase):
@patch('imsearch.config_init')
@patch('imsearch.Index')
def test_init(self, mock_index, config_init_mock):
instance = mock_index()
instance.name = 'test'
index = imsearch.init('test', MONGO_URI='mongodb://localhost:27017/')
self.assertEqual(index, instance)
self.assertEqual(index.name, 'test')
config_init_mock.assert_called_with(
{'MONGO_URI': 'mongodb://localhost:27017/'})
@patch('imsearch.run')
def test_detector(self, mock_backend):
imsearch.run_detector('redis://dummy:Password@111.111.11.111:6379/0')
mock_backend.assert_called_with(
'redis://dummy:Password@111.111.11.111:6379/0')
if __name__ == "__main__":
unittest.main()
|
class Solution:
def twoSum(self, nums: List[int], target: int) -> List[int]:
num_index = [(v, index) for index, v in enumerate(nums)]
num_index.sort()
begin, end = 0, len(nums) - 1
while(begin < end):
curr = num_index[begin][0] + num_index[end][0]
if(curr == target):
return [num_index[begin][1], num_index[end][1]]
elif(curr < target):
begin += 1
else:
end -= 1 |
import Foundation
from PyObjCTools.TestSupport import TestCase, min_os_level
class TestNSURLProtectionSpace(TestCase):
def testConstants(self):
self.assertIsInstance(Foundation.NSURLProtectionSpaceHTTPProxy, str)
self.assertIsInstance(Foundation.NSURLProtectionSpaceHTTPSProxy, str)
self.assertIsInstance(Foundation.NSURLProtectionSpaceFTPProxy, str)
self.assertIsInstance(Foundation.NSURLProtectionSpaceSOCKSProxy, str)
self.assertIsInstance(Foundation.NSURLAuthenticationMethodDefault, str)
self.assertIsInstance(Foundation.NSURLAuthenticationMethodHTTPBasic, str)
self.assertIsInstance(Foundation.NSURLAuthenticationMethodHTTPDigest, str)
self.assertIsInstance(Foundation.NSURLAuthenticationMethodHTMLForm, str)
@min_os_level("10.5")
def testConstants10_5(self):
self.assertIsInstance(Foundation.NSURLProtectionSpaceHTTP, str)
self.assertIsInstance(Foundation.NSURLProtectionSpaceHTTPS, str)
self.assertIsInstance(Foundation.NSURLProtectionSpaceFTP, str)
self.assertIsInstance(Foundation.NSURLAuthenticationMethodNTLM, str)
self.assertIsInstance(Foundation.NSURLAuthenticationMethodNegotiate, str)
@min_os_level("10.6")
def testConstants10_6(self):
self.assertIsInstance(
Foundation.NSURLAuthenticationMethodClientCertificate, str
)
self.assertIsInstance(Foundation.NSURLAuthenticationMethodServerTrust, str)
def testMethods(self):
self.assertResultIsBOOL(
Foundation.NSURLProtectionSpace.receivesCredentialSecurely
)
self.assertResultIsBOOL(Foundation.NSURLProtectionSpace.isProxy)
|
import numpy as np
from gym_game.envs.active_vision_env import GridUtil
screen_height = 400
screen_width = 800
grid_util = GridUtil(grid_length_x=3, grid_length_y=4, screen_height=screen_height, screen_width=screen_width)
"""
Range x:
0:[0-266.67] 1:[266.67-533.33] 2:[533.33-800]
Range y:
0:[0-100] 1:[100-200] 2:[200-300] 3:[300-400]
0:(133,50) 1:(400,50) 2:666, 50
3:(133,150) 4:(400,150) 5:666, 150
6:(133,250) 7:(400,250) 8:666, 250
9:(133,350) 10:(400,350) 11:666, 350
"""
num_cells = grid_util.num_cells()
print("num_cells = ", num_cells)
print("size of cells (x, y) = {}".format(grid_util.grid_cell_size))
print("\naction --> xy coord")
for action in range(0, num_cells):
xy = grid_util.action_2_xy(action)
print("{} --> {}".format(action, xy))
# walk top left to bottom right
print("\nxy --> xy_array")
for xy in range(0, min(screen_height, screen_width), min(screen_height, screen_width)//10):
xy_array = np.array([xy, xy])
action = grid_util.xy_2_action(xy_array)
print("({}, {}) --> {}".format(xy, xy, action))
|
#
# PySNMP MIB module NETFINITYMANAGER-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/NETFINITYMANAGER-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 20:08:55 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ValueSizeConstraint, ConstraintsUnion, ConstraintsIntersection, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsUnion", "ConstraintsIntersection", "SingleValueConstraint")
dmiMibs, = mibBuilder.importSymbols("NETFINITYSERVICES-MIB", "dmiMibs")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Unsigned32, IpAddress, TimeTicks, Gauge32, Integer32, iso, Counter32, ObjectIdentity, Counter64, Bits, enterprises, ModuleIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, NotificationType, MibIdentifier = mibBuilder.importSymbols("SNMPv2-SMI", "Unsigned32", "IpAddress", "TimeTicks", "Gauge32", "Integer32", "iso", "Counter32", "ObjectIdentity", "Counter64", "Bits", "enterprises", "ModuleIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "NotificationType", "MibIdentifier")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
class DmiInteger(Integer32):
pass
class DmiOctetstring(OctetString):
pass
class DmiDisplaystring(DisplayString):
pass
class DmiDate(OctetString):
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(28, 28)
fixedLength = 28
class DmiComponentIndex(Integer32):
pass
netFinityManagerMIB = MibIdentifier((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3))
dmtfGroups2 = MibIdentifier((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1))
tComponentid2 = MibTable((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 1), )
if mibBuilder.loadTexts: tComponentid2.setStatus('mandatory')
eComponentid2 = MibTableRow((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 1, 1), ).setIndexNames((0, "NETFINITYMANAGER-MIB", "DmiComponentIndex"))
if mibBuilder.loadTexts: eComponentid2.setStatus('mandatory')
a1Manufacturer = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 1, 1, 1), DmiDisplaystring()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a1Manufacturer.setStatus('mandatory')
a1Product = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 1, 1, 2), DmiDisplaystring()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a1Product.setStatus('mandatory')
a1Version = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 1, 1, 3), DmiDisplaystring()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a1Version.setStatus('mandatory')
a1SerialNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 1, 1, 4), DmiDisplaystring()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a1SerialNumber.setStatus('mandatory')
tRemoteSystems = MibTable((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 11), )
if mibBuilder.loadTexts: tRemoteSystems.setStatus('mandatory')
eRemoteSystems = MibTableRow((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 11, 1), ).setIndexNames((0, "NETFINITYMANAGER-MIB", "DmiComponentIndex"), (0, "NETFINITYMANAGER-MIB", "a11SystemTag"))
if mibBuilder.loadTexts: eRemoteSystems.setStatus('mandatory')
a11SystemTag = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 11, 1, 1), DmiInteger()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a11SystemTag.setStatus('mandatory')
a11SystemName = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 11, 1, 2), DmiDisplaystring()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a11SystemName.setStatus('mandatory')
a11ProtocolName = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 11, 1, 3), DmiDisplaystring()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a11ProtocolName.setStatus('mandatory')
a11NetworkAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 11, 1, 4), DmiDisplaystring()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a11NetworkAddress.setStatus('mandatory')
a11SystemState = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 11, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3))).clone(namedValues=NamedValues(("vOff-line", 0), ("vOn-line", 1), ("vOff-lineWithErrorCondition", 2), ("vOn-lineWithErrorCondition", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: a11SystemState.setStatus('mandatory')
a11Server = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 11, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("vFalse", 0), ("vTrue", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: a11Server.setStatus('mandatory')
a11Manager = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 11, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("vFalse", 0), ("vTrue", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: a11Manager.setStatus('mandatory')
a11OperatingSystemType = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 11, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11))).clone(namedValues=NamedValues(("vUnknown", 0), ("vIbmOs2", 1), ("vMicrosoftWindows", 2), ("vNovellNetware", 3), ("vMicrosoftWindowsNt", 4), ("vIbmAix", 5), ("vBanyanVines", 6), ("vIbmPc-dos", 7), ("vScoXenix", 8), ("vUnixSystemV", 9), ("vMicrosoftWindows95", 11)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: a11OperatingSystemType.setStatus('mandatory')
a11OsMajorVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 11, 1, 9), DmiInteger()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a11OsMajorVersion.setStatus('mandatory')
a11OsMinorVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 11, 1, 10), DmiInteger()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a11OsMinorVersion.setStatus('mandatory')
a11SystemModelId = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 11, 1, 11), DmiOctetstring()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a11SystemModelId.setStatus('mandatory')
a11SystemModelName = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 11, 1, 12), DmiDisplaystring()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a11SystemModelName.setStatus('mandatory')
a11SystemOn_lineNotify = MibScalar((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 11, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 254, 255))).clone(namedValues=NamedValues(("vSev0", 0), ("vSev1", 1), ("vSev2", 2), ("vSev3", 3), ("vSev4", 4), ("vSev5", 5), ("vSev6", 6), ("vSev7", 7), ("vNoDefault", 254), ("vDisabled", 255)))).setLabel("a11SystemOn-lineNotify").setMaxAccess("readonly")
if mibBuilder.loadTexts: a11SystemOn_lineNotify.setStatus('mandatory')
a11SystemOff_lineNotify = MibScalar((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 11, 1, 14), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 254, 255))).clone(namedValues=NamedValues(("vSev0", 0), ("vSev1", 1), ("vSev2", 2), ("vSev3", 3), ("vSev4", 4), ("vSev5", 5), ("vSev6", 6), ("vSev7", 7), ("vNoDefault", 254), ("vDisabled", 255)))).setLabel("a11SystemOff-lineNotify").setMaxAccess("readonly")
if mibBuilder.loadTexts: a11SystemOff_lineNotify.setStatus('mandatory')
a11PresenceCheckInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 11, 1, 15), DmiInteger()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a11PresenceCheckInterval.setStatus('mandatory')
a11MacAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 11, 1, 16), DmiOctetstring()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a11MacAddress.setStatus('mandatory')
tRemoteSystemGroups = MibTable((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 12), )
if mibBuilder.loadTexts: tRemoteSystemGroups.setStatus('mandatory')
eRemoteSystemGroups = MibTableRow((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 12, 1), ).setIndexNames((0, "NETFINITYMANAGER-MIB", "DmiComponentIndex"), (0, "NETFINITYMANAGER-MIB", "a12GroupTag"))
if mibBuilder.loadTexts: eRemoteSystemGroups.setStatus('mandatory')
a12GroupTag = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 12, 1, 1), DmiInteger()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a12GroupTag.setStatus('mandatory')
a12GroupName = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 12, 1, 2), DmiDisplaystring()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a12GroupName.setStatus('mandatory')
a12RequiredKeywordsCombination = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 12, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("vAllKeywordsMustMatch", 0), ("vAnyOfTheKeywordsMayMatch", 1), ("vExactlyOneOfTheKeywordsMustMatch", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: a12RequiredKeywordsCombination.setStatus('mandatory')
a12Keywords = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 12, 1, 4), DmiDisplaystring()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a12Keywords.setStatus('mandatory')
a12SystemOn_lineNotifyDefault = MibScalar((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 12, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 254, 255))).clone(namedValues=NamedValues(("vSev0", 0), ("vSev1", 1), ("vSev2", 2), ("vSev3", 3), ("vSev4", 4), ("vSev5", 5), ("vSev6", 6), ("vSev7", 7), ("vNoDefault", 254), ("vDisabled", 255)))).setLabel("a12SystemOn-lineNotifyDefault").setMaxAccess("readonly")
if mibBuilder.loadTexts: a12SystemOn_lineNotifyDefault.setStatus('mandatory')
a12SystemOff_lineNotifyDefault = MibScalar((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 12, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 254, 255))).clone(namedValues=NamedValues(("vSev0", 0), ("vSev1", 1), ("vSev2", 2), ("vSev3", 3), ("vSev4", 4), ("vSev5", 5), ("vSev6", 6), ("vSev7", 7), ("vNoDefault", 254), ("vDisabled", 255)))).setLabel("a12SystemOff-lineNotifyDefault").setMaxAccess("readonly")
if mibBuilder.loadTexts: a12SystemOff_lineNotifyDefault.setStatus('mandatory')
a12DefaultPresenceCheckInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 12, 1, 7), DmiInteger()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a12DefaultPresenceCheckInterval.setStatus('mandatory')
a12DiscoveryStartFlag = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 12, 1, 8), DmiInteger()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: a12DiscoveryStartFlag.setStatus('mandatory')
tRemoteSystemGroupMap = MibTable((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 13), )
if mibBuilder.loadTexts: tRemoteSystemGroupMap.setStatus('mandatory')
eRemoteSystemGroupMap = MibTableRow((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 13, 1), ).setIndexNames((0, "NETFINITYMANAGER-MIB", "DmiComponentIndex"), (0, "NETFINITYMANAGER-MIB", "a13SystemTag"), (0, "NETFINITYMANAGER-MIB", "a13GroupTag"))
if mibBuilder.loadTexts: eRemoteSystemGroupMap.setStatus('mandatory')
a13SystemTag = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 13, 1, 1), DmiInteger()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a13SystemTag.setStatus('mandatory')
a13GroupTag = MibTableColumn((1, 3, 6, 1, 4, 1, 2, 6, 71, 200, 3, 1, 13, 1, 2), DmiInteger()).setMaxAccess("readonly")
if mibBuilder.loadTexts: a13GroupTag.setStatus('mandatory')
mibBuilder.exportSymbols("NETFINITYMANAGER-MIB", a1Manufacturer=a1Manufacturer, a11SystemTag=a11SystemTag, DmiComponentIndex=DmiComponentIndex, tRemoteSystems=tRemoteSystems, DmiDisplaystring=DmiDisplaystring, eComponentid2=eComponentid2, a12GroupName=a12GroupName, a11MacAddress=a11MacAddress, a11Server=a11Server, a11OperatingSystemType=a11OperatingSystemType, a11SystemModelId=a11SystemModelId, a11PresenceCheckInterval=a11PresenceCheckInterval, a12Keywords=a12Keywords, a11Manager=a11Manager, eRemoteSystemGroupMap=eRemoteSystemGroupMap, a13SystemTag=a13SystemTag, tComponentid2=tComponentid2, a11SystemName=a11SystemName, a11SystemOn_lineNotify=a11SystemOn_lineNotify, a11OsMajorVersion=a11OsMajorVersion, tRemoteSystemGroupMap=tRemoteSystemGroupMap, a11SystemState=a11SystemState, dmtfGroups2=dmtfGroups2, a1Version=a1Version, eRemoteSystems=eRemoteSystems, a12DiscoveryStartFlag=a12DiscoveryStartFlag, netFinityManagerMIB=netFinityManagerMIB, DmiOctetstring=DmiOctetstring, a11OsMinorVersion=a11OsMinorVersion, a11SystemOff_lineNotify=a11SystemOff_lineNotify, a12SystemOff_lineNotifyDefault=a12SystemOff_lineNotifyDefault, a11ProtocolName=a11ProtocolName, a13GroupTag=a13GroupTag, DmiInteger=DmiInteger, a12RequiredKeywordsCombination=a12RequiredKeywordsCombination, a12GroupTag=a12GroupTag, DmiDate=DmiDate, a12SystemOn_lineNotifyDefault=a12SystemOn_lineNotifyDefault, tRemoteSystemGroups=tRemoteSystemGroups, a1Product=a1Product, a1SerialNumber=a1SerialNumber, a12DefaultPresenceCheckInterval=a12DefaultPresenceCheckInterval, a11NetworkAddress=a11NetworkAddress, eRemoteSystemGroups=eRemoteSystemGroups, a11SystemModelName=a11SystemModelName)
|
"""An example script that moves to the control can root directory, then creates
a DATA directory, then creates a subdirectory titled by YYYYMMDD, then creates
a log, power cycles the sensors for 30 seconds, then closes the log.
"""
from datetime import datetime,timezone
from martech.sbs.thetis import THETIS
import time
port = 'COM3'
thetis = THETIS(port)
if thetis.open_connection(115200) is True:
info = thetis.get_version()
print('Connected to {}.'.format(info['profiler_id']))
if thetis.change_to_root_directory('PC') is True:
thetis.make_directory("DATA","PC")
if "DATA" in thetis.list_subdirectories("PC"):
thetis.change_directory("DATA")
today = datetime.now(timezone.utc).strftime('%Y%m%d')
thetis.make_directory(today,"PC")
thetis.change_directory(today)
wd = thetis.get_working_directory('PC')
print(wd)
thetis.logging("ON","PC")
thetis.set_sensors_power("ON")
time.sleep(1)
thetis.set_pump_power("OFF")
time.sleep(30)
thetis.set_sensors_power("OFF")
time.sleep(10)
thetis.logging("OFF","PC")
files = thetis.list_files("PC")
filenames = []
for filename in files:
filenames.append(filename[0])
print(filenames)
yn = input("Do you want to delete the files you just created?")
if 'Y' in yn.upper():
ppds = [f for f in filenames if 'PPD' in f]
acds = [f for f in filenames if 'ACD' in f]
snds = [f for f in filenames if 'SND' in f]
#Delete the decimated files.
thetis.delete_file(ppds,"PC")
thetis.delete_file(acds,"PC")
thetis.delete_file(snds,"PC")
thetis.delete_file(filenames,"PC") #Delete the rest.
yn2 = input("Do you want to delete the directory you just created?")
if 'Y' in yn2.upper():
files = thetis.list_files("PC")
if files == []: #If the directory is empty.
thetis.change_directory('..') #Move back one directory.
if thetis.remove_directory(today) is True:
print('Directory removed.')
subs = thetis.list_subdirectories()
print(subs)
if today not in subs:
print('Success!')
|
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_rtd_theme_github_versions"
# Options for the sphinx rtd theme, use DLS blue
html_theme_options = dict(style_nav_header_background="rgb(7, 43, 93)")
|
#-----------------------------------------------------------------------------
# Copyright (c) 2014, yt Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
class Surface:
"""
Containe for holding image array, results of volume rendering.
Parameters
----------
device_ptr: uint64
A pycuda allocation object representing a point to device memory.
bounds:
A tuple representing the shape of the underlying data, often
numpy.shape.
"""
def __init__(self):
self.device_ptr = None
self.bounds = None
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
PYRAX_INSTALLED = True
try:
import pyrax
except ImportError:
PYRAX_INSTALLED = False
from heat.engine import resource
from heat.openstack.common import log as logging
logger = logging.getLogger(__name__)
class RackspaceResource(resource.Resource):
'''
Common base class for Rackspace Resource Providers
'''
properties_schema = {}
def __init__(self, name, json_snippet, stack):
super(RackspaceResource, self).__init__(name, json_snippet, stack)
if PYRAX_INSTALLED:
self.pyrax = pyrax
self._cloud_db = None
self._cloud_dns = None
self._cloud_lb = None
self._cloud_server = None
self._cloud_nw = None
self._cloud_blockstore = None
self._authenticated = False
def cloud_db(self):
'''Rackspace cloud database client.'''
if not self._cloud_db:
self.__authenticate()
self._cloud_db = self.pyrax.cloud_databases
return self._cloud_db
def cloud_lb(self):
'''Rackspace cloud loadbalancer client.'''
if not self._cloud_lb:
self.__authenticate()
self._cloud_lb = self.pyrax.cloud_loadbalancers
return self._cloud_lb
def cloud_dns(self):
'''Rackspace cloud dns client.'''
if not self._cloud_dns:
self.__authenticate()
self._cloud_dns = self.pyrax.cloud_dns
return self._cloud_dns
def nova(self):
'''Rackspace cloudservers client.'''
if not self._cloud_server:
self.__authenticate()
self._cloud_server = self.pyrax.cloudservers
return self._cloud_server
def cinder(self):
'''Rackspace cinder client.'''
if not self._cloud_blockstore:
self.__authenticate()
self._cloud_blockstore = self.pyrax.cloud_blockstorage
return self._cloud_blockstore
def neutron(self):
'''Rackspace neutron client.'''
if not self._cloud_nw:
self.__authenticate()
self._cloud_nw = self.pyrax.cloud_networks
return self._cloud_nw
def __authenticate(self):
# current implemenation shown below authenticates using
# username and password. Need make it work with auth-token
if not self._authenticated:
pyrax.set_setting("identity_type", "keystone")
pyrax.set_setting("auth_endpoint", self.context.auth_url)
pyrax.set_setting("tenant_id", self.context.tenant)
logger.info("Authenticating with username:%s" %
self.context.username)
pyrax.auth_with_token(self.context.auth_token,
tenant_id=self.context.tenant_id,
tenant_name=self.context.tenant)
logger.info("User %s authenticated successfully."
% self.context.username)
self._authenticated = True
|
import math
def main() -> None:
# if k == 1, infinity
# if k >= 2
# enumerate all lines passing each two coorinate pairs.
# O(N^2)
# for each line, check how many coodinates passing the line.
# store katamuki(dx, dy) divided by gcd and y0 (when x = 0)
# y0 (dx, y * dx - x * dy)
n, k = map(int, input().split())
xy = [tuple(map(int, input().split())) for _ in range(n)]
if k == 1:
print("Infinity")
return
lines = []
for i in range(n - 1):
for j in range(i + 1, n):
xi, yi = xy[i]
xj, yj = xy[j]
dx = xj - xi
dy = yj - yi
if dx < 0:
dx *= -1
dy *= -1
assert not dx == dy == 0
g = math.gcd(dx, dy)
dx //= g
dy //= g
if dx == 0 and dy < 0:
dy *= -1
if dy == 0 and dx < 0:
dx *= -1
y0 = None if dx == 0 else yi * dx - xi * dy
x0 = None if dy == 0 else xi * dy - yi * dx
# print(i, j, dx, dy, x0, y0, xi, yi)
lines.append((dx, dy, x0, y0))
# print(lines)
lines = list(set(lines))
tot = 0
for line in lines:
dx, dy, x0, y0 = line
count = 0
for x, y in xy:
if y0 is None:
count += x == x0
elif x0 is None:
count += y == y0
else:
count += y * dx - y0 == dy * x
tot += count >= k
print(tot)
if __name__ == "__main__":
main()
|
from hiargparse import ArgumentParser, Namespace
from hiargparse import ArgsProvider, Arg, ChildProvider
from typing import Optional, List
class Tire:
@classmethod
def get_args_provider(cls) -> ArgsProvider:
return ArgsProvider(
args=[
# type can be specified; if not, default.__class__ is used
Arg('radius', 21.0, type=float),
# choices
Arg('unit-of-radius', 'cm', choices=['cm', 'm', 'mm']),
# store_true action
Arg('for-winter', action='store_true'),
# multiple names, multiple values
Arg(['style', 'type'], ['cool'], nargs='+'),
# dest is set names[0] by default; you can change the destination as you like
Arg(['value-in-dollar', 'how-much-in-dollar'], 1e+3, dest='value'),
]
)
def __init__(self, params: Namespace) -> None:
# Namespace is accessable with __getattr__
self._style = params.style # recommended
self._radius = getattr(params, 'radius') # raw access
# also getitem is supported (note that argparse.Namespace does not support this)
self._radius_unit = params['unit_of_radius'] # dict-like access
# Namespace does not memorize its type, so variable type annotation is recommended
self._for_winter: bool = params.for_winter # as you know it must be bool
self._value: float = params.value
def __repr__(self) -> str:
if self._for_winter:
winter_str = 'for winter'
else:
winter_str = 'NOT for winter'
repr_str = ('<A {} tire of rad: {} {} ({}). {} dollars. >'
.format(self._style, self._radius, self._radius_unit, winter_str, self._value))
return repr_str
class Car:
@classmethod
def get_args_provider(cls) -> ArgsProvider:
def complicated_type(arg: str) -> complex:
# do your stuff
try:
val = complex(arg)
except ValueError:
val = 0
return val
return ArgsProvider(
args=[
# you can write arbitrary help message
# if you want to append your message after the default, try %(default-text)s
Arg('tire-radius', 21.0, type=float,
help='%(default-text)s This arg is for its tires. '),
# Complicated type, nargs, metavars are OK
Arg('numbers-you-like', type=complicated_type,
nargs=3, metavar=('Hop', 'Step', 'Jump'))
# if you have some name-conflicted arguments, hiargparse will warn it.
# you can specify propagate=True/False, move it from args to propagate_args,
# or specify no_provides arguments in ChildProvider to supress this warnings.
# Arg('type', 'cool') # uncomment to see the warning message
# also, if you have some dest-conflicted arguments, hiargparse raises an error.
# Arg('conflict', 42, dest='front_tire') # uncomment to see the error
# Arg('radius', 21, type=float) # uncomment to see the error
],
# args propagation
# users can specify only the root argument
# and its value is propagated to all child providers
propagate_args=[
Arg('unit-of-radius', 'cm', choices=['cm', 'm', 'mm']),
# you can use a different name from the propagation-target name
Arg('for-winter-car', action='store_true', propagate_targets=['for-winter'])
],
child_providers=[
# multiple providers for a class
ChildProvider(cls=Tire, name='front_tire', prefix='ftire',
# you can choose not to provide some arg
# to serve it at runtime (ex. in __init__())
no_provides={'radius'}),
ChildProvider(cls=Tire, name='back_tire', prefix='btire',
no_provides={'radius'})
]
)
def __init__(self, params: Namespace) -> None:
# parameters for Car
self._numbers: Optional[List[complex]] = params.numbers_you_like
# some additional (not from arguments) parameters for Tire
front_tire_params = params.front_tire._replaced(radius=params.tire_radius)
# multiple instances for a providers
self._front_tires = [Tire(front_tire_params) for i in range(2)]
# Namespace has some useful attributes; _replaced, _update, _asdict, and more.
back_tire_params = params.back_tire
back_tire_params._update({'radius': params.tire_radius + 1.0})
# of course you can use normal access to the attribute
back_tire_params.value *= 2
self._back_tires = [Tire(back_tire_params) for i in range(2)]
def print_spec(self) -> None:
print('Car: ')
print('front tires: {}'.format(', '.join([str(tire) for tire in self._front_tires])))
print('back tires: {}'.format(', '.join([str(tire) for tire in self._back_tires])))
if self._numbers is not None:
print('by the way, I like {} hahaha'.format(', '.join([str(n) for n in self._numbers])))
if __name__ == '__main__':
# set a root argument provider (same as other argument providers)
args_provider = ArgsProvider(
child_providers=[ChildProvider(Car)]
)
# quite usual argparse way except *new code* line
parser = ArgumentParser()
parser.add_argument('-V', '--version', action='version', version='v1.0')
args_provider.add_arguments_to_parser(parser) # *new code*
params = parser.parse_args()
# now you have ALL parameters including child and grandchild arguments
print(params)
car = Car(params.Car)
car.print_spec()
|
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
# Full license terms provided in LICENSE.md file.
import sys
sys.path.append('third_party/models/')
sys.path.append('third_party/models/research')
sys.path.append('third_party/models/research/slim')
#from PIL import Image
#import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from model_meta import NETS, FROZEN_GRAPHS_DIR, CHECKPOINT_DIR
import time
import cv2
TEST_IMAGE_PATH='data/images/gordon_setter.jpg'
TEST_OUTPUT_PATH='data/test_output_tf.txt'
NUM_RUNS=50
if __name__ == '__main__':
with open(TEST_OUTPUT_PATH, 'w') as test_f:
for net_name, net_meta in NETS.items():
if 'exclude' in net_meta.keys() and net_meta['exclude'] is True:
continue
print("Testing %s" % net_name)
with open(net_meta['frozen_graph_filename'], 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
with tf.Graph().as_default() as graph:
tf.import_graph_def(graph_def, name="")
tf_config = tf.ConfigProto()
tf_config.gpu_options.allow_growth = True
tf_config.allow_soft_placement = True
tf_sess = tf.Session(config=tf_config, graph=graph)
tf_input = tf_sess.graph.get_tensor_by_name(net_meta['input_name'] + ':0')
tf_output = tf_sess.graph.get_tensor_by_name(net_meta['output_names'][0] + ':0')
# load and preprocess image
image = cv2.imread(TEST_IMAGE_PATH)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = cv2.resize(image, (net_meta['input_width'], net_meta['input_height']))
image = net_meta['preprocess_fn'](image)
# run network
times = []
for i in range(NUM_RUNS + 1):
t0 = time.time()
output = tf_sess.run([tf_output], feed_dict={
tf_input: image[None, ...]
})[0]
t1 = time.time()
times.append(1000 * (t1 - t0))
avg_time = np.mean(times[1:]) # don't include first run
# parse output
top5 = net_meta['postprocess_fn'](output)
print(top5)
test_f.write("%s %s\n" % (net_name, avg_time))
|
#!/bin/python3
#====================================================
# MODULES
#====================================================
from arsenalgear import mathematics as mt
#====================================================
# Mathematics
#====================================================
def mathematics():
print( "Hermite polynomial in x = 3 and n = 2: ", mt.Hermite( 3, 2 ) )
print( "Chebyshev polynomial in x = 3 and n = 2: ", mt.Chebyshev( 3, 2 ) )
print( "Legendre polynomial in x = 3 and n = 2: ", mt.Legendre( 3, 2 ) )
print( "Laguerre polynomial in x = 3 and n = 2: ", mt.Laguerre( 3, 2 ) )
print()
print( "Check if 3 is between 1 and 4: ", mt.IsInBounds( 3, 1, 4 ) )
print()
print( "Parsing ( 3*n*x + i * x ) in x = 1 and n = 2:", mt.e_parser( "3*n*x", "x", 1, 2 ) )
print()
print( "kronecker delta for i = 1 and j = 0: ", mt.kronecker( 1, 0 ) )
#====================================================
# Main
#====================================================
def main():
mathematics()
if __name__ == "__main__":
main() |
import pyaudio
from matplotlib import pyplot as plt
from Source.IO.Sync import *
import numpy as np
import wave
from scipy import signal
CHUNKSIZE = int(44100//0.25) # fixed chunk size
sync = createSyncSignal()
success = createSuccessSignal()
error = createErrorSignal()
# initialize portaudio
p = pyaudio.PyAudio()
stream = p.open(format=pyaudio.paInt16, channels=1, rate=44100, input=True, frames_per_buffer=CHUNKSIZE)
print("Esperando señal")
#####
# Ahora abrir el audio llamado syncSignal
#####
while(True):
data = stream.read(CHUNKSIZE)
numpydata = np.fromstring(data, dtype=np.int16)
delay = findSyncSignal(numpydata, sync)
if(delay):
print("Sync encontrado!")
stream.read(delay)
break
direccion = "../Resources/Audio/Created/testRecord.wav"
frames = []
duracion = 5
CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 44100
print("Grabando")
for i in range(0, int(RATE / CHUNK * duracion)):
data = stream.read(CHUNK)
frames.append(data)
print("Terminado")
stream.stop_stream()
stream.close()
p.terminate()
wf = wave.open(direccion, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
|
# -*- coding: utf-8 -*-
from .lib import *
class FundingInstrument(MoipEntity):
def __init__(cls, **kw):
cls.__metadata__ = {}
# FIELDS
cls.method = String(max=65)
cls.creditCard = Obj(context=cls, key='creditCard', name='CreditCard')
cls.boleto = Obj(context=cls, key='boleto', name='Boleto')
cls.onlineBankDebit = Obj(context=cls, key='onlineBankDebit', name='OnlineBankDebit')
super().__init__(**kw)
|
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="py-messenger",
version="1.2.0",
author="Vadym Mariiechko",
author_email="vadimich348@gmail.com",
description="Client/server single chat messenger",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/marik348/python-messegner",
packages=find_packages(),
include_package_data=True,
classifiers=[
"Intended Audience :: Developers",
"Programming Language :: Python :: 3.6",
"License :: OSI Approved :: MIT License",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX :: Linux",
],
python_requires='>=3.6, <4',
keywords="messenger, chat",
)
|
# A executer depuis python manage.py shell
from collections import defaultdict
from centre.models import CentreAmbulatoire
import pandas, geopandas
import numpy as np
from shapely.ops import nearest_points
def get_cva():
df = pandas.read_excel("Referentiel_centres_vacci_20210421.xlsx")
df.drop(df[~df.TYPOLOGIE.isin({'CVA', 'CV MIXTE'})].index, axis=0, inplace=True)
df['Code'] = [str(e).zfill(5) for e in df.com_insee]
return df
def peuple_centres():
"""
Peupler la table CentreAmbulatoire
:return:
"""
df = get_cva()
for _, el in df.iterrows():
centre = CentreAmbulatoire(nom=el.nom, id_ministere=el.gid, code_postal=el.com_cp,
code_commune_insee=el.com_insee,
capacite=np.random.randint(1000))
centre.save()
return
def get_enum_depts():
"""
Retourne une enumeration des departements francais
:return:
"""
df = pandas.read_csv("departements-francais.csv", delimiter="\t")
df['dept'] = [str(e).zfill(2) for e in df['NUMÉRO']]
df.drop([101, 102], axis=0, inplace=True)
enum_dept = [("{} - {}".format(e.dept, e.NOM), i + 1) for i, e in df.iterrows()]
return enum_dept
def get_enum_regions():
"""
Retourne une enumeration des departements francais
:return:
"""
df = pandas.read_csv("region2020.csv", delimiter=",")
df['region'] = [str(e).zfill(2) for e in df['reg']]
enum_region = [(i + 1, "{} - {}".format(e.region, e.libelle),) for i, e in df.iterrows()]
return enum_region
# Maintenant on va proposer une repartition des doses en fonction de la pop
def get_df_communes(region_filtre=11):
df = pandas.read_excel('Donnes_INSEE_NumEduc.xlsx')
df['Code'] = [str(e).zfill(5) for e in df.Code]
latlon = pandas.read_csv('communes-departement-region.csv')
latlon.drop_duplicates(['code_commune_INSEE', 'latitude', 'longitude', 'code_region'], inplace=True, keep='first')
df = df.merge(latlon[['code_commune_INSEE', 'latitude', 'longitude', 'code_region']], left_on='Code',
right_on='code_commune_INSEE', how='inner')
if region_filtre is not None:
df.drop(df[df.code_region != region_filtre].index, axis=0, inplace=True)
df.drop('code_commune_INSEE', axis=1, inplace=True)
return df
def get_alone_com():
df = get_df_communes(region_filtre=11)
cva = get_cva()
num_centres = cva.groupby('Code', as_index=False).gid.count()
df = df.merge(num_centres, on='Code', how='left')
df.gid.fillna(0, inplace=True)
return df
def get_closest_centre():
df = get_alone_com()
cva = get_cva()
gdf = geopandas.GeoDataFrame(
df, geometry=geopandas.points_from_xy(df.longitude, df.latitude))
gcva = geopandas.GeoDataFrame(
cva, geometry=geopandas.points_from_xy(cva.long_coor1, cva.lat_coor1))
centres = gcva.geometry.unary_union
def near(point, pts=centres):
# find the nearest point and return the corresponding Place value
nearest = gcva.geometry == nearest_points(point, pts)[1]
return gcva[nearest].gid.values[0]
gdf['Plus_proche'] = gdf.apply(lambda row: near(row.geometry), axis=1)
gdf.loc[gdf.gid > 0, 'Plus_proche'] = None
return gdf
def get_pop_centre():
cva = get_cva()
gdf = get_closest_centre()
result = defaultdict(list)
pop_tot = 0
for i, el in cva.iterrows():
gid = el.gid
pop_proches = gdf[gdf.Plus_proche == gid].Population2016.sum()
pop_commune = gdf[gdf.Code == el.Code].Population2016.values[0] / gdf[gdf.Code == el.Code].gid.values[0]
result['gid'].append(gid)
result['Population'].append(pop_commune + pop_proches)
pop_tot += result['Population'][-1]
result = pandas.DataFrame.from_dict(result)
cva = cva.merge(result, on='gid', how='inner')
cva = cva.merge(cva.groupby('DEPT', as_index=False).Population.sum(), on='DEPT', suffixes=('_centre', '_dept'))
cva['ratio'] = cva.Population_centre/cva.Population_dept*100
return cva
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (C) eranroz
#
# Distributed under the terms of the MIT license.
import datetime
import MySQLdb
import pywikibot
familyGadgets = dict() # family -> (gadget -> [(lang, users)])
def fillStatsForCluster(host, dbList):
clusterHost = cluster
conn = MySQLdb.connect(host=host,
read_default_file='~/replica.my.cnf')
cursor = conn.cursor()
for db, lang, family in dbList:
print 'Querying ',db
if family not in familyGadgets:
familyGadgets[family] = dict()
gadgetsDict = familyGadgets[family]
cursor.execute('USE `%s_p`'%db)
try:
cursor.execute('''
/* gadgets_popular.py SLOW_OK */
SELECT
up_property,
COUNT(*)
FROM %s_p.user_properties_anon
WHERE up_property LIKE 'gadget-%%'
AND up_value = 1
GROUP BY up_property;
'''%db)
except:
continue
for row in cursor.fetchall():
gadgetName = row[0].split('gadget-', 1)[1]
if gadgetName not in gadgetsDict:
gadgetsDict[gadgetName]=[]
langLink = '[[:%s:%s:MediaWiki:%s|%s]]' % (family,lang,row[0], lang)
count = row[1]
gadgetsDict[gadgetName].append((langLink,count))
cursor.close()
conn.close()
report_template = u'''\
Cross-project gadgets preferences statistics.
'''
report_family_template= u'''
Gagets statistics for %s projects as of %s.
----
{| class="wikitable sortable plainlinks" style="width:85%%; margin:auto;"
|- style="white-space:nowrap;"
! Gadget
! wikis (number of users)
! total number of users
|-
%s
|}
'''
conn = MySQLdb.connect(host='enwiki.labsdb',
db='meta_p',
read_default_file='~/replica.my.cnf')
cursor = conn.cursor()
cursor.execute('''
select slice,dbname,lang,family from meta_p.wiki
where is_closed=0
and family in ('wikibooks','wikipedia','wiktionary','wikiquote','wikisource','wikinews','wikiversity','wikivoyage')
and dbname not like 'test%'
''')
servers,dbnames,wikiLangs,wikiFamilies = zip(*cursor.fetchall())
nameToCluster=dict()
for clus, db, lang, family in zip(servers,dbnames,wikiLangs,wikiFamilies):
if clus not in nameToCluster:
nameToCluster[clus]=[]
nameToCluster[clus].append((db,lang,family))
for cluster, wikisMetaData in nameToCluster.iteritems():
print 'Filling data from cluster ', cluster
fillStatsForCluster(cluster, wikisMetaData)
report_text = report_template
for family, gadgets in familyGadgets.iteritems():
gadgetsDetails = [(gadgetName,', '.join([u'%s (%s)'%(link,str(count)) for link, count in langData]), sum([count for link,count in langData])) for gadgetName, langData in gadgets.iteritems()]
gadgetsDetails.sort(key=lambda x:x[2], reverse=True)
gadgetsInfo = [u'| %s || %s || %i'%(gadgetName, langData, totalCount) for gadgetName, langData, totalCount in gadgetsDetails]
family_report = report_family_template % (family, datetime.datetime.now().strftime('%B %Y'), '\n|-\n'.join(gadgetsInfo))
meta_wiki = pywikibot.getSite('meta', 'meta')
meta_page = pywikibot.Page(meta_wiki, 'Gadgets/%s'%(family))
meta_page.put(family_report, 'Update')
report_text = report_text+'\n'+ family_report
try:
resFile = file('gadgetsData.wikitext','w')
print>>resFile,report_text
resFile.close()
except:
pass
print report_text
cursor.close()
conn.close()
|
from .unicornafl import uc_afl_fuzz, uc_afl_fuzz_custom, monkeypatch, UcAflError, UC_AFL_RET_OK, UC_AFL_RET_ERROR, UC_AFL_RET_CHILD, UC_AFL_RET_NO_AFL, UC_AFL_RET_CALLED_TWICE, UC_AFL_RET_FINISHED
# Compatibility
from unicorn import * |
# coding: utf-8
from __future__ import absolute_import
from flask import json
from six import BytesIO
from openapi_server.test import BaseTestCase
class TestGenerationController(BaseTestCase):
"""GenerationController integration test stubs"""
def test_generation_list(self):
"""Test case for generation_list
"""
query_string = [('limit', 56),
('offset', 56)]
response = self.client.open(
'/api/v2/generation/',
method='GET',
query_string=query_string)
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_generation_read(self):
"""Test case for generation_read
"""
response = self.client.open(
'/api/v2/generation/{id}'.format(id=56),
method='GET')
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
if __name__ == '__main__':
import unittest
unittest.main()
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
from setuptools import setup
import digit
setup(name=digit.__name__,
version=digit.__version__,
author='Pongsakorn Sommalai',
author_email='bongtrop@gmail.com',
license='MIT',
url='https://github.com/bongtrop/digit',
description='Dig git information from .git',
long_description=digit.__doc__,
scripts=['digit.py'],
py_modules=['digit'],
install_requires=[
'requests'
],
entry_points="""
[console_scripts]
digit=digit:main
""",
keywords=''
)
|
# Based on pygments documentation
from pygments.lexer import RegexLexer, bygroups
from pygments.token import *
__all__ = ['PrometheusLexer']
class PrometheusLexer(RegexLexer):
name = 'Prometheus'
aliases = ['prom', 'prometheus']
filenames = ['*.prom']
tokens = {
'root': [
(r'^#.*$', Comment),
(r'[a-zA-Z0-9_]+', Name.Tag, ('maybe_dimensions')),
],
'value': [
(r'[0-9]+(\.[0-9]+(e[-+][0-9]+)?)?$', Number.Float),
],
'maybe_dimensions': [
(r'\s+', Text, ('#pop', 'value')),
(r'\{', Punctuation, 'dimensions'),
(r'\}', Punctuation, '#pop'),
],
'dimensions': [
(r',', Punctuation),
(r'\}', Punctuation, '#pop'),
(r'([^=}]+)(=)("[^"]*")',
bygroups(Name.Attribute, Operator, String.Double)),
],
}
|
#!/usr/bin/env python3
# type: ignore
import os
import argparse
import signal
import sys
import cereal.messaging as messaging
from cereal import log
from selfdrive.monitoring.hands_on_wheel_monitor import HandsOnWheelStatus
from selfdrive.controls.lib.events import Events
HandsOnWheelState = log.DriverMonitoringState.HandsOnWheelState
def sigint_handler(signal, frame):
print("handler!")
exit(0)
signal.signal(signal.SIGINT, sigint_handler)
def status_monitor():
# use driverState socker to drive timing.
driverState = messaging.sub_sock('driverState', addr=args.addr, conflate=True)
sm = messaging.SubMaster(['carState', 'dMonitoringState'], addr=args.addr)
steering_status = HandsOnWheelStatus()
v_cruise_last = 0
while messaging.recv_one(driverState):
try:
sm.update()
v_cruise = sm['carState'].cruiseState.speed
steering_wheel_engaged = len(sm['carState'].buttonEvents) > 0 or \
v_cruise != v_cruise_last or sm['carState'].steeringPressed
v_cruise_last = v_cruise
# Get status from our own instance of SteeringStatus
steering_status.update(Events(), steering_wheel_engaged, sm['carState'].cruiseState.enabled, sm['carState'].vEgo)
steering_state = steering_status.hands_on_wheel_state
state_name = "Unknown "
if steering_state == HandsOnWheelState.none:
state_name = "Not Active "
elif steering_state == HandsOnWheelState.ok:
state_name = "Hands On Wheel "
elif steering_state == HandsOnWheelState.minor:
state_name = "Hands Off Wheel - Minor "
elif steering_state == HandsOnWheelState.warning:
state_name = "Hands Off Wheel - Warning "
elif steering_state == HandsOnWheelState.critical:
state_name = "Hands Off Wheel - Critical"
elif steering_state == HandsOnWheelState.terminal:
state_name = "Hands Off Wheel - Terminal"
# Get events from `dMonitoringState`
events = sm['dMonitoringState'].events
event_name = events[0].name if len(events) else "None"
event_name = "{:<30}".format(event_name[:30])
# Print output
sys.stdout.write(f'\rSteering State: {state_name} | event: {event_name}')
sys.stdout.flush()
except Exception as e:
print(e)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Sniff a communcation socket')
parser.add_argument('--addr', default='127.0.0.1')
args = parser.parse_args()
if args.addr != "127.0.0.1":
os.environ["ZMQ"] = "1"
messaging.context = messaging.Context()
status_monitor()
|
from django.urls import path
from daroca.apps.orders import views
app_name = 'orders'
urlpatterns = [
path('add/', views.add, name='add'),
]
|
from glob import glob
from os.path import join
from collections import defaultdict
import pydicom
import subprocess as sp
import tempfile
import nibabel as nib
import numpy as np
import pandas as pd
def load_forum_data(xml_dir, dcm_dir, out_dir):
defult_tmp_dir = tempfile._get_default_tempdir()
xml_paths = sorted(glob(join(xml_dir, '*.xml')))
dcm_paths = sorted(glob(join(dcm_dir, '*.dcm')))
data = []
for xml, dcm in zip(xml_paths, dcm_paths):
xml_file = None
dcm_file = pydicom.dcmread(dcm)
series_entries = dcm_file.dir('series')
if 'SeriesDescription' not in series_entries:
continue
# Patient and series description
pname = dcm_file.PatientName
desc = dcm_file.SeriesDescription
if 'PixelData' in dcm_file.dir('pixel'):
# Decompressing DICOM
tmp_file = join(defult_tmp_dir, next(tempfile._get_candidate_names()))
convert_dcm(dcm, tmp_file)
# Converting to NIFTI
raw_data = pydicom.dcmread(tmp_file).pixel_array
if len(raw_data.shape) < 3 or 'Cube' not in desc: continue
x, y, z = raw_data.shape
# Get metadata and create file name
lat = dcm_file.Laterality
img_date = dcm_file.StudyDate
fname = f'{pname}_{desc}_{img_date}_{lat}.nii.gz'.replace(' ', '')
print(fname, raw_data.shape, f'{raw_data.min()} - {raw_data.max()}')
row_dict = dict(patient_name=pname, laterality=lat, filename=fname, date=img_date, x=x, y=y, z=z)
data.append(row_dict)
# Save nifti file
nii_obj = nib.Nifti1Image(raw_data, np.eye(4))
save_path = join(out_dir, fname)
nib.save(nii_obj, save_path)
df = pd.DataFrame(data=data)
df['date'] = pd.to_datetime(df['date'])
return df
def convert_dcm(input_dcm, output_dcm):
cmd = ['gdcmconv', '--raw', input_dcm, output_dcm]
return sp.check_output(cmd)
|
from coyote_framework.webdriver.webdriverwrapper.support import JavascriptExecutor
__author__ = 'justin'
from selenium.common.exceptions import WebDriverException
from coyote_framework.log import Logger
class WebDriverWrapperException(WebDriverException):
def __init__(self, driver_wrapper, msg='WebDriverWrapper Exception', execute_on_error=None):
js_executor = JavascriptExecutor.JavascriptExecutor(driver_wrapper)
error_message = None
try:
# error_message sometimes has encoding problems
error_message = "Message: {} || Page Title: {} || Current URL: {}"\
.format(msg, driver_wrapper.driver.title, driver_wrapper.driver.current_url)
# insert the error message into the page
js_executor.execute_template('messageInjectorTemplate', {'message': error_message})
except Exception as e:
error_message = 'Unable to build error message: {}'.format(e) if error_message is None else error_message
finally:
Logger.get().warn(error_message)
WebDriverException.__init__(self, error_message)
# pass it an anonymous function to execute on instantiation
if execute_on_error is not None and hasattr(execute_on_error, '__call__'):
execute_on_error() |
'''
This code is implemented to calculate the similarity between images.
Use cosin distance for image and point clouds
Use Hash distance + Histogram distance between images
'''
import sys
sys.path.append("..")
from PIL import Image
import cv2
from numpy import average, linalg, dot
## cosin distance
def get_thumbnail(image, size=(100, 330), greyscale=True):
#image = image.resize(size, Image.ANTIALIAS)
if greyscale:
image = image.convert('L')
return image
def image_similarity_vectors_via_numpy(image1, image2):
image1 = get_thumbnail(image1)
image2 = get_thumbnail(image2)
images = [image1, image2]
vectors = []
norms = []
for image in images:
vector = []
for pixel_tuple in image.getdata():
vector.append(average(pixel_tuple))
vectors.append(vector)
norms.append(linalg.norm(vector, 2))
a, b = vectors
a_norm, b_norm = norms
res = dot(a / a_norm, b / b_norm)
return res
## hamming distance
'''
import cv2
import numpy as np
from compiler.ast import flatten
import sys
def pHash(imgfile):
"""get image pHash value"""
#加载并调整图片为32x32灰度图片
img=cv2.imread(imgfile)
img=cv2.resize(img,(64,64),interpolation=cv2.INTER_CUBIC)
#创建二维列表
h, w = img.shape[:2]
vis0 = np.zeros((h,w), np.float32)
vis0[:h,:w] = img #填充数据
#二维Dct变换
vis1 = cv2.dct(cv2.dct(vis0))
#cv.SaveImage('a.jpg',cv.fromarray(vis0)) #保存图片
vis1.resize(32,32)
#把二维list变成一维list
img_list=flatten(vis1.tolist())
#计算均值
avg = sum(img_list)*1./len(img_list)
avg_list = ['0' if i<avg else '1' for i in img_list]
#得到哈希值
return ''.join(['%x' % int(''.join(avg_list[x:x+4]),2) for x in range(0,32*32,4)])
'''
'''
# 均值哈希算法
def ahash(image):
# 将图片缩放为8*8的
image = cv2.resize(image, (8,8), interpolation=cv2.INTER_CUBIC)
# 将图片转化为灰度图
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
# s为像素和初始灰度值,hash_str为哈希值初始值
s = 0
ahash_str = ''
# 遍历像素累加和
for i in range(8):
for j in range(8):
s = s+gray[i, j]
# 计算像素平均值
avg = s/64
# 灰度大于平均值为1相反为0,得到图片的平均哈希值,此时得到的hash值为64位的01字符串
ahash_str = ''
for i in range(8):
for j in range(8):
if gray[i,j]>avg:
ahash_str = ahash_str + '1'
else:
ahash_str = ahash_str + '0'
result = ''
for i in range(0, 64, 4):
result += ''.join('%x' % int(ahash_str[i: i + 4], 2))
# print("ahash值:",result)
return result
# 差异值哈希算法
def dhash(image):
# 将图片转化为8*8
image = cv2.resize(image,(8,8),interpolation=cv2.INTER_CUBIC )
# 将图片转化为灰度图
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
dhash_str = ''
for i in range(8):
for j in range(8):
if gray[i,j]>gray[i, j+1]:
dhash_str = dhash_str + '1'
else:
dhash_str = dhash_str + '0'
result = ''
for i in range(0, 64, 4):
result += ''.join('%x'%int(dhash_str[i: i+4],2))
# print("dhash值",result)
return result
# 计算两个哈希值之间的差异
def campHash(hash1, hash2):
n = 0
# hash长度不同返回-1,此时不能比较
if len(hash1) != len(hash2):
return -1
# 如果hash长度相同遍历长度
for i in range(len(hash1)):
if hash1[i] != hash2[i]:
n = n+1
return n
def useHASH(img1,img2):
hash1 = ahash(img1)
print('img1的ahash值',hash1)
hash2= dhash(img1)
print('img1的dhash值',hash2)
hash3= ahash(img2)
print('img2的ahash值',hash3)
hash4= dhash(img2)
print('img2的dhash值',hash4)
camphash1 = campHash(hash1, hash3)
camphash2= campHash(hash2, hash4)
print("ahash均值哈希差异度:",camphash1)
print("dhash差异哈希差异度:",camphash2)
'''
if __name__ == "__main__":
img1 = Image.open('../data/img/um_000000.png')
img2 = Image.open('../result/voxel_gray.png')
# resize and get Region of Interest(ROI)
img1 = img1.resize((100, 330),Image.ANTIALIAS)
img2 = img2.resize((100, 330),Image.ANTIALIAS)
img1 = img1.crop((0, 29, 330, 100)) # (left, upper, right, lower)
img2 = img2.crop((0, 29, 330, 100))
# cosin distance
cosin = image_similarity_vectors_via_numpy(img1, img2)
print(cosin)
'''
#ph1 = pHash('./data/img/um_000000.png')
#print(ph1)
img1 = cv2.imread('./data/img/um_000000.png')
img2 = cv2.imread('./result/um_000000_composition.png')
useHASH(img1,img2)
''' |
display = {
0: 'ABCEFG',
1: 'CF',
2: 'ACDEG',
3: 'ACDFG',
4: 'BCDF',
5: 'ABDFG',
6: 'ABDEFG',
7: 'ACF',
8: 'ABCDEFG',
9: 'ABCDFG'
}
solve_it = {
'a': 'C',
'b': 'F',
'c': 'G',
'd': 'A',
'e': 'B',
'f': 'D',
'g': 'E'
}
def part1(input_str: str) -> None:
count = 0
for line in input_str.split('\n'):
for output_num in line.split('|')[1].strip().split(' '):
if len(output_num) in (2, 3, 4, 7):
count += 1
print(f'Day 8 Part 1: Count: {count}')
def part2(input_str: str) -> None:
total = 0
line_num = 1
for line in input_str.split('\n'):
io_parts = line.split('|')
input_list = io_parts[0].strip().split(' ')
output_list = io_parts[1].strip().split(' ')
key_dict = decode_input(input_list)
output_num = decode_output(output_list, key_dict)
total += output_num
print(f'Line {line_num}: {output_num}')
line_num += 1
print(f'Day 8 Part 2: Total = {total}')
def decode_input(input_list: list[str]) -> dict[str, str]:
# display = {
# 1: 'CF',
# 7: 'ACF',
# 4: 'BCDF',
# 5: 'ABDFG', => ADG(BF)
# 2: 'ACDEG', => ADG(CE)
# 3: 'ACDFG', => ADG(CF)
# 9: 'ABCDFG'
# 0: 'ABCEFG',
# 6: 'ABDEFG',
# 8: 'ABCDEFG',
# }
# 'ab', 'abd', 'abef', 'bcdef', 'acdfg', 'abcdf', 'abcdef', 'bcdefg', 'abcdeg', 'abcdefg'
# 1.
# ab -> CF => (ab ~ CF)
# abd -> ACF => d = 'A'
# 2.
# abef -> BCDF => (ef ~ BD)
# cdf -> ADG => (cf ~ DG)
# f = 'D', e = 'B', c = 'G'
# 3.
# bcdef -> (ABDG)b => b = 'F', a = 'C'
# acdfg -> (ADG)ag => g = 'E'
sorted_list = [''.join(sorted(list_num)) for list_num in sorted(input_list, key=len)]
tmp_dict = {}
solved_dict = {}
for x in sorted_list:
if len(x) in (2, 3, 4, 7):
tmp_dict.update({len(x): x})
elif len(x) == 5:
my_list = tmp_dict.get(5, [])
my_list.append(x)
tmp_dict.update({5: sorted(my_list)})
# 1.
my_a = ''.join(set(tmp_dict.get(3))-set(tmp_dict.get(2)))
solved_dict.update({'A': my_a})
# 2.
four_and_two = set(tmp_dict.get(4)) - set(tmp_dict.get(2))
five_and_three = set.intersection(set(tmp_dict.get(5)[0]), set(tmp_dict.get(5)[1]), set(tmp_dict.get(5)[2]))
five_and_two = copy_set(five_and_three)
five_and_two.discard(solved_dict.get('A'))
tmp_d = four_and_two.intersection(five_and_two)
my_d = ''.join(tmp_d)
tmp_b = copy_set(four_and_two)
tmp_b.discard(my_d)
my_b = ''.join(tmp_b)
tmp_g = copy_set(five_and_two)
tmp_g.discard(my_d)
my_g = ''.join(tmp_g)
solved_dict.update({'D': my_d})
solved_dict.update({'B': my_b})
solved_dict.update({'G': my_g})
# 3.
for tmp_5 in tmp_dict.get(5):
tmp_5_set = set(tmp_5)
for v in solved_dict.values():
tmp_5_set.discard(v)
if len(tmp_5_set) == 1:
my_f = ''.join(tmp_5_set)
solved_dict.update({'F': my_f})
tmp_2 = set(tmp_dict.get(2))
tmp_2.discard(my_f)
my_c = ''.join(tmp_2)
solved_dict.update({'C': my_c})
break
# 4.
tmp_7_set = set(tmp_dict.get(7))
for v in solved_dict.values():
tmp_7_set.discard(v)
my_e = ''.join(tmp_7_set)
solved_dict.update({'E': my_e})
print(sorted_list)
print(solved_dict)
return solved_dict
def copy_set(my_set: set) -> set:
return set([x for x in my_set])
def find_intersections(master_list: list[list[str]]) -> set[str]:
result = []
for idx1 in range(0, len(master_list)):
s1 = set(master_list[idx1])
for idx2 in range(0, len(master_list)):
if idx2 == idx1:
continue
else:
s2 = set(master_list[idx2])
s3 = s1.intersection(s2)
result.extend(s3)
return set(result)
def decode_output(output_list: list[str],
key_dict: dict[str, str]) -> int:
my_key_dict = dict((v, k) for k, v in key_dict.items())
str_value = ''
for output_num in output_list:
decode = ''
for char in sorted(output_num):
decode += my_key_dict.get(char).upper()
decode = ''.join(sorted(decode))
for k, v in display.items():
if v == decode:
str_value += str(k)
break
return int(str_value)
if __name__ == '__main__':
with open('../../resources/2021/inputd8a.txt', 'r') as f:
test_string = f.read()
part1(test_string)
part2(test_string)
with open('../../resources/2021/inputd8.txt', 'r') as f:
test_input = f.read()
part1(test_input)
part2(test_input)
|
import numpy as np
from MLlib.activations import sigmoid
class MeanSquaredError():
"""
Calculate Mean Squared Error.
"""
@staticmethod
def loss(X, Y, W):
"""
Calculate loss by mean square method.
PARAMETERS
==========
X:ndarray(dtype=float,ndim=1)
input vector
Y:ndarray(dtype=float)
output vector
W:ndarray(dtype=float)
Weights
RETURNS
=======
array of mean squared losses
"""
M = X.shape[0]
return np.sum((np.dot(X, W).T - Y) ** 2) / (2 * M)
@staticmethod
def derivative(X, Y, W):
"""
Calculate derivative for mean square method.
PARAMETERS
==========
X:ndarray(dtype=float,ndim=1)
input vector
Y:ndarray(dtype=float)
output vector
W:ndarray(dtype=float)
Weights
RETURNS
=======
array of derivates
"""
M = X.shape[0]
return np.dot((np.dot(X, W).T - Y), X).T / M
class LogarithmicError():
"""
Calculate Logarithmic Error.
"""
@staticmethod
def loss(X, Y, W):
"""
Calculate loss by logarithmic error method.
PARAMETERS
==========
X:ndarray(dtype=float,ndim=1)
input vector
Y:ndarray(dtype=float)
output vector
W:ndarray(dtype=float)
Weights
RETURNS
=======
array of logarithmic losses
"""
M = X.shape[0]
H = sigmoid(np.dot(X, W).T)
return (1/M)*(np.sum((-Y)*np.log(H)-(1-Y)*np.log(1-H)))
@staticmethod
def derivative(X, Y, W):
"""
Calculate derivative for logarithmic error method.
PARAMETERS
==========
X:ndarray(dtype=float,ndim=1)
input vector
Y:ndarray(dtype=float)
output vector
W:ndarray(dtype=float)
Weights
RETURNS
=======
array of derivates
"""
M = X.shape[0]
H = sigmoid(np.dot(X, W).T)
return (1/M)*(np.dot(X.T, (H-Y).T))
class AbsoluteError():
"""
Calculate Absolute Error.
"""
@staticmethod
def loss(X, Y, W):
"""
Calculate loss by absolute error method.
PARAMETERS
==========
X:ndarray(dtype=float,ndim=1)
input vector
Y:ndarray(dtype=float)
output vector
W:ndarray(dtype=float)
Weights
RETURNS
=======
array of absolute losses
"""
M = X.shape[0]
return np.sum(np.absolute(np.dot(X, W).T - Y)) / M
@staticmethod
def derivative(X, Y, W):
"""
Calculate derivative for absolute error method.
PARAMETERS
==========
X:ndarray(dtype=float,ndim=1)
input vector
Y:ndarray(dtype=float)
output vector
W:ndarray(dtype=float)
Weights
RETURNS
=======
array of derivates
"""
M = X.shape[0]
AbsError = (np.dot(X, W).T-Y)
return np.dot(
np.divide(
AbsError,
np.absolute(AbsError),
out=np.zeros_like(AbsError),
where=(np.absolute(AbsError)) != 0),
X
).T/M
|
import taco.common.exceptions
class MutexException(taco.common.exceptions.DataDictException):
pass
class MutexLockFailedException(MutexException):
def __init__(self, lock_name, holder, ttl, exc=None):
data_dict = {
'lock_name': lock_name,
'holder': holder,
'ttl': ttl,
}
super().__init__('Mutex lock exception', data_dict, exc=exc)
class MutexReleaseFailedException(MutexException):
def __init__(self, lock_name, holder, ttl, exc=None):
data_dict = {
'lock_name': lock_name,
'holder': holder,
'ttl': ttl,
}
super().__init__('Mutex release exception', data_dict, exc=exc)
class MutexPruneException(MutexException):
def __init__(self, lock_name, holder, ttl, exc=None):
data_dict = {
'lock_name': lock_name,
'holder': holder,
'ttl': ttl,
}
super().__init__('Mutex prune exception', data_dict, exc=exc)
|
import os
import sys
import json
import sqlite3
from os import listdir, makedirs
from os.path import isfile, isdir, join, split, exists, splitext
from nltk import word_tokenize, tokenize
import traceback
EXIST = {"atis", "geo", "advising", "yelp", "restaurants", "imdb", "academic"}
def convert_fk_index(data):
fk_holder = []
for fk in data["foreign_keys"]:
tn, col, ref_tn, ref_col = fk[0][0], fk[0][1], fk[1][0], fk[1][1]
ref_cid, cid = None, None
try:
tid = data['table_names_original'].index(tn)
ref_tid = data['table_names_original'].index(ref_tn)
for i, (tab_id, col_org) in enumerate(data['column_names_original']):
if tab_id == ref_tid and ref_col == col_org:
ref_cid = i
elif tid == tab_id and col == col_org:
cid = i
if ref_cid and cid:
fk_holder.append([cid, ref_cid])
except:
traceback.print_exc()
print("table_names_original: ", data['table_names_original'])
print("finding tab name: ", tn, ref_tn)
sys.exit()
return fk_holder
def dump_db_json_schema(db, f):
'''read table and column info'''
conn = sqlite3.connect(db)
conn.execute('pragma foreign_keys=ON')
cursor = conn.execute("SELECT name FROM sqlite_master WHERE type='table';")
data = {'db_id': f,
'table_names_original': [],
'table_names': [],
'column_names_original': [(-1, '*')],
'column_names': [(-1, '*')],
'column_types': ['text'],
'primary_keys': [],
'foreign_keys': []}
fk_holder = []
for i, item in enumerate(cursor.fetchall()):
table_name = item[0]
data['table_names_original'].append(table_name)
data['table_names'].append(table_name.lower().replace("_", ' '))
fks = conn.execute("PRAGMA foreign_key_list('{}') ".format(table_name)).fetchall()
#print("db:{} table:{} fks:{}".format(f,table_name,fks))
fk_holder.extend([[(table_name, fk[3]), (fk[2], fk[4])] for fk in fks])
cur = conn.execute("PRAGMA table_info('{}') ".format(table_name))
for j, col in enumerate(cur.fetchall()):
data['column_names_original'].append((i, col[1]))
data['column_names'].append((i, col[1].lower().replace("_", " ")))
#varchar, '' -> text, int, numeric -> integer,
col_type = col[2].lower()
if 'char' in col_type or col_type == '' or 'text' in col_type or 'var' in col_type:
data['column_types'].append('text')
elif 'int' in col_type or 'numeric' in col_type or 'decimal' in col_type or 'number' in col_type\
or 'id' in col_type or 'real' in col_type or 'double' in col_type or 'float' in col_type:
data['column_types'].append('number')
elif 'date' in col_type or 'time' in col_type or 'year' in col_type:
data['column_types'].append('time')
elif 'boolean' in col_type:
data['column_types'].append('boolean')
else:
data['column_types'].append('others')
if col[5] == 1:
data['primary_keys'].append(len(data['column_names'])-1)
data["foreign_keys"] = fk_holder
data['foreign_keys'] = convert_fk_index(data)
return data
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python get_tables.py [dir includes many subdirs containing database.sqlite files] [output file name e.g. output.json] [existing tables.json file to be inherited]")
sys.exit()
input_dir = sys.argv[1]
output_file = sys.argv[2]
ex_tab_file = sys.argv[3]
all_fs = [df for df in listdir(input_dir) if exists(join(input_dir, df, df+'.sqlite'))]
with open(ex_tab_file, encoding='utf8') as f:
ex_tabs = json.load(f)
#for tab in ex_tabs:
# tab["foreign_keys"] = convert_fk_index(tab)
ex_tabs = {tab["db_id"]: tab for tab in ex_tabs if tab["db_id"] in all_fs}
print("precessed file num: ", len(ex_tabs))
not_fs = [df for df in listdir(input_dir) if not exists(join(input_dir, df, df+'.sqlite'))]
for d in not_fs:
print("no sqlite file found in: ", d)
db_files = [(df+'.sqlite', df) for df in listdir(input_dir) if exists(join(input_dir, df, df+'.sqlite'))]
tables = []
for f, df in db_files:
#if df in ex_tabs.keys():
#print 'reading old db: ', df
# tables.append(ex_tabs[df])
db = join(input_dir, df, f)
print('\nreading new db: ', df)
table = dump_db_json_schema(db, df)
prev_tab_num = len(ex_tabs[df]["table_names"])
prev_col_num = len(ex_tabs[df]["column_names"])
cur_tab_num = len(table["table_names"])
cur_col_num = len(table["column_names"])
if df in ex_tabs.keys() and prev_tab_num == cur_tab_num and prev_col_num == cur_col_num and prev_tab_num != 0 and len(ex_tabs[df]["column_names"]) > 1:
table["table_names"] = ex_tabs[df]["table_names"]
table["column_names"] = ex_tabs[df]["column_names"]
else:
print("\n----------------------------------problem db: ", df)
tables.append(table)
print("final db num: ", len(tables))
with open(output_file, 'wt', encoding='utf8') as out:
json.dump(tables, out, sort_keys=True, indent=2, separators=(',', ': '), ensure_ascii=False) |
####
# This is the script for storing the schema of your TerminusDB
# database for your project.
# Use 'terminusdb commit' to commit changes to the database and
# use 'terminusdb sync' to change this file according to
# the exsisting database schema
####
"""
Title: Phonebook for Awesome Startup
Description: Database storing all the contact details of all employees in Awesome Startup
Authors: Destiny Norris, Fabian Dalby
"""
from typing import Optional
from terminusdb_client.woqlschema import DocumentTemplate, EnumTemplate
class Address(DocumentTemplate):
"""Home address of Employee
Attributes
----------
postcode : str
Postal Code
street : str
Street name.
street_num : int
Street number.
town : str
Town name.
"""
_subdocument = []
postcode: str
street: str
street_num: int
town: str
class Employee(DocumentTemplate):
"""Employee of the Company"""
address: "Address"
contact_number: str
manager: Optional["Employee"]
name: str
team: "Team"
title: str
class Team(EnumTemplate):
marketing = ()
it = ()
class EmployeesFromCSV(DocumentTemplate):
employee_id: str
manager: Optional["EmployeesFromCSV"]
name: Optional[str]
team: Optional[str]
title: Optional[str]
|
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
def plotRecursiveFunction(T, X, t1, t2):
plt.plot(T[t1:t2], X[t1:t2], 'ro')
plt.title("Mackey-Glass time series")
plt.xlabel("t")
plt.ylabel("x")
plt.show()
def plotPredictions(prediction, trueValue):
plt.plot(trueValue, 'ro')
plt.plot(prediction, 'bo')
plt.plot(prediction-trueValue, 'go')
plt.title("Prediction (blue) vs True value (red) vs the difference (green)")
plt.xlabel("t")
plt.ylabel("x")
plt.show()
|
import io
import gym
import numpy as np
import torch
from .pg_model import generate_pg_default_models
from ..algorithm import Algorithm
from ..util import ensure_tensor
class PGSolver(Algorithm):
def __init__(
self,
observation_space,
action_space,
models=None,
learning_rate=5e-5,
device=None,
):
super().__init__(device)
if models is None:
models = generate_pg_default_models(
observation_space, action_space, learning_rate
)
assert models["policy_model"]
assert models["optimizer"]
self.policy = models["policy_model"]
self.optimizer = models["optimizer"]
self.policy.to(self.device, non_blocking=True)
assert isinstance(action_space, gym.spaces.Discrete)
self.num_actions = action_space.n
def update(self, batch, weights=None):
s0, a, r = batch
s0 = ensure_tensor(s0, torch.float, self.device)
a = ensure_tensor(a, torch.long, self.device)
r = ensure_tensor(r, torch.float, self.device)
self.optimizer.zero_grad()
log_probs = torch.log(self.policy(s0))
selected_log_probs = r * log_probs[np.arange(len(a)), a]
loss = -selected_log_probs.mean()
out_loss = selected_log_probs.detach().cpu().abs()
loss.backward()
self.optimizer.step()
return out_loss
def act(self, state):
state = ensure_tensor(state, dtype=torch.float, device=self.device).unsqueeze(0)
probs = self.policy(state).detach().cpu().numpy()
action = np.argmax(probs, axis=1)
return action.item()
def load_weights(self, stream):
states = torch.load(stream, map_location=self.device)
self.policy.load_state_dict(states)
self.policy.to(self.device, non_blocking=True)
def save_weights(self, stream=None):
if stream is None:
stream = io.BytesIO()
torch.save(self.policy.state_dict(), stream)
return stream
|
number_of_guests = int(input())
price_for_person = float(input())
budget = float(input())
for_person_discount = 0
cake_price = budget - budget * 90 / 100
if 10 <= number_of_guests <= 15:
for_person_discount = price_for_person - price_for_person * 15 / 100
elif 15 < number_of_guests <= 20:
for_person_discount = price_for_person - price_for_person * 20 / 100
elif number_of_guests > 20 :
for_person_discount = price_for_person - price_for_person * 25 / 100
else:
for_person_discount = price_for_person
total_for_people = for_person_discount * number_of_guests
total = total_for_people + cake_price
diff = budget - total
if total <= budget:
print(f"It is party time! {abs(diff):.2f} leva left.")
else:
print(f"No party! {abs(diff):.2f} leva needed.") |
# Copyright Contributors to the Packit project.
# SPDX-License-Identifier: MIT
from enum import Enum
from typing import Optional
class ActionName(Enum):
"""
Name of the action.
Can be defined by user in the per-package config file and used to overwrite the default
implementation.
New action needs to be added here and to the table in the
`https://github.com/packit/packit.dev/content/docs/actions.md`.
(Some values are also used in tests:
- tests/unit/test_config.py
- tests/unit/test_actions.py
- tests/unit/test_base_git.py
- tests/integration/test_base_git.py
"""
post_upstream_clone = "post-upstream-clone"
pre_sync = "pre-sync"
create_patches = "create-patches"
prepare_files = "prepare-files"
create_archive = "create-archive"
get_current_version = "get-current-version"
fix_spec = "fix-spec-file"
changelog_entry = "changelog-entry"
@classmethod
def is_valid_action(cls, action: str) -> bool:
return action in cls.get_possible_values()
@classmethod
def get_action_from_name(cls, action: str) -> Optional["ActionName"]:
if not cls.is_valid_action(action):
return None
return ActionName(action)
@classmethod
def get_possible_values(cls):
return [a.value for a in ActionName]
|
"""
You are given the firstname and lastname of a person on two different lines. Your task is to read them and print the following:
Hello firstname lastname! You just delved into python.
Input Format
The first line contains the first name, and the second line contains the last name.
Constraints
The length of the first and last name ≤ .
Output Format
Print the output as mentioned above.
Sample Input 0
Ross
Taylor
Sample Output 0
Hello Ross Taylor! You just delved into python.
"""
def print_full_name(a, b):
print("Hello {} {}! You just delved into python.".format(a,b))
if __name__ == '__main__':
first_name = input()
last_name = input()
print_full_name(first_name, last_name) |
# 2020.03.13
# Feeling a bit anxious recently, but I'm back :)
# Problem Statement:
# https://leetcode.com/problems/clone-graph/
# Referred to the DFS solution here:
# https://leetcode.com/problems/clone-graph/discuss/42309/Depth-First-Simple-Java-Solution
"""
# Definition for a Node.
class Node:
def __init__(self, val = 0, neighbors = None):
self.val = val
self.neighbors = neighbors if neighbors is not None else []
"""
class Solution:
def cloneGraph(self, node: 'Node') -> 'Node':
if not node: return None
self.hash_table = {}
return self.connect(node)
# the function will return the node with all its neighbors attached and all its neighbors' neighbors attached
def connect(self, current_node_origin):
# if visited, do return directly, otherwise would loop between permernantly
if current_node_origin.val in self.hash_table.keys():
return self.hash_table[current_node_origin.val]
# if never visited, add into the dictionary
current_node_copy = Node(current_node_origin.val, [])
self.hash_table[current_node_copy.val] = current_node_copy
# connect all its neighbors, and connect all its neighbors' neighbors
for neighbor_node in current_node_origin.neighbors:
current_node_copy.neighbors.append(self.connect(neighbor_node))
return current_node_copy
|
import lxml.etree as et
import moment
def process_xpath(xpaths, *preprocessors):
def _process_xpath(root):
nonlocal xpaths
if type(xpaths) == str:
xpaths = (xpaths,)
for xpath in xpaths:
nodes = root.xpath(xpath)
if not nodes:
import ipdb
ipdb.set_trace()
raise BaseException('no valid nodes for xpath:', xpath)
for node in nodes:
for preprocessor in preprocessors:
preprocessor(node)
return _process_xpath
def process_down(*preprocessors, valid_nodes=None):
def _process_down(root):
for node in iter_down(root, valid_nodes):
for preprocessor in preprocessors:
preprocessor(node)
return _process_down
def process_up(*preprocessors, valid_nodes=None):
def _process_up(root):
for node in iter_up(root, valid_nodes):
for preprocessor in preprocessors:
preprocessor(node)
return _process_up
def iter_down(node, valid_nodes=None):
yield node
for child in node.iterchildren():
if valid_nodes is None or child.tag in valid_nodes:
for descendant in iter_down(child, valid_nodes):
yield descendant
def iter_up(node, valid_nodes=None):
for child in node.iterchildren():
if valid_nodes is None or child.tag in valid_nodes:
for descendant in iter_up(child, valid_nodes):
yield descendant
yield node
# ------------------------------------
plurals = {}
def pluralize(word):
if word in plurals:
return plurals[word]
else:
return word + 's'
id_index = {}
def index_docs(dom):
global id_index
for node in dom.xpath('//document[@id]'):
id_index[xstring(node, '@id')] = {'node': node, 'children': {}}
pass
def index_doc(doc_node, xpath='.//*'):
global id_index
doc_id = doc_node.get('id')
doc_index = id_index.setdefault({'node': doc_node, 'children': {}})['children']
xpath += '[@id]'
for node in doc.xpath(xpath):
doc_index[xstring(node, '@id')] = node
def index_doc_id(node):
global id_index
doc_node = node.xpath('ancestor::document')[0]
doc_id = xstring(doc_node, '@id')
node_id = xstring(node, '@id')
id_index.setdefault(doc_id, {'node': doc_node, 'children': {}})['children'][node_id] = node
def resolve_ref(node):
doc_id = node.get('doc') or xstring(node, 'ancestor::document/@id')
doc_index = id_index.get(doc_id)
if doc_index is None:
# print('invalid cite: ', doc_id or 'None')
return
root_id = node.get('root')
if root_id:
ref_node = doc_index['children'].get(root_id)
else:
ref_node = doc_index['node']
# if ref_node is None:
# print('invalid cite: ', doc_id or 'None', root_id or 'None')
return ref_node
# ------------------------------------
def format_date(date):
return moment.date(date).strftime('%B %d, %Y')
# ------------------------------------
def xstring(node, xpath):
return node.xpath('string({})'.format(xpath))
def xcache(node, tag):
return xstring(node, 'cache/{}'.format(tag))
def update_cache(node, new_el, xpath=None):
if node is None:
return
cache = node.find('cache')
if cache is None:
cache = make_node('cache', parent=node)
if xpath is not None:
cache = cache.find(xpath)
if cache is None:
raise Exception('path specified by xpath {} must exist'.format(xpath))
try:
old_el = cache.find(new_el.tag)
except:
import ipdb
ipdb.set_trace()
if old_el is None:
cache.append(new_el)
else:
cache.replace(old_el, new_el)
def make_node(tag, text=None, parent=None, **attrs):
"""Make a node in an XML document."""
n = et.Element(tag)
if parent is not None:
parent.append(n)
n.text = text
for k, v in attrs.items():
if v is None: continue
elif isinstance(v, (bool, int)):
v = str(v)
n.set(k.replace("___", ""), v)
return n
def add_ancestors(node):
ancestors = make_node('ancestors')
for ancestor in node.xpath('../cache/ancestors/ancestor'):
make_node('ancestor', parent=ancestors, **ancestor.attrib)
if node.getparent() is not None and not xcache(node.getparent(), 'noPage'):
parent_node = make_node('ancestor', parent=ancestors,
url=xstring(node, '../cache/url'),
title=xstring(node, '../cache/title'),
)
update_cache(node, ancestors)
|
import logging
import random
import time
from datetime import datetime, timezone, timedelta
import pytz
import requests
from bs4 import BeautifulSoup as bS
logger = logging.getLogger('root')
def get_current_time():
try:
jamaica = pytz.timezone('Jamaica')
t = datetime.now(tz=jamaica)
current_time = datetime(t.year, t.month, t.day, t.hour, t.minute, t.second, t.microsecond, tzinfo=timezone.utc)
return current_time
except Exception as e:
logger.error(e)
raise
def format_date(current_time):
try:
return current_time.strftime('%Y-%m-%d')
except Exception as e:
logger.error(e)
raise
def get_first_date():
try:
year = datetime.today().year
first_date = datetime.strptime(f'{year}-01-01', '%Y-%m-%d')
return format_date(first_date)
except Exception as e:
logger.error(e)
def get_current_formatted_date():
"""Get current date or previous business day"""
try:
current_time = get_current_time()
diff = 0
if current_time.weekday() == 5:
diff = 1
if current_time.weekday() == 6:
diff = 2
return format_date(current_time - timedelta(days=diff))
except Exception as e:
logger.error(e)
raise
def get_response(url):
try:
ua_file = 'ua_file.txt'
lines = open(ua_file).read().splitlines()
random_ua = random.choice(lines)
headers = {
'User-Agent': random_ua,
}
page_response = requests.get(url, headers=headers)
return page_response
except Exception as e:
logger.error(e)
def get_parse_tree(url, retry_num):
logger.info(f"Getting parse tree for {url}")
parse_tree = None
try:
for i in range(0, retry_num):
response = get_response(url)
if response is not None and response.status_code == 200:
# Returns the page content to string using UTF-8
content = response.text
parse_tree = bS(content, "html.parser")
break
else:
logger.warning(f'Could not load page.')
delay = random.randrange(5, 15)
time.sleep(delay)
return parse_tree
except Exception:
raise
def find_tables(parse_tree, is_find_all=False):
try:
if parse_tree is None:
raise Exception('No data provided.')
if is_find_all:
table = parse_tree.findAll('table')
else:
table = parse_tree.find('table')
if table is None:
raise Exception('Table is missing.')
return table
except Exception as e:
raise e
class Cells:
def __init__(self, cells):
self.cells = cells
def extract_text(self, cell_number):
if self.cells[cell_number].find("a"):
return self.cells[cell_number].find("a").text.strip().lower()
return self.cells[cell_number].text.strip().lower()
def extract_href(self, cell_number):
if self.cells[cell_number].find("a", href=True):
return self.cells[cell_number].find("a", href=True).get("href", '').lower()
return ''
|
import pickle
import tensorflow as tf
from tensorflow.keras.callbacks import TensorBoard
from tensorflow.keras.callbacks import ModelCheckpoint
from pathlib import Path
import numpy as np
import json
class CustomTensorBoard(TensorBoard):
"""
Extends tensorflow.keras.callbacks TensorBoard
Custom tensorboard class to make logging of learning rate possible when using
keras.optimizers.schedules.LearningRateSchedule.
See https://github.com/tensorflow/tensorflow/pull/37552
Also logs momemtum for supported optimizers that use momemtum.
"""
def __init__(self, *args, **kwargs):
self.dump_history = kwargs.pop("dump_history")
super().__init__(*args, **kwargs)
def _collect_learning_rate(self, logs):
logs = logs or {}
lr_schedule = getattr(self.model.optimizer, "lr", None)
if isinstance(lr_schedule, tf.keras.optimizers.schedules.LearningRateSchedule):
logs["learning_rate"] = np.float64(tf.keras.backend.get_value(lr_schedule(self.model.optimizer.iterations)))
else:
logs.update({"learning_rate": np.float64(tf.keras.backend.eval(self.model.optimizer.lr))})
# Log momentum if the optimizer has it
try:
logs.update({"momentum": np.float64(tf.keras.backend.eval(self.model.optimizer.momentum))})
except AttributeError:
pass
# In Adam, the momentum parameter is called beta_1
if isinstance(self.model.optimizer, tf.keras.optimizers.Adam):
logs.update({"adam_beta_1": np.float64(tf.keras.backend.eval(self.model.optimizer.beta_1))})
return logs
def on_epoch_end(self, epoch, logs):
logs = logs or {}
logs.update(self._collect_learning_rate(logs))
if self.dump_history:
history_path = Path(self.log_dir) / "history"
history_path.mkdir(parents=True, exist_ok=True)
history_path = str(history_path)
with open("{}/history_{}.json".format(history_path, epoch), "w") as fi:
converted_logs = {k: float(v) for k, v in logs.items()}
json.dump(converted_logs, fi)
super().on_epoch_end(epoch, logs)
def on_train_batch_end(self, batch, logs):
logs = logs or {}
if isinstance(self.update_freq, int) and batch % self.update_freq == 0:
logs.update(self._collect_learning_rate(logs))
super().on_train_batch_end(batch, logs)
class CustomModelCheckpoint(ModelCheckpoint):
"""Extends tensorflow.keras.callbacks.ModelCheckpoint to also save optimizer"""
def __init__(self, *args, **kwargs):
# Added arguments
self.optimizer_to_save = kwargs.pop("optimizer_to_save")
self.optimizer_filepath = kwargs.pop("optimizer_save_filepath")
super().__init__(*args, **kwargs)
Path(self.filepath).parent.mkdir(parents=True, exist_ok=True)
def on_epoch_end(self, epoch, logs=None):
super().on_epoch_end(epoch, logs)
# If a checkpoint was saved, also save the optimizer
filepath = str(self.optimizer_filepath).format(epoch=epoch + 1, **logs)
if self.epochs_since_last_save == 0:
if self.save_best_only:
current = logs.get(self.monitor)
if current == self.best:
with open(filepath, "wb") as f:
pickle.dump(self.optimizer_to_save, f)
else:
with open(filepath, "wb") as f:
pickle.dump(self.optimizer_to_save, f)
|
import requests
from datetime import datetime
import smtplib
"""
1XX: Hold None
2XX: Here You Go
3XX: Go Away
4XX: You Screwed Up
5XX: I Screwed Up
https://httpstatuses.com/
"""
MY_LAT = 25.032969
MY_LONG = 121.565414
def is_iss_overhead():
response = requests.get(url="http://api.open-notify.org/iss-now.json")
response.raise_for_status()
data = response.json()
print(data)
longitude = float(data["iss_position"]["longitude"])
latitude = float(data["iss_position"]["latitude"])
print(longitude, latitude)
if MY_LAT - 5 <= latitude <= MY_LAT + 5 and MY_LONG-5 <= longitude <= MY_LONG +5:
print(True)
return True
else:
return False
# if response.status_code == 404:
# raise Exception("That resource doesn't exist.")
# elif response.status_code == 401:
# raise Exception("You are not authorized to access this data.")
parameter = {
"lat": MY_LAT,
"lng": MY_LONG,
"formatted": 0
}
def get_hour(date_time):
return int(date_time.split("T")[1].split(":")[0])
def is_night():
response = requests.get(url=" https://api.sunrise-sunset.org/json", params=parameter)
response.raise_for_status()
data = response.json()
print(data)
sunrise_hour = get_hour(data["results"]["sunrise"])
sunset_hour = get_hour(data["results"]["sunset"])
print(sunset_hour, sunrise_hour)
now_hour = datetime.now().hour
print(now_hour)
if sunset_hour <= now_hour or now_hour <= sunrise_hour:
return True
else:
return False
if is_night():
connection = smtplib.SMTP("smtp.gmail.com")
connection.starttls()
connection.login("cindychen.co@gmail.com", "ichrock0826")
connection.sendmail(from_addr="cindychen.co@gmail.com",
to_addrs="cindychen.co@gmail.com",
msg="Subject: Test") |
import pdb
import numpy as np
import operator
import copy
import csv
# Binary (+1, -1) prediction. This assumes that the data points are
# the ROWS of the matrix!! The last column of the data matrix are the
# labels.
class DTN:
N_THRESHOLD = 4 # don't split if node has fewer examples than this
H_THRESHOLD = .01 # don't split if node has entropy less than this
H_REDUCTION_THRESHOLD = .001 # don't split if it doesn't reduce H by this
index = 0
def __init__(self, data=None, config = None):
self.config = config
if config != None:
self.N_THRESHOLD = config[0]
self.H_THRESHOLD = config[1]
self.H_REDUCTION_THRESHOLD = config[2]
DTN.index += 1
self.index = DTN.index # unique number
self.data = data # store the data
self.p = None # prob of positive - look at set_h
if data is not None:
self.n = float(data.shape[0]) # number of data points
self.indices = range(data.shape[1]-1) # feature indices
self.set_h() # compute entropy
self.splits = {}
# The test is data[:,fi] < th
self.fi = None # feature index
self.th = None # threshold
self.lc = None # left child
self.rc = None # right child
self.parent = None # parent
# Create split on feat i at value th
def split(self, i, th):
self.fi = i
self.th = th
self.lc = DTN(d[d[:, i] < th], config = self.config)
self.rc = DTN(d[d[:, i] >= th], config = self.config)
self.splits[i].remove(th)
# Evalute candidate split by weighted average entropy
def split_eval(self, i, th):
lc = DTN(self.data[self.data[:, i] < th], config = self.config)
rc = DTN(self.data[self.data[:, i] >= th], config = self.config)
pl = lc.n / self.n
pr = 1.0 - pl
avgH = pl * lc.h + pr * rc.h
return avgH, lc, rc
# Entropy of class labels in this node, assumes 1, -1
def set_h(self):
b = .001
npos = np.sum(self.data[:,-1] == 1) # count labels = 1
p = (npos + b) / (self.n + b + b)
self.p = p
self.h = - (p * np.log(p) + (1 - p) * np.log(1 - p))
def build_tree(self):
if self.h < self.H_THRESHOLD or self.n <= self.N_THRESHOLD:
return
# Find the best split
(i, th, (h, lc, rc)) = argmax([(i, th, self.split_eval(i, th)) \
for i in self.indices \
for th in self.get_splits(i)],
lambda x: -x[2][0]) # x=(a,b,(h,c,d))
if (self.h - h) < self.H_REDUCTION_THRESHOLD:
return
# Curse again!
self.fi = i
self.th = th
self.lc = lc
self.rc = rc
self.lc.parent = self
self.rc.parent = self
self.lc.build_tree()
self.rc.build_tree()
# The candidate splits between the values in the training set.
def get_splits(self, i):
if i not in self.splits:
d = np.sort(self.data[:,i], axis = None)
d1 = d[:-1]
d2 = d[1:]
self.splits[i] = (d1 + d2) / 2.0
return self.splits[i]
# Classify a data point
def classify(self, x):
if self.fi == None: # leaf
return self.p # prob of posistive
elif x[self.fi] < self.th:
return self.lc.classify(x) # satisfies test, left branch
else:
return self.rc.classify(x) # fails test, right branch
def display(self, depth=0, max_depth=3):
if depth > max_depth:
print(depth*' ', 'Depth >', max_depth)
return
if self.fi is None:
print(depth*' ', '=>', "%.2f"%self.p, '[', 'n=', self.n, ']')
return
print(depth*' ', 'Feat', self.fi, '<', self.th, '[', 'n=', self.n, ']')
self.lc.display(depth+1, max_depth)
self.rc.display(depth+1, max_depth)
def DT(X, Y, config = None):
D = np.hstack([X, Y]) # points are rows of X
root = DTN(D, config = config)
root.build_tree()
return root
def classification_error_DT(dt, X, Y):
pred = np.array([np.apply_along_axis(dt.classify,1,X)]).T - 0.5 # predicts +,-
return np.mean(np.sign(Y * pred) > 0.0)
# Evaluate on a train/test split
def dt_eval(trainingX, trainingY, testX, testY, max_depth=5, verbose=True, config = None):
dt = DT(trainingX, trainingY, config = config)
acc = classification_error_DT(dt, testX, testY)
if verbose:
dt.display(max_depth=max_depth)
print('Test accuracy', acc)
return acc
def argmax(l, f):
"""
@param l: C{List} of items
@param f: C{Procedure} that maps an item into a numeric score
@returns: the element of C{l} that has the highest score
"""
vals = [f(x) for x in l]
return l[vals.index(max(vals))]
######################################################################
# For auto dataset (same as in HW 3, except returns data in rows)
######################################################################
def load_auto_data(path_data='auto-mpg.tsv'):
"""
Returns a list of dict with keys:
"""
numeric_fields = {'mpg', 'cylinders', 'displacement', 'horsepower', 'weight',
'acceleration', 'model_year', 'origin'}
data = []
with open(path_data) as f_data:
for datum in csv.DictReader(f_data, delimiter='\t'):
for field in list(datum.keys()):
if field in numeric_fields and datum[field]:
datum[field] = float(datum[field])
data.append(datum)
return data
# Feature transformations
def std_vals(data, f):
vals = [entry[f] for entry in data]
avg = sum(vals)/len(vals)
dev = [(entry[f] - avg)**2 for entry in data]
sd = (sum(dev)/len(vals))**0.5
return (avg, sd)
def standard(v, std):
return [(v-std[0])/std[1]]
def raw(x):
return [x]
def one_hot(v, entries):
vec = len(entries)*[0]
vec[entries.index(v)] = 1
return vec
# The class (mpg) added to the front of features (points are rows)
# Note that mpg has been made a discrete variable with value 1 or -1 representing good or bad miles per gallon
def auto_data_and_labels(auto_data, features):
features = [('mpg', raw)] + features
std = {f:std_vals(auto_data, f) for (f, phi) in features if phi==standard}
entries = {f:list(set([entry[f] for entry in auto_data])) \
for (f, phi) in features if phi==one_hot}
print('avg and std', std)
print('entries in one_hot field', entries)
findex = 0
# Print the meaning of the features
for (f, phi) in features[1:]:
if phi == standard:
print(findex, f, 'std')
findex += 1
elif phi == one_hot:
for entry in entries[f]:
print(findex, f, entry, 'one_hot')
findex += 1
else:
print(findex, f, 'raw')
findex += 1
vals = []
for entry in auto_data:
phis = []
for (f, phi) in features:
if phi == standard:
phis.extend(phi(entry[f], std[f]))
elif phi == one_hot:
phis.extend(phi(entry[f], entries[f]))
else:
phis.extend(phi(entry[f]))
vals.append(np.array([phis]))
data_labels = np.vstack(vals)
np.random.seed(0)
np.random.shuffle(data_labels)
return data_labels[:, 1:], data_labels[:, 0:1]
# USE FOR QUESTION 1.2 AND PARAMETER TUNING
# The choice of feature processing for each feature, mpg is always raw and
# does not need to be specified. Other choices are standard and one_hot.
# 'name' is not numeric and would need a different encoding.
features = [('cylinders', one_hot),
('displacement', standard),
('horsepower', standard),
('weight', standard),
('acceleration', standard),
## Drop model_year by default
## ('model_year', raw),
('origin', one_hot)]
# USE FOR QUESTION 1.3
# A small feature set
'''
features = [('weight', standard),
('origin', one_hot)]
'''
'''
# USE FOR QUESTION 1.4
# The choice of feature processing for each feature, mpg is always raw and
# does not need to be specified. Other choices are standard and one_hot.
# 'name' is not numeric and would need a different encoding.
features = [('weight', raw),
('origin', raw)]
'''
# Returns a list of dictionaries. Keys are the column names, including mpg.
auto_data_all = load_auto_data('auto-mpg.tsv')
# Construct the standard data and label arrays
auto_data, auto_labels = auto_data_and_labels(auto_data_all, features)
######################################################################
# Apply the decision tree to the auto data
######################################################################
# Run a single train/test split
def auto_test(pct=0.25): # pct is for test
X = auto_data
Y = auto_labels
(n, d) = X.shape
indices = np.random.permutation(n) # randomize the data set
tx = int((1-pct)*n) # size of training split
training_idx, test_idx = indices[:tx], indices[tx:]
trainingX, testX = X[training_idx,:], X[test_idx,:]
trainingY, testY = Y[training_idx,:], Y[test_idx,:]
return dt_eval(trainingX, trainingY, testX, testY)
# Cross validate with k folds
def auto_xval(k=10, verbose = True, config = [1, 0.01, 0.001]):
indices = np.random.permutation(auto_data.shape[0])
X = auto_data[indices,:]
Y = auto_labels[indices,:]
s_data = np.array_split(X, k, axis=0)
s_labels = np.array_split(Y, k, axis=0)
score_sum = 0
for i in range(k):
data_train = np.concatenate(s_data[:i] + s_data[i+1:], axis=0)
labels_train = np.concatenate(s_labels[:i] + s_labels[i+1:], axis=0)
data_test = np.array(s_data[i])
labels_test = np.array(s_labels[i])
score_sum += dt_eval(data_train, labels_train, data_test, labels_test, verbose = verbose, config = config)
print('Xval accuraracy', score_sum/k)
return score_sum/k
print('Loaded decision_tree.py')
print(auto_xval())
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
medianprops = dict(color="r")
whiskerprops = dict(linestyle='--')
df = pd.read_csv('/home/jordibisbal/WS18-MSc-JordiBisbalAnsaldo--NetworkSlicing/evaluation/experiments/1/blockTime/blockTime.csv')
dfx=df[['InP1', 'InP2','InP3','InP4','InP5']]
data = [dfx['InP1'], dfx['InP2'], dfx['InP3'] ,dfx['InP4'], dfx['InP5']]
"""
#Analytic block time
max_time = 10;
A = zeros(10,10,10,10,10);
for i = 1:max_time
for j = 1:max_time
for k = 1:max_time
for l = 1:max_time
for m = 1:max_time
A(i,j,k,l,m) = min([i,j,k,l,m]);
end
end
end
end
end
B = mean(mean(mean(mean(mean(A)))))
"""
fig, ax = plt.subplots(figsize=(7,5))
ax.boxplot(data, 0, '', medianprops=medianprops,whiskerprops=whiskerprops)
plt.plot([1,2,3,4,5,6], [5.5,3.85,3.025,2.53,2.2083,1.9784], 'ro', label= "analytical")
plt.plot([1,2,3,4,5,6], [-1,-1,-1,-1,-1,-1], 'ro', label= "real-world")
ax.yaxis.grid(linestyle=':',linewidth=1.5)
ax.set_xlim(xmin=0)
ax.set_ylim(ymin=0, ymax=10)
ax.legend()
plt.xlabel('# InPs')
plt.ylabel('average block time ' + 'E[' + r'$t_b]$')
plt.savefig('ev_block_time.png')
plt.show();
|
import itertools
from typing import Any, Callable, Optional, Tuple, Type, Union
import pytest
from antidote import Constants, Factory, Provide, Service, world, service
from antidote._compatibility.typing import Protocol
from antidote.core import Wiring
class Interface:
pass
class FactoryOutput:
pass
class FactoryOutput2:
pass
class A:
pass
class B:
pass
@pytest.fixture(autouse=True)
def new_world():
with world.test.new():
world.test.singleton({A: A(),
B: B(),
'a': object(),
'b': object(),
'x': object(),
'y': object(),
'z': object()})
yield
class DummyProtocol(Protocol):
a: A
b: B
def __init__(self, a: A = None, b: B = None):
pass
def method_AB(self, a: A = None, b: B = None) -> Tuple[A, B]:
pass
def method_ab(self, a=None, b=None) -> Tuple[Any, Any]:
pass
def method_xyz(self, x=None, y=None, z=None) -> Tuple[Any, Any, Any]:
pass
DEFAULT_WIRING = object()
def builder(cls_or_decorator: Union[type, Callable[..., Any]],
wiring_kind: str = 'Wiring',
subclass: bool = False):
meta_kwargs = dict(abstract=True) if subclass else dict()
if isinstance(cls_or_decorator, type):
cls = cls_or_decorator
def decorator(wiring=None):
return lambda x: x
else:
cls = object
decorator = cls_or_decorator
def build(wiring: Wiring = None):
decorate = (decorator(wiring=wiring)
if wiring is not DEFAULT_WIRING else
decorator())
@decorate
class Dummy(cls, **meta_kwargs):
if wiring is not DEFAULT_WIRING and cls is not object:
if wiring is not None:
if wiring_kind == 'Wiring':
__antidote__ = cls.Conf(wiring=wiring)
else:
__antidote__ = cls.Conf().with_wiring(**{
attr: getattr(wiring, attr)
for attr in Wiring.__slots__
})
else:
__antidote__ = cls.Conf(wiring=None)
def __init__(self, a: Provide[A] = None, b: Provide[B] = None):
super().__init__()
self.a = a
self.b = b
def method_AB(self, a: A = None, b: B = None) -> Tuple[A, B]:
return a, b
def method_ab(self, a=None, b=None) -> Tuple[Any, Any]:
return a, b
def method_xyz(self, x=None, y=None, z=None) -> Tuple[Any, Any, Any]:
return x, y, z
def __call__(self) -> FactoryOutput: # for Factory
pass
def get(self): # for Constants
pass
if subclass:
class SubDummy(Dummy):
def __call__(self) -> FactoryOutput2: # for Factory
pass
return SubDummy
else:
return Dummy
return build
@pytest.fixture(params=[
pytest.param((builder, service), id="@service"),
*[
pytest.param((builder, c, w), id=f"{c.__name__} - {w}")
for (c, w) in itertools.product([Factory,
Service,
Constants],
['with_wiring', 'Wiring'])
]
])
def class_builder(request):
f, *args = request.param
return f(*args)
@pytest.fixture(params=[
pytest.param((c, w), id=f"{c.__name__} - w")
for (c, w) in itertools.product([Factory,
Service,
Constants],
['with_wiring', 'Wiring'])
])
def subclass_builder(request):
(cls, wiring_kind) = request.param
return builder(cls, wiring_kind, subclass=True)
F = Callable[[Optional[Wiring]], Type[DummyProtocol]]
def test_default(class_builder: F):
dummy = class_builder(DEFAULT_WIRING)()
assert dummy.a is world.get(A)
assert dummy.b is world.get(B)
(a, b) = dummy.method_AB()
assert a is None
assert b is None
def test_no_wiring(class_builder: F):
dummy = class_builder(None)()
assert dummy.a is None
assert dummy.b is None
def test_methods(class_builder: F):
dummy = class_builder(Wiring(methods=('__init__',)))()
assert dummy.a is world.get(A)
assert dummy.b is world.get(B)
(a, b) = dummy.method_AB()
assert a is None
assert b is None
dummy = class_builder(Wiring(methods=('method_AB',),
auto_provide=True))()
assert dummy.a is None
assert dummy.b is None
(a, b) = dummy.method_AB()
assert a is world.get(A)
assert b is world.get(B)
def test_auto_provide(class_builder: F):
# Uses type hints by default
dummy = class_builder(Wiring(methods=('method_AB',)))()
(a, b) = dummy.method_AB()
assert a is None
assert b is None
dummy = class_builder(Wiring(methods=('method_AB',), auto_provide=True))()
(a, b) = dummy.method_AB()
assert a is world.get(A)
assert b is world.get(B)
dummy = class_builder(Wiring(methods=('method_AB',), auto_provide=False))()
(a, b) = dummy.method_AB()
assert a is None
assert b is None
dummy = class_builder(Wiring(methods=('method_AB',), auto_provide=[A]))()
(a, b) = dummy.method_AB()
assert a is world.get(A)
assert b is None
dummy = class_builder(Wiring(methods=('method_AB',),
auto_provide=lambda cls: True))()
(a, b) = dummy.method_AB()
assert a is world.get(A)
assert b is world.get(B)
dummy = class_builder(Wiring(methods=('method_AB',),
auto_provide=lambda cls: issubclass(cls, A)))()
(a, b) = dummy.method_AB()
assert a is world.get(A)
assert b is None
dummy = class_builder(Wiring(methods=('method_AB',),
auto_provide=lambda cls: False))()
(a, b) = dummy.method_AB()
assert a is None
assert b is None
def test_dependencies_dict(class_builder: F):
dummy = class_builder(Wiring(dependencies=dict(),
auto_provide=True))()
assert dummy.a is world.get(A)
assert dummy.b is world.get(B)
(a, b) = dummy.method_AB()
assert a is world.get(A)
assert b is world.get(B)
(a, b) = dummy.method_ab()
assert a is None
assert b is None
dummy = class_builder(Wiring(dependencies=dict(a='x', b='y'),
auto_provide=True))()
assert dummy.a is world.get(A)
assert dummy.b is world.get(B)
(a, b) = dummy.method_AB()
assert a is world.get('x')
assert b is world.get('y')
(a, b) = dummy.method_ab()
assert a is world.get('x')
assert b is world.get('y')
dummy = class_builder(Wiring(dependencies=dict(b='y'),
auto_provide=True))()
assert dummy.a is world.get(A)
assert dummy.b is world.get(B)
(a, b) = dummy.method_AB()
assert a is world.get(A)
assert b is world.get('y')
(a, b) = dummy.method_ab()
assert a is None
assert b is world.get('y')
def test_dependencies_seq(class_builder: F):
dummy = class_builder(Wiring(dependencies=[],
auto_provide=True))()
assert dummy.a is world.get(A)
assert dummy.b is world.get(B)
(a, b) = dummy.method_AB()
assert a is world.get(A)
assert b is world.get(B)
(a, b) = dummy.method_ab()
assert a is None
assert b is None
dummy = class_builder(Wiring(dependencies=[None, None],
auto_provide=True))()
assert dummy.a is world.get(A)
assert dummy.b is world.get(B)
(a, b) = dummy.method_AB()
assert a is world.get(A)
assert b is world.get(B)
(a, b) = dummy.method_ab()
assert a is None
assert b is None
dummy = class_builder(Wiring(dependencies=['x', 'y'],
auto_provide=True))()
assert dummy.a is world.get(A)
assert dummy.b is world.get(B)
(a, b) = dummy.method_AB()
assert a is world.get('x')
assert b is world.get('y')
(a, b) = dummy.method_ab()
assert a is world.get('x')
assert b is world.get('y')
dummy = class_builder(Wiring(dependencies=[None, 'y'],
auto_provide=True))()
assert dummy.a is world.get(A)
assert dummy.b is world.get(B)
(a, b) = dummy.method_AB()
assert a is world.get(A)
assert b is world.get('y')
(a, b) = dummy.method_ab()
assert a is None
assert b is world.get('y')
dummy = class_builder(Wiring(dependencies=['x', None],
auto_provide=True))()
assert dummy.a is world.get(A)
assert dummy.b is world.get(B)
(a, b) = dummy.method_AB()
assert a is world.get('x')
assert b is world.get(B)
(a, b) = dummy.method_ab()
assert a is world.get('x')
assert b is None
def test_dependencies_callable(class_builder: F):
dummy = class_builder(Wiring(dependencies=lambda arg: None,
auto_provide=True))()
assert dummy.a is world.get(A)
assert dummy.b is world.get(B)
(a, b) = dummy.method_AB()
assert a is world.get(A)
assert b is world.get(B)
(a, b) = dummy.method_ab()
assert a is None
assert b is None
dummy = class_builder(Wiring(dependencies=lambda arg: 'x' if arg.name == 'a' else 'y',
auto_provide=True))()
assert dummy.a is world.get(A)
assert dummy.b is world.get(B)
(a, b) = dummy.method_AB()
assert a is world.get('x')
assert b is world.get('y')
(a, b) = dummy.method_ab()
assert a is world.get('x')
assert b is world.get('y')
dummy = class_builder(Wiring(
dependencies=lambda arg: 'x' if arg.name == 'a' else None,
auto_provide=True))()
assert dummy.a is world.get(A)
assert dummy.b is world.get(B)
(a, b) = dummy.method_AB()
assert a is world.get('x')
assert b is world.get(B)
(a, b) = dummy.method_ab()
assert a is world.get('x')
assert b is None
def test_distinct_arguments(class_builder: F):
# Having more arguments in dependencies seq is not an issue for methods having less.
dummy = class_builder(Wiring(methods=('method_AB', 'method_xyz'),
dependencies=['x', None, 'z'],
auto_provide=True))()
(a, b) = dummy.method_AB()
assert a is world.get('x')
assert b is world.get(B)
(x, y, z) = dummy.method_xyz()
assert x is world.get('x')
assert y is None
assert z is world.get('z')
# Unknown argument in the dependencies dict won't raise an error.
dummy = class_builder(Wiring(methods=('method_AB', 'method_xyz'),
dependencies=dict(b='b', y='y'),
auto_provide=True))()
(a, b) = dummy.method_AB()
assert a is world.get(A)
assert b is world.get('b')
(x, y, z) = dummy.method_xyz()
assert x is None
assert y is world.get('y')
assert z is None
# type_hints
dummy = class_builder(Wiring(methods=('method_AB', 'method_xyz'),
auto_provide=[A]))()
(a, b) = dummy.method_AB()
assert a is world.get(A)
assert b is None
(x, y, z) = dummy.method_xyz()
assert x is None
assert y is None
assert z is None
|
import logging
import pickle
import re
from functools import lru_cache
from pathlib import Path
from typing import NamedTuple, Optional
import requests
from cartopy.crs import PlateCarree
from ...core.mixins import ShapelyMixin, PointMixin
class AirportNamedTuple(NamedTuple):
alt: int
country: str
iata: str
icao: str
lat: float
lon: float
name: str
# TODO inspect why it fails on some machines...
# well then it works on Airport
def __getattr__(self, name):
if name == "latitude":
return self.lat
if name == "longitude":
return self.lon
if name == "altitude":
return self.alt
def __getstate__(self):
return self.__dict__
def __setstate__(self, d):
self.__dict__.update(d)
class Airport(AirportNamedTuple, ShapelyMixin):
def __getattr__(self, name):
if name == "latitude":
return self.lat
if name == "longitude":
return self.lon
if name == "altitude":
return self.alt
def __repr__(self):
short_name = (
self.name.replace("International", "")
.replace("Airport", "")
.strip()
)
return f"{self.icao}/{self.iata}: {short_name}"
def __str__(self):
return (
f"{self.icao}/{self.iata} {self.name.strip()} ({self.country})"
f"\n\t{self.lat} {self.lon} altitude: {self.alt}"
)
def _repr_html_(self):
title = f"<b>{self.name.strip()}</b> ({self.country}) "
title += f"<code>{self.icao}/{self.iata}</code>"
no_wrap_div = '<div style="white-space: nowrap">{}</div>'
return title + no_wrap_div.format(self._repr_svg_())
@lru_cache()
def osm_request(self):
from cartotools.osm import request, tags
return request(
(self.lon - .06, self.lat - .06, self.lon + .06, self.lat + .06),
**tags.airport,
)
@property
def shape(self):
return self.osm_request().shape
def plot(self, ax, **kwargs):
params = {
"edgecolor": "silver",
"facecolor": "None",
"crs": PlateCarree(),
**kwargs,
}
ax.add_geometries(list(self.osm_request()), **params)
@property
def point(self):
p = PointMixin()
p.latitude = self.latitude
p.longitude = self.longitude
return p
class AirportParser(object):
cache: Optional[Path] = None
def __init__(self):
if self.cache is not None and self.cache.exists():
with open(self.cache, "rb") as fh:
self.airports = pickle.load(fh)
else:
c = requests.get(
"https://www.flightradar24.com/_json/airports.php",
headers={"user-agent": "Mozilla/5.0"},
)
self.airports = list(Airport(**a) for a in c.json()["rows"])
logging.info("Caching airport list from FlightRadar")
if self.cache is not None:
with open(self.cache, "wb") as fh:
pickle.dump(self.airports, fh)
def __getitem__(self, name: str):
return next(
(
a
for a in self.airports
if (a.iata == name.upper()) or (a.icao == name.upper())
),
None,
)
def search(self, name: str):
return list(
(
a
for a in self.airports
if (a.iata == name.upper())
or (a.icao == name.upper())
or (re.match(name, a.country, flags=re.IGNORECASE))
or (re.match(name, a.name, flags=re.IGNORECASE))
)
)
|
# -*- coding: utf-8 -*-
# Copyright 2019 Julian Betz
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
from datetime import datetime
import glob
import shutil
from collections.abc import Iterable, Mapping
import math
import numpy as np
from numpy.random import RandomState
import sklearn
import tensorflow as tf
from tensorflow.train import SessionRunHook, get_or_create_global_step
from tensorflow.estimator import Estimator, EstimatorSpec, ModeKeys, RunConfig
from tensorflow.data import Dataset
from tensorflow.contrib.cudnn_rnn import CudnnCompatibleLSTMCell
from tensorflow.contrib.layers import xavier_initializer
from tensorflow.contrib.estimator import stop_if_no_decrease_hook
import pickle
from ..language import Language
from ..features.symbol_stream import enumerate_alternatives, sample_alternatives
from ..util.math import prod
from ..util.progress import print_progress
class SaverHook(SessionRunHook):
"""A helper class that allows to save the model to one directory during training
and to provide the best model from a different directory for production
mode.
"""
def __init__(self, model_dir):
self._model_dir = model_dir
def end(self, session):
session.graph.get_collection('savers')[0].save(
session,
self._model_dir + '/model.ckpt',
get_or_create_global_step(session.graph))
class LanguageModel:
"""A neural language model that estimates the probability of a sentence.
:param str model_dir: Where to store all relevant model data. If
``None``, a generic location based on the current date and time will
be used.
:param dict params: The model parameters.
:param int seed: The seed to use for the underlying Tensorflow graph.
:param str warm_start_from: A directory containing model parameter
values for initialization.
:param bool production_mode: Whether to use the production or the
training model.
:param int save_summary_steps: The periodicity at which to save
summaries.
:param int keep_checkpoint_max: The maximum number of recent checkpoint
files to keep.
:param yokome.language.Language language: The language of the language model.
:param vocabulary: A mapping from input units to integer values, or a
sequence of input units. This is used to encode the incoming data
numerically. If ``None``, a pickled mapping is expected to be found
in the model directory, named ``encoder.pickle``. Every input unit
that is not found in this vocabulary is considered to be an
out-of-vocabulary unit.
"""
def __init__(self, model_dir=None, params=None, seed=None, warm_start_from=None, production_mode=False, *, save_summary_steps=100, keep_checkpoint_max=5, language=None, vocabulary=None):
if not isinstance(language, Language):
raise TypeError(type(language).__name__)
if model_dir is None:
model_dir = os.path.abspath(
os.path.dirname(os.path.abspath(__file__))
+ '/../../models'
+ datetime.now().strftime('/%Y-%m-%d_%H:%M:%S.%f'))
self._ESTIMATOR = Estimator(
model_fn=self.model_fn,
config=RunConfig(model_dir=model_dir + ('/best_model' if production_mode else '/training'),
tf_random_seed=seed,
save_summary_steps=save_summary_steps,
# Force tensorflow to only save checkpoints after
# full epochs
save_checkpoints_steps=(None if production_mode else np.inf),
save_checkpoints_secs=None,
keep_checkpoint_max=keep_checkpoint_max,
keep_checkpoint_every_n_hours=np.inf,
log_step_count_steps=save_summary_steps),
params=params,
warm_start_from=warm_start_from)
self._language = language
if vocabulary is None:
with open(model_dir + '/encoder.pickle', 'rb') as f:
self._ENCODER = pickle.load(f)
else:
if isinstance(vocabulary, Mapping):
self._ENCODER = vocabulary
elif isinstance(vocabulary, Iterable):
self._ENCODER = dict()
for word in vocabulary:
if word not in self._ENCODER:
self._ENCODER[word] = len(self._ENCODER) + 1
else:
raise TypeError('Vocabulary cannot be of type %r'
% (type(vocabulary).__name__,))
os.makedirs(model_dir, exist_ok=True)
with open(model_dir + '/encoder.pickle', 'wb') as f:
pickle.dump(self._ENCODER, f, pickle.HIGHEST_PROTOCOL)
self._INPUT_DTYPES = ({'ids': tf.int64,
'length': tf.int64,
'n': tf.int64,
'contribution': tf.float32},
tf.int64)
self._INPUT_SHAPES = ({'ids': tf.TensorShape((None,)),
'length': tf.TensorShape(()),
'n': tf.TensorShape(()),
'contribution': tf.TensorShape(())},
tf.TensorShape(()))
self._INPUT_PADDING = ({'ids': np.array(len(self._ENCODER) + 1, dtype=np.int64),
'length': np.array(0, dtype=np.int64),
'n': np.array(1, dtype=np.int64),
'contribution': np.array(1.0, dtype=np.float32)},
np.array(0, dtype=np.int64))
# self._INPUT_DUMMY = ({'ids': np.empty((0, 1), dtype=np.int64),
# 'length': np.empty((0,), dtype=np.int64),
# 'n': np.empty((0,), dtype=np.int64),
# 'contribution': np.empty((0,), dtype=np.float32)},
# np.empty((0,), dtype=np.int64))
# XXX Train once on batch of size zero to force tensorflow to initialize
# variables and establish a checkpoint
def _encode(self, words):
return np.array((len(self._ENCODER) + 1,) # Beginning/end of sentence
+ tuple(self._ENCODER[word]
if word in self._ENCODER
else 0 # Unknown word
for word in words),
dtype=np.int64)
def model_fn(self, features, labels, mode, params):
# tf.random.set_random_seed(params['seed'])
ids = features['ids']
# Embedding layer
embeddings = tf.Variable(tf.random.uniform(
(len(self._ENCODER) + 2, params['embedding_size']),
-1.0,
1.0,
tf.float32))
layer = tf.nn.embedding_lookup(embeddings, ids)
# LSTM layer
lengths = features['length']
# XXX Use keras layers instead
layer, _ = tf.nn.dynamic_rnn(
CudnnCompatibleLSTMCell(params['lstm_size']),
layer,
lengths,
dtype=tf.float32)
# MLP layers
for n_units in params['dense_sizes']:
layer = tf.layers.dense(
layer,
n_units,
activation=tf.nn.relu,
kernel_initializer=xavier_initializer(),
bias_initializer=tf.zeros_initializer())
# Softmax layer
layer = tf.layers.dense(
layer,
len(self._ENCODER) + 2,
activation=None,
kernel_initializer=xavier_initializer(),
bias_initializer=tf.zeros_initializer())
labels = tf.concat((ids[:, 1:], ids[:, :1]), 1)
mask = tf.sequence_mask(lengths, dtype=tf.float32)
layer = (tf.nn.sparse_softmax_cross_entropy_with_logits(logits=layer,
labels=labels)
* mask)
if mode == ModeKeys.PREDICT:
return EstimatorSpec(mode, predictions={
'log2_word_probs': -layer,
'log2_sentence_prob': -tf.reduce_sum(layer, 1),
'length': lengths,
'n': features['n'] #,
# 'global_step': tf.identity(get_or_create_global_step())
})
contribution = tf.reshape(features['contribution'], (-1, 1))
loss = tf.reduce_sum(layer * contribution) / tf.reduce_sum(mask)
if mode == ModeKeys.EVAL:
return EstimatorSpec(mode, loss=loss)
elif mode == ModeKeys.TRAIN:
train_op = tf.train.AdamOptimizer().minimize(
loss, global_step=get_or_create_global_step())
train_op = tf.group(
# tf.print(ids),
train_op)
return EstimatorSpec(mode, loss=loss, train_op=train_op)
def _provide_features(self, sentences, sample_size):
for sentence in sentences:
sentence = list(self._language.tokenize(sentence))
if sample_size > 0:
graphic_originals = ''.join(
candidates[0]['surface_form']['graphic']
for candidates in sentence
if 'surface_form' in candidates[0])
phonetic_originals = ''.join(
candidates[0]['surface_form']['phonetic']
for candidates in sentence
if 'surface_form' in candidates[0])
graphic_substitutes = ''.join(
candidates[0]['lemma']['graphic']
for candidates in sentence
if 'surface_form' not in candidates[0])
phonetic_substitutes = ''.join(
candidates[0]['lemma']['phonetic']
for candidates in sentence
if 'surface_form' not in candidates[0])
seed = (hash((graphic_originals,
phonetic_originals,
graphic_substitutes,
phonetic_substitutes))
% 0x100000000)
n = sample_size
sentence = sample_alternatives(sentence, n, seed)
else:
n = prod(len(candidates) for candidates in sentence)
sentence = enumerate_alternatives(sentence)
contribution = 1 / n
for tokens in sentence:
ids = self._encode(self._language.extract(tokens))
yield ({'ids': ids,
'length': np.array(ids.shape[0], dtype=np.int64),
'n': np.array(n, dtype=np.int64),
'contribution': np.array(contribution, dtype=np.float32)},
np.array(0, dtype=np.int64))
def _input_fn(self, sentences, batch_size, sample_size=0):
# if batch_size <= 0:
# raise ValueError('Batch size must be positive')
dataset = Dataset.from_generator(
lambda: self._provide_features(sentences, sample_size),
self._INPUT_DTYPES,
self._INPUT_SHAPES)
dataset = dataset.padded_batch(
batch_size,
self._INPUT_SHAPES,
self._INPUT_PADDING)
return dataset
def train(self, trn_set, evl_set, max_epochs=1, batch_size=1, max_generalization_loss=None, shuffle=False, random_state=None, verbose=False):
"""Train the model.
:param trn_set: A sequence of sentences, a training set. Each
sentence will be tokenized using the language object provided at
language model creation.
:param trn_set: A sequence of sentences, an evaluation set. Each
sentence will be tokenized using the language object provided at
language model creation.
:param int max_epochs: The maximum number of epochs to train for. The
actual number of epochs may be less if the training process stops
early.
:param int batch_size: The number of sentences to estimate the
probability for in parallel.
:param float max_generalization_loss: The maximum generalization loss at
which the training process is still continued.
:param bool shuffle: Whether to shuffle the samples for each epoch.
:param random_state: The random state used for shuffling. May be a
:class:`numpy.RandomState` instance, an ``int`` seed, or ``None``.
If ``None``, an unseeded pseudo-random number generator will be
used.
:param bool verbose: Whether to show progress indicators.
"""
if verbose:
print('Training language model:')
current_model_dir = os.path.abspath(self._ESTIMATOR.model_dir
+ '/../current_model')
best_model_dir = os.path.abspath(self._ESTIMATOR.model_dir
+ '/../best_model')
if os.path.exists(best_model_dir) or os.path.exists(current_model_dir) or glob.glob(self._ESTIMATOR.model_dir + '/*'):
# XXX Prompt user
if verbose:
print(' Overriding model in %r' % (os.path.dirname(self._ESTIMATOR.model_dir),))
for directory in (best_model_dir, current_model_dir, self._ESTIMATOR.model_dir):
try:
shutil.rmtree(directory)
except FileNotFoundError as e:
pass
trn_set = tuple(trn_set)
evl_set = tuple(evl_set)
if random_state is None or isinstance(random_state, int):
random_state = RandomState(random_state)
# XXX Early stopping does currently only take epochs into account that
# happen during this run of ``train``, but not prior, saved checkpoints
min_evl_loss = np.inf
min_evl_loss_epoch = 0
for _ in (print_progress(range(max_epochs),
prefix=lambda i, element: ' |',
suffix=lambda i, element: '| Epoch %d '
% (i if element is None else i + 1,))
if verbose
else range(max_epochs)):
sentences = (sklearn.utils.shuffle(trn_set,
# Ensure that ``random_state`` is run exactly once per epoch
random_state=random_state.randint(0x100000000))
if shuffle
else trn_set)
os.makedirs(current_model_dir, exist_ok=False)
# Train for one epoch
self._ESTIMATOR.train(input_fn=lambda:
self._input_fn(sentences,
batch_size),
hooks=[SaverHook(current_model_dir)],
steps=math.ceil(len(trn_set) / batch_size))
metrics = self._ESTIMATOR.evaluate(input_fn=lambda:
self._input_fn(evl_set,
batch_size),
name='evl')
if min_evl_loss > metrics['loss']:
# XXX Non-atomic replacement
try:
shutil.rmtree(best_model_dir)
except FileNotFoundError as e:
pass
os.replace(current_model_dir, best_model_dir)
min_evl_loss = metrics['loss']
min_evl_loss_epoch = metrics['global_step']
else:
try:
shutil.rmtree(current_model_dir)
except FileNotFoundError as e:
pass
# Early stopping based on generalization loss criterion
if (max_generalization_loss is not None
and ((metrics['loss'] - min_evl_loss) / min_evl_loss
> max_generalization_loss)):
if verbose:
print('\n Stopping early')
break
return min_evl_loss_epoch
def validate(self, vld_set, batch_size=1):
"""Evaluate the model performance on a validation set.
:param vld_set: A sequence of sentences, a validation set. Each
sentence will be tokenized using the language object provided at
language model creation.
:param int batch_size: The number of sentences to estimate the
probability for in parallel.
:return: A dictionary containing the metrics evaluated on the validation
set. Contains an entry ``'loss'`` for the loss and an entry
``'global_step'`` for the global step for which this validation was
performed.
"""
return self._ESTIMATOR.evaluate(input_fn=lambda:
self._input_fn(vld_set,
batch_size),
name='vld')
def estimate_probability(self, sentences, batch_size, sample_size=0):
"""Estimate the probability of the specified sentences.
:param sentences: A sequence of sentences. Each sentence will be
tokenized using the language object provided at language model
creation.
:param int batch_size: The number of sentences to estimate the
probability for in parallel.
:param int sample_size: The number of sentence alternatives to sample.
If this is greater than zero, for every list of token candidates
after tokenization, one token candidate is chosen for each sample.
If it is ``0``, all sentence alternatives (i.e. all combinations of
token candidates) are enumerated and used for probability
estimation.
:return: An iterable over one dictionary per sentence, each of the form
.. code-block:: python
{
'log2_word_probs': <word log-probabilities per alternative>,
'log2_sentence_probs': <sentence log-probability per alternative>
}
"""
aggregation = []
i = 0
for prediction in self._ESTIMATOR.predict(
input_fn=lambda: self._input_fn(sentences,
batch_size,
sample_size)):
if i <= 0:
if aggregation:
yield {'log2_word_probs': tuple(a['log2_word_probs'] for a in aggregation),
'log2_sentence_probs': np.array([a['log2_sentence_prob'] for a in aggregation], dtype=np.float32)}
aggregation = []
i = prediction['n']
aggregation.append({'log2_word_probs': prediction['log2_word_probs'][:prediction['length']],
'log2_sentence_prob': prediction['log2_sentence_prob']})
i -= 1
if aggregation:
yield {'log2_word_probs': tuple(a['log2_word_probs'] for a in aggregation),
'log2_sentence_probs': np.array([a['log2_sentence_prob'] for a in aggregation], dtype=np.float32)}
def training_dir(self):
"""Get the directory where the model for training is stored."""
return os.path.abspath(self._ESTIMATOR.model_dir + '/../training')
def production_dir(self):
"""Get the directory where the model for production is stored."""
return os.path.abspath(self._ESTIMATOR.model_dir + '/../best_model')
|
#!/usr/bin/env python3
import asyncio
import os
from datetime import datetime, timedelta
from pngme.api import AsyncClient
async def get_net_cash_flow(
api_client: AsyncClient,
user_uuid: str,
utc_starttime: datetime,
utc_endtime: datetime,
) -> float:
"""Compute the net cash flow for a user over a given period.
No currency conversions are performed. Net cash flow is calculated by
differencing cash-in (credit) and cash-out (debit) transactions across
all of a user's depository accounts during the given period.
Args:
api_client: Pngme Async API client
user_uuid: the Pngme user_uuid for the mobile phone user
utc_starttime: the UTC time to start the time window
utc_endtime: the UTC time to end the time window
Returns:
the net cash flow amount (differencing cash-in (credit) and cash-out (debit) transactions)
"""
# STEP 1: fetch list of institutions belonging to the user
institutions = await api_client.institutions.get(user_uuid=user_uuid)
# subset to only fetch data for institutions known to contain depository-type accounts for the user
institutions_w_depository = []
for inst in institutions:
if "depository" in inst["account_types"]:
institutions_w_depository.append(inst)
# STEP 2: Loop through all transactions adding the transactions
inst_coroutines = []
for institution in institutions_w_depository:
inst_coroutines.append(
api_client.transactions.get(
user_uuid=user_uuid,
institution_id=institution["institution_id"],
utc_starttime=utc_starttime,
utc_endtime=utc_endtime,
account_types=["depository"],
)
)
transactions_by_institution = await asyncio.gather(*inst_coroutines)
# STEP 3: Compute the net cash flow as the difference between cash-in and cash-out
cash_in_amount = 0
cash_out_amount = 0
for transactions in transactions_by_institution:
for transaction in transactions:
if transaction["impact"] == "CREDIT":
cash_in_amount += transaction["amount"]
elif transaction["impact"] == "DEBIT":
cash_out_amount += transaction["amount"]
total_net_cash_flow = cash_in_amount - cash_out_amount
return total_net_cash_flow
if __name__ == "__main__":
# Mercy Otieno, mercy@pngme.demo.com, 254123456789
user_uuid = "958a5ae8-f3a3-41d5-ae48-177fdc19e3f4"
token = os.environ["PNGME_TOKEN"]
client = AsyncClient(access_token=token)
utc_endtime = datetime(2021, 10, 1)
utc_starttime = utc_endtime - timedelta(days=30)
async def main():
net_cash_flow = await get_net_cash_flow(
api_client=client,
user_uuid=user_uuid,
utc_starttime=utc_starttime,
utc_endtime=utc_endtime,
)
print(net_cash_flow)
asyncio.run(main())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.