content stringlengths 5 1.05M |
|---|
import sys
import requests
import csv
import json
from mmda.types.annotation import SpanGroup, BoxGroup
from mmda.types.document import Document
pdf_file = sys.argv[1]
with open(pdf_file,"rb") as f:
pdf_bytes = f.read()
doc = Document.from_json(requests.post("http://symbolscraper", data = pdf_bytes).json())
resp = requests.post("http://layoutparser", json=doc.to_json(with_images=True))
box_groups = [BoxGroup.from_json(p) for p in resp.json()]
doc.annotate(blocks=box_groups)
resp = requests.post("http://vila", json=doc.to_json(with_images=True))
span_groups = [SpanGroup.from_json(p) for p in resp.json()]
doc.annotate(preds=span_groups)
with open("/pipeline/output.json","w") as f:
json.dump(doc.to_json(), f)
with open(f"/pipeline/output.csv", "w") as f:
writer = csv.writer(f)
writer.writerows((s.type, "\n".join(s.symbols)) for s in doc.preds)
|
"""
CamPy: Python-based multi-camera recording software.
Integrates machine vision camera APIs with ffmpeg real-time compression.
Outputs one MP4 video file and metadata files for each camera
"campy" is the main console.
User inputs are loaded from config yaml file using a command line interface (CLI)
configurator parses the config arguments (and default params) into "params" dictionary.
configurator assigns params to each camera stream in the "cam_params" dictionary.
* Camera index is set by "cameraSelection".
* If param is string, it is applied to all cameras.
* If param is list of strings, it is assigned to each camera, ordered by camera index.
Camera streams are acquired and encoded in parallel using multiprocessing.
Usage:
campy-acquire ./configs/campy_config.yaml
"""
import os, time, sys, logging, threading, queue
from collections import deque
import multiprocessing as mp
from campy import writer, display, configurator
from campy.trigger import trigger
from campy.cameras import unicam
from campy.utils.utils import HandleKeyboardInterrupt
def OpenSystems():
# Configure parameters
params = configurator.ConfigureParams()
# Load Camera Systems and Devices
systems = unicam.LoadSystems(params)
systems = unicam.GetDeviceList(systems, params)
# Start camera triggers if configured
systems = trigger.StartTriggers(systems, params)
return systems, params
def CloseSystems(systems, params):
trigger.StopTriggers(systems, params)
unicam.CloseSystems(systems, params)
def AcquireOneCamera(n_cam):
# Initialize param dictionary for this camera stream
cam_params = configurator.ConfigureCamParams(systems, params, n_cam)
# Initialize queues for display, video writer, and stop messages
dispQueue = deque([], 2)
writeQueue = deque()
stopReadQueue = deque([],1)
stopWriteQueue = deque([],1)
# Start image window display thread
threading.Thread(
target = display.DisplayFrames,
daemon = True,
args = (cam_params, dispQueue,),
).start()
# Start grabbing frames ("producer" thread)
threading.Thread(
target = unicam.GrabFrames,
daemon = True,
args = (cam_params, writeQueue, dispQueue, stopReadQueue, stopWriteQueue,),
).start()
# Start video file writer (main "consumer" process)
writer.WriteFrames(cam_params, writeQueue, stopReadQueue, stopWriteQueue)
def Main():
with HandleKeyboardInterrupt():
# Acquire cameras in parallel with Windows- and Linux-compatible pool
p = mp.get_context("spawn").Pool(params["numCams"])
p.map_async(AcquireOneCamera,range(params["numCams"])).get()
CloseSystems(systems, params)
# Open systems, creates global 'systems' and 'params' variables
systems, params = OpenSystems() |
import unittest
import subprocess
import os
import utils
TOPDIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
cleanup_pycs = os.path.join(TOPDIR, 'cleanup_pycs.py')
class Tests(unittest.TestCase):
def test_cleanup_pycs(self):
"""Test cleanup_pycs.py script"""
with utils.TempDir() as tmpdir:
ok_py = os.path.join(tmpdir, "ok.py")
ok_pyc = os.path.join(tmpdir, "ok.pyc")
to_delete_pyc = os.path.join(tmpdir, "del.pyc")
for f in (ok_py, ok_pyc, to_delete_pyc):
utils.write_file(f, "")
p = subprocess.Popen([cleanup_pycs, tmpdir])
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 0)
self.assertTrue(os.path.exists(ok_py))
self.assertTrue(os.path.exists(ok_pyc))
self.assertFalse(os.path.exists(to_delete_pyc))
def test_cleanup_pycs_cwd(self):
"""Test cleanup_pycs.py script in cwd"""
with utils.TempDir() as tmpdir:
ok_py = os.path.join(tmpdir, "ok.py")
ok_pyc = os.path.join(tmpdir, "ok.pyc")
to_delete_pyc = os.path.join(tmpdir, "del.pyc")
to_delete2_pyc = os.path.join(tmpdir, "del2.pyc")
for f in (ok_py, ok_pyc, to_delete_pyc, to_delete2_pyc):
utils.write_file(f, "")
p = subprocess.Popen([cleanup_pycs], cwd=tmpdir)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 0)
self.assertTrue(os.path.exists(ok_py))
self.assertTrue(os.path.exists(ok_pyc))
self.assertFalse(os.path.exists(to_delete_pyc))
self.assertFalse(os.path.exists(to_delete2_pyc))
if __name__ == '__main__':
unittest.main()
|
import re
texto = "En esta cadena se encuentra una palabra mágica"
re.search('mágica', texto)
re.search('hola', texto)
palabra = "mágica"
encontrado = re.search(palabra, texto)
if encontrado:
print("Se ha encontrado la palabra:", palabra)
else:
print("No se ha encontrado la palabra:", palabra) |
v1 = int(input())
v2 = int(input())
v3 = int(input())
if v2 > v1:
pass
|
import numpy as np
import matplotlib.pyplot as plt
import BotDecidesPos
#import matplotlib.pyplot as plt
class BotPool:
recorder=[]
def __init__(self):
pass
def register(self,nbot):
self.recorder.append((nbot))
print(nbot.id,"has been registered")
def getList(self):
return self.recorder
def stimulate(self):
c=0
for bot in self.recorder:
# x=np.random.randn(1)*10.0
# y = np.random.randn(1) * 10.0
#tx = np.random.randn(1) * 10.0
#ty = np.random.randn(1) * 10.0
x=np.array([0.0,10.0])
y = np.array([0.0, 0.0])
tx = np.array([9.0, 0.0])
ty = np.array([5.0, 17.0])
bot.setDefPos(x[c],y[c])
print("Ïnitial refference point of ", bot.id," ïs x: ",x[c], " y: ", y[c])
bot.setTarget(tx[c],ty[c])
print("final refference point of ", bot.id, " ïs x: ", tx[c], " y: ", ty[c])
bot.setSpeed(0.8)
c = c + 1
for bot in self.recorder:
while bot.checkTarget() :
bot.updatePos()
p=1
coor=["red","blue","green"]
fig=plt.figure()
for bot in self.recorder:
#ax=plt.subplot(2,1,p)
c=str(p)
plt.scatter(bot.plotx,bot.ploty,color=coor[p])
print(bot.id,bot.plotx,bot.ploty)
p=p+1
#plt.show()
#plt.scatter(bot.plotx,bot.ploty)
bp=BotPool()
ob1=OneBot("123bot")
ob2=OneBot("213bot")
bp.register(ob1)
bp.register(ob2)
bp.stimulate()
plt.show()
#mpl.show()
|
from ..input_action import InputAction
class ButtonAdd(InputAction):
def run(self, total, game):
addition = self._value
applicables = ['add','subtract','multiply','divide','append']
def addValue(actionName):
action = game.actions[actionName]
typeCheck = str(type(action))
isApplicable = False
for applicable in applicables:
isApplicable = 'solver.actions.'+applicable+'.' in typeCheck
if isApplicable:
break
if not isApplicable:
return actionName
action.add_value(addition)
return actionName
# print (game.actions['add_2'].__dict__)
[addValue(x) for x in game.actions]
# print (game.actions['add_2'].__dict__)
return total
|
# -*- coding: utf-8 -*-
import functools
from typing import Optional
import cytoolz
import structlog
from ..typedefs import BatchJrpcRequest
from ..typedefs import CachedBatchResponse
from ..typedefs import CachedSingleResponse
from ..typedefs import SingleJrpcRequest
from ..typedefs import SingleJrpcResponse
from .ttl import TTL
logger = structlog.get_logger(__name__)
@functools.lru_cache(8192)
def jsonrpc_cache_key(single_jsonrpc_request: SingleJrpcRequest) -> str:
return str(single_jsonrpc_request.urn)
def irreversible_ttl(jsonrpc_response: dict=None,
last_irreversible_block_num: int=None) -> TTL:
if not jsonrpc_response:
return TTL.NO_CACHE
if not isinstance(last_irreversible_block_num, int):
logger.debug('bad/missing last_irrersible_block_num',
lirb=last_irreversible_block_num)
return TTL.NO_CACHE
try:
jrpc_block_num = block_num_from_jsonrpc_response(jsonrpc_response)
return TTL.DEFAULT_TTL
except Exception as e:
logger.warning(
'Unable to cache using last irreversible block',
e=e,
lirb=last_irreversible_block_num)
return TTL.NO_CACHE
def block_num_from_jsonrpc_response(
jsonrpc_response: dict=None) -> Optional[int]:
# pylint: disable=no-member
get_in = cytoolz.get_in
# for appbase get_block
block_id = get_in(['result', 'block', 'block_id'], jsonrpc_response)
if block_id:
return int(str(block_id)[:8], base=16)
# for appbase get_block_header
previous = get_in(['result', 'header', 'previous'],
jsonrpc_response)
if previous:
return int(str(previous)[:8], base=16) + 1
# for steemd get_block
block_id = get_in(['result', 'block_id'], jsonrpc_response)
if block_id:
return int(str(block_id)[:8], base=16)
# for steemd get_block_header
previous = get_in(['result', 'previous'],
jsonrpc_response)
if previous:
return int(str(previous)[:8], base=16) + 1
return None
def merge_cached_response(request: SingleJrpcRequest,
cached_response: CachedSingleResponse,
) -> Optional[SingleJrpcResponse]:
if not cached_response:
return None
return {'id': request.id, 'jsonrpc': '2.0', 'result': cached_response['result']}
def merge_cached_responses(request: BatchJrpcRequest,
cached_responses: CachedBatchResponse) -> CachedBatchResponse:
return [merge_cached_response(req, resp) for req, resp in zip(
request, cached_responses)]
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
df=pd.read_csv('https://covid.ourworldindata.org/data/owid-covid-data.csv')
df.columns
# Mean, median and min sales by country
df.pivot_table(index=['Country'], values=['Sales'], aggfunc={'median','mean','min'})
df.pivot_table(index=['Country'], values=['Sales'], aggfunc={'median','mean','min'}).plot()
# Groupby has some of similar funcitonality
df.groupby(['Country']).mean()['Sales'].plot() # can specify a specific column to find mean of. Here we do mean(sales)
df.pivot_table(index=['location'], values=[''])
df.groupby(['location']).count()
# Top 10 countries with Diabetes prevalence
df[['location','diabetes_prevalence']].drop_duplicates().sort_values(by='diabetes_prevalence').head(10).plot(x='location', y='diabetes_prevalence', kind='bar')
# Unique list of countries
location = df[['location']].drop_duplicates()
# Focus on 1 country - India
temp=df.loc[df.location=='India']
temp.plot(x='date', y='new_cases', rot=45, kind='bar')
plt.title('India case evolution')
plt.xlabel('Time')
plt.ylabel('New cases')
temp2=df[['date','new_cases']] # select cols of interest
temp2.set_index("date", inplace=True)
temp2.index = pd.to_datetime(temp2.index)
temp3=temp2.resample(rule='W').mean()
temp3.plot( rot=45, kind='bar')
# Lets get total cases by country and diabetes prevalance and other factors to see corelation
# Lets identify 20 countries with most cases
df.date.max() # '2020-10-24'
df2=df.loc[df.date=='2020-10-24']
df[['total_deaths','diabetes_prevalence']].plot(y='total_deaths',x='diabetes_prevalence', kind='scatter')
df[['total_deaths','population']].plot(y='total_deaths',x='population', kind='scatter')
temp=df.groupby(['location']).sum()['new_cases'].sort_values(ascending=False).head(21)
temp=df.groupby(['location']).mean()['diabetes_prevalence']
df.columns
|
#!/usr/bin/python3
# *****************************************************************************
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# ******************************************************************************
from fabric import *
from patchwork.files import exists
from patchwork import files
from datalab.logger import logging
import sys
import os
import time
import subprocess
import datalab.fab
def handle_dpkg_lock(error='', rerun=False):
try:
count = 0
if 'E: Could not get lock ' and 'It is held by process ' in error:
log = datalab.fab.conn.sudo('cat /tmp/dpkg.log | grep "E: Could not get lock"').stdout
lock_path = log.split('\n')[0][22:log.find('.')]
pid = log.split('\n')[0][log.find('It is held by process ') + 22:].split(' ')[0]
datalab.fab.conn.sudo('kill -9 {}'.format(pid))
datalab.fab.conn.sudo('rm -f {}'.format(lock_path))
while 'no_lock' not in error and count < 10:
pid = datalab.fab.conn.sudo('lsof /var/lib/dpkg/lock-frontend | grep dpkg | awk \'{print $2}\'').stdout.replace( '\n', '')
if pid != '':
datalab.fab.conn.sudo('kill -9 {}'.format(pid))
datalab.fab.conn.sudo('rm -f /var/lib/dpkg/lock-frontend')
pid = datalab.fab.conn.sudo('lsof /var/lib/dpkg/lock | grep dpkg | awk \'{print $2}\'').stdout.replace('\n', '')
if pid != '':
datalab.fab.conn.sudo('kill -9 {}'.format(pid))
datalab.fab.conn.sudo('rm -f /var/lib/dpkg/lock')
if rerun:
datalab.fab.conn.sudo('dpkg --configure -a 2>&1 | tee /tmp/tee.tmp; '
'if ! grep -w -E "({0})" /tmp/tee.tmp; '
'then echo "no_lock" > /tmp/dpkg.log; '
'else cat /tmp/tee.tmp > /tmp/dpkg.log;fi; '
'if ! grep -w -E "({1})" /tmp/tee.tmp; '
'then echo "no_error" >> /tmp/dpkg.log; '
'else cat /tmp/tee.tmp >> /tmp/dpkg.log;fi'.format(lock_parser,
error_parser))
error = datalab.fab.conn.sudo('cat /tmp/dpkg.log').stdout
else:
error = 'no_lock'
count = count + 1
if 'no_error' not in error:
raise Exception
except:
sys.exit(1)
def handle_apt_lock(error='', rerun=False):
try:
count = 0
if 'E: Could not get lock ' and 'It is held by process ' in error:
log = datalab.fab.conn.sudo('cat /tmp/apt.log | grep "E: Could not get lock"').stdout
lock_path = log.split('\n')[0][22:log.find('.')]
pid = log.split('\n')[0][log.find('It is held by process ') + 22:].split(' ')[0]
datalab.fab.conn.sudo('kill -9 {}'.format(pid))
datalab.fab.conn.sudo('rm -f {}'.format(lock_path))
while 'no_lock' not in error and count < 10:
pid = datalab.fab.conn.sudo('lsof /var/lib/apt/lists/lock | grep apt | awk \'{print $2}\'').stdout.replace('\n', '')
if pid != '':
datalab.fab.conn.sudo('kill -9 {}'.format(pid))
datalab.fab.conn.sudo('rm -f /var/lib/apt/lists/lock')
if rerun:
datalab.fab.conn.sudo('apt update 2>&1 | tee /tmp/tee.tmp; '
'if ! grep -w -E "({0})" /tmp/tee.tmp; '
'then echo "no_lock" > /tmp/apt.log; '
'else cat /tmp/tee.tmp > /tmp/apt.log;fi; '
'if ! grep -w -E "({1})" /tmp/tee.tmp; '
'then echo "no_error" >> /tmp/apt.log; '
'else cat /tmp/tee.tmp >> /tmp/apt.log;fi'.format(lock_parser,
error_parser))
error = datalab.fab.conn.sudo('cat /tmp/apt.log').stdout
else:
error = 'no_lock'
count = count + 1
if 'no_error' not in error:
raise Exception
except:
sys.exit(1)
def handle_apt_get_lock(error='', rerun=False):
try:
count = 0
if 'E: Could not get lock ' and 'It is held by process ' in error:
log = datalab.fab.conn.sudo('cat /tmp/apt.log | grep "E: Could not get lock"').stdout
lock_path = log.split('\n')[0][22:log.find('.')]
pid = log.split('\n')[0][log.find('It is held by process ') + 22:].split(' ')[0]
datalab.fab.conn.sudo('kill -9 {}'.format(pid))
datalab.fab.conn.sudo('rm -f {}'.format(lock_path))
while 'no_lock' not in error and count < 10:
datalab.fab.conn.sudo('lsof /var/lib/dpkg/lock')
datalab.fab.conn.sudo('lsof /var/lib/apt/lists/lock')
datalab.fab.conn.sudo('lsof /var/cache/apt/archives/lock')
datalab.fab.conn.sudo('rm -f /var/lib/apt/lists/lock')
datalab.fab.conn.sudo('rm -f /var/cache/apt/archives/lock')
datalab.fab.conn.sudo('rm -f /var/lib/dpkg/lock')
if rerun:
datalab.fab.conn.sudo('apt-get {0} {1} 2>&1 | tee /tmp/tee.tmp; '
'if ! grep -w -E "({2})" /tmp/tee.tmp; '
'then echo "no_lock" > /tmp/apt_get.log; '
'else cat /tmp/tee.tmp > /tmp/apt_get.log;fi; '
'if ! grep -w -E "({3})" /tmp/tee.tmp; '
'then echo "no_error" >> /tmp/apt_get.log; '
'else cat /tmp/tee.tmp >> /tmp/apt_get.log;fi'.format(command,
requisites,
lock_parser,
error_parser))
error = datalab.fab.conn.sudo('cat /tmp/apt_get.log').stdout
else:
error = 'no_lock'
count = count + 1
if 'no_error' not in error:
raise Exception
except:
sys.exit(1)
def manage_pkg(command, environment, requisites):
try:
allow = False
counter = 0
while not allow:
if counter > 60:
logging.error("Instance is broken (app manager does not work properly) please recreate it.")
traceback.print_exc()
sys.exit(1)
else:
logging.info('Package manager is:')
if environment == 'remote':
if 'busy' in datalab.fab.conn.sudo('pgrep "^apt" -a && echo "busy" || echo "ready"').stdout or 'busy' in datalab.fab.conn.sudo('pgrep "^dpkg" -a && echo "busy" || echo "ready"').stdout:
counter += 1
time.sleep(10)
else:
try:
lock_parser = "frontend is locked|locked|not get lock|unavailable"
error_parser = "Could not|No matching|Error:|E:|failed|Requires:"
datalab.fab.conn.sudo('dpkg --configure -a 2>&1 | tee /tmp/tee.tmp; '
'if ! grep -w -E "({0})" /tmp/tee.tmp; '
'then echo "no_lock" > /tmp/dpkg.log; '
'else cat /tmp/tee.tmp > /tmp/dpkg.log;fi;'
'if ! grep -w -E "({1})" /tmp/tee.tmp; '
'then echo "no_error" >> /tmp/dpkg.log; '
'else cat /tmp/tee.tmp >> /tmp/dpkg.log;fi'.format(lock_parser,
error_parser))
err = datalab.fab.conn.sudo('cat /tmp/dpkg.log').stdout
if 'no_lock' not in err:
handle_dpkg_lock(err, rerun=True)
datalab.fab.conn.sudo('apt update 2>&1 | tee /tmp/tee.tmp; '
'if ! grep -w -E "({0})" /tmp/tee.tmp; '
'then echo "no_lock" > /tmp/apt.log; '
'else cat /tmp/tee.tmp > /tmp/apt.log;fi; '
'if ! grep -w -E "({1})" /tmp/tee.tmp; '
'then echo "no_error" >> /tmp/apt.log; '
'else cat /tmp/tee.tmp >> /tmp/apt.log;fi'.format(lock_parser,
error_parser))
err = datalab.fab.conn.sudo('cat /tmp/apt.log').stdout
if 'no_lock' not in err:
handle_dpkg_lock()
handle_apt_lock(err, rerun=True)
datalab.fab.conn.sudo('apt-get {0} {1} 2>&1 | tee /tmp/tee.tmp; '
'if ! grep -w -E "({2})" /tmp/tee.tmp; '
'then echo "no_lock" > /tmp/apt_get.log; '
'else cat /tmp/tee.tmp > /tmp/apt_get.log;fi; '
'if ! grep -w -E "({3})" /tmp/tee.tmp; '
'then echo "no_error" >> /tmp/apt_get.log; '
'else cat /tmp/tee.tmp >> /tmp/apt_get.log;fi'.format(command,
requisites,
lock_parser,
error_parser))
err = datalab.fab.conn.sudo('cat /tmp/apt_get.log').stdout
if 'no_lock' not in err:
handle_dpkg_lock()
handle_apt_lock()
handle_apt_get_lock(err, rerun=True)
allow = True
except Exception as err:
traceback.print_exc()
append_result("Failed to manage_pkgs", str(err))
elif environment == 'local':
if subprocess.run('sudo pgrep "^apt" -a && echo "busy" || echo "ready"',
capture_output=True, shell=True, check=True) == 'busy':
counter += 1
time.sleep(10)
else:
allow = True
subprocess.run('sudo apt-get {0} {1}'.format(command, requisites),
capture_output=True, shell=True, check=True)
else:
logging.error('Wrong environment')
sys.exit(1)
except Exception as err:
logging.error('Managing packages function error:', str(err))
traceback.print_exc()
sys.exit(1)
def ensure_pkg(os_user, requisites='linux-headers-$(uname -r) python3-pip python3-dev python3-virtualenv '
'groff gcc vim less git wget '
'libssl-dev unattended-upgrades nmap '
'libffi-dev unzip libxml2-dev haveged'):
try:
if not exists(datalab.fab.conn,'/home/{}/.ensure_dir/pkg_upgraded'.format(os_user)):
count = 0
check = False
while not check:
if count > 60:
logging.error("Repositories are not available. Please, try again later.")
sys.exit(1)
else:
try:
logging.info("Updating repositories "
"and installing requested tools: {}".format(requisites))
logging.info("Attempt number " + str(count) + " to install requested tools. Max 60 tries.")
manage_pkg('update', 'remote', '')
manage_pkg('-y install', 'remote', requisites)
datalab.fab.conn.sudo('unattended-upgrades -v')
datalab.fab.conn.sudo(
'sed -i \'s|APT::Periodic::Unattended-Upgrade "1"|APT::Periodic::Unattended-Upgrade "0"|\' '
'/etc/apt/apt.conf.d/20auto-upgrades')
datalab.fab.conn.run('export LC_ALL=C')
datalab.fab.conn.sudo('touch /home/{}/.ensure_dir/pkg_upgraded'.format(os_user))
datalab.fab.conn.sudo('systemctl enable haveged')
datalab.fab.conn.sudo('systemctl start haveged')
if os.environ['conf_cloud_provider'] == 'aws':
manage_pkg('-y install --install-recommends', 'remote', 'linux-aws-hwe')
check = True
except:
count += 1
time.sleep(50)
except Exception as err:
logging.error('Installing prerequisites packages error:', str(err))
traceback.print_exc()
sys.exit(1)
def find_java_path_remote():
try:
java_path = datalab.fab.conn.sudo("sh -c \"update-alternatives --query java | grep 'Value: ' | grep "
"-o '/.*/jre'\"").stdout.replace('\n','')
return java_path
except Exception as err:
logging.error('Finding remote java path error:', str(err))
traceback.print_exc()
sys.exit(1)
def find_java_path_local():
try:
java_path = subprocess.run("sh -c \"update-alternatives --query java | grep 'Value: ' | grep "
"-o '/.*/jre'\"", capture_output=True, shell=True, check=True).stdout.decode(
'UTF-8').rstrip("\n\r")
return java_path
except Exception as err:
logging.error('Finding local java path error:', str(err))
traceback.print_exc()
sys.exit(1)
def disable_edge_scp_binary(os_user):
try:
if not exists(datalab.fab.conn, '/home/{}/.ensure_dir/disabled_scp_binary'.format(os_user)):
datalab.fab.conn.sudo('mv /usr/bin/scp /usr/bin/scp_disabled')
datalab.fab.conn.sudo('touch /home/{}/.ensure_dir/disabled_scp_binary'.format(os_user))
except Exception as err:
logging.error('Updating openssh to version:', str(err))
traceback.print_exc()
sys.exit(1)
def ensure_openssh_version(os_user):
try:
if not exists(datalab.fab.conn,'/home/{}/.ensure_dir/openssh_version_ensured'.format(os_user)):
if os.environ['conf_openssh_version'] not in datalab.fab.conn.sudo('ssh -V').stdout:
datalab.fab.conn.sudo('mkdir /var/lib/sshd')
datalab.fab.conn.sudo('chmod -R 700 /var/lib/sshd/')
datalab.fab.conn.sudo('chown -R root:sys /var/lib/sshd/')
datalab.fab.conn.sudo('wget -c https://cdn.openbsd.org/pub/OpenBSD/OpenSSH/portable/openssh-{0}.tar.gz '
'-O /tmp/openssh-{0}.tar.gz'.format(os.environ['conf_openssh_version']))
datalab.fab.conn.sudo('bash -l -c "tar -zhxvf /tmp/openssh-{0}.tar.gz -C /tmp/; cd /tmp/openssh-{0}; ./configure; make; make install"'.format(os.environ['conf_openssh_version']))
datalab.fab.conn.sudo('touch /home/{}/.ensure_dir/openssh_version_ensured'.format(os_user))
except Exception as err:
logging.error('Updating openssh to version:', str(err))
traceback.print_exc()
sys.exit(1)
def ensure_ntpd(os_user, edge_private_ip=''):
try:
if not exists(datalab.fab.conn,'/home/{}/.ensure_dir/ntpd_ensured'.format(os_user)):
datalab.fab.conn.sudo('timedatectl set-ntp no')
manage_pkg('-y install', 'remote', 'ntp ntpdate')
datalab.fab.conn.sudo('bash -c \"echo "tinker panic 0" >> /etc/ntp.conf\"')
if os.environ['conf_resource'] != 'ssn' and os.environ['conf_resource'] != 'edge':
datalab.fab.conn.sudo('bash -c \"echo "server {} prefer iburst" >> /etc/ntp.conf\"'.format(
edge_private_ip))
datalab.fab.conn.sudo('systemctl restart ntp')
datalab.fab.conn.sudo('systemctl enable ntp')
datalab.fab.conn.sudo('touch /home/{}/.ensure_dir/ntpd_ensured'.format(os_user))
except Exception as err:
logging.error('Installing NTPD error:', str(err))
traceback.print_exc()
sys.exit(1)
def ensure_java(os_user):
try:
if not exists(datalab.fab.conn,'/home/{}/.ensure_dir/java_ensured'.format(os_user)):
manage_pkg('-y install', 'remote', 'openjdk-8-jdk-headless')
datalab.fab.conn.sudo('touch /home/{}/.ensure_dir/java_ensured'.format(os_user))
except Exception as err:
logging.error('Installing Java error:', str(err))
traceback.print_exc()
sys.exit(1)
def ensure_step(os_user):
try:
if not exists(datalab.fab.conn,'/home/{}/.ensure_dir/step_ensured'.format(os_user)):
manage_pkg('-y install', 'remote', 'wget')
datalab.fab.conn.sudo('wget https://github.com/smallstep/cli/releases/download/v0.13.3/step-cli_0.13.3_amd64.deb '
'-O /tmp/step-cli_0.13.3_amd64.deb')
datalab.fab.conn.sudo('dpkg -i /tmp/step-cli_0.13.3_amd64.deb')
datalab.fab.conn.sudo('touch /home/{}/.ensure_dir/step_ensured'.format(os_user))
except:
logging.error('Installing step-cli error:', str(err))
traceback.print_exc()
sys.exit(1)
|
# -*- coding: utf-8 -*-
"""
Provides a way to introspect database and returns its current state for:
- schemas
- sequences
- tables
- views
- triggers
- indices
- constraints
Usage example:
.. code-block:: ipythonconsole
In [1]: uri='postgresql://localhost/jukoro_test.ju_20150403102042'
In [2]: from jukoro.pg import inspect
In [3]: schema, state = inspect(uri)
In [4]: schema
Out[4]: 'ju_20150403102042'
In [5]: state
Out[5]: <jukoro.pg.introspect.State at 0x7f8391a16e90>
In [6]: state.tables
Out[6]: <jukoro.pg.introspect.StateValues at 0x7f8391a16fd0>
In [7]: list(state.tables)
Out[7]: [u'test_pg', u'entity']
In [8]: list(state.views)
Out[8]: [u'test_pg__live']
In [9]: list(state.triggers)
Out[9]:
[u'ju_before__test_pg__live__update',
u'ju_before__test_pg__live__delete',
u'ju_before__test_pg__live__insert']
In [10]: list(state.indices)
Out[10]:
[u'ju_idx__test_pg__attr7_entity_start_entity_end',
u'ju_idx__test_pg__doc',
u'ju_idx__test_pg__attr1_entity_start_entity_end',
u'ju_idx__test_pg__entity_id',
u'ju_idx__test_pg__attr2_entity_start_entity_end',
u'entity_pkey',
u'test_pg_pkey',
u'ju_idx__test_pg__attr4_entity_start_entity_end']
In [11]: list(state.constraints)
Out[11]:
[u'243905_243922_5_not_null',
u'243905_243908_1_not_null',
u'243905_243922_1_not_null',
u'ju_validate__test_pg__attr4',
u'ju_validate__test_pg__attr5',
u'243905_243908_5_not_null',
u'ju_validate__test_pg__attr7',
u'ju_validate__test_pg__attr1',
u'ju_validate__test_pg__attr2',
u'ju_validate__test_pg__attr3',
u'243905_243908_2_not_null',
u'entity_pkey',
u'243905_243922_2_not_null',
u'test_pg_pkey']
In [12]: 'test_pg' in state.tables
Out[12]: True
"""
from collections import OrderedDict
_TABLES = """
SELECT
table_name as qname
FROM
information_schema.tables
WHERE
table_type = 'BASE TABLE'
AND
table_schema = %(schema)s;
"""
_VIEWS = """
SELECT
table_name as qname
FROM
information_schema.views
WHERE
table_schema = %(schema)s;
"""
_TRIGGERS = """
SELECT
trigger_name as qname
FROM
information_schema.triggers
WHERE
trigger_schema = %(schema)s;
"""
_CONSTRAINTS = """
SELECT
constraint_name as qname
FROM
information_schema.table_constraints
WHERE
constraint_schema = %(schema)s;
"""
_SEQUENCES = """
SELECT
sequence_name as qname
FROM
information_schema.sequences
WHERE
sequence_schema = %(schema)s;
"""
_SCHEMAS = """
SELECT
schema_name as qname
FROM
information_schema.schemata
WHERE
schema_name = %(schema)s;
"""
_INDICES = """
SELECT
indexname as qname
FROM
pg_indexes
WHERE
schemaname = %(schema)s;
"""
# mapping for names and queries
INTROMAP = OrderedDict(
schemas=_SCHEMAS,
sequences=_SEQUENCES,
tables=_TABLES,
views=_VIEWS,
triggers=_TRIGGERS,
indices=_INDICES,
constraints=_CONSTRAINTS,
)
class PgIntrospect(object):
"""
Introspects database using predefined sql queries
Can act as context manager
:param conn: instance of :class:`~jukoro.pg.db.PgConnection`
See :func:`~jukoro.pg.introspect.inspect` for example usage
"""
def __init__(self, conn):
self.conn = conn
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
self.conn = None
def _get(self, q, params):
with self.conn.transaction() as _conn:
resp = _conn.execute(q, params).all()
resp = [r['qname'] for r in resp]
return resp
def __getattr__(self, name):
if name not in INTROMAP:
raise AttributeError('Unknown attribute "%s"' % name)
return self._get(INTROMAP[name], {'schema': self.conn.schema})
class StateValues(object):
"""
Acts as a container for current database state for specific type
(one of schemas/sequences/tables/views/triggers/indices/constraints)
:param values: list of values retrieved from database using
:class:`~jukoro.pg.introspect.PgIntrospect`
"""
def __init__(self, values):
self._values = set(values or [])
def __contains__(self, k):
return k in self._values
def __iter__(self):
for item in self._values:
yield item
def pop(self, k):
"""
Removes value from current state
:param k: value to remove
Primary usage for this is to clear out values expected to be present
in database and keep values expected to be deleted from database
"""
self._values.discard(k)
def clear(self):
"""
Clears all values from current state
"""
self._values.clear()
class State(object):
"""
Acts as a container for current database state for all types
:param pairs: iterator or a list of pairs
(key, :class:`~jukoro.pg.introspect.StateValues` instance)
"""
def __init__(self, pairs):
self._pairs = OrderedDict(pairs)
def __getattr__(self, name):
return self._pairs.get(name)
def inspect(uri):
"""
Inspects current database state
:param uri: connection string
:returns: tuple of (schema, state)
:rtype: tuple(str, :class:`~jukoro.pg.introspect.State`)
"""
from jukoro.pg import PgConnection
conn = PgConnection(uri)
schema = conn.schema
with PgIntrospect(conn) as inspector:
state = State(
(k, StateValues(getattr(inspector, k))) for k in INTROMAP)
conn.close()
return schema, state
|
def outer(func):
def inner(*args, **kwargs):
print("My message")
return func(*args, **kwargs)
return inner
@outer
def div(a, b):
return a / b
print(div(1, 2)) |
# Generated by Django 3.1 on 2020-09-29 21:48
from django.db import migrations
import tinymce.models
class Migration(migrations.Migration):
dependencies = [
('app', '0026_auto_20200930_0117'),
]
operations = [
migrations.AlterField(
model_name='jumbotron',
name='description',
field=tinymce.models.HTMLField(blank=True, max_length=2000, null=True, verbose_name='شرح کامل'),
),
migrations.AlterField(
model_name='jumbotron',
name='short_description',
field=tinymce.models.HTMLField(blank=True, max_length=1000, null=True, verbose_name='شرح کوتاه'),
),
]
|
import torch
from torch.distributions import HalfCauchy, HalfNormal
import torch.nn.functional as F
import matplotlib.pyplot as plt
from tqdm import tqdm
from pyro.distributions import *
import pyro
from pyro.optim import Adam
from pyro.infer import SVI, Trace_ELBO, Predictive
assert pyro.__version__.startswith('1')
pyro.enable_validation(True) # can help with debugging
pyro.set_rng_seed(0)
data = torch.cat((MultivariateNormal(-8 * torch.ones(2), torch.eye(2)).sample([50]),
MultivariateNormal(8 * torch.ones(2), torch.eye(2)).sample([50]),
MultivariateNormal(torch.tensor([-0.5, 1]), torch.eye(2)).sample([50])))
N = data.shape[0]
def mix_weights(beta):
beta1m_cumprod = (1 - beta).cumprod(-1)
return F.pad(beta, (0, 1), value=1) * F.pad(beta1m_cumprod, (1, 0), value=1)
def model(data, **kwargs):
with pyro.plate("beta_plate", T - 1):
beta = pyro.sample("beta", Beta(1, alpha))
with pyro.plate("var_plate", T * 2):
var = pyro.sample("var", HalfNormal(scale=0.5 * torch.ones(1)))
with pyro.plate("corr_plate", T):
corr = pyro.sample("corr", LKJCorrCholesky(d=2, eta=1e6 * torch.ones(1)).expand([T]))
with pyro.plate("mu_plate", T):
L_sigma = torch.bmm(torch.diag_embed(torch.sqrt(var.view(T, 2))), corr)
mu = pyro.sample("mu", MultivariateNormal(torch.zeros(2), scale_tril=L_sigma))
with pyro.plate("data", N):
z = pyro.sample("z", Categorical(mix_weights(beta)))
pyro.sample("obs", MultivariateNormal(mu[z], scale_tril=L_sigma[z]), obs=data)
def guide(data, **kwargs):
gamma = pyro.param('gamma', alpha * torch.ones(T - 1,), constraint=constraints.positive)
zeta = pyro.param('zeta', lambda: Uniform(0.25, 0.5).sample([T * 2]))
psi = pyro.param('psi', lambda: Uniform(1e6, 1e7).sample(), constraint=constraints.positive)
tau = pyro.param('tau', lambda: MultivariateNormal(torch.zeros(2), 5 * torch.eye(2)).sample([T]))
pi = pyro.param('pi', torch.ones(N, T) / T, constraint=constraints.simplex)
with pyro.plate("beta_plate", T - 1):
q_beta = pyro.sample("beta", Beta(torch.ones(T - 1), gamma))
with pyro.plate("var_plate", T * 2):
q_var = pyro.sample("var", HalfNormal(scale=zeta))
with pyro.plate("corr_plate", T):
q_corr = pyro.sample("corr", LKJCorrCholesky(d=2, eta=psi).expand([T]))
with pyro.plate("mu_plate", T):
q_L_sigma = torch.bmm(torch.diag_embed(torch.sqrt(q_var.view(T, 2))), q_corr)
q_mu = pyro.sample("mu", MultivariateNormal(tau, scale_tril=q_L_sigma))
with pyro.plate("data", N):
z = pyro.sample("z", Categorical(pi))
T = 3
# alpha = 0.1
# model(data)
optim = Adam({"lr": 0.01})
svi = SVI(model, guide, optim, loss=Trace_ELBO())
def train(num_iterations):
losses = []
pyro.clear_param_store()
for j in tqdm(range(num_iterations)):
loss = svi.step(data, num_particles=10)
losses.append(loss)
return losses
def truncate(alpha, centers, vars, corrs, weights):
threshold = alpha**-1 / 100.
true_centers = centers[weights > threshold]
vars = vars.view(T, 2)
true_vars = vars[weights > threshold]
true_corrs = corrs[weights > threshold, ...]
_sigmas = torch.bmm(true_vars.sqrt().view(-1, 2).diag_embed(), true_corrs)
true_sigmas = torch.zeros(len(_sigmas), 2, 2)
for n in range(len(_sigmas)):
true_sigmas[n, ...] = torch.mm(_sigmas[n, ...], _sigmas[n, ...].T)
true_weights = weights[weights > threshold] / torch.sum(weights[weights > threshold])
return true_centers, true_sigmas, true_weights
alpha = 1
elbo = train(25000)
plt.figure()
plt.plot(elbo)
plt.show()
# We make a point-estimate of our model parameters using the
# posterior means of tau and phi for the centers and weights
posterior_predictive = Predictive(guide, num_samples=100)
posterior_samples = posterior_predictive.forward(data)
mu_mean = posterior_samples['mu'].detach().mean(dim=0)
var_mean = posterior_samples['var'].detach().mean(dim=0)
corr_mean = posterior_samples['corr'].detach().mean(dim=0)
beta_mean = posterior_samples['beta'].detach().mean(dim=0)
weights_mean = mix_weights(beta_mean)
centers, sigmas, weights = truncate(alpha, mu_mean, var_mean, corr_mean, weights_mean)
plt.figure(figsize=(15, 5))
plt.subplot(1, 2, 1)
plt.scatter(data[:, 0], data[:, 1], color="blue", marker="+")
plt.scatter(centers[:, 0], centers[:, 1], color="red")
from math import pi
t = torch.arange(0, 2 * pi, 0.01)
circle = torch.stack([torch.sin(t), torch.cos(t)], dim=0)
for n in range(len(sigmas)):
ellipse = torch.mm(torch.cholesky(sigmas[n, ...]), circle)
plt.plot(ellipse[0, :] + centers[n, 0], ellipse[1, :] + centers[n, 1],
linestyle='-', linewidth=2, color='g', alpha=1.)
plt.show() |
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
'''
https://leetcode.com/problems/merge-two-sorted-lists/discuss/9735/Python-solutions-(iteratively-recursively-iteratively-in-place).
1. Iterative
1. Start with first node in list1:
1.1. check if it is smaller than first node in list2,
1.2. if not add node2 in new list till the value of node in list2 is less than or equal to node 1
1.3. add node1 to the result list
2. if there are nodes in list2 add them to result list
2. Recursive: https://leetcode.com/problems/merge-two-sorted-lists/discuss/9715/Java-1-ms-4-lines-codes-using-recursion
if list1 is None: return list2
if list2 is None: return list1
if l1.val < l2.val:
l1.next = merge2Lists(l1.next, l2)
else:
l2.next = merge2Lists(l1, l2.next)
3. Iterative, in-place:
* Given 2 sorted lists, merge them in-place: Inplace-merging means creating merged linked-list without using additional space (i.e., SC: O(1)).
* Maintain 3 head pointers, 1 each for part of list1, list2 remaining to be processed and 1 for result list
* start with list1: check if first item of list1 is smaller than that of list2, if so, make list1's first node as head of result list: resultHead = list1[0]
and advance list1head (thereby shrinking list1 and expanding resultList)
* if node in list2 is smaller than node in list1 then resultHead = list2[0]
* for all subsequent nodes in list2 with value less than that of the node pointed to by head of list1: continue traversing list2 and advancing list2head
(advancing list2 head has the effect of list2 getting shrunked and resultlist expanding)
* At the end of loop, when either list1 or list2 is empty, return head
'''
'''
Input: list1 = [1,2,4], list2 = [1,3,4]
Output: [1,1,2,3,4,4]
Input: list1 = [], list2 = []
Output: []
Input: list1 = [], list2 = [0]
Output: [0]
'''
class Solution:
def mergeTwoLists(self, list1: Optional[ListNode], list2: Optional[ListNode]) -> Optional[ListNode]:
list1head = list1
list2head = list2
resulthead = None
resulttail = None
while list1head and list2head:
if list1head.val < list2head.val:
if not resulthead:
resulthead = list1head
resulttail = list1head
else:
resulttail.next = list1head
resulttail = list1head
list1head = list1head.next
else:
if not resulthead:
resulthead = list2head
resulttail = list2head
else:
resulttail.next = list2head
resulttail = list2head
list2head = list2head.next
if list1head:
if resulttail:
resulttail.next = list1head
else:
resulthead = list1head
if list2head:
if resulttail:
resulttail.next = list2head
else:
resulthead = list2head
return resulthead
def mergeTwoLists(self, list1: Optional[ListNode], list2: Optional[ListNode]) -> Optional[ListNode]:
if list1 is None and list2 is not None:
return list2
elif list2 is None and list1 is not None:
return list1
elif list1 is None and list2 is None:
return None
else:
#0. Initialize result list
resultList = []
#1. For each node in list1:
# Append all nodes from list2 which are smaller or equal than this node to the result
# Append node to result
#for node1 in list1:
node1 = list1[0]
node2 = list2[0]
while node1 is not None:
#node2 = list2[0]
while node2.val <= node1.val:
if resultList:
resultList[-1].next = node2
resultList.append(node2)
node2 = node2.next
if resultList:
resultList[-1].next = node1
resultList.append(node1)
resultList[-1].next = None
node1 = node1.next
#2. If list2 is not None: # at this point list1 is exhausted.
# Append list2 to the result
while node2 is not None:
resultList[-1].next = node2
resultList.append(node2)
node2 = node2.next
#3. return head of the result list
return resultList[0]
|
# coding=utf-8
from app import create_app, db, cli
from app.models import Thumbnail, Image, Currency, Event, EventCurrency, Post, Expense, Settlement, User, Message, Notification, Task
app = create_app()
cli.register(app)
@app.shell_context_processor
def make_shell_context():
return {'db': db,
'Thumbnail': Thumbnail,
'Image': Image,
'Currency': Currency,
'Event': Event,
'EventCurrency': EventCurrency,
'Post': Post,
'Expense': Expense,
'Settlement': Settlement,
'User': User,
'Message': Message,
'Notification': Notification,
'Task': Task}
|
#!/usr/bin/python -u
"""
Copyright (C) 2017 Jacksgong(jacksgong.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
from os.path import exists
from okcat.confloader import ConfLoader
from okcat.helper import get_conf_path, print_unicode
from okcat.logprocessor import LogProcessor
from okcat.terminalcolor import colorize, allocate_color
TIME_REGEX = r'\d{2}-\d{2} .*\d{2}:\d{2}:\d{2}\.\d+'
class LogFileParser:
filePaths = []
valid = False
processor = None
hideSameTags = None
logStreams = []
cacheLines = []
lineTimes = []
def __init__(self, file_paths, hide_same_tags):
self.filePaths = file_paths
self.hideSameTags = hide_same_tags
def setup(self, yml_file_name):
for path in self.filePaths:
if not exists(path):
exit("log path: %s is not exist!" % path)
self.processor = LogProcessor(self.hideSameTags)
loader = ConfLoader()
loader.load(get_conf_path(yml_file_name))
self.processor.setup_trans(trans_msg_map=loader.get_trans_msg_map(),
trans_tag_map=loader.get_trans_tag_map(),
hide_msg_list=loader.get_hide_msg_list())
self.processor.setup_separator(separator_rex_list=loader.get_separator_regex_list())
self.processor.setup_highlight(highlight_list=loader.get_highlight_list())
self.processor.setup_condition(tag_keywords=loader.get_tag_keyword_list())
log_line_regex = loader.get_log_line_regex()
if log_line_regex is None:
log_line_regex = 'date,time,process,thread,level,tag,message = "(.\S*) *(.\S*) *(\d*) *(\d*) *([A-Z]) *([^:]*): (.*?)$"'
print("you don't provide 'log_line-regex' for parse each line on file, so we use this one as default:")
print(log_line_regex + "\n")
self.processor.setup_regex_parser(regex_exp=log_line_regex)
def color_line(self, line):
msg_key, line_buf, match_precondition = self.processor.process(line)
if not match_precondition:
return
if msg_key is not None:
print('')
print_unicode(u''.join(colorize(msg_key + ": ", fg=allocate_color(msg_key))).encode('utf-8').lstrip())
print_unicode(u''.join(line_buf).encode('utf-8').lstrip())
def popup_cache_line(self, popup_index):
need_read_stream = self.logStreams[popup_index]
new_line = need_read_stream.readline()
if new_line:
match_result = re.search(TIME_REGEX, new_line)
if match_result:
self.lineTimes.insert(popup_index, match_result.group())
self.cacheLines.insert(popup_index, new_line)
else:
self.color_line(new_line)
self.popup_cache_line(popup_index)
else:
need_read_stream.close()
self.logStreams.pop(popup_index)
def process(self):
origin_index = 0
for path in self.filePaths:
stream = open(path, "r")
self.logStreams.append(stream)
self.popup_cache_line(origin_index)
origin_index += 1
while self.cacheLines:
min_index = self.lineTimes.index(min(self.lineTimes))
self.lineTimes.pop(min_index)
selected_line = self.cacheLines.pop(min_index)
self.color_line(selected_line)
self.popup_cache_line(min_index)
|
print(max(i*j for i in range(100,1000) for j in range(100,1000) if str(i*j) == str(i*j)[::-1]))
|
def compute(n): # function sent to remote nodes for execution
time.sleep(n)
return n
if __name__ == '__main__':
import dispy
# list remote nodes (here Amazon EC2 instance with external IP 54.204.242.185)
nodes = ['54.204.242.185']
# use ssh to forward port 51347 for each node; e.g.,
# 'ssh -R 51347:localhost:51347 54.204.242.185'
# start dispynode on each node with 'dispynode.py -i 54.204.242.185' (so dispynode
# uses external IP address instead of default local IP address)
cluster = dispy.JobCluster(compute, nodes=nodes, ip_addr='127.0.0.1')
jobs = []
for i in range(1, 10):
job = cluster.submit(i)
jobs.append(job)
for job in jobs:
print('result: %s' % job())
|
from enemy import *
from Dragon import *
import Message
import enemy
# The dragon is a special and unique enemy, hence these values are hardcoded
class Drake(Dragon):
def __init__(self, messageLog, currentMap = None):
super().__init__(messageLog, currentMap)
self.name = "Drake"
self.levelMod = random.choice([-2,-1,0,0,0,0,0,1,1,2])
self.level = currentMap.dangerLevel + self.levelMod
self.character = "d"
self.speed = 10
self.hp = self.maxhp = round(12 * (max(1,self.level - 4) ** 0.4))
self.mp = self.maxmp = round(10 * (max(1,self.level - 4) ** 0.4))
self.mpChargeRate = 1
self.baseDamage = round(6 * (max(1,self.level - 4) ** 0.2))
self.baseToHit = round(8 * (max(1,self.level - 4) ** 0.2))
self.baseToDefend = round(4 * (max(1,self.level - 4) ** 0.2))
self.burnDamage = 1
self.color = "yellow"
self.chartype = "Drake"
def danger(self):
return 6
def update(self):
super().update()
def Attacked(self, damage, attacker, melee = True):
enemy.Enemy.Attacked(self, damage, attacker)
|
import os
import asobann
import asobann.app
import pytest
from flask import Flask
pytestmark = [pytest.mark.quick]
PARAMS = [
{
'id': 'development',
'input': {
'FLASK_ENV': 'development',
'env': {
},
},
'expected': {
'config': {
'REDIS_URI': None,
'MONGO_URI': 'mongodb://localhost:27017/ex2dev',
'BASE_URL': 'http://localhost:5000',
'GOOGLE_ANALYTICS_ID': None,
},
},
},
{
'id': 'development with PUBLIC_HOSTNAME (dev on AWS)',
'input': {
'FLASK_ENV': 'development',
'env': {
'PUBLIC_HOSTNAME': 'asobann.example.com',
},
},
'expected': {
'config': {
'BASE_URL': 'https://asobann.example.com',
},
},
},
{
'id': 'development (Google Analytics is disabled)',
'input': {
'FLASK_ENV': 'development',
'env': {
'GOOGLE_ANALYTICS_ID': 'UA-DUMMYID',
},
},
'expected': {
'config': {
'GOOGLE_ANALYTICS_ID': None,
},
},
},
{
'id': 'test',
'input': {
'FLASK_ENV': 'test',
'env': {
},
'testing': True,
},
'expected': {
'config': {
'REDIS_URI': None,
'MONGO_URI': 'mongodb://localhost:27017/ex2test',
'BASE_URL': '*',
'GOOGLE_ANALYTICS_ID': None,
},
},
},
{
'id': 'production',
'input': {
'FLASK_ENV': 'production',
'env': {
'REDIS_URI': 'redis://example.com',
'MONGODB_URI': 'mongodb://example.com/',
'PUBLIC_HOSTNAME': 'asobann.example.com',
'GOOGLE_ANALYTICS_ID': 'UA-DUMMYID',
},
},
'expected': {
'config': {
'REDIS_URI': 'redis://example.com',
'MONGO_URI': 'mongodb://example.com/?retryWrites=false',
'BASE_URL': 'https://asobann.example.com',
'GOOGLE_ANALYTICS_ID': 'UA-DUMMYID',
},
},
},
{
'id': 'production (preceding period in PUBLIC_HOSTNAME)',
'input': {
'FLASK_ENV': 'production',
'env': {
'REDIS_URI': 'redis://example.com',
'MONGODB_URI': 'mongodb://example.com/',
'PUBLIC_HOSTNAME': '.asobann.example.com',
'GOOGLE_ANALYTICS_ID': 'UA-DUMMYID',
},
},
'expected': {
'config': {
'BASE_URL': 'https://asobann.example.com',
},
},
},
]
@pytest.mark.parametrize('param', PARAMS, ids=[p['id'] for p in PARAMS])
def test_config(param):
input_ = param['input']
expected = param['expected']
os.environ['FLASK_ENV'] = input_['FLASK_ENV']
try:
for key, value in input_['env'].items():
os.environ[key] = value
app = Flask(__name__)
import importlib
importlib.reload(asobann.config_common) # must be reloaded with new environment
asobann.app.configure_app(app, testing=input_.get('testing', False))
for key, value in expected['config'].items():
assert app.config[key] == value
finally:
for key, value in input_['env'].items():
del os.environ[key]
|
import numpy as np
from multiprocessing import Pool, Process
import time
def f(x):
time.sleep(2)
return x*x
def rand():
return np.random.random()
def run_map(numW=4, numJobs=20):
t1= time.time()
pool = Pool(numW)
print(pool.map(f, range(numJobs)))
t2 = time.time()
print('Time: ', t2 - t1)
def run_process(numJobs=20):
t1= time.time()
prcs = []
for i in range(numJobs):
p = Process(target=f, args=(i,))
p.start()
prcs.append(p)
print('All Processes launched')
for p in prcs:
p.join()
t2= time.time()
print('Time: ', t2 - t1)
def run_async(numW=4, numJobs=20):
t1= time.time()
p = Pool(numW)
resPool = []
for i in range(numJobs):
#resPool.append(p.apply_async(f, [i]))
resPool.append(p.apply_async(rand))
print('All Processes launched')
res = []
for r in resPool:
res.append(r.get())
t2= time.time()
print('Time: ', t2 - t1)
p.close()
return res
|
from pytest import fixture
from livedivulgador.bots.livedivulgador import LiveDivulgador
from livedivulgador.plugins.twitter import TwitterPlugin
@fixture
def livedivulgador():
plugins_list = [TwitterPlugin]
bot = LiveDivulgador()
bot.add_plugins(plugins_list)
yield bot
del bot
def test_livedivulgador_init(livedivulgador):
livedivulgador.run()
|
import sys, getopt, signal, threading, serial
from time import sleep
from functools import partial
from lib import xpc
# get_params
param_port = 'COM3'
param_baud = 9600
stop_threads = False
client_xplane = None
def get_params():
global param_port
global param_baud
strUsage = 'usage: ArdSerialUDP.py --port COM3 --baud 9600'
arrValidArgsOpt = ["port=","baud="]
arrValidArgsRequired = ["--port","--baud"]
if len(sys.argv)==1:
print (strUsage)
sys.exit(2)
try:
opts, args = getopt.getopt(sys.argv[1:],"h",arrValidArgsOpt)
except getopt.GetoptError as err:
print (err)
print (strUsage)
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print (strUsage)
sys.exit()
elif opt in ("--port"):
param_port = arg
elif opt in ("--baud"):
param_baud = arg
def exit_handler(thread, signal, frame):
global stop_threads
global client_xplane
try:
stop_threads = True
thread.join()
print ('Close socket UDP XPlane')
client_xplane.close()
except:
print("Unexpected error:", sys.exc_info()[0])
sys.exit(2)
sys.exit(0)
def handle_xplane(input_data, serial_arduino):
global client_xplane
if client_xplane is None:
print("WARNING: reset client_xplane")
init_XPlane()
return
try:
print("handle_xplane:", input_data)
arrOperationXplane = input_data.split("#")
if arrOperationXplane[0] == "sendDREF":
dref = arrOperationXplane[1].strip()
value = float(arrOperationXplane[2].strip())
client_xplane.sendDREF(dref, value)
print("sendDREF:", dref, value)
elif arrOperationXplane[0] == "getDREF":
dref = arrOperationXplane[1].strip()
status_dref = client_xplane.getDREF(dref)
print("getDREF:", dref, status_dref)
toArduinoSerial = '<'+dref+'#'+str(status_dref[0])+'>'
print (toArduinoSerial.encode())
serial_arduino.write(toArduinoSerial.encode())
except:
print("ERROR: reset client_xplane")
print("Unexpected error:", sys.exc_info()[0])
init_XPlane()
return
def read_from_port(serial_arduino):
while True:
global stop_threads
if stop_threads:
print ('Exiting thread...')
if serial_arduino.is_open == True:
print ('Close serial port')
serial_arduino.close()
break
while serial_arduino.inWaiting() > 0:
input_data = serial_arduino.readline().decode()
if len(input_data) > 0:
handle_xplane(input_data, serial_arduino)
def serial_arduino():
try:
serial_arduino = serial.Serial(param_port, param_baud, timeout=None)
except serial.serialutil.SerialException:
print ("Arduino not connected")
sys.exit(2)
thread = threading.Thread(target=read_from_port, args=(serial_arduino,))
thread.start()
signal.signal(signal.SIGINT, partial(exit_handler, thread))
def verify_udp_xplane():
global client_xplane
try:
client_xplane.getDREF("sim/test/test_float")
except:
print ("Error connection to X-Plane.")
init_XPlane() # loop - connection to X-Plane
def init_XPlane():
global client_xplane
try:
client_xplane = xpc.XPlaneConnect(timeout = 1000)
except:
print ("Error establishing connection to X-Plane.")
verify_udp_xplane()
sleep(1)
if __name__ == "__main__":
get_params()
init_XPlane()
serial_arduino()
# Keep the main thread running, otherwise signals are ignored.
while True:
sleep(0.5)
|
#!/usr/bin/env python3
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(name='Medinfore',
version='1.1.0',
description='Indexing and searching engine in a medical corpus',
author='Michael Domingues',
author_email='dominguesjust@gmail.com',
license='MIT',
keywords='ehr api indexing',
classifiers=[
'Development Status :: 1 - Prototype',
'Environment :: Console & Web',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.5',
'Topic :: Software Development :: Information Retrieval',
],
packages=['api', 'documenthandler', 'languagemodel', 'redisdb', 'utilities'],
install_requires=['flask', 'nltk', 'numpy', 'gensim', 'pprint', 'redis', 'PyMedTermino']
)
|
import os
import json
import time
import numpy as np
import pandas as pd
import shappack
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA, KernelPCA
## Parameters ###################################################
FILE_PATH = "./data/2021-08-18-argowf-chaos-b2qdj-user_pod-memory-hog_0.json"
PLOTS_NUM = 120
TARGET_METRICS = ["cpu_usage_seconds_total",
"memory_working_set_bytes",
"network_transmit_bytes_total",
"network_receive_bytes_total",
"fs_writes_total",
"fs_reads_total"]
PARAMS = {
"n_components": 0.8
}
ANALYSIS_PERIOD = 20
N_WORKERS = 1
SEED = 123
np.random.seed(SEED)
#################################################################
class ShapPCA(object):
def __init__(self, train_data, model=PCA(n_components=0.80)):
self.model = model.fit(train_data)
def predict(self, data):
input_data = np.asarray(data)
output_data = self._reconstruct_data(input_data)
errors = np.mean((input_data - output_data) ** 2, axis=1)
return np.asarray(errors)
def reconstruction_error(self, data):
input_data = np.asarray(data)
output_data = self._reconstruct_data(input_data)
recon_error = (input_data - output_data) ** 2
return recon_error
def _reconstruct_data(self, data):
transformed_data = self.model.transform(data)
reconstructed_data = self.model.inverse_transform(transformed_data)
return reconstructed_data
def read_file(file_path):
with open(file_path) as f:
raw_data = json.load(f)
containers_data = raw_data["containers"]
data_df = pd.DataFrame()
for con in containers_data:
if con in ["queue-master", "rabbitmq", "session-db"]:
continue
for metric in containers_data[con]:
container_name = metric["container_name"]
metric_name = metric["metric_name"].replace("container_", "")
if metric_name not in TARGET_METRICS:
continue
column_name = "{}_{}".format(container_name, metric_name)
data_df[column_name] = np.array(metric["values"], dtype=np.float)[:, 1][-PLOTS_NUM:]
data_df = data_df.round(4).fillna(data_df.mean())
return data_df
def preprocessing(data_df):
scaler = StandardScaler()
data_std = scaler.fit_transform(data_df)
return data_std
if __name__ == '__main__':
data_df = read_file(FILE_PATH)
data_df = preprocessing(data_df)
train_data, test_data = data_df[:-ANALYSIS_PERIOD], data_df[-ANALYSIS_PERIOD:]
start = time.time()
model = ShapPCA(train_data, model=PCA(n_components=PARAMS["n_components"]))
time_train = round(time.time() - start, 6)
print(f"Training: {time_train}")
start = time.time()
explainer = shappack.KernelExplainer(model.predict, train_data)
shap_value = explainer.shap_values(test_data, n_workers=N_WORKERS)
time_shap = round(time.time() - start, 3)
print(f"SHAP: {time_shap}")
|
# https://stackoverflow.com/questions/30135091/write-thread-safe-to-file-in-python
from threading import Thread
from thread_writer import ThreadWriter
from pydng.core import RPICAM2DNG
import document_handler
class ThreadRawConverter:
def __init__(self, config, stream, filename):
self.config = config
self.json_colour_profile = document_handler.load_colour_profile(config)
self.filename = filename
self.stream = stream
self.finished = False
self.thread_writer = ThreadWriter(self.filename, 'wb')
Thread(name = "ThreadRawConverter", target=self.internal_converter).start()
def internal_converter(self):
while not self.finished:
if self.stream != None:
# TODO: Copy over the EXIF data
output = RPICAM2DNG().convert(self.stream, json_camera_profile=self.json_colour_profile)
self.thread_writer.write(output)
self.finished = True
self.thread_writer.close()
|
#!/usr/bin/env python2
import math
import sys
import random
if len(sys.argv) < 7:
print "Usage: %s num spread n_find n_insert n_delete n_range [seed]" % sys.argv[0]
exit(1)
n = int(sys.argv[1])
spread = int(sys.argv[2])
find = int(sys.argv[3])
insert = int(sys.argv[4])
delete = int(sys.argv[5])
rng = int(sys.argv[6])
if len(sys.argv) > 7:
random.seed(sys.argv[7])
if spread < n:
spread = n
all_numbers = list(range(-spread, spread+1))
random.shuffle(all_numbers)
print n, find+insert+delete+rng
print '\n'.join(map(str, sorted(all_numbers[:n])))
ops = ['f' for _ in range(find)] + ['i' for _ in range(insert)] + \
['d' for _ in range(delete)] + ['r' for _ in range(rng)]
random.shuffle(ops)
for op in ops:
if op != 'r':
print op, random.randint(-spread, spread)
else:
a, b = random.randint(-spread, spread), random.randint(-spread, spread)
if a > b:
a, b = b, a
print op, a, b
|
poem = '''\
Programming is fun
When the work is done
if you wanna make your work also fun:
use Python!
'''
# The above phrase is used to input text in the file poem.txt which
# goes to the default directory \exercies in this case
# Open for 'w'riting
f = open('poem.txt', 'w')
# Write text, which is the string above, to file
f.write(poem)
# Close the file
f.close()
# If no mode is specified,
# 'r'ead mode is assumed by default
f = open('poem.txt')
while True:
line = f.readline()
if len(line) == 0: # Zero length indicates EOF
break
# The `line` already has a newline
# at the end of each line
# since it is reading from a file.
print(line, end='')
# close the file
f.close()
|
from cqc.pythonLib import CQCConnection
import sys
import time
sys.path.append("../..")
from backends.cqc_backend import CQCBackend
from components.host import Host
from components.network import Network
from objects.qubit import Qubit
import components.protocols as protocols
def main():
backend = CQCBackend()
network = Network.get_instance()
nodes = ["Alice", "Bob", "Eve", "Dean"]
network.start(nodes, backend)
network.delay = 0.7
hosts = {'alice': Host('Alice', backend),
'bob': Host('Bob', backend)}
network.delay = 0
# A <-> B
hosts['alice'].add_connection('Bob')
hosts['bob'].add_connection('Alice')
hosts['alice'].start()
hosts['bob'].start()
for h in hosts.values():
network.add_host(h)
# print(f"ack test - SEND CLASSICAL - started at {time.strftime('%X')}")
hosts['alice'].send_classical(hosts['bob'].host_id, 'hello bob one', await_ack=True)
hosts['alice'].send_classical(hosts['bob'].host_id, 'hello bob two', await_ack=True)
# print(f"ack test - SEND CLASSICAL - finished at {time.strftime('%X')}")
saw_ack_1 = False
saw_ack_2 = False
messages = hosts['alice'].classical
for m in messages:
if m.content == protocols.ACK and m.seq_num == 1:
saw_ack_1 = True
if m.content == protocols.ACK and m.seq_num == 2:
saw_ack_2 = True
if saw_ack_1 and saw_ack_2:
break
assert saw_ack_1
assert saw_ack_2
print("All tests succesfull!")
network.stop(True)
exit()
if __name__ == '__main__':
main()
|
# Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from unittest import TestCase
from uw_sws_graderoster.dao import SWS_GradeRoster_DAO
class SWSGradeRosterTestDao(TestCase):
def test_custom_headers(self):
self.assertEquals(
SWS_GradeRoster_DAO()._custom_headers('GET', '/', {}, None), None)
|
import requests
from bs4 import BeautifulSoup
from datetime import datetime
from webapp.db import db
from webapp.news.models import News
def get_html(url):
try:
result = requests.get(url)
result.raise_for_status()
return result.text
except(requests.RequestException, ValueError):
print("Сетевая ошибка")
return False
def get_python_news():
html = get_html("https://www.python.org/blogs/")
if html:
soup = BeautifulSoup(html, 'html.parser')
all_news = soup.find('ul', class_='list-recent-posts').findAll('li')
for news in all_news:
title = news.find('a').text
url = news.find('a')['href']
published = news.find('time').text
try:
published = datetime.strptime(published, '%B %d, %Y')
except ValueError:
published = datetime.now()
save_news(title=title, url=url, published=published)
return False
def save_news(title, url, published):
news_exists = News.query.filter(News.url == url).count()
print(news_exists)
if not news_exists:
news_news = News(title=title, url=url, published=published)
db.session.add(news_news)
db.session.commit()
|
#!/usr/bin/env python
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import sys
import os
import optparse
import cPickle as pickle
import numpy as np
def get_options():
parser = optparse.OptionParser()
parser.add_option("--root",
default='regr_vals',
help="Input prefix")
return parser.parse_args()
opt, args = get_options()
flight = pickle.load(open(opt.root + '.flight'))
test = pickle.load(open(opt.root + '.test'))
msids = ('1crat', 'fptemp_11', 'orbitephem0_x', 'sim_z', 'tephin', 'cvcductr', 'dp_dpa_power')
attrs = ('times', 'vals', 'quals', 'stds', 'mins', 'maxes', 'means',
'p01s', 'p05s', 'p16s', 'p50s', 'p84s', 'p95s', 'p99s')
for msid in msids:
for stat in ('dat', 'dat5', 'datd'):
for attr in attrs:
try:
f = flight[msid][stat]
t = test[msid][stat]
except KeyError:
print 'MSID={} stat={} missing in flight or test data'.format(msid, stat)
continue
if attr not in f:
continue
print 'Checking', msid, stat, attr,
if len(f[attr]) != len(t[attr]):
print 'Length mismatch:', len(f[attr]), len(t[attr])
continue
if attr == 'quals':
ok = f['quals'] == t['quals']
else:
ok = np.max(np.abs(f[attr] - t[attr])) < 1e-6
print ('OK' if ok else 'NOT OK')
|
import time
import os
def check_line(line: str):
stack = []
opening = {"(": 1, "[": 2, "{": 3, "<": 4}
closing = {")": ("(", 3), "]": ("[", 57), "}": ("{", 1197), ">": ("<", 25137)}
for i, char in enumerate(line):
if char in opening:
stack.append(char)
else:
popped = stack.pop()
if closing[char][0] != popped:
return closing[char][1], 0
line_score = 0
while len(stack) > 0:
line_score *= 5
popped = stack.pop()
line_score += opening[popped]
return 0, line_score
def check_all_lines(lines: list[str]):
corrupt_score = 0
completion_score = []
for line in lines:
line_result = check_line(line)
corrupt_score += line_result[0]
if line_result[1] != 0:
completion_score.append(line_result[1])
return corrupt_score, sorted(completion_score)[len(completion_score) // 2]
def parse_input(filename: str) -> list[str]:
with open(filename, "r") as file:
return [line.strip() for line in file]
def main(input_filename: str):
start_time = time.time()
lines = parse_input(input_filename)
part1_start = time.time()
corrupt_score, completion_score = check_all_lines(lines)
end_time = time.time()
print(f"Part 1: Corruption score: {corrupt_score}")
print(f"Part 2: Completion score: {completion_score}")
print("Elapsed Time:")
print(f" Parsing: {(part1_start - start_time) * 1000:.2f} ms")
print(f" Part 1 + Part 2: {(end_time - part1_start) * 1000:.2f} ms (evaluation is combined today)")
print(f" Total: {(end_time - start_time) * 1000:.2f} ms")
if __name__ == "__main__":
os.chdir(os.path.split(__file__)[0])
main("../../inputs/2021/day10.txt")
|
from typing import DefaultDict
import tensorflow as tf
import numpy as np
import pytest
from bard.midi import tokenizer, vocabulary
MAX_SIZE = 1000
DEFAULT_DECODED = '0'
@pytest.mark.parametrize("inputs, expected", [
(["a", "a", "b", "c", "a", "d", "f"], 5),
(["a", "b", "c", "d", "e", "f"], 6),
])
def test_tokenizer_has_correct_vocab_size_with_sequence(inputs, expected):
tok = tokenizer.MidiTokenizer(inputs, default_decoded=DEFAULT_DECODED)
assert bool(tf.reduce_all(tok.vocab_size() == expected))
@pytest.mark.parametrize("inputs, expected", [
(["a", "a", "b", "c", "a", "d", "f"], tf.constant([3, 3, 4, 5, 3, 6, 7], dtype=tf.int64)),
(["a", "b", "b", "c", "d", "d", "d", "e"], tf.constant([3, 4, 4, 5, 6, 6, 6, 7], dtype=tf.int64))
])
def test_tokenizer_encodes_correctly_with_sequence(inputs, expected):
tok = tokenizer.MidiTokenizer(inputs, default_decoded=DEFAULT_DECODED)
encoded = tok.encode(tf.constant(inputs))
assert bool(tf.reduce_all(encoded == expected))
@pytest.mark.parametrize("inputs", [
(["a", "a", "b", "c", "a", "d", "f"]), (["a", "b", "b", "c", "d", "d", "d", "e"])
])
def test_tokenizer_decodes_to_same_sequence(inputs):
tok = tokenizer.MidiTokenizer(inputs, default_decoded=DEFAULT_DECODED)
encoded = tok.encode(tf.constant(inputs))
assert bool(tf.reduce_all(tf.constant(inputs) == tok.decode(encoded)))
|
# sv2020.04.07
#
# class Cross_Entropy()
# Compute cross entropy across each feature for feature selection
#
# three methods included
# compute:
# Manimaran A, Ramanathan T, You S, et al. Visualization, Discriminability and Applications of Interpretable Saak Features[J]. 2019.
# KMeans_Cross_Entropy
import numpy as np
import math
import sklearn
from sklearn.cluster import KMeans,MiniBatchKMeans
class Cross_Entropy():
def __init__(self, num_class, num_bin=10):
self.num_class = (int)(num_class)
self.num_bin = (int)(num_bin)
def bin_process(self, x ,y):
if np.max(x) == np.min(x):
return -1*np.ones(self.num_bin)
x = ((x - np.min(x)) / (np.max(x) - np.min(x))) * (self.num_bin)
mybin = np.zeros((self.num_bin, self.num_class))
b = x.astype('int64')
b[b == self.num_bin] -= 1
for i in range(b.shape[0]):
mybin[b[i], y[i]] += 1.
for l in range(0, self.num_class):
p = np.array(y[ y==l ]).shape[0]
mybin[:, l] /= (float)(p)
return np.argmax(mybin, axis=1)
def kmeans_process(self, x, y):
kmeans = KMeans(n_clusters=self.num_bin, random_state=0).fit(x.reshape(1,-1))
mybin = np.zeros((self.num_bin, self.num_class))
b = kmeans.labels_.astype('int64')
b[b == self.num_bin] -= 1
for i in range(b.shape[0]):
mybin[b[i], y[i]] += 1.
for l in range(0, self.num_class):
p = np.array(y[ y==l ]).shape[0]
mybin[:, l] /= (float)(p)
return np.argmax(mybin, axis=1)
def compute_prob(self, x, y):
prob = np.zeros((self.num_class, x.shape[1]))
for k in range(0, x.shape[1]):
mybin = self.bin_process(x[:,k], y[:,0])
#mybin = self.kmeans_process(x[:,k], y[:,0])
for l in range(0, self.num_class):
p = mybin[mybin == l]
p = np.array(p).shape[0]
prob[l, k] = p / (float)(self.num_bin)
return prob
def compute(self, x, y, class_weight=None):
x = x.astype('float64')
y = y.astype('int64')
y = y.reshape(-1, 1)
prob = self.compute_prob(x, y)
prob = -1 * np.log10(prob + 1e-5) / np.log10(self.num_class)
y = np.moveaxis(y, 0, 1)
H = np.zeros((self.num_class, x.shape[1]))
for c in range(0, self.num_class):
yy = y == c
p = prob[c].reshape(prob.shape[1], 1)
p = p.repeat(yy.shape[1], axis=1)
H[c] += np.mean(yy * p, axis=1)
if class_weight != None:
class_weight = np.array(class_weight)
H *= class_weight.reshape(class_weight.shape[0],1) * self.num_class
return np.mean(H, axis=0)
# new cross entropy
def KMeans_Cross_Entropy(self, X, Y):
if np.unique(Y).shape[0] == 1: #alread pure
return 0
if X.shape[0] < self.num_bin:
return -1
kmeans = MiniBatchKMeans(n_clusters=self.num_bin, random_state=0, batch_size=10000).fit(X)
prob = np.zeros((self.num_bin, self.num_class))
for i in range(self.num_bin):
idx = (kmeans.labels_ == i)
tmp = Y[idx]
for j in range(self.num_class):
prob[i, j] = (float)(tmp[tmp == j].shape[0]) / ((float)(Y[Y==j].shape[0]) + 1e-5)
prob = (prob)/(np.sum(prob, axis=1).reshape(-1,1) + 1e-5)
y = np.eye(self.num_class)[Y.reshape(-1)]
probab = prob[kmeans.labels_]
return sklearn.metrics.log_loss(y, probab)/math.log(self.num_class)
if __name__ == "__main__":
from sklearn import datasets
from sklearn.model_selection import train_test_split
print(" > This is a test example: ")
digits = datasets.load_digits()
X_ori = digits.images.reshape((len(digits.images), -1))
print(" input feature shape: %s"%str(X_ori.shape))
X_train, X_test, y_train, y_test = train_test_split(X_ori, digits.target, test_size=0.2, stratify=digits.target)
# print('training data shape {}'.format(X_train.shape))
ce = Cross_Entropy(num_class=10, num_bin=5)
feat_ce = np.zeros(X_train.shape[-1])
for k in range(X_train.shape[-1]):
feat_ce[k] = ce.KMeans_Cross_Entropy(X_train[:,k].reshape(-1,1), y_train)
print(" --> KMeans ce: %s"%str(feat_ce[k]))
print("------- DONE -------\n")
|
#
# This file is part of pyasn1-modules software.
#
# Created by Russ Housley.
#
# Copyright (c) 2019, Vigil Security, LLC
# License: http://snmplabs.com/pyasn1/license.html
#
# X.509v3 Certificates for Secure Shell Authentication
#
# ASN.1 source from:
# https://www.rfc-editor.org/rfc/rfc6187.txt
#
from pyasn1.type import univ
id_pkix = univ.ObjectIdentifier("1.3.6.1.5.5.7")
id_kp = id_pkix + (3,)
id_kp_secureShellClient = id_kp + (21,)
id_kp_secureShellServer = id_kp + (22,)
|
from JumpScale import j
import os
def _setup_stacktrace_hook():
'''Set up SIGUSR2 signal handler which dumps stack traces of all threads'''
try:
import signal
except ImportError:
# No signal support on current platform, ignore
return
sig = signal.SIGUSR2
def stderr():
'''Coroutine which writes input to sys.stderr and a dump file,
/tmp/pm_<PID>.stack'''
outputs = list()
try:
import sys
outputs.append((sys.stderr.write, sys.stderr.flush, lambda: None, ))
except Exception:
pass
try:
import os
name = '/tmp/pm_%d.stack' % os.getpid()
fd = open(name, 'w')
outputs.append((fd.write, fd.flush, fd.close, ))
except Exception:
pass
try:
while True:
message = yield
if message is None:
break
for write, flush, _ in outputs:
try:
write(message)
flush()
except Exception:
pass
finally:
for _, _, close in outputs:
try:
close()
except Exception:
pass
def getframes(output, frame):
'''Get a list of all current frames
This function tries to use sys._current_frames to get a list of the
frames of every running thread and their thread ID. If this function is
not available, the given frame will be returned using the string
'<unknown>' as thread ID.
'''
if j.application.skipTraceback:
return None
import sys
# Using sys._current_frames for now
# We could rewrite this using ctypes as well, see the implementation of
# _PyThread_CurrentFrames at
# http://svn.python.org/projects/python/trunk/Python/pystate.c
current_frames = getattr(sys, '_current_frames', None)
if not current_frames:
output('Your system has no support to dump stacks of all threads\n')
output('Only dumping interrupted frame\n')
return (('<current>', frame, ), )
else:
return tuple(current_frames().items())
def dump_proc_status(output):
import os.path
procfile = '/proc/%d/status' % os.getpid()
if not os.path.exists(procfile):
# File doesn't exist, we're not running on Linux or something alike
return
try:
fd = open(procfile, 'r')
except Exception:
# No permissions or something alike?
# Funny if a process would have no permission on its own status proc
# file, but anyway, better safe than sorry
return
try:
data = fd.read()
finally:
fd.close()
output('Dumping content of %s\n' % procfile)
output('\n')
output(data)
output('\n')
def handler_impl(output, num, frame):
'''Implementation of the signal handler
This will be called inside a try/except clause so the signal handler
behaves correctly.
'''
import traceback
output('Got signal %s\n' % str(num))
output('Dumping current stack frame(s)\n')
frames = getframes(output, frame)
output('\n')
try:
from threading import _active as active_threads
except ImportError:
active_threads = dict()
for threadid, frame in frames:
title = None
if threadid in active_threads:
try:
name = active_threads[threadid].getName()
except Exception:
pass
else:
if name:
title = 'Thread %s (%s)' % (name, str(threadid))
if not title:
title = 'Thread %s' % str(threadid)
output('%s\n%s\n' % (title, '=' * len(title)))
try:
import thread
get_ident = thread.get_ident
except (ImportError, AttributeError):
get_ident = lambda: object()
ident = get_ident()
if threadid == get_ident():
# We want to strip of ourself from the stacktrace
orig_frame = frame
while frame:
# If we found the frame of this 'handler' function
if frame.f_code == handler.func_code:
# Go one frame up and return
frame = frame.f_back
break
# Else go up one more frame
frame = frame.f_back
# If we were not able to find the stackframe we were looking
# for, just use the original one
if not frame:
frame = orig_frame
# Format and print backtrace
stack = ''.join(traceback.format_stack(frame))
output(stack)
output('\n')
try:
dump_proc_status(output)
except Exception:
pass
def handler(num, frame):
'''Signal handler which dumps Python stacks of all running threads'''
output = stderr()
output.next()
output = output.send
try:
handler_impl(output, num, frame)
except Exception as e:
output('An exception occurred while handling signal %d\n' % num)
output('Exception information:\n')
output('%s\n\n' % str(e))
try:
output(None)
except StopIteration:
pass
# Install signal handler, if none set
# Check whether a handler is set
orig_handler = signal.getsignal(sig)
if orig_handler != signal.SIG_DFL:
return
# Set up handler
old = signal.signal(sig, handler)
# Set up our signal handler
try:
_setup_stacktrace_hook()
except Exception as e:
pass
# Remove the no longer needed function
del _setup_stacktrace_hook
from .Dirs import Dirs
j.dirs=Dirs()
from . import logging
from .Application import Application
from . import system
j.system.installtools=j.base.fs.installtools
from . import enumerators
j.application=Application()
from . import base
from . import baseclasses
# from JumpScale.core.baseclasses.BaseEnumeration import enumerations
# j.enumerators=enumerations
from PlatformTypes import PlatformTypes
j.system.platformtype=PlatformTypes()
from . import pmtypes
pmtypes.register_types()
j.basetype=pmtypes.register_types()
from . import errorhandling
# j.pm_hooked_extension_dirs = dict()
import JumpScale.baselib.codeexecutor
import JumpScale.baselib.jpackages
import JumpScale.baselib.tags
import JumpScale.baselib.platforms
import JumpScale.core.config
import JumpScale.baselib.hrd
import JumpScale.baselib.startupmanager
from . import shellconfig
from . import console
from . import gui
#reinit whoAmI after dirs are loaded
j.application.initWhoAmI()
# from extensions.PMExtensions import PMExtensions
# from JumpScale.core.JumpScale import JumpScale
# class InteractiveExtensions(PMExtensionsGroup):
# def __init__(self):
# self._init=False
# self.__init_properties__()
# self.pm_name="i"
# self.pm_location="i"
# def _initExtensions(self):
# if self._init==False:
# self._pmExtensions = PMExtensions(self, 'i.', suppressAlreadyMountedError=True)
# ##self._pmExtensions.load(j.system.fs.joinPaths(self.dirs.extensionsDir,"interactive"))
# self._pmExtensions.load(j.system.fs.joinPaths(j.dirs.extensionsDir,"core"))
# self._init=True
# def extensionsLoad(self,extensionsDir):
# self._pmExtensions.load(extensionsDir)
|
"""The organization command."""
from json import dumps
from .base import Base
from bonita.api.bonita_client import BonitaClient
class Organization(Base):
"""Manage organization"""
def run(self):
#print('You supplied the following options:', dumps(self.options, indent=2, sort_keys=True))
# bonita organization [import <filename>|export|delete]
self.bonita_client = BonitaClient(self.loadConfiguration())
if self.hasOption('import'):
self.importOrganization()
elif self.hasOption('export'):
self.exportOrganization()
elif self.hasOption('delete'):
self.delete()
else:
print('Nothing to do.')
def importOrganization(self):
filename = self.options['<filename>']
rc, datas = self.bonita_client.importOrganization(filename)
self.processResults(rc, datas)
def exportOrganization(self):
rc, datas = self.bonita_client.exportOrganization()
self.processResults(rc, datas)
def delete(self):
rc, datas = self.bonita_client.deleteOrganization()
self.processResultCode(rc)
|
# bdateutil
# ---------
# Adds business day logic and improved data type flexibility to
# python-dateutil. 100% backwards compatible with python-dateutil,
# simply replace dateutil imports with bdateutil.
#
# Author: ryanss <ryanssdev@icloud.com>
# Website: https://github.com/ryanss/bdateutil
# License: MIT (see LICENSE file)
import unittest
from datetime import date, datetime
import holidays
from bdateutil import isbday
from bdateutil import relativedelta
from bdateutil import parse
from bdateutil.rrule import *
from testdateutil import *
class TestIsBday(unittest.TestCase):
def test_isbday(self):
self.assertFalse(isbday(date(2014, 1, 4)))
self.assertTrue(isbday(date(2014, 1, 1)))
self.assertFalse(isbday(date(2014, 1, 1), holidays=holidays.US()))
class TestRelativeDelta(unittest.TestCase):
def test_init(self):
self.assertEqual(relativedelta(date(2014, 1, 7), date(2014, 1, 3)),
relativedelta(days=4, bdays=2))
self.assertEqual(relativedelta(date(2014, 1, 31), date(2014, 1, 1)),
relativedelta(days=30, bdays=22))
self.assertEqual(relativedelta(date(2014, 2, 1), date(2014, 1, 1)),
relativedelta(months=1, bdays=23))
self.assertEqual(relativedelta(date(2014, 2, 2), date(2014, 1, 1)),
relativedelta(months=1, days=1, bdays=23))
self.assertEqual(relativedelta(date(2014, 1, 1), date(2014, 2, 2)),
relativedelta(months=-1, days=-1, bdays=-23))
def test_add(self):
rd1 = relativedelta(years=+1, months=+2, bdays=+3, days=+4)
rd2 = relativedelta(years=+2, months=-3, bdays=+4, days=+5)
rd3 = relativedelta(years=+3, months=-1, bdays=+7, days=+9)
self.assertEqual(rd1 + rd2, rd3)
self.assertEqual(relativedelta(bdays=3) + date(2014, 1, 3),
date(2014, 1, 8))
rd4 = relativedelta(years=+1, months=+2, days=+1)
rd5 = relativedelta(years=+4, months=+1, bdays=+7, days=+10)
self.assertEqual(rd3 + rd4, rd5)
self.assertEqual("2014-01-01" + relativedelta(weekday=FR),
datetime(2014, 1, 3))
def test_radd(self):
self.assertEqual(date(2014, 1, 3) + relativedelta(bdays=2),
date(2014, 1, 7))
self.assertEqual(date(2014, 1, 7) + relativedelta(bdays=-2),
date(2014, 1, 3))
self.assertEqual(date(2014, 2, 3) + relativedelta(bdays=-19),
date(2014, 1, 7))
def test_sub(self):
rd1 = relativedelta(years=+1, months=+2, bdays=+3, days=+4)
rd2 = relativedelta(years=+2, months=-3, bdays=+4, days=+5)
rd3 = relativedelta(years=-1, months=+5, bdays=-1, days=-1)
self.assertEqual(rd1 - rd2, rd3)
def test_rsub(self):
self.assertEqual(date(2014, 1, 7) - relativedelta(bdays=2),
date(2014, 1, 3))
self.assertEqual(date(2014, 1, 3) - relativedelta(bdays=-2),
date(2014, 1, 7))
self.assertEqual(date(2014, 2, 3) - relativedelta(bdays=19),
date(2014, 1, 7))
def test_neg(self):
self.assertEqual(-relativedelta(years=+1, bdays=-3),
relativedelta(years=-1, bdays=+3))
def test_bool(self):
self.assertTrue(relativedelta(bdays=1))
self.assertTrue(relativedelta(days=1))
self.assertFalse(relativedelta())
def test_mul(self):
self.assertEqual(relativedelta(years=+1, bdays=-3) * 3,
relativedelta(years=+3, bdays=-9))
self.assertEqual(relativedelta(years=+1, bdays=-3) * -3,
relativedelta(years=-3, bdays=+9))
self.assertEqual(relativedelta(years=+1, bdays=-3) * 0,
relativedelta(years=0, bdays=0))
def test_rmul(self):
self.assertEqual(3 * relativedelta(years=+1, bdays=-3),
relativedelta(years=+3, bdays=-9))
self.assertEqual(-3 * relativedelta(years=+1, bdays=-3),
relativedelta(years=-3, bdays=+9))
self.assertEqual(0 * relativedelta(years=+1, bdays=-3),
relativedelta(years=0, bdays=0))
def test_eq(self):
r1 = relativedelta(years=1, months=2, days=3, bdays=1,
hours=4, minutes=5, seconds=6, microseconds=7)
r2 = relativedelta(years=1, months=2, days=3, bdays=1,
hours=4, minutes=5, seconds=6, microseconds=7)
self.assertEqual(r1, r2)
self.assertTrue(r1 == r2)
r2.days = 4
self.assertNotEqual(r1, r2)
self.assertFalse(r1 == r2)
r2.days = 3
r2.bdays = 0
self.assertNotEqual(r1, r2)
self.assertFalse(r1 == r2)
self.assertEqual(relativedelta(), relativedelta())
self.assertTrue(relativedelta() == relativedelta())
self.assertNotEqual(relativedelta(days=1), relativedelta(bdays=1))
self.assertFalse(relativedelta() == relativedelta(months=1))
self.assertNotEqual(relativedelta(days=1), relativedelta(bdays=1))
self.assertFalse(relativedelta() == relativedelta(months=1))
def test_ne(self):
r1 = relativedelta(years=1, months=2, days=3, bdays=1,
hours=4, minutes=5, seconds=6, microseconds=7)
r2 = relativedelta(years=1, months=2, days=3, bdays=1,
hours=4, minutes=5, seconds=6, microseconds=7)
self.assertFalse(r1 != r2)
r2.days = 4
self.assertTrue(r1 != r2)
r2.days = 3
r2.bdays = 0
self.assertTrue(r1 != r2)
self.assertFalse(relativedelta() != relativedelta())
self.assertTrue(relativedelta() != relativedelta(months=1))
self.assertTrue(relativedelta() != relativedelta(months=1))
def test_div(self):
self.assertEqual(relativedelta(years=+3, bdays=-9) / 3,
relativedelta(years=+1, bdays=-3))
self.assertEqual(relativedelta(years=+3, bdays=-9) / -3,
relativedelta(years=-1, bdays=+3))
self.assertRaises(ZeroDivisionError,
lambda: relativedelta(bdays=-3) / 0)
def test_truediv(self):
self.assertEqual(relativedelta(years=+4, bdays=-10) / 3.0,
relativedelta(years=+1, bdays=-3))
def test_repr(self):
rd1 = relativedelta(years=+1, months=+2, days=-3)
self.assertEqual(str(rd1),
"relativedelta(years=+1, months=+2, days=-3)")
rd2 = relativedelta(years=+1, months=+2, bdays=-7)
self.assertEqual(str(rd2),
"relativedelta(years=+1, months=+2, bdays=-7)")
rd3 = relativedelta(years=-1, months=-2, bdays=+7)
self.assertEqual(str(rd3),
"relativedelta(years=-1, months=-2, bdays=+7)")
rd4 = relativedelta(year=2014, month=1, day=2)
self.assertEqual(str(rd4),
"relativedelta(year=2014, month=1, day=2)")
class TestParser(unittest.TestCase):
def test_timestamp(self):
self.assertEqual(parse(1388577600).date(), date(2014, 1, 1))
def test_parserinfo(self):
self.assertEqual(parse("1/2/2014"), datetime(2014, 1, 2))
self.assertEqual(parse(b"1/2/2014"), datetime(2014, 1, 2))
self.assertEqual(parse("1/2/2014", dayfirst=True),
datetime(2014, 2, 1))
self.assertEqual(parse("1/2/2014", parserinfo(dayfirst=True)),
datetime(2014, 2, 1))
def test_exceptions(self):
self.assertRaises(ValueError, lambda: parse("abc"))
self.assertRaises(TypeError, lambda: parse(['a', 'b', 'c']))
class TestRRule(unittest.TestCase):
def test_bdaily(self):
start = parse("2014-01-01")
self.assertEqual(list(rrule(BDAILY, count=4, dtstart=start)),
[datetime(2014, 1, 1, 0, 0),
datetime(2014, 1, 2, 0, 0),
datetime(2014, 1, 3, 0, 0),
datetime(2014, 1, 6, 0, 0)])
def test_parse(self):
self.assertEqual(list(rrule(BDAILY, count=4, dtstart="2014-01-01")),
[datetime(2014, 1, 1, 0, 0),
datetime(2014, 1, 2, 0, 0),
datetime(2014, 1, 3, 0, 0),
datetime(2014, 1, 6, 0, 0)])
self.assertEqual(list(rrule(BDAILY, count=4, dtstart="2014-01-01",
until="01/04/2014")),
[datetime(2014, 1, 1, 0, 0),
datetime(2014, 1, 2, 0, 0),
datetime(2014, 1, 3, 0, 0)])
if __name__ == "__main__":
unittest.main()
|
from django.urls import path
from .schema import schema
from .views import AsyncGraphQLView
urlpatterns = [path("graphql", AsyncGraphQLView.as_view(schema=schema))]
|
#
# This file is part of the FFEA simulation package
#
# Copyright (c) by the Theory and Development FFEA teams,
# as they appear in the README.md file.
#
# FFEA is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FFEA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with FFEA. If not, see <http://www.gnu.org/licenses/>.
#
# To help us fund FFEA development, we humbly ask that you cite
# the research papers on the package.
#
import sys, os
import numpy as np
import FFEA_kinetic_map
if len(sys.argv) != 5:
sys.exit("Usage: python " + os.path.basename(os.path.abspath(sys.argv[0])) + " [INPUT MAP A] [INPUT MAP B] [OUTPUT AB] [OUTPUT BA]")
# Get args
mapa_fname = sys.argv[1]
mapb_fname = sys.argv[2]
outmapab_fname = sys.argv[3]
outmapba_fname = sys.argv[4]
# Get maps
mapa = FFEA_kinetic_map.FFEA_kinetic_map(mapa_fname)
mapb = FFEA_kinetic_map.FFEA_kinetic_map(mapb_fname)
# Apply maps
mapab = mapa.apply_to_map(mapb)
mapba = mapb.apply_to_map(mapa)
# Write maps
mapab.write_to_file(outmapab_fname)
mapba.write_to_file(outmapba_fname)
|
"""
User interface Controls for the layout.
"""
from __future__ import unicode_literals
from pygments.token import Token
from abc import ABCMeta, abstractmethod
from collections import defaultdict, namedtuple
from six import with_metaclass
from prompt_toolkit.enums import DEFAULT_BUFFER, SEARCH_BUFFER
from prompt_toolkit.filters import to_cli_filter
from prompt_toolkit.mouse_events import MouseEventTypes
from prompt_toolkit.search_state import SearchState
from prompt_toolkit.selection import SelectionType
from prompt_toolkit.utils import get_cwidth, SimpleLRUCache
from .highlighters import Highlighter
from .lexers import Lexer, SimpleLexer
from .processors import Processor, Transformation
from .screen import Screen, Char, Point
from .utils import token_list_width, split_lines
import time
__all__ = (
'BufferControl',
'FillControl',
'TokenListControl',
'UIControl',
)
class UIControl(with_metaclass(ABCMeta, object)):
"""
Base class for all user interface controls.
"""
def reset(self):
# Default reset. (Doesn't have to be implemented.)
pass
def preferred_width(self, cli, max_available_width):
return None
def preferred_height(self, cli, width):
return None
def has_focus(self, cli):
"""
Return ``True`` when this user control has the focus.
If so, the cursor will be displayed according to the cursor position
reported in :meth:`.UIControl.create_screen`. If the created screen has
the property ``show_cursor=False``, the cursor will be hidden from the
output.
"""
return False
@abstractmethod
def create_screen(self, cli, width, height):
"""
Write the content at this position to the screen.
Returns a :class:`.Screen` instance.
Optionally, this can also return a (screen, highlighting) tuple, where
the `highlighting` is a dictionary of dictionaries. Mapping
y->x->Token if this position needs to be highlighted with that Token.
"""
def mouse_handler(self, cli, mouse_event):
"""
Handle mouse events.
When `NotImplemented` is returned, it means that the given event is not
handled by the `UIControl` itself. The `Window` or key bindings can
decide to handle this event as scrolling or changing focus.
:param cli: `CommandLineInterface` instance.
:param mouse_event: `MouseEvent` instance.
"""
return NotImplemented
def move_cursor_down(self, cli):
"""
Request to move the cursor down.
This happens when scrolling down and the cursor is completely at the
top.
"""
def move_cursor_up(self, cli):
"""
Request to move the cursor up.
"""
class TokenListControl(UIControl):
"""
Control that displays a list of (Token, text) tuples.
(It's mostly optimized for rather small widgets, like toolbars, menus, etc...)
Mouse support:
The list of tokens can also contain tuples of three items, looking like:
(Token, text, handler). When mouse support is enabled and the user
clicks on this token, then the given handler is called. That handler
should accept two inputs: (CommandLineInterface, MouseEvent) and it
should either handle the event or return `NotImplemented` in case we
want the containing Window to handle this event.
:param get_tokens: Callable that takes a `CommandLineInterface` instance
and returns the list of (Token, text) tuples to be displayed right now.
:param default_char: default :class:`.Char` (character and Token) to use
for the background when there is more space available than `get_tokens`
returns.
:param get_default_char: Like `default_char`, but this is a callable that
takes a :class:`prompt_toolkit.interface.CommandLineInterface` and
returns a :class:`.Char` instance.
:param has_focus: `bool` or `CLIFilter`, when this evaluates to `True`,
this UI control will take the focus. The cursor will be shown in the
upper left corner of this control, unless `get_token` returns a
``Token.SetCursorPosition`` token somewhere in the token list, then the
cursor will be shown there.
:param wrap_lines: `bool` or `CLIFilter`: Wrap long lines.
"""
def __init__(self, get_tokens, default_char=None, get_default_char=None,
align_right=False, align_center=False,
has_focus=False, wrap_lines=True):
assert default_char is None or isinstance(default_char, Char)
assert get_default_char is None or callable(get_default_char)
assert not (default_char and get_default_char)
self.align_right = to_cli_filter(align_right)
self.align_center = to_cli_filter(align_center)
self._has_focus_filter = to_cli_filter(has_focus)
self.wrap_lines = to_cli_filter(wrap_lines)
self.get_tokens = get_tokens
# Construct `get_default_char` callable.
if default_char:
get_default_char = lambda _: default_char
elif not get_default_char:
get_default_char = lambda _: Char(' ', Token)
self.get_default_char = get_default_char
#: Cache for rendered screens.
self._screen_lru_cache = SimpleLRUCache(maxsize=18)
self._token_lru_cache = SimpleLRUCache(maxsize=1)
# Only cache one token list. We don't need the previous item.
# Render info for the mouse support.
self._tokens = None # The last rendered tokens.
self._pos_to_indexes = None # Mapping from mouse positions (x,y) to
# positions in the token list.
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.get_tokens)
def _get_tokens_cached(self, cli):
"""
Get tokens, but only retrieve tokens once during one render run.
(This function is called several times during one rendering, because
we also need those for calculating the dimensions.)
"""
return self._token_lru_cache.get(
cli.render_counter, lambda: self.get_tokens(cli))
def has_focus(self, cli):
return self._has_focus_filter(cli)
def preferred_width(self, cli, max_available_width):
"""
Return the preferred width for this control.
That is the width of the longest line.
"""
text = ''.join(t[1] for t in self._get_tokens_cached(cli))
line_lengths = [get_cwidth(l) for l in text.split('\n')]
return max(line_lengths)
def preferred_height(self, cli, width):
screen = self.create_screen(cli, width, None)
return screen.height
def create_screen(self, cli, width, height):
# Get tokens
tokens_with_mouse_handlers = self._get_tokens_cached(cli)
default_char = self.get_default_char(cli)
# Wrap/align right/center parameters.
wrap_lines = self.wrap_lines(cli)
right = self.align_right(cli)
center = self.align_center(cli)
def process_line(line):
" Center or right align a single line. "
used_width = token_list_width(line)
padding = width - used_width
if center:
padding = int(padding / 2)
return [(default_char.token, default_char.char * padding)] + line + [(Token, '\n')]
if right or center:
tokens2 = []
for line in split_lines(tokens_with_mouse_handlers):
tokens2.extend(process_line(line))
tokens_with_mouse_handlers = tokens2
# Strip mouse handlers from tokens.
tokens = [tuple(item[:2]) for item in tokens_with_mouse_handlers]
# Create screen, or take it from the cache.
key = (default_char, tokens_with_mouse_handlers, width, wrap_lines, right, center)
params = (default_char, tokens, width, wrap_lines, right, center)
screen, self._pos_to_indexes = self._screen_lru_cache.get(key, lambda: self._get_screen(*params))
self._tokens = tokens_with_mouse_handlers
return screen
@classmethod
def _get_screen(cls, default_char, tokens, width, wrap_lines, right, center):
screen = Screen(default_char, initial_width=width)
# Only call write_data when we actually have tokens.
# (Otherwise the screen height will go up from 0 to 1 while we don't
# want that. -- An empty control should not take up any space.)
if tokens:
write_data_result = screen.write_data(tokens, width=(width if wrap_lines else None))
indexes_to_pos = write_data_result.indexes_to_pos
pos_to_indexes = dict((v, k) for k, v in indexes_to_pos.items())
else:
pos_to_indexes = {}
return screen, pos_to_indexes
@classmethod
def static(cls, tokens):
def get_static_tokens(cli):
return tokens
return cls(get_static_tokens)
def mouse_handler(self, cli, mouse_event):
"""
Handle mouse events.
(When the token list contained mouse handlers and the user clicked on
on any of these, the matching handler is called. This handler can still
return `NotImplemented` in case we want the `Window` to handle this
particular event.)
"""
if self._pos_to_indexes:
# Find position in the token list.
position = mouse_event.position
index = self._pos_to_indexes.get((position.x, position.y))
if index is not None:
# Find mouse handler for this character.
count = 0
for item in self._tokens:
count += len(item[1])
if count >= index:
if len(item) >= 3:
# Handler found. Call it.
handler = item[2]
handler(cli, mouse_event)
return
else:
break
# Otherwise, don't handle here.
return NotImplemented
class FillControl(UIControl):
"""
Fill whole control with characters with this token.
(Also helpful for debugging.)
"""
def __init__(self, character=' ', token=Token):
self.token = token
self.character = character
def __repr__(self):
return '%s(character=%r, token=%r)' % (
self.__class__.__name__, self.character, self.token)
def reset(self):
pass
def has_focus(self, cli):
return False
def create_screen(self, cli, width, height):
char = Char(self.character, self.token)
screen = Screen(char, initial_width=width)
return screen
class BufferControl(UIControl):
"""
Control for visualising the content of a `Buffer`.
:param input_processors: list of :class:`~prompt_toolkit.layout.processors.Processor`.
:param lexer: :class:`~prompt_toolkit.layout.lexers.Lexer` instance for syntax highlighting.
:param preview_search: `bool` or `CLIFilter`: Show search while typing.
:param get_search_state: Callable that takes a CommandLineInterface and
returns the SearchState to be used. (If not CommandLineInterface.search_state.)
:param wrap_lines: `bool` or `CLIFilter`: Wrap long lines.
:param buffer_name: String representing the name of the buffer to display.
:param default_char: :class:`.Char` instance to use to fill the background. This is
transparent by default.
:param focus_on_click: Focus this buffer when it's click, but not yet focussed.
"""
def __init__(self,
buffer_name=DEFAULT_BUFFER,
input_processors=None,
highlighters=None,
lexer=None,
preview_search=False,
search_buffer_name=SEARCH_BUFFER,
get_search_state=None,
wrap_lines=True,
menu_position=None,
default_char=None,
focus_on_click=False):
assert input_processors is None or all(isinstance(i, Processor) for i in input_processors)
assert highlighters is None or all(isinstance(i, Highlighter) for i in highlighters)
assert menu_position is None or callable(menu_position)
assert lexer is None or isinstance(lexer, Lexer)
assert get_search_state is None or callable(get_search_state)
self.preview_search = to_cli_filter(preview_search)
self.get_search_state = get_search_state
self.wrap_lines = to_cli_filter(wrap_lines)
self.focus_on_click = to_cli_filter(focus_on_click)
self.input_processors = input_processors or []
self.highlighters = highlighters or []
self.buffer_name = buffer_name
self.menu_position = menu_position
self.lexer = lexer or SimpleLexer()
self.default_char = default_char or Char(token=Token.Transparent)
self.search_buffer_name = search_buffer_name
#: LRU cache for the lexer.
#: Often, due to cursor movement, undo/redo and window resizing
#: operations, it happens that a short time, the same document has to be
#: lexed. This is a faily easy way to cache such an expensive operation.
self._token_lru_cache = SimpleLRUCache(maxsize=8)
#: Keep a similar cache for rendered screens. (when we scroll up/down
#: through the screen, or when we change another buffer, we don't want
#: to recreate the same screen again.)
self._screen_lru_cache = SimpleLRUCache(maxsize=8)
#: Highlight Cache.
#: When nothing of the buffer content or processors has changed, but
#: the highlighting of the selection/search changes,
self._highlight_lru_cache = SimpleLRUCache(maxsize=8)
self._xy_to_cursor_position = None
self._last_click_timestamp = None
def _buffer(self, cli):
"""
The buffer object that contains the 'main' content.
"""
return cli.buffers[self.buffer_name]
def has_focus(self, cli):
# This control gets the focussed if the actual `Buffer` instance has the
# focus or when any of the `InputProcessor` classes tells us that it
# wants the focus. (E.g. in case of a reverse-search, where the actual
# search buffer may not be displayed, but the "reverse-i-search" text
# should get the focus.)
return cli.current_buffer_name == self.buffer_name or \
any(i.has_focus(cli) for i in self.input_processors)
def preferred_width(self, cli, max_available_width):
# Return the length of the longest line.
return max(map(len, self._buffer(cli).document.lines))
def preferred_height(self, cli, width):
# Draw content on a screen using this width. Measure the height of the
# result.
screen, highlighters = self.create_screen(cli, width, None)
return screen.height
def _get_input_tokens(self, cli, document):
"""
Tokenize input text for highlighting.
Return (tokens, source_to_display, display_to_source) tuple.
:param document: The document to be shown. This can be `buffer.document`
but could as well be a different one, in case we are
searching through the history. (Buffer.document_for_search)
"""
def get():
# Call lexer.
tokens = list(self.lexer.get_tokens(cli, document.text))
# 'Explode' tokens in characters.
# (Some input processors -- like search/selection highlighter --
# rely on that each item in the tokens array only contains one
# character.)
tokens = [(token, c) for token, text in tokens for c in text]
# Run all processors over the input.
# (They can transform both the tokens and the cursor position.)
source_to_display_functions = []
display_to_source_functions = []
d_ = document # Each processor receives the document of the previous one.
for p in self.input_processors:
transformation = p.apply_transformation(cli, d_, tokens)
d_ = transformation.document
assert isinstance(transformation, Transformation)
tokens = transformation.tokens
source_to_display_functions.append(transformation.source_to_display)
display_to_source_functions.append(transformation.display_to_source)
# Chain cursor transformation (movement) functions.
def source_to_display(cursor_position):
" Chained source_to_display. "
for f in source_to_display_functions:
cursor_position = f(cursor_position)
return cursor_position
def display_to_source(cursor_position):
" Chained display_to_source. "
for f in reversed(display_to_source_functions):
cursor_position = f(cursor_position)
return cursor_position
return tokens, source_to_display, display_to_source
key = (
document.text,
# Include invalidation_hashes from all processors.
tuple(p.invalidation_hash(cli, document) for p in self.input_processors),
)
return self._token_lru_cache.get(key, get)
def create_screen(self, cli, width, height):
buffer = self._buffer(cli)
# Get the document to be shown. If we are currently searching (the
# search buffer has focus, and the preview_search filter is enabled),
# then use the search document, which has possibly a different
# text/cursor position.)
def preview_now():
""" True when we should preview a search. """
return bool(self.preview_search(cli) and
cli.buffers[self.search_buffer_name].text)
if preview_now():
if self.get_search_state:
ss = self.get_search_state(cli)
else:
ss = cli.search_state
document = buffer.document_for_search(SearchState(
text=cli.current_buffer.text,
direction=ss.direction,
ignore_case=ss.ignore_case))
else:
document = buffer.document
# Wrap.
wrap_width = width if self.wrap_lines(cli) else None
def _create_screen():
screen = Screen(self.default_char, initial_width=width)
# Get tokens
# Note: we add the space character at the end, because that's where
# the cursor can also be.
input_tokens, source_to_display, display_to_source = self._get_input_tokens(cli, document)
input_tokens += [(self.default_char.token, ' ')]
write_data_result = screen.write_data(input_tokens, width=wrap_width)
indexes_to_pos = write_data_result.indexes_to_pos
line_lengths = write_data_result.line_lengths
pos_to_indexes = dict((v, k) for k, v in indexes_to_pos.items())
def cursor_position_to_xy(cursor_position):
""" Turn a cursor position in the buffer into x/y coordinates
on the screen. """
cursor_position = min(len(document.text), cursor_position)
# First get the real token position by applying all transformations.
cursor_position = source_to_display(cursor_position)
# Then look up into the table.
try:
return indexes_to_pos[cursor_position]
except KeyError:
# This can fail with KeyError, but only if one of the
# processors is returning invalid key locations.
raise
# return 0, 0
def xy_to_cursor_position(x, y):
""" Turn x/y screen coordinates back to the original cursor
position in the buffer. """
# Look up reverse in table.
while x > 0 or y > 0:
try:
index = pos_to_indexes[x, y]
break
except KeyError:
# No match found -> mouse click outside of region
# containing text. Look to the left or up.
if x: x -= 1
elif y: y -=1
else:
# Nobreak.
index = 0
# Transform.
return display_to_source(index)
return screen, cursor_position_to_xy, xy_to_cursor_position, line_lengths
# Build a key for the caching. If any of these parameters changes, we
# have to recreate a new screen.
key = (
# When the text changes, we obviously have to recreate a new screen.
document.text,
# When the width changes, line wrapping will be different.
# (None when disabled.)
wrap_width,
# Include invalidation_hashes from all processors.
tuple(p.invalidation_hash(cli, document) for p in self.input_processors),
)
# Get from cache, or create if this doesn't exist yet.
screen, cursor_position_to_xy, self._xy_to_cursor_position, line_lengths = \
self._screen_lru_cache.get(key, _create_screen)
x, y = cursor_position_to_xy(document.cursor_position)
screen.cursor_position = Point(y=y, x=x)
# If there is an auto completion going on, use that start point for a
# pop-up menu position. (But only when this buffer has the focus --
# there is only one place for a menu, determined by the focussed buffer.)
if cli.current_buffer_name == self.buffer_name:
menu_position = self.menu_position(cli) if self.menu_position else None
if menu_position is not None:
assert isinstance(menu_position, int)
x, y = cursor_position_to_xy(menu_position)
screen.menu_position = Point(y=y, x=x)
elif buffer.complete_state:
# Position for completion menu.
# Note: We use 'min', because the original cursor position could be
# behind the input string when the actual completion is for
# some reason shorter than the text we had before. (A completion
# can change and shorten the input.)
x, y = cursor_position_to_xy(
min(buffer.cursor_position,
buffer.complete_state.original_document.cursor_position))
screen.menu_position = Point(y=y, x=x)
else:
screen.menu_position = None
# Add highlighting.
highlight_key = (
key, # Includes everything from the 'key' above. (E.g. when the
# document changes, we have to recalculate highlighting.)
# Include invalidation_hashes from all highlighters.
tuple(h.invalidation_hash(cli, document) for h in self.highlighters)
)
highlighting = self._highlight_lru_cache.get(highlight_key, lambda:
self._get_highlighting(cli, document, cursor_position_to_xy, line_lengths))
return screen, highlighting
def _get_highlighting(self, cli, document, cursor_position_to_xy, line_lengths):
"""
Return a _HighlightDict for the highlighting. (This is a lazy dict of dicts.)
The Window class will apply this for the visible regions. - That way,
we don't have to recalculate the screen again for each selection/search
change.
:param line_lengths: Maps line numbers to the length of these lines.
"""
def get_row_size(y):
" Return the max 'x' value for a given row in the screen. "
return max(1, line_lengths.get(y, 0))
# Get list of fragments.
row_to_fragments = defaultdict(list)
for h in self.highlighters:
for fragment in h.get_fragments(cli, document):
# Expand fragments.
start_column, start_row = cursor_position_to_xy(fragment.start)
end_column, end_row = cursor_position_to_xy(fragment.end)
token = fragment.token
if start_row == end_row:
# Single line highlighting.
row_to_fragments[start_row].append(
_HighlightFragment(start_column, end_column, token))
else:
# Multi line highlighting.
# (First line.)
row_to_fragments[start_row].append(
_HighlightFragment(start_column, get_row_size(start_row), token))
# (Middle lines.)
for y in range(start_row + 1, end_row):
row_to_fragments[y].append(_HighlightFragment(0, get_row_size(y), token))
# (Last line.)
row_to_fragments[end_row].append(_HighlightFragment(0, end_column, token))
# Create dict to return.
return _HighlightDict(row_to_fragments)
def mouse_handler(self, cli, mouse_event):
"""
Mouse handler for this control.
"""
buffer = self._buffer(cli)
position = mouse_event.position
# Focus buffer when clicked.
if self.has_focus(cli):
if self._xy_to_cursor_position:
# Translate coordinates back to the cursor position of the
# original input.
pos = self._xy_to_cursor_position(position.x, position.y)
# Set the cursor position.
if pos <= len(buffer.text):
if mouse_event.event_type == MouseEventTypes.MOUSE_DOWN:
buffer.exit_selection()
buffer.cursor_position = pos
elif mouse_event.event_type == MouseEventTypes.MOUSE_UP:
# When the cursor was moved to another place, select the text.
# (The >1 is actually a small but acceptable workaround for
# selecting text in Vi navigation mode. In navigation mode,
# the cursor can never be after the text, so the cursor
# will be repositioned automatically.)
if abs(buffer.cursor_position - pos) > 1:
buffer.start_selection(selection_type=SelectionType.CHARACTERS)
buffer.cursor_position = pos
# Select word around cursor on double click.
# Two MOUSE_UP events in a short timespan are considered a double click.
double_click = self._last_click_timestamp and time.time() - self._last_click_timestamp < .3
self._last_click_timestamp = time.time()
if double_click:
start, end = buffer.document.find_boundaries_of_current_word()
buffer.cursor_position += start
buffer.start_selection(selection_type=SelectionType.CHARACTERS)
buffer.cursor_position += end - start
else:
# Don't handle scroll events here.
return NotImplemented
# Not focussed, but focussing on click events.
else:
if self.focus_on_click(cli) and mouse_event.event_type == MouseEventTypes.MOUSE_UP:
# Focus happens on mouseup. (If we did this on mousedown, the
# up event will be received at the point where this widget is
# focussed and be handled anyway.)
cli.focus(self.buffer_name)
else:
return NotImplemented
def move_cursor_down(self, cli):
b = self._buffer(cli)
b.cursor_position += b.document.get_cursor_down_position()
def move_cursor_up(self, cli):
b = self._buffer(cli)
b.cursor_position += b.document.get_cursor_up_position()
_HighlightFragment = namedtuple('_HighlightFragment', 'start_column end_column token') # End is excluded.
class _HighlightDict(dict):
"""
Helper class to contain the highlighting.
Maps 'y' coordinate to 'x' coordinate to Token.
:param row_to_fragments: Dictionary that maps row numbers to list of `_HighlightFragment`.
"""
def __init__(self, row_to_fragments):
self.row_to_fragments = row_to_fragments
def __missing__(self, key):
result = _HighlightDictRow(self.row_to_fragments[key])
self[key] = result
return result
def __repr__(self):
return '_HighlightDict(%r)' % (dict.__repr__(self), )
class _HighlightDictRow(dict):
def __init__(self, list_of_fragments):
self.list_of_fragments = list_of_fragments
def __missing__(self, key):
result = None
for f in self.list_of_fragments:
if f.start_column <= key < f.end_column: # End is excluded.
result = f.token
break
self[key] = result
return result
|
# ------------------------------------------------------------------------
# BoxeR
# Copyright (c) 2022. All Rights Reserved.
# Licensed under the MIT License [see LICENSE for details]
# ------------------------------------------------------------------------
# Modified from mmf (https://github.com/facebookresearch/mmf)
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# ------------------------------------------------------------------------
import os
import copy
import warnings
import omegaconf
import torch
from torch.utils.data.dataset import Dataset
from e2edet.dataset.processor import build_processor
from e2edet.utils.general import get_cache_dir, get_root
class BaseDataset(Dataset):
def __init__(self, config, dataset_name, dataset_type="train", **kwargs):
super().__init__()
if config is None:
config = {}
self.config = config
# self._iter_per_update = config.iter_per_update if dataset_type == "train" else 1
self._dataset_name = dataset_name
self._dataset_type = dataset_type
self._device = kwargs["current_device"]
self._global_config = kwargs["global_config"]
self._iter_per_update = self._global_config.training.iter_per_update
def _get_absolute_path(self, paths):
if os.environ.get("E2E_DATASETS") is None:
warnings.warn("E2E_DATASETS environment not found! Setting to '.data' ...")
os.environ["E2E_DATASETS"] = get_cache_dir(".data")
if isinstance(paths, list):
return [self._get_absolute_path(path) for path in paths]
elif isinstance(paths, str):
if not os.path.isabs(paths):
paths = os.path.join(os.environ.get("E2E_DATASETS"), paths)
return paths
else:
raise TypeError(
"Paths passed to dataset should either be " "string or list"
)
def init_processors(self):
if not hasattr(self.config, "processors"):
return
for processor_key, processor_params in self.config.processors.items():
if not processor_params:
continue
if "answer" in processor_key:
processor_params = copy.deepcopy(processor_params)
with omegaconf.open_dict(processor_params):
file_path = processor_params.params["class_file"]
processor_params.params["class_file"] = self._get_absolute_path(
file_path
)
processor_object = build_processor(processor_params)
setattr(self, processor_key, processor_object)
def _prepare_batch(self, batch, non_blocking=False):
sample, target = batch
new_sample = {}
new_target = []
for k, v in sample.items():
if isinstance(v, torch.Tensor):
new_sample[k] = v.to(self._device, non_blocking=non_blocking)
else:
new_sample[k] = v
for t in target:
new_item = {}
for k, v in t.items():
if isinstance(v, torch.Tensor):
new_item[k] = v.to(self._device, non_blocking=non_blocking)
else:
new_item[k] = v
new_target.append(new_item)
return (new_sample, new_target)
def prepare_batch(self, batch, non_blocking=False):
"""
Transfer cpu tensors in batch to gpu
:batch: (sample, target)
sample: dict of tensors
target: list of dict
"""
if self._iter_per_update > 1:
return [
self._prepare_batch(split, non_blocking=non_blocking) for split in batch
]
else:
return self._prepare_batch(batch, non_blocking=non_blocking)
def _load(self, index):
raise NotImplementedError
def __getitem__(self, index):
sample, target = self._load(index)
sample["iter_per_update"] = self.iter_per_update
return sample, target
@property
def dataset_type(self):
return self._dataset_type
@property
def dataset_name(self):
return self._dataset_name
@property
def iter_per_update(self):
return self._iter_per_update
@dataset_name.setter
def dataset_name(self, name):
self._dataset_name = name
def get_collate_fn(self):
raise NotImplementedError
@torch.no_grad()
def prepare_for_evaluation(self, predictions, *args, **kwargs):
raise NotImplementedError
@torch.no_grad()
def format_for_evalai(self, output, *args, **kwargs):
raise NotImplementedError
|
import copy
import numpy as np
from State import State
from Player import Player
class Environment:
def __init__(self, NUM_ROWS, NUM_COLS, DEPTH, init_board=None):
self.state = None
self.moves_made = set()
self.duplicate_moves = set()
self.draw_flag = False
self.turn = None
self.NUM_ROWS = NUM_ROWS
self.NUM_COLS = NUM_COLS
self.DEPTH = DEPTH
self.reset()
def reset(self):
# resets the board to be empty and the turn to be 'X'
self.state = State(np.array([[0 for i in range(self.NUM_COLS)] for j in range(self.NUM_COLS)]))
self.moves_made = set()
self.duplicate_moves = set()
self.draw_flag = False
self.turn = 1
def update(self, action, player, turn=0, check_legal=False):
# updates the board given an action represented as 2 indicies e.g. [0, 2]
# returns [next_state, result]
# where next_state is the board after action is taken
piece, location = action
if not self.is_legal(action, player):
if check_legal:
print(self.state)
raise ValueError("The action {} is not legal".format(action))
else:
return (self.state, 10*self.turn)
if turn == 0:
turn = self.turn
if piece.is_on_board:
# if the piece was on the board, set its origin to be empty
self.state.board[location] = 0
# update the board and the player
prev_state = copy.copy(self.state)
action_made = {"prev_state": prev_state}
prev_occupant = int(self.state.board[location])
self.state.board[location] = turn * piece.size
final_state = self.state
action_made.update({"final_state": final_state})
if str(action_made) in self.duplicate_moves:
self.draw_flag = True
elif str(action_made) in self.moves_made:
self.duplicate_moves.add(str(action_made))
else:
self.moves_made.add(str(action_made))
for idx, i in enumerate(player.pieces):
condition = None
try:
condition = i.size == piece.size and not i.is_on_board and not piece.is_on_board
except IndexError:
continue
if condition:
# update values for the locations of pieces
for idx, i in enumerate(player.pieces[piece.stack_number*4+piece.location:]):
if i.is_top_of_stack:
break
player.pieces[idx].location -= 1
break
if self.state.lower_layers[0][location] != 0:
self.state.board[location] = self.state.lower_layers[0][location]
if prev_occupant != 0:
self.update_lower_layers(action, player, prev_occupant)
# update the turn tracker
self.turn *= -1
return (self.state, self.get_result(self.state))
def update_lower_layers(self, action, player, prev_occupant, i=0):
piece, location = action
layer = self.state.lower_layers[i]
dest = layer[location]
if dest != 0:
self.update_lower_layers(self, action, player, dest, i+1)
dest = self.turn * piece.size
self.state.lower_layers[i+1, location[0], location[1]] = prev_occupant
for p in player.pieces:
if p.location == location:
p.stack_number += 1
break
def get_result(self, state):
# returns None if the game isn't over, 1 if white wins and -1 if black wins
# check rows
for row in state.board:
ones = np.sign(row)
if abs(sum(ones)) == self.NUM_ROWS:
return sum(ones) / self.NUM_ROWS
# check columns
cols = state.board.copy()
cols.transpose()
for col in cols:
ones = np.sign(row)
if abs(sum(ones)) == self.NUM_COLS:
return sum(ones) / self.NUM_COLS
# check diagonals
diags = [state.board.diagonal(), np.fliplr(state.board).diagonal()]
for diag in diags:
ones = np.sign(diag)
if abs(sum(ones)) == self.NUM_ROWS:
return sum(ones) / self.NUM_ROWS
# check for draws
# that is, if three identical moves have been made, it's a draw
if self.draw_flag:
return 0
return None
def is_legal(self, action, player):
piece, location = action
curr_piece = self.state.board[location]
# the piece has to be bigger than the one currently there
if piece.size <= curr_piece:
return False
# implement the rule that a new gobblet on the board must be on an empty space
if not piece.is_on_board and curr_piece != 0:
# exception: if there is three in a row through the desired location, the move is valid
row = self.state.board[location[0]]
col = self.state.board[:, location[1]]
diag = [0 for i in range(self.NUM_ROWS)]
if location[0]==location[1]:
diag = self.state.board.diagonal()
elif location[0]+location[1] == self.NUM_ROWS-1:
diag = np.fliplr(self.state.board).diagonal()
flag = False
for i in [row, col, diag]:
if flag:
break
counter = 0
for j in np.squeeze(i):
if j != 0:
counter += 1
if counter==3:
flag = True
break
if not flag:
return False
return True
def get_legal_moves(self, player):
# returns the legal moves that can be taken
moves = []
add_move = moves.append
is_valid_move = self.is_valid_move
for idx, i in enumerate(self.state.board):
for jIdx, j in enumerate(i):
for stack in player.pieces:
if len(stack) == 0:
continue
if is_valid_move((idx, jIdx), stack[0].size):
add_move([(idx, jIdx), int(stack[0].size), [0]])
for piece in player.pieces_on_board:
if is_valid_move((idx, jIdx), piece.size):
add_move([(idx, jIdx), int(piece.size), copy.deepcopy(piece[0])])
return moves
def display(self):
for i in self.state.board:
print(i)
print() |
#!/usr/bin/env python3
# encoding: UTF-8
"""
This file is part of IPGeoLocation tool.
Copyright (C) 2015-2016 @maldevel
https://github.com/maldevel/IPGeoLocation
IPGeoLocation - Retrieve IP Geolocation information
Powered by http://ip-api.com
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
For more see the file 'LICENSE' for copying permission.
"""
__author__ = 'maldevel'
import csv
from xml.etree import ElementTree as etree
from collections import OrderedDict
class FileExporter:
def __init__(self):
pass
def ExportListToCSV(self, ipGeoLocObjs, filename):
return self.__ExportToCSV(ipGeoLocObjs, filename)
def ExportToCSV(self, ipGeoLocObj, filename):
return self.__ExportToCSV([ipGeoLocObj], filename)
def ExportListToXML(self, ipGeoLocObjs, filename):
return self.__ExportToXML(ipGeoLocObjs, filename)
def ExportToXML(self, ipGeoLocObj, filename):
return self.__ExportToXML([ipGeoLocObj], filename)
def ExportListToTXT(self, ipGeoLocObjs, filename):
return self.__ExportToTXT(ipGeoLocObjs, filename)
def ExportToTXT(self, ipGeoLocObj, filename):
return self.__ExportToTXT([ipGeoLocObj], filename)
def __ExportToTXT(self, ipGeoLocObjs, filename):
try:
with open(filename, 'w') as txtfile:
txtfile.write('Results IPGeolocation\n')
for ipGeoLocObj in ipGeoLocObjs:
if ipGeoLocObj:
txtfile.write('Target: {}\n'.format(ipGeoLocObj.Query))
txtfile.write('IP: {}\n'.format(ipGeoLocObj.IP))
txtfile.write('ASN: {}\n'.format(ipGeoLocObj.ASN))
txtfile.write('City: {}\n'.format(ipGeoLocObj.City))
txtfile.write('Country: {}\n'.format(ipGeoLocObj.Country))
txtfile.write('Country Code: {}\n'.format(ipGeoLocObj.CountryCode))
txtfile.write('ISP: {}\n'.format(ipGeoLocObj.ISP))
txtfile.write('Latitude: {}\n'.format(ipGeoLocObj.Latitude))
txtfile.write('Longtitude: {}\n'.format(ipGeoLocObj.Longtitude))
txtfile.write('Organization: {}\n'.format(ipGeoLocObj.Organization))
txtfile.write('Region: {}\n'.format(ipGeoLocObj.Region))
txtfile.write('Region Name: {}\n'.format(ipGeoLocObj.RegionName))
txtfile.write('Timezone: {}\n'.format(ipGeoLocObj.Timezone))
txtfile.write('Zip: {}\n'.format(ipGeoLocObj.Zip))
txtfile.write('Google Maps: {}\n'.format(ipGeoLocObj.GoogleMapsLink))
txtfile.write('\n')
return True
except:
return False
def __ExportToXML(self, ipGeoLocObjs, filename):
try:
root = etree.Element('Results')
for ipGeoLocObj in ipGeoLocObjs:
if ipGeoLocObj:
orderedData = OrderedDict(sorted(ipGeoLocObj.ToDict().items()))
self.__add_items(etree.SubElement(root, 'IPGeolocation'),
((key.replace(' ', ''), value) for key, value in orderedData.items()))
tree = etree.ElementTree(root)
tree.write(filename, xml_declaration=True, encoding='utf-8')
return True
except:
return False
def __ExportToCSV(self, ipGeoLocObjs, filename):
try:
with open(filename, 'w', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=';', quoting=csv.QUOTE_MINIMAL)
writer.writerow(['Results', 'IPGeolocation'])
for ipGeoLocObj in ipGeoLocObjs:
if ipGeoLocObj:
writer.writerow(['Target', ipGeoLocObj.Query])
writer.writerow(['IP', ipGeoLocObj.IP])
writer.writerow(['ASN', ipGeoLocObj.ASN])
writer.writerow(['City', ipGeoLocObj.City])
writer.writerow(['Country', ipGeoLocObj.Country])
writer.writerow(['Country Code', ipGeoLocObj.CountryCode])
writer.writerow(['ISP', ipGeoLocObj.ISP])
writer.writerow(['Latitude', ipGeoLocObj.Latitude])
writer.writerow(['Longtitude', ipGeoLocObj.Longtitude])
writer.writerow(['Organization', ipGeoLocObj.Organization])
writer.writerow(['Region', ipGeoLocObj.Region])
writer.writerow(['Region Name', ipGeoLocObj.RegionName])
writer.writerow(['Timezone', ipGeoLocObj.Timezone])
writer.writerow(['Zip', ipGeoLocObj.Zip])
writer.writerow(['Google Maps', ipGeoLocObj.GoogleMapsLink])
writer.writerow([])
return True
except:
return False
def __add_items(self, root, items):
for name, text in items:
elem = etree.SubElement(root, name)
elem.text = text
|
import time
from multiworld.core.image_env import ImageEnv
from rlkit.core import logger
from rlkit.envs.vae_wrapper import temporary_mode
import cv2
import numpy as np
import os.path as osp
import os
from rlkit.samplers.data_collector.vae_env import (
VAEWrappedEnvPathCollector,
)
from rlkit.torch.her.her import HERTrainer
from rlkit.torch.sac.policies import MakeDeterministic
from rlkit.torch.sac.sac import SACTrainer
from rlkit.torch.skewfit.online_vae_algorithm import OnlineVaeAlgorithm
from rlkit.util.io import load_local_or_remote_file
from rlkit.util.video import dump_video
from rlkit.launchers.launcher_util import reset_execution_environment, set_gpu_mode, set_seed, setup_logger, save_experiment_data
import torch, copy
import __main__ as main
from torch.multiprocessing import Pool, set_start_method
def run_task(variant, log_dir, exp_prefix):
print("log_dir: ", log_dir)
exp_prefix = time.strftime("%m-%d") + "-" + exp_prefix
variants = []
log_dirs = []
exp_prefixs = []
seeds = variant['seed']
for seed in seeds:
tmp_vv = copy.deepcopy(variant)
tmp_vv['seed'] = seed
variants.append(tmp_vv)
seed_log_dir = log_dir + '/' + str(seed)
print("seed_log_dir: ", seed_log_dir)
log_dirs.append(seed_log_dir)
exp_prefixs.append(exp_prefix)
for i in range(len(seeds)):
skewfit_full_experiment_chester([variants[i], log_dirs[i], exp_prefixs[i]])
def skewfit_full_experiment_chester(args):
variant, log_dir, exp_prefix = args
base_log_dir = log_dir
logger.reset()
print("log dir is: ", base_log_dir)
script_name = main.__file__
seed = variant['seed']
actual_log_dir = setup_logger(
exp_prefix=exp_prefix,
variant=variant,
exp_id=0,
seed=seed,
snapshot_mode='gap_and_last',
snapshot_gap=25,
log_dir=base_log_dir,
script_name=script_name,
)
variant = variant['variant']
if torch.cuda.is_available():
use_gpu = True
else:
use_gpu = False
set_gpu_mode(use_gpu)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(seed)
variant['skewfit_variant']['save_vae_data'] = True
full_experiment_variant_preprocess(variant)
train_vae_and_update_variant(variant)
skewfit_experiment(variant['skewfit_variant'])
def skewfit_full_experiment(variant):
variant['skewfit_variant']['save_vae_data'] = True
full_experiment_variant_preprocess(variant)
train_vae_and_update_variant(variant)
skewfit_experiment(variant['skewfit_variant'])
def full_experiment_variant_preprocess(variant):
train_vae_variant = variant['train_vae_variant']
skewfit_variant = variant['skewfit_variant']
if 'env_id' in variant:
assert 'env_class' not in variant
env_id = variant['env_id']
skewfit_variant['env_id'] = env_id
train_vae_variant['generate_vae_dataset_kwargs']['env_id'] = env_id
else:
env_class = variant['env_class']
env_kwargs = variant['env_kwargs']
train_vae_variant['generate_vae_dataset_kwargs']['env_class'] = (
env_class
)
train_vae_variant['generate_vae_dataset_kwargs']['env_kwargs'] = (
env_kwargs
)
skewfit_variant['env_class'] = env_class
skewfit_variant['env_kwargs'] = env_kwargs
init_camera = variant.get('init_camera', None)
imsize = variant.get('imsize', 84)
train_vae_variant['generate_vae_dataset_kwargs']['init_camera'] = (
init_camera
)
train_vae_variant['generate_vae_dataset_kwargs']['imsize'] = imsize
train_vae_variant['imsize'] = imsize
skewfit_variant['imsize'] = imsize
skewfit_variant['init_camera'] = init_camera
# for online training LSTM
skewfit_variant['replay_buffer_kwargs']['max_path_length'] = skewfit_variant['max_path_length']
# for loading door and pick up presampled goals to train vae
if skewfit_variant.get('presampled_goals_path') is not None:
if train_vae_variant['generate_vae_dataset_kwargs'].get('dataset_path') is None:
train_vae_variant['generate_vae_dataset_kwargs']['dataset_path'] = skewfit_variant['presampled_goals_path']
def train_vae_and_update_variant(variant):
from rlkit.core import logger
skewfit_variant = variant['skewfit_variant']
train_vae_variant = variant['train_vae_variant']
if skewfit_variant.get('vae_path', None) is None:
logger.remove_tabular_output(
'progress.csv', relative_to_snapshot_dir=True
)
logger.add_tabular_output(
'vae_progress.csv', relative_to_snapshot_dir=True
)
vae, vae_train_data, vae_test_data = train_vae(train_vae_variant,
return_data=True)
if skewfit_variant.get('save_vae_data', False):
skewfit_variant['vae_train_data'] = vae_train_data
skewfit_variant['vae_test_data'] = vae_test_data
logger.save_extra_data(vae, 'vae.pkl', mode='pickle')
logger.remove_tabular_output(
'vae_progress.csv',
relative_to_snapshot_dir=True,
)
logger.add_tabular_output(
'progress.csv',
relative_to_snapshot_dir=True,
)
skewfit_variant['vae_path'] = vae # just pass the VAE directly
else:
if skewfit_variant.get('save_vae_data', False):
vae_train_data, vae_test_data, info = generate_vae_dataset(
train_vae_variant['generate_vae_dataset_kwargs']
)
skewfit_variant['vae_train_data'] = vae_train_data
skewfit_variant['vae_test_data'] = vae_test_data
def train_vae(variant, return_data=False):
from rlkit.util.ml_util import PiecewiseLinearSchedule
from rlkit.torch.vae.conv_vae import (
ConvVAE,
)
import rlkit.torch.vae.conv_vae as conv_vae
from rlkit.torch.vae.vae_trainer import ConvVAETrainer
from rlkit.core import logger
import rlkit.torch.pytorch_util as ptu
from rlkit.pythonplusplus import identity
import torch
beta = variant["beta"]
representation_size = variant["representation_size"]
generate_vae_dataset_fctn = variant.get('generate_vae_data_fctn',
generate_vae_dataset)
train_data, test_data, info = generate_vae_dataset_fctn(
variant['generate_vae_dataset_kwargs']
)
logger.save_extra_data(info)
logger.get_snapshot_dir()
if 'beta_schedule_kwargs' in variant:
beta_schedule = PiecewiseLinearSchedule(
**variant['beta_schedule_kwargs'])
else:
beta_schedule = None
if variant.get('decoder_activation', None) == 'sigmoid':
decoder_activation = torch.nn.Sigmoid()
else:
decoder_activation = identity
architecture = variant['vae_kwargs'].get('architecture', None)
if not architecture and variant.get('imsize') == 84:
architecture = conv_vae.imsize84_default_architecture
elif not architecture and variant.get('imsize') == 48:
architecture = conv_vae.imsize48_default_architecture
variant['vae_kwargs']['architecture'] = architecture
variant['vae_kwargs']['imsize'] = variant.get('imsize')
m = ConvVAE(
representation_size,
decoder_output_activation=decoder_activation,
**variant['vae_kwargs']
)
m.to(ptu.device)
t = ConvVAETrainer(train_data, test_data, m, beta=beta,
beta_schedule=beta_schedule, **variant['algo_kwargs'])
save_period = variant['save_period']
dump_skew_debug_plots = variant.get('dump_skew_debug_plots', False)
for epoch in range(variant['num_epochs']):
should_save_imgs = (epoch % save_period == 0)
t.train_epoch(epoch)
t.test_epoch(
epoch,
save_reconstruction=should_save_imgs,
# save_vae=False,
)
if should_save_imgs:
t.dump_samples(epoch)
t.update_train_weights()
logger.save_extra_data(m, 'vae.pkl', mode='pickle')
if return_data:
return m, train_data, test_data
return m
def generate_vae_dataset(variant):
env_class = variant.get('env_class', None)
env_kwargs = variant.get('env_kwargs', None)
env_id = variant.get('env_id', None)
N = variant.get('N', 10000)
test_p = variant.get('test_p', 0.9)
use_cached = variant.get('use_cached', True)
imsize = variant.get('imsize', 84)
num_channels = variant.get('num_channels', 3)
show = variant.get('show', False)
init_camera = variant.get('init_camera', None)
dataset_path = variant.get('dataset_path', None)
oracle_dataset_using_set_to_goal = variant.get(
'oracle_dataset_using_set_to_goal', False)
random_rollout_data = variant.get('random_rollout_data', False)
random_and_oracle_policy_data = variant.get('random_and_oracle_policy_data',
False)
random_and_oracle_policy_data_split = variant.get(
'random_and_oracle_policy_data_split', 0)
policy_file = variant.get('policy_file', None)
n_random_steps = variant.get('n_random_steps', 100)
vae_dataset_specific_env_kwargs = variant.get(
'vae_dataset_specific_env_kwargs', None)
save_file_prefix = variant.get('save_file_prefix', None)
non_presampled_goal_img_is_garbage = variant.get(
'non_presampled_goal_img_is_garbage', None)
tag = variant.get('tag', '')
from multiworld.core.image_env import ImageEnv, unormalize_image
import rlkit.torch.pytorch_util as ptu
info = {}
if dataset_path is not None:
print('load vae training dataset from: ', dataset_path)
pjhome = os.environ['PJHOME']
dataset = np.load(osp.join(pjhome, dataset_path), allow_pickle=True).item()
dataset = dataset['image_desired_goal']
dataset = unormalize_image(dataset)
N = dataset.shape[0]
else:
if env_kwargs is None:
env_kwargs = {}
if save_file_prefix is None:
save_file_prefix = env_id
if save_file_prefix is None:
save_file_prefix = env_class.__name__
filename = "/tmp/{}_N{}_{}_imsize{}_random_oracle_split_{}{}.npy".format(
save_file_prefix,
str(N),
init_camera.__name__ if init_camera else '',
imsize,
random_and_oracle_policy_data_split,
tag,
)
print("filename is: ", filename)
if use_cached and osp.isfile(filename):
dataset = np.load(filename)
else:
now = time.time()
if env_id is not None:
import gym
import multiworld
multiworld.register_all_envs()
env = gym.make(env_id)
else:
if vae_dataset_specific_env_kwargs is None:
vae_dataset_specific_env_kwargs = {}
for key, val in env_kwargs.items():
if key not in vae_dataset_specific_env_kwargs:
vae_dataset_specific_env_kwargs[key] = val
env = env_class(**vae_dataset_specific_env_kwargs)
if not isinstance(env, ImageEnv):
env = ImageEnv(
env,
imsize,
init_camera=init_camera,
transpose=True,
normalize=True,
non_presampled_goal_img_is_garbage=non_presampled_goal_img_is_garbage,
)
else:
imsize = env.imsize
env.non_presampled_goal_img_is_garbage = non_presampled_goal_img_is_garbage
env.reset()
info['env'] = env
if random_and_oracle_policy_data:
policy_file = load_local_or_remote_file(policy_file)
policy = policy_file['policy']
policy.to(ptu.device)
if random_rollout_data:
from rlkit.exploration_strategies.ou_strategy import OUStrategy
policy = OUStrategy(env.action_space)
dataset = np.zeros((N, imsize * imsize * num_channels),
dtype=np.uint8)
for i in range(N):
if random_and_oracle_policy_data:
num_random_steps = int(
N * random_and_oracle_policy_data_split)
if i < num_random_steps:
env.reset()
for _ in range(n_random_steps):
obs = env.step(env.action_space.sample())[0]
else:
obs = env.reset()
policy.reset()
for _ in range(n_random_steps):
policy_obs = np.hstack((
obs['state_observation'],
obs['state_desired_goal'],
))
action, _ = policy.get_action(policy_obs)
obs, _, _, _ = env.step(action)
elif oracle_dataset_using_set_to_goal:
print(i)
goal = env.sample_goal()
env.set_to_goal(goal)
obs = env._get_obs()
elif random_rollout_data:
if i % n_random_steps == 0:
g = dict(
state_desired_goal=env.sample_goal_for_rollout())
env.set_to_goal(g)
policy.reset()
# env.reset()
u = policy.get_action_from_raw_action(
env.action_space.sample())
obs = env.step(u)[0]
else:
env.reset()
for _ in range(n_random_steps):
obs = env.step(env.action_space.sample())[0]
img = obs['image_observation']
dataset[i, :] = unormalize_image(img)
if show:
img = img.reshape(3, imsize, imsize).transpose()
img = img[::-1, :, ::-1]
cv2.imshow('img', img)
cv2.waitKey(1)
# radius = input('waiting...')
print("done making training data", filename, time.time() - now)
np.save(filename, dataset)
n = int(N * test_p)
train_dataset = dataset[:n, :]
test_dataset = dataset[n:, :]
return train_dataset, test_dataset, info
def get_envs(variant):
from multiworld.core.image_env import ImageEnv
from rlkit.envs.vae_wrapper import VAEWrappedEnv
from rlkit.util.io import load_local_or_remote_file
render = variant.get('render', False)
vae_path = variant.get("vae_path", None)
reward_params = variant.get("reward_params", dict())
init_camera = variant.get("init_camera", None)
do_state_exp = variant.get("do_state_exp", False)
presample_goals = variant.get('presample_goals', False)
presample_image_goals_only = variant.get('presample_image_goals_only',
False)
presampled_goals_path = variant.get('presampled_goals_path', None)
vae = load_local_or_remote_file(vae_path) if type(
vae_path) is str else vae_path
if 'env_id' in variant:
import gym
import multiworld
multiworld.register_all_envs()
env = gym.make(variant['env_id'])
else:
env = variant["env_class"](**variant['env_kwargs'])
if not do_state_exp:
if isinstance(env, ImageEnv):
image_env = env
else:
image_env = ImageEnv(
env,
variant.get('imsize'),
init_camera=init_camera,
transpose=True,
normalize=True,
)
if presample_goals:
"""
This will fail for online-parallel as presampled_goals will not be
serialized. Also don't use this for online-vae.
"""
if presampled_goals_path is None:
image_env.non_presampled_goal_img_is_garbage = True
vae_env = VAEWrappedEnv(
image_env,
vae,
imsize=image_env.imsize,
decode_goals=render,
render_goals=render,
render_rollouts=render,
reward_params=reward_params,
**variant.get('vae_wrapped_env_kwargs', {})
)
print("generating pre-sampled-goals")
presampled_goals = variant['generate_goal_dataset_fctn'](
env=vae_env,
env_id=variant.get('env_id', None),
**variant['goal_generation_kwargs']
)
# print("presampled goals are: ", presampled_goals)
save_path = './data/local/goals/{}-goal-{}.npy'.format(
variant.get('env_id'),
variant['goal_generation_kwargs']['num_presampled_goals']
)
np.save(save_path, presampled_goals)
print("save pre-sampled goals to: ", save_path)
exit()
del vae_env
else:
presampled_goals = load_local_or_remote_file(
presampled_goals_path
).item()
del image_env
image_env = ImageEnv(
env,
variant.get('imsize'),
init_camera=init_camera,
transpose=True,
normalize=True,
presampled_goals=presampled_goals,
**variant.get('image_env_kwargs', {})
)
vae_env = VAEWrappedEnv(
image_env,
vae,
imsize=image_env.imsize,
decode_goals=render,
render_goals=render,
render_rollouts=render,
reward_params=reward_params,
presampled_goals=presampled_goals,
**variant.get('vae_wrapped_env_kwargs', {})
)
print("Presampling all goals only")
else:
vae_env = VAEWrappedEnv(
image_env,
vae,
imsize=image_env.imsize,
decode_goals=render,
render_goals=render,
render_rollouts=render,
reward_params=reward_params,
**variant.get('vae_wrapped_env_kwargs', {})
)
if presample_image_goals_only:
presampled_goals = variant['generate_goal_dataset_fctn'](
image_env=vae_env.wrapped_env,
**variant['goal_generation_kwargs']
)
image_env.set_presampled_goals(presampled_goals)
print("Presampling image goals only")
else:
print("Not using presampled goals")
env = vae_env
return env
def get_exploration_strategy(variant, env):
from rlkit.exploration_strategies.epsilon_greedy import EpsilonGreedy
from rlkit.exploration_strategies.gaussian_strategy import GaussianStrategy
from rlkit.exploration_strategies.ou_strategy import OUStrategy
exploration_type = variant['exploration_type']
exploration_noise = variant.get('exploration_noise', 0.1)
if exploration_type == 'ou':
es = OUStrategy(
action_space=env.action_space,
max_sigma=exploration_noise,
min_sigma=exploration_noise, # Constant sigma
)
elif exploration_type == 'gaussian':
es = GaussianStrategy(
action_space=env.action_space,
max_sigma=exploration_noise,
min_sigma=exploration_noise, # Constant sigma
)
elif exploration_type == 'epsilon':
es = EpsilonGreedy(
action_space=env.action_space,
prob_random_action=exploration_noise,
)
else:
raise Exception("Invalid type: " + exploration_type)
return es
def skewfit_preprocess_variant(variant):
if variant.get("do_state_exp", False):
variant['observation_key'] = 'state_observation'
variant['desired_goal_key'] = 'state_desired_goal'
variant['achieved_goal_key'] = 'state_acheived_goal'
def skewfit_experiment(variant):
import rlkit.torch.pytorch_util as ptu
from rlkit.data_management.online_vae_replay_buffer import \
OnlineVaeRelabelingBuffer
from rlkit.torch.networks import FlattenMlp
from rlkit.torch.sac.policies import TanhGaussianPolicy
from rlkit.torch.vae.vae_trainer import ConvVAETrainer
skewfit_preprocess_variant(variant)
env = get_envs(variant)
uniform_dataset_fn = variant.get('generate_uniform_dataset_fn', None)
if uniform_dataset_fn:
uniform_dataset = uniform_dataset_fn(
**variant['generate_uniform_dataset_kwargs']
)
else:
uniform_dataset = None
observation_key = variant.get('observation_key', 'latent_observation')
desired_goal_key = variant.get('desired_goal_key', 'latent_desired_goal')
achieved_goal_key = desired_goal_key.replace("desired", "achieved")
obs_dim = (
env.observation_space.spaces[observation_key].low.size
+ env.observation_space.spaces[desired_goal_key].low.size
)
action_dim = env.action_space.low.size
hidden_sizes = variant.get('hidden_sizes', [400, 300])
qf1 = FlattenMlp(
input_size=obs_dim + action_dim,
output_size=1,
hidden_sizes=hidden_sizes,
)
qf2 = FlattenMlp(
input_size=obs_dim + action_dim,
output_size=1,
hidden_sizes=hidden_sizes,
)
target_qf1 = FlattenMlp(
input_size=obs_dim + action_dim,
output_size=1,
hidden_sizes=hidden_sizes,
)
target_qf2 = FlattenMlp(
input_size=obs_dim + action_dim,
output_size=1,
hidden_sizes=hidden_sizes,
)
policy = TanhGaussianPolicy(
obs_dim=obs_dim,
action_dim=action_dim,
hidden_sizes=hidden_sizes,
)
vae = env.vae
replay_buffer = OnlineVaeRelabelingBuffer(
vae=env.vae,
env=env,
observation_key=observation_key,
desired_goal_key=desired_goal_key,
achieved_goal_key=achieved_goal_key,
**variant['replay_buffer_kwargs']
)
vae_trainer = ConvVAETrainer(
variant['vae_train_data'],
variant['vae_test_data'],
env.vae,
**variant['online_vae_trainer_kwargs']
)
assert 'vae_training_schedule' not in variant, "Just put it in algo_kwargs"
max_path_length = variant['max_path_length']
trainer = SACTrainer(
env=env,
policy=policy,
qf1=qf1,
qf2=qf2,
target_qf1=target_qf1,
target_qf2=target_qf2,
**variant['twin_sac_trainer_kwargs']
)
trainer = HERTrainer(trainer)
eval_path_collector = VAEWrappedEnvPathCollector(
variant['evaluation_goal_sampling_mode'],
env,
MakeDeterministic(policy),
max_path_length,
observation_key=observation_key,
desired_goal_key=desired_goal_key,
)
expl_path_collector = VAEWrappedEnvPathCollector(
variant['exploration_goal_sampling_mode'],
env,
policy,
max_path_length,
observation_key=observation_key,
desired_goal_key=desired_goal_key,
)
algorithm = OnlineVaeAlgorithm(
trainer=trainer,
exploration_env=env,
evaluation_env=env,
exploration_data_collector=expl_path_collector,
evaluation_data_collector=eval_path_collector,
replay_buffer=replay_buffer,
vae=vae,
vae_trainer=vae_trainer,
uniform_dataset=uniform_dataset,
max_path_length=max_path_length,
**variant['algo_kwargs']
)
if variant['custom_goal_sampler'] == 'replay_buffer':
env.custom_goal_sampler = replay_buffer.sample_buffer_goals
algorithm.to(ptu.device)
vae.to(ptu.device)
algorithm.train()
def get_video_save_func(rollout_function, env, policy, variant):
logdir = logger.get_snapshot_dir()
save_period = variant.get('save_video_period', 50)
do_state_exp = variant.get("do_state_exp", False)
dump_video_kwargs = variant.get("dump_video_kwargs", dict())
if do_state_exp:
imsize = variant.get('imsize')
dump_video_kwargs['imsize'] = imsize
image_env = ImageEnv(
env,
imsize,
init_camera=variant.get('init_camera', None),
transpose=True,
normalize=True,
)
def save_video(algo, epoch):
if epoch % save_period == 0 or epoch == algo.num_epochs:
filename = osp.join(logdir,
'video_{epoch}_env.mp4'.format(epoch=epoch))
dump_video(image_env, policy, filename, rollout_function,
**dump_video_kwargs)
else:
image_env = env
dump_video_kwargs['imsize'] = env.imsize
def save_video(algo, epoch):
if epoch % save_period == 0 or epoch == algo.num_epochs:
filename = osp.join(logdir,
'video_{epoch}_env.mp4'.format(epoch=epoch))
temporary_mode(
image_env,
mode='video_env',
func=dump_video,
args=(image_env, policy, filename, rollout_function),
kwargs=dump_video_kwargs
)
filename = osp.join(logdir,
'video_{epoch}_vae.mp4'.format(epoch=epoch))
temporary_mode(
image_env,
mode='video_vae',
func=dump_video,
args=(image_env, policy, filename, rollout_function),
kwargs=dump_video_kwargs
)
return save_video |
from struct import pack
class Packet:
"""robot packet type"""
PACKET_LEN = 20
PAYLOAD_LEN = 16
def __init__(self, dev, cmd, inc, payload=bytes(PAYLOAD_LEN), crc=None):
self.dev = dev
self.cmd = cmd
self.inc = inc
assert len(payload) <= self.PAYLOAD_LEN, "invalid payload length"
self.payload = payload + bytes(self.PAYLOAD_LEN - len(payload))
self._crc = crc
@classmethod
def from_bytes(cls, raw):
"""create a new packet instance from raw bytes"""
assert len(raw) == cls.PACKET_LEN, "invalid packet length"
return Packet(raw[0], raw[1], raw[2], payload=raw[3:19], crc=raw[19])
def to_bytes(self):
"""20 byte packet with crc"""
return self.packet() + bytes([self.calc_crc()])
def to_bytearray(self):
"""mutable 20 byte array with crc"""
return bytearray(self.to_bytes())
def packet(self):
"""19 byte packet without crc"""
return pack("3B", self.dev, self.cmd, self.inc) + self.payload
def check_crc(self):
"""check if computed crc matches stored crc"""
return False if self._crc is None else self._crc == self.calc_crc()
def calc_crc(self):
"""calculates crc for packet"""
crc = 0x00
for c in self.packet():
for i in range(8):
b = crc & 0x80
if c & (0x80 >> i):
b ^= 0x80
crc <<= 1
if b:
crc ^= 0x07
crc &= 0xFF
return crc
@property
def crc(self):
"""return stored crc or calculate new crc"""
return self._crc if self._crc is not None else self.calc_crc()
def __str__(self):
return f"Packet(dev={self.dev}, cmd={self.cmd}, inc={self.inc}, payload={self.payload}, crc={self.crc})"
|
import unittest
from .. import keystore, networks
class TestBip44Derivations(unittest.TestCase):
def setUp(self):
self.initial_net = networks.net
def tearDown(self):
# make sure we restore the network settings, in case it affects other tests
if self.initial_net == networks.MainNet:
networks.set_mainnet()
else:
networks.set_testnet()
def test_mainnet(self):
networks.set_mainnet()
self.assertEqual(keystore.bip44_derivation_xec(1337), "m/44'/899'/1337'")
self.assertEqual(keystore.bip44_derivation_bch(1337), "m/44'/145'/1337'")
self.assertEqual(keystore.bip44_derivation_btc(1337), "m/44'/0'/1337'")
self.assertEqual(
keystore.bip44_derivation_xec_tokens(1337), "m/44'/1899'/1337'"
)
self.assertEqual(keystore.bip44_derivation_bch_tokens(1337), "m/44'/245'/1337'")
def test_testnet(self):
networks.set_testnet()
self.assertEqual(keystore.bip44_derivation_xec(1337), "m/44'/1'/1337'")
self.assertEqual(keystore.bip44_derivation_bch(1337), "m/44'/1'/1337'")
self.assertEqual(keystore.bip44_derivation_btc(1337), "m/44'/1'/1337'")
self.assertEqual(
keystore.bip44_derivation_xec_tokens(1337), "m/44'/1899'/1337'"
)
self.assertEqual(keystore.bip44_derivation_bch_tokens(1337), "m/44'/245'/1337'")
if __name__ == "__main__":
unittest.main()
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class SentimentBatchResultItem(Model):
"""SentimentBatchResultItem.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar score: A decimal number between 0 and 1 denoting the sentiment of
the document. A score above 0.7 usually refers to a positive document
while a score below 0.3 normally has a negative connotation. Mid values
refer to neutral text.
:vartype score: float
:ivar id: Unique document identifier.
:vartype id: str
"""
_validation = {
'score': {'readonly': True},
'id': {'readonly': True},
}
_attribute_map = {
'score': {'key': 'score', 'type': 'float'},
'id': {'key': 'id', 'type': 'str'},
}
def __init__(self):
super(SentimentBatchResultItem, self).__init__()
self.score = None
self.id = None
|
"""
`gef` command test module
"""
import pytest
import pathlib
from tests.utils import (
gdb_run_cmd,
GefUnitTestGeneric,
gdb_start_silent_cmd_last_line,
removeuntil,
)
class GefCommand(GefUnitTestGeneric):
"""`gef` command test module"""
def test_cmd_gef(self):
res = gdb_run_cmd("gef")
self.assertNoException(res)
self.assertIn("GEF - GDB Enhanced Features", res)
def test_cmd_gef_config(self):
res = gdb_run_cmd("gef config")
self.assertNoException(res)
self.assertIn("GEF configuration settings", res)
known_patterns = (
"gef.autosave_breakpoints_file (str)",
"gef.debug (bool)",
"gef.disable_color (bool)",
"gef.extra_plugins_dir (str)",
"gef.follow_child (bool)",
"gef.readline_compat (bool)",
"gef.show_deprecation_warnings (bool)",
"gef.tempdir (str)",
"got.function_not_resolved (str)",
"got.function_resolved (str)",
)
for pattern in known_patterns:
self.assertIn(pattern, res)
def test_cmd_gef_config_get(self):
res = gdb_run_cmd("gef config gef.debug")
self.assertNoException(res)
self.assertIn("GEF configuration setting: gef.debug", res)
# the `True` is automatically set by `gdb_run_cmd` so we know it's there
self.assertIn("""gef.debug (bool) = True\n\nDescription:\n\tEnable debug mode for gef""",
res)
def test_cmd_gef_config_set(self):
res = gdb_start_silent_cmd_last_line("gef config gef.debug 0",
after=("pi print(is_debug())", ))
self.assertNoException(res)
self.assertEqual("False", res)
def test_cmd_gef_help(self):
res = gdb_run_cmd("help gef")
self.assertNoException(res)
known_patterns = (
"gef config",
"gef help",
"gef install",
"gef missing",
"gef restore",
"gef run",
"gef save",
"gef set",
)
for pattern in known_patterns:
self.assertIn(pattern, res)
def test_cmd_gef_run_and_run(self):
res = gdb_run_cmd("gef set args $_gef0",
before=("pattern create -n 4", ),
after=("show args"))
self.assertNoException(res)
self.assertIn("aaaabaaacaaadaaaeaaafaaagaaahaaaiaaajaaakaaalaaamaaan", res)
res = gdb_run_cmd("gef set args $_gef42",
before=("pattern create -n 4", ),
after=("show args"))
self.assertException(res)
def test_cmd_gef_save(self):
# check
res = gdb_run_cmd("gef save")
self.assertNoException(res)
self.assertIn("Configuration saved to '", res)
gefrc_file = removeuntil("Configuration saved to '", res.rstrip("'"))
# set & check
for name in ("AAAABBBBCCCCDDDD", "gef"):
res = gdb_run_cmd("gef save", before=(f"gef config gef.tempdir /tmp/{name}", ))
self.assertNoException(res)
with pathlib.Path(gefrc_file).open() as f:
config = f.read()
self.assertIn(f'tempdir = /tmp/{name}\n', config)
@pytest.mark.online
def test_cmd_gef_install(self):
res = gdb_run_cmd("gef install skel windbg stack")
self.assertNoException(res)
# we install 3 plugins, the pattern must be found 3 times
pattern = "Installed file"
for _ in range(3):
idx = res.find(pattern)
self.assertNotEqual(-1, idx)
self.assertIn("new command(s) available", res)
res = res[idx:]
|
from netapp.netapp_object import NetAppObject
class TestViewType1(NetAppObject):
"""
Test input typedef 1
"""
_node = None
@property
def node(self):
"""
Node
Attributes: key, required-for-create, non-modifiable
"""
return self._node
@node.setter
def node(self, val):
if val != None:
self.validate('node', val)
self._node = val
_field_1 = None
@property
def field_1(self):
"""
This is zapi description for field1.
Attributes: key, required-for-create, non-modifiable
"""
return self._field_1
@field_1.setter
def field_1(self, val):
if val != None:
self.validate('field_1', val)
self._field_1 = val
_field_2 = None
@property
def field_2(self):
"""
Generic/Dummy Field 2
Attributes: key, required-for-create, non-modifiable
"""
return self._field_2
@field_2.setter
def field_2(self, val):
if val != None:
self.validate('field_2', val)
self._field_2 = val
@staticmethod
def get_api_name():
return "test-view-type-1"
@staticmethod
def get_desired_attrs():
return [
'node',
'field-1',
'field-2',
]
def describe_properties(self):
return {
'node': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'field_1': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'field_2': { 'class': basestring, 'is_list': False, 'required': 'optional' },
}
|
"""Drop the response column. We're gonna push straight to Bas.
Revision ID: 04d56b17edad
Revises: 60a4bfee242f
Create Date: 2020-12-11 08:26:57.780808
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '04d56b17edad'
down_revision = '60a4bfee242f'
branch_labels = None
depends_on = None
# Note that Sqlite is silly and has limitations on dropping columns. :-(
# See https://blog.miguelgrinberg.com/post/fixing-alter-table-errors-with-flask-migrate-and-sqlite
def upgrade():
with op.batch_alter_table('metasysCrawl', schema=None) as batch_op:
batch_op.drop_column('response')
# op.drop_column('metasysCrawl', 'response')
def downgrade():
# op.add_column('metasysCrawl', sa.Column('response', sa.Text(), nullable=True))
with op.batch_alter_table('metasysCrawl', schema=None) as batch_op:
batch_op.add_column(sa.Column('response', sa.Text(), nullable=True)) |
"""Plot figures for tutorial."""
import chaospy as cp
import numpy
import matplotlib.pyplot as plt
import seaborn
def plot_figures():
"""Plot figures for tutorial."""
numpy.random.seed(1000)
def foo(coord, param):
return param[0] * numpy.e ** (-param[1] * coord)
coord = numpy.linspace(0, 10, 200)
distribution = cp.J(cp.Uniform(1, 2), cp.Uniform(0.1, 0.2))
samples = distribution.sample(50)
evals = numpy.array([foo(coord, sample) for sample in samples.T])
plt.plot(coord, evals.T, "k-", lw=3, alpha=0.2)
plt.xlabel(r"\verb;coord;")
plt.ylabel(r"function evaluations \verb;foo;")
plt.savefig("demonstration.png")
plt.clf()
samples = distribution.sample(1000, "H")
evals = [foo(coord, sample) for sample in samples.T]
expected = numpy.mean(evals, 0)
deviation = numpy.std(evals, 0)
plt.fill_between(
coord, expected-deviation, expected+deviation,
color="k", alpha=0.3
)
plt.plot(coord, expected, "k--", lw=3)
plt.xlabel(r"\verb;coord;")
plt.ylabel(r"function evaluations \verb;foo;")
plt.title("Results using Monte Carlo simulation")
plt.savefig("results_montecarlo.png")
plt.clf()
polynomial_expansion = cp.orth_ttr(8, distribution)
foo_approx = cp.fit_regression(polynomial_expansion, samples, evals)
expected = cp.E(foo_approx, distribution)
deviation = cp.Std(foo_approx, distribution)
plt.fill_between(
coord, expected-deviation, expected+deviation,
color="k", alpha=0.3
)
plt.plot(coord, expected, "k--", lw=3)
plt.xlabel(r"\verb;coord;")
plt.ylabel(r"function evaluations \verb;foo;")
plt.title("Results using point collocation method")
plt.savefig("results_collocation.png")
plt.clf()
absissas, weights = cp.generate_quadrature(8, distribution, "C")
evals = [foo(coord, val) for val in absissas.T]
foo_approx = cp.fit_quadrature(polynomial_expansion, absissas, weights, evals)
expected = cp.E(foo_approx, distribution)
deviation = cp.Std(foo_approx, distribution)
plt.fill_between(
coord, expected-deviation, expected+deviation,
color="k", alpha=0.3
)
plt.plot(coord, expected, "k--", lw=3)
plt.xlabel(r"\verb;coord;")
plt.ylabel(r"function evaluations \verb;foo;")
plt.title("Results using psuedo-spectral projection method")
plt.savefig("results_spectral.png")
plt.clf()
if __name__ == "__main__":
plot_figures()
|
import numpy as np
import torch
class EvaluationMetrics:
def __init__(self):
self.relu = torch.nn.ReLU()
def average_precision_score(self, bbox, bbox_pred, threshold):
iou_scores = self.iou_loss(bbox, bbox_pred)
y_true = np.ones(bbox.size()[0])
y_loss = self.iou_loss(bbox, bbox_pred).detach().numpy()
y_score = [1 if v>threshold else 0 for v in y_loss]
score = np.mean(y_score)
return(score)
def iou_loss(self, bbox, bbox_pred):
area1 = (bbox[:, 0, 2] - bbox[:, 0, 0])*(bbox[:, 0, 3] - bbox[:, 0, 1])
area2 = (bbox_pred[:, 0, 2] - bbox_pred[:, 0, 0]) * \
(bbox_pred[:, 0, 3] - bbox_pred[:, 0, 1])
area_intersection = (torch.min(bbox[:, 0, 2], bbox_pred[:, 0, 2]) - torch.max(bbox[:, 0, 0], bbox_pred[:, 0, 0]))*(
torch.min(bbox[:, 0, 3], bbox_pred[:, 0, 3]) - torch.max(bbox[:, 0, 1], bbox_pred[:, 0, 1]))
loss = (area_intersection + 1e-4) / \
(area1 + area2 - area_intersection + 1e-4)
loss = self.relu(loss)
#loss = torch.mean(loss, dim=0)
#loss = 1 - loss
return(loss)
def mean_average_precision(self, bbox, bbox_pred):
scores = 0
count = 0
for threshold in range(0, 11):
scores += self.average_precision_score(bbox = bbox, bbox_pred = bbox_pred, threshold = threshold*0.1)
count += 1
scores /= count
return(scores)
if __name__ == '__main__':
eval1 = EvaluationMetrics()
bbox = torch.tensor([[1,2,3,4], [1,2,3,4]], dtype = torch.float).view(-1, 1, 4)
bbox_pred = torch.tensor([[1,2,3,4], [1,2,300,5]], dtype=torch.float).view(-1, 1, 4)
y_true = [1,1,1,1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
y_score = [1,0,0,0,1, 0, 1, 1, 0, 0, 0, 0, 0]
print(eval1.mean_average_precision(bbox = bbox, bbox_pred= bbox_pred)) |
from dataclasses import dataclass, field
from typing import List, Optional
@dataclass
class Ids:
class Meta:
name = "ids"
idref: List[str] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
"min_occurs": 1,
}
)
id1: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
}
)
id2: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
}
)
@dataclass
class Root(Ids):
class Meta:
name = "root"
|
# Generated by Django 2.2.19 on 2021-04-08 10:13
import django.contrib.postgres.fields.jsonb
import django.db.models.deletion
import django_extensions.db.fields
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0020_dataretentionstatistics'),
]
operations = [
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
(
'created',
django_extensions.db.fields.CreationDateTimeField(
auto_now_add=True, null=True, verbose_name='created'
),
),
(
'modified',
django_extensions.db.fields.ModificationDateTimeField(
auto_now=True, null=True, verbose_name='modified'
),
),
('name', models.CharField(max_length=128)),
('title', models.CharField(max_length=256)),
(
'question_type',
models.CharField(
choices=[
('RADIO', 'radio'),
('SELECTION', 'Selection'),
('MULTIPLE_SELECTOR', 'Multiple selection'),
('TEXT', 'text'),
('COMPANY_LOOKUP', 'Company lookup'),
],
max_length=5,
),
),
(
'question_choices',
django.contrib.postgres.fields.jsonb.JSONField(
blank=True, default=dict, help_text='Array of choices'
),
),
('is_active', models.BooleanField(default=True)),
('order', models.PositiveSmallIntegerField(default=0)),
('service', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='user.Service')),
],
options={
'ordering': ['-order'],
},
),
migrations.AlterModelOptions(
name='dataretentionstatistics',
options={'verbose_name': 'Data Retention Statistics', 'verbose_name_plural': 'Data Retention Statistics'},
),
migrations.CreateModel(
name='UserAnswer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
(
'created',
django_extensions.db.fields.CreationDateTimeField(
auto_now_add=True, null=True, verbose_name='created'
),
),
(
'modified',
django_extensions.db.fields.ModificationDateTimeField(
auto_now=True, null=True, verbose_name='modified'
),
),
('answer', django.contrib.postgres.fields.jsonb.JSONField(blank=True, default=dict)),
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='user.Question')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-modified', '-created'),
'get_latest_by': 'modified',
'abstract': False,
},
),
]
|
# Generated by Django 2.2.4 on 2020-12-04 05:47
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('Dashboard', '0015_recommend_activated'),
]
operations = [
migrations.CreateModel(
name='History',
fields=[
('hist_id', models.AutoField(primary_key=True, serialize=False)),
('updated', models.DateTimeField(auto_now_add=True)),
('Results', models.CharField(max_length=100)),
('description', models.TextField()),
('recommendations', models.TextField()),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
from django.contrib import admin
from Pins.models import Pin
admin.site.register(Pin)
|
#! /usr/bin/env python2
#
# This source file is Copyright (c) 2019, FERMI NATIONAL ACCELERATOR
# LABORATORY. All rights reserved.
# For details of the Fermitools (BSD) license see COPYING.
import cvmfs_user_pub
def application(environ, start_response):
return cvmfs_user_pub.dispatch(environ, start_response)
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from pandora.impl.pytorch import utils
class RNNEncoder(nn.Module):
"""RNN Character level encoder of the focus token"""
def __init__(self, num_layers, input_size, hidden_size, dropout=0.0,
merge_mode='concat'):
self.num_layers = num_layers
self.input_shape = input_size
self.hidden_size = hidden_size
self.dropout = dropout
self.merge_mode = merge_mode
super(RNNEncoder, self).__init__()
self.rnn_hidden_size = self.hidden_size
if self.merge_mode == 'concat':
hidden_size, rest = divmod(self.hidden_size, 2)
if rest > 0:
raise ValueError("'concat' merge_mode needs even hidden_size")
self.rnn_hidden_size = hidden_size
self.rnn = nn.LSTM(input_size=input_size,
hidden_size=self.rnn_hidden_size,
num_layers=num_layers,
bidirectional=True,
dropout=self.dropout)
self.init()
def init(self):
# rnn
utils.init_rnn(self.rnn)
def init_hidden(self, token_in, batch_dim=1):
batch = token_in.size(batch_dim)
size = (2 * self.num_layers, batch, self.rnn_hidden_size)
h_0 = Variable(token_in.data.new(*size).zero_(), requires_grad=False)
c_0 = Variable(token_in.data.new(*size).zero_(), requires_grad=False)
return h_0, c_0
def forward(self, token_embed):
"""
Parameters
===========
token_embed: (seq_len x batch x emb_dim)
Returns:
=========
token_out : (batch x hidden_size), summary vector for each batch,
consisting of the last hidden activation of the rnn
token_context: (seq_len x batch x hidden_size), output of the rnn
over the entire sequence
"""
output, _ = self.rnn(
token_embed, self.init_hidden(token_embed))
if self.merge_mode == 'sum':
# (seq_len x batch x hidden_size * 2)
seq_len, batch, _ = output.size()
# expose bidirectional hidden (seq_len x batch x 2 x hidden)
output = output.view(seq_len, -1, 2, self.rnn_hidden_size)
# (seq_len x batch x 1 x hidden_size)
left, right = torch.chunk(output, chunks=2, dim=2)
# (seq_len x batch x hidden_size)
output = (left + right).squeeze(2)
token_out, token_context = output[-1], output
return token_out, token_context
class BottleEncoder(nn.Module):
"""
Implements a linear projection from (seq_len x batch x inp_size) to
(batch x output_size) applying one of three different pooling operations.
If `flatten`, it applies the linear transformation on the flattend input
(seq_len * inp_size) to output_size. Flatten requires a known fixed input
length. If `max` it does max pooling over the seq_len dimension prior
to the linear transformation. If `rnn`, it runs a bidirection GRU over
the seq_len and applies the linear transformation on the concatenated
summary vectors of both directions.
"""
def __init__(self, inp_size, output_size, seq_len=None, pooling='flatten',
dropout=0.0):
self.pooling = pooling
self.dropout = dropout
self.inp_size = inp_size
super(BottleEncoder, self).__init__()
if self.pooling == 'flatten':
assert seq_len, "Flatten requires knowing the seq_len"
# no pooling, do flattening instead
self.dense = nn.Linear(seq_len * inp_size, output_size)
elif self.pooling == 'rnn':
assert divmod(inp_size, 2)[1] == 0, \
"rnn pooling requires even input size"
self.pooling_layer = nn.GRU(
inp_size, inp_size // 2, bidirectional=True)
self.dense = nn.Linear(inp_size, output_size)
elif self.pooling == 'max':
self.dense = nn.Linear(inp_size, output_size)
def init(self):
if self.pooling == 'rnn':
# rnn
utils.init_rnn(self.pooling_layer)
# linear
utils.init_linear(self.dense)
def forward(self, inp):
"""
Parameters
===========
inp : (batch x inp_size x seq_len)
Returns
========
output : (batch x output_size)
"""
if self.pooling == 'flatten':
inp = inp.view(inp.size(0), -1)
elif self.pooling == 'rnn':
# initial hidden
batch, num_dirs, hid_size = inp.size(0), 2, self.inp_size // 2
hidden = Variable(
inp.data.new(num_dirs, batch, hid_size).zero_(),
requires_grad=False)
# (batch x inp_size x seq_len) -> (seq_len x batch x inp_size)
inp = inp.transpose(1, 2).transpose(0, 1).contiguous()
# (seq_len x batch x inp_size)
inp, _ = self.pooling_layer(inp, hidden)
# (batch x inp_size)
inp = inp[-1]
elif self.pooling == 'max':
# (batch x inp_size x seq_len) -> (batch x inp_size)
inp, _ = inp.max(2)
inp = F.dropout(inp, p=self.dropout, training=self.training)
return self.dense(inp)
def get_conv_output_length(inp_len, kernel_size,
padding=0, dilation=1, stride=1):
"""
compute length of the convolutional output sequence (l_out)
l_out = floor(
(l_in + 2 ∗ padding − dilation ∗ (kernel_size − 1) − 1)
/
stride + 1)
"""
return math.floor(
(inp_len + 2 * padding - dilation * (kernel_size - 1) - 1)
/
stride + 1)
class ConvEncoder(nn.Module):
"""CNN Encoder of the focus token at the character level"""
def __init__(self, in_channels, out_channels, kernel_size, output_size,
token_len, dropout=0.0, pooling='flatten'):
self.dropout = dropout
self.pooling = pooling
self.out_channels = out_channels
super(ConvEncoder, self).__init__()
self.focus_conv = nn.Conv1d(
in_channels=in_channels, # emb_dim
out_channels=out_channels, # nb_filters
kernel_size=kernel_size)
seq_len = None
if pooling == 'flatten':
seq_len = get_conv_output_length(token_len, kernel_size)
self.focus_dense = BottleEncoder(
out_channels, output_size, seq_len=seq_len, pooling=pooling,
dropout=dropout)
self.init()
def init(self):
# conv
utils.init_conv(self.focus_conv)
def forward(self, token_embed):
"""
Parameters
===========
token_embed (seq_len x batch x emb_dim)
Returns
========
token_out : (batch x output_size)
token_context : (conv_seq_len x batch x channel_out)
"""
# (batch x emb_dim x seq_len)
token_embed = token_embed.transpose(0, 1).transpose(1, 2)
# (batch x channel_out x conv_seq_len)
token_context = self.focus_conv(token_embed)
token_context = F.relu(token_context)
# (batch x output_size)
token_out = self.focus_dense(token_context)
token_out = F.dropout(
token_out, p=self.dropout, training=self.training)
token_out = F.relu(token_out)
return token_out, token_context.transpose(0, 1).transpose(0, 2)
|
#!/usr/bin/env python
# coding: utf-8
# # **Hello, World!**
# Welcome to the world of python. In this section, we simply print a string using python code. You can say it's kind of a ritual/tradition for anyone who sets on a journey in coding with any language - our way of saying "Hey world, here I come!" 😉
# In[1]:
print('Hello World!!')
print('Get ready to rock-N-roll ')
# In[2]:
# we saw that the lines automatically shifted to new lines.
# although we can do this manually too, in one print statement
print('Hello World!! \nGet ready to rock-N-roll')
# So what was that `\n` ?
#
# Read this - https://qr.ae/pGSFo0
# # **Interact with python - get an input/data**
# To do some work, we need something to begin with - right?
# For example, you need a bat and a ball to play cricket.
# Similarly, while coding we need some data or input from the user on which we will do some work.
# In[3]:
#asking user for a text (name)
username = input('Please enter your name - ')
#lets greet the user now
print ('Hello ' +username)
# In[4]:
#OK so lets go one step ahead ;) -- just to give you a feel of what we can do with programming
#now its time to help the user know his/her age
#ask the user's birth year
year = input('Please enter your Birth Year - ')
# In[5]:
#calculating the age, here we have to change the type for year from string to integer
age = 2021-int(year)
#now we will print the results
print(f'Hey {username}, You are {age} year(s) old :) ')
#
# * using `f` before writing the print statement saves us a lot of time and effort
# * just need to keep in mind that we are keeping the variables in `{}`
# * read more here - https://docs.python.org/3/tutorial/inputoutput.html
# # **Dealing with Numbers**
# So, in programming you can do lots of mathematical operations. But for that you need to have an idea of what are the various datatypes that are to be considered - or even not considered! This section will introduce with some of the possibilities and we'll cover the other complex stuff as we progress.
#
# You've got it. Keep going! 🙂
# In[6]:
# first let us understand the datatypes.
# integer datatype
print(f"The datatype for variable 20 is {type(20)}") # <class 'int'>
# In[7]:
# float datatype
print(f"The datatype for variable 20.02 is {type(20.02)}") # <class 'float'>
# In[8]:
# string datatype
print(f"The datatype for variable 'abcd efg hijk lmnop' is {type('abcd efg hijk lmnop')}") #<class 'str'>
# In[9]:
#get the binary for the number
print(bin(2020)) #prints the binary form of 2020 which is 0b11111100100
#get number(integer) from binary form
print(int('0b11111100100', 2)) # 2 for printing the integer form by base as 2
# ---------------- that would be enogh for now to understand about the datatypes.
# In[10]:
# Let us now perform some arithmetic operations
#arithmetic operations without variables
print(f"Sum of 3 and 5 is {3+5}")
print(f"difference of 3 and 5 is {3-5}")
print(f"Product of 3 and 5 is {3*5}")
print(f"Fraction or Division of 3 and 5 is {3/5}")
print(f"exponent of 3 with 5 is {3**5}") #exponent
print(f"Modulus of 3 and 5 is {3%5}")#mod
# In[12]:
#arithmetic operations with variables
a = input("Enter a number. It will be stored as 'a' = ")
b = input("Enter another number. It will be stored as 'b' = ")
#python accepts inputs as str. So whenever we need to perform any mathematical operations, we need to change the datatypes
# In[13]:
print(f"You see, I am writing here a+b but the output will not be the sum. \nInstead, you will see the two numbers will be concatenated! \nHere is the output = {a+b}")
# In[14]:
a = float(a) #keeping in float is safer as user might feed data with decimals
b = float(b) #keeping in float is safer as user might feed data with decimals
print(f"Sum of {a} and {b} is {a+b}")
print(f"difference of {a} and {b} is {a-b}")
print(f"Product of {a} and {b} is {a*b}")
print(f"Fraction or Division of {a} and {b} is {a/b}")
print(f"exponent of {a} with {b} is {a**b}") #exponent
print(f"Modulus of {a} and {b} is {a%b}")#mod
# # **Math Functions in Python**
# To make our lives easier, there are many in-built special functions that are very useful to do specific tasks.
# Here we will see few of the in-built functions that can be used to perform mathematical operations.
#
# - first we need to import the math module. Read more here https://docs.python.org/3/library/math.html
# - This module provides access to the mathematical functions defined by the C standard.
#
# - These functions cannot be used with complex numbers; use the functions of the same name from the cmath module if you require support for complex numbers. The distinction between functions which support complex numbers and those which don’t is made since most users do not want to learn quite as much mathematics as required to understand complex numbers. Receiving an exception instead of a complex result allows earlier detection of the unexpected complex number used as a parameter, so that the programmer can determine how and why it was generated in the first place.
#
# - The following functions are provided by this module. Except when explicitly noted otherwise, all return values are floats.
# In[15]:
#importing the module
import math
# In[16]:
# --------------Number-theoretic and representation functions--------------------------------------
long_string = '''
math.ceil(x)
Return the ceiling of x, the smallest integer greater than or equal to x.
If x is not a float, delegates to x.__ceil__(), which should return an Integral value.
'''
print(long_string)
# In[17]:
print("\n--------------------math.ceil(x)-------------------------------")
print(f"math.ceil(x) --- for number = 404 --- {math.ceil(404)}")
print(f"math.ceil(x) --- for number = 404.01 --- {math.ceil(404.01)}")
print(f"math.ceil(x) --- for number = 404.36 --- {math.ceil(404.36)}")
print(f"math.ceil(x) --- for number = 404.50 --- {math.ceil(404.50)}")
print(f"math.ceil(x) --- for number = 404.86 --- {math.ceil(404.86)}")
print("---------------------------------------------------------------\n")
# In[18]:
long_string = '''
math.comb(n, k)
Return the number of ways to choose k items from n items without repetition and without order.
Evaluates to n! / (k! * (n - k)!) when k <= n and evaluates to zero when k > n.
Also called the binomial coefficient because it is equivalent to the coefficient of k-th term in polynomial expansion of the expression (1 + x) ** n.
Raises TypeError if either of the arguments are not integers. Raises ValueError if either of the arguments are negative.
'''
print(long_string)
# Explore more here - https://www.programiz.com/python-programming/modules/math
#
# # **Strings in Python**
# Here we will see how to handle strings in python.
# When we deal with data, we mostly deal with strings - which we then reformat according to our choices.
# So, it is important that we deal properly with the strings such that we don't loose data
# In[19]:
# write a long string (multiple lines without using \n)
long_string = '''
Hello there!
We are currently creating a long string.
Write multiple lines here,
without any worries. B-)
'''
print(long_string)
# In[20]:
#using escape sequences
#it's difficult to insert a special character in a string or print statement.
#so, we use \ as our saviour!
print("See, we are writing \" in a print statement without any worries!")
print('Isn\'t it awesome??')
# In[21]:
#newline
print("This is the first line \nThis is the second line")
# In[22]:
#backspace
print("This is an incomplete li\bne")
# In[23]:
#horizontal tab
print("Here comes the tab\tGot that??")
# In[24]:
#print a backslash itself
print("So, here is the \\ you wanted to see!")
# In[25]:
#formating a string (we have already seen this before, now it is time to realize it !!)
a = 2020
print("This code was written in the year "+str(a)) #here the number is printed in form of a string otherwise it throws an error
#TypeError: can only concatenate str (not "int") to str
print("After 10 years it will be the year "+str(a+10)) #same explanation as above
#now let us use a shortcut
print(f"The code is written in the year {a}") #see, how simple it is to format a string!!
print(f"After 10 years it will be the year {a+10}")
# In[26]:
#how to get a string index
text = "Climate change is real!"
print(text)
print(text[1:10]) #counting starts from 0
print(text[0:10]) #now see the difference
print(text[:10]) #prints first 10 elements
print(text[::]) #prints Everything
print(text[-1]) #first element starting from the end of the string
print(text[-3]) #third element starting from the end of the string
print(text[::-1]) #prints in reverse order
# * There are many more things to know about strings.
# * You are welcome to add anything relevant you wish to in this notebook!
# * Please collaborate and contribute :)
#
# # **String functions in Python**
# Just like we used the math-functions above, this is also quite similar. But here you wouldn't have to import a module.
# Follow the code below (let the code do the talking!)
# Note: This section discusses one of the functions to get you started. There are many more available. Just Google them!
#
# Reference - https://www.w3schools.com/python/python_ref_string.asp
# In[27]:
mystring = 'lights WILL guide YOU home\n'
# capitalize() Converts the first character to upper case
print(f"\ncapitalize() Converts the first character to upper case \n\nOriginal string = {mystring} \nResult string = {mystring.capitalize()}")
# In[28]:
# casefold() Converts string into lower case
print(f"\ncasefold() Converts string into lower case\n\nOriginal string = {mystring} \nResult string = {mystring.casefold()}")
# In[29]:
# center() Returns a centered string
temp = "banana"
print(f"\ncenter() Returns a centered string\n\nOriginal string = {temp} ")
# In[30]:
temp = temp.center(20, "0")
print(f"\nResult string = {temp}")
# # **Lists in Python**
# Python has several features which are used in all sorts of programming endeavors. One of them is a "list".
# Like always, Follow the code below (let the code do the talking!)
#
# This set of codes has been generously contributed by **Mr. Bittesh Barman.**
#
# Mr. Bittesh is a **PhD student at the Department of Chemistry, Pondicherry University, India**. <br>
# Visit this URL to view his works - https://www.researchgate.net/profile/Bittesh-Barman-2 <br>
# Thank you!
# In[31]:
# Working with Lists!
cars = ["honda","hundai","tata"] # this is a list type data structure. each elements in list is called item.
print(cars)
print(cars[0])# we can call any item in this list by its index no.
print(cars[2])
# In[32]:
# Changing items in a list
shoping_cart = ["Pencil", "notebook","book"]
print(shoping_cart)
shoping_cart[0] = "pen" # we can change item by using the index no.
print(shoping_cart)
# In[33]:
#Appending to a list
fruits = ['banana','orange','watermelon']
fruits.append('grapes') # we can add items in list using append method.
print(fruits)
# In[34]:
# The 'insert()' method!
weapons = ['pan', 'assult rifle', 'shotgun', 'pistol']
weapons.insert(3, 'sniper') # we can add item in any position of the list by using insert method.
print(weapons)
# # **Tuples in Python**
# A tuple is a sequence of immutable (meaning unchanging over time or unable to be changed) Python objects.
# Follow the code below (let the code do the talking!)
# In[35]:
#defining a tuple
tuple_1 = ('India', 'Japan', 100, 90, 85674);
tuple_1
# Please note that in defining a tuple, a semicolon is used! (not mandatory though). <br>
# So those python memes donot hold TRUE here 😉
# In[36]:
#size of the tuple
len(tuple_1)
# The size is 5 but if we see the index, it starts with 0. <br>
# Let's have a look here
#
#
# In[37]:
#Accessing elements inside the tuple
print(f"The first element - {tuple_1[0]}\nThe second element - {tuple_1[1]}\nThe last element - {tuple_1[len(tuple_1)-1]} ")
# The last element was obtained by using the last index via the code `tuple_1[len(tuple_1)-1]` <br>
# Just for fun!
#
# **CAUTION - Tuples are immutable**
#
# So, if we write `tuple_1[0] = some value` we will get an error!
# # **Dictionaries in Python**
# Dictionaries store elements in a key-value pair format. Dictionary elements are accessed via keys while List elements are accessed by their index.
# Follow the code below (let the code do the talking!)
# In[38]:
#definig a dictionary
dy = { "Country": "India",
"Currency": "INR",
"Continent": "Asia",
"Language": "Multiple"}
dy
# In[39]:
#Access a dictionary (using the key and not the value)
dy["Country"]
# Try this - `dy["India"]` <br>
# You will get an error. We need to use the key to access a specific value!
# In[40]:
#adding data to dictionary
dy["Capital"] = "Delhi"
dy
# In[41]:
# We can Overwrite dictionary values too
dy["Currency"] = "Indian Rupee"
dy
# In[42]:
# Deleting data in dictionary
del dy["Language"]
dy
# So the Currency key was deleted. <br>
# Now let us delete the whole dictionary
# In[43]:
del dy
#done
# # **Comparison Operators**
# Used to compare 2 or more values and decide if the condition is True or False <br>
# Follow the code below (let the code do the talking!)
#
# Let us consider a random variable 'x' with a random numerical value stored in it. <br>
# Following is how we can compare the value stored in 'x' with other numerical entities.
#
# - Equals: x == 5
# - Not equal: x != 5
# - Greater than: x > 5
# - Greater than or equal: x >= 5
# - Less than: x < 5
# - Less than or equal: to x <= 5
#
# The outcome is always in the form of "True" or "False" - Boolean
# In[44]:
# let us declare a variable with a numerical value
x = 1001
print(x==5)
# In[45]:
print(x!=5)
# In[46]:
print(x > 5)
# In[47]:
print(x >= 5)
# In[48]:
print( x < 5)
# In[49]:
print( x <= 5)
# Now that we know how these work, we can proceed to use them for decision making.
# ie, **If-else statements**
#
# # **Conditional Statements (IF-ELSE)**
# Think of this scenario - if I score at least 40% in the exam, I will pass, else I will fail.<br>
# So, here the condition for me passing the exam is to reach the 40% mark which can be expressed as ">=" (didn't understand? Study the previous section!) . <br>Now, it just has to be conveyed to the computer and here's how it is done!<br>
# Follow the code below (let the code do the talking!)
#
# - It is basically the If - else statement
# - If statement is generally followed by an optional else statement
# - The results are always in Boolean
# - else statement works if the if statement returns a FALSE expression.
# In[50]:
pass_marks = float(input("Enter your marks"))
if pass_marks>=40.0:
print("You passed the exam.")
else:
print("Well, it didn't work this time. But you can do it. Please don't give up.")
# In[51]:
pass_marks = float(input("Enter your marks"))
if pass_marks>=40.0:
print("You passed the exam.")
else:
print("Well, it didn't work this time. But you can do it. Please don't give up.")
# So, that's how we deal with the if-else statements in python.<br>
# Note: **Always remember to take care of the indentation!**
#
# # **Nested or Multiple IF-ELSE (also called ELIF)??**
# Sometimes, we need to put up multiple conditions for an event to happen. For that, we use IF-ELSE statements multiple times.<br>
# So, this is how we do it in python!
#
#
# **Multiple if-else**
# <br>Let us consider that the criteria to get a job interview is atleast 8.0 <br>CGPA and at least 2 years of experience.<br>
# So following would be the way to deal with the situation
#
# *Note - I'm sad that individuals get judged like this. Skills matter. Not numbers.*
# In[52]:
cgpa = float(input("what is your CGPA out of 10.0?"))
if cgpa >=8.0:
experience = float(input("how many years of experience do you have?"))
if experience>=2.0:
print("You are eligible for an interview")
else:
print("Sorry, although you have at least 8.0 GPA, you lack a minimum experience of 2 years.")
else:
print("Sorry, you need minimum 8.0 CGPA to be eligible")
# In[53]:
cgpa = float(input("what is your CGPA out of 10.0?"))
if cgpa >=8.0:
experience = float(input("how many years of experience do you have?"))
if experience>=2.0:
print("You are eligible for an interview")
else:
print("Sorry, although you have at least 8.0 GPA, you lack a minimum experience of 2 years.")
else:
print("Sorry, you need minimum 8.0 CGPA to be eligible")
# In[54]:
cgpa = float(input("what is your CGPA out of 10.0?"))
if cgpa >=8.0:
experience = float(input("how many years of experience do you have?"))
if experience>=2.0:
print("You are eligible for an interview")
else:
print("Sorry, although you have at least 8.0 GPA, you lack a minimum experience of 2 years.")
else:
print("Sorry, you need minimum 8.0 CGPA to be eligible")
# **The elif statement**
#
# Let us just write a simple code where the user enters a number from 1 to 5 and the code prints the number in words.
#
#
# In[55]:
num = int(input("Enter a number between 1 to 5 - "))
if num == 1:
print('One')
elif num == 2:
print('Two')
elif num==3:
print('Three')
elif num==4:
print("Four")
else:
print("Five")
# In[56]:
num = int(input("Enter a number between 1 to 5 - "))
if num == 1:
print('One')
elif num == 2:
print('Two')
elif num==3:
print('Three')
elif num==4:
print("Four")
else:
print("Five")
# ...and we can keep going like this.<br>
# **So, basically the elif statement is nothing but an if statement after an else statement.**
# # **Loops in python**
#
# Generally, in a program, statements are executed line by line, it means sequentially, but when a block of code needs to be executed multiple times, then what to do? Programming languages comes with that provision also, using loops.
#
# Python supports two types of loop
#
# * while loop
# * for loop
#
# This set of codes has been generously contributed by **Mr. Tapas Saha**. <br>
# Mr. Tapas is a **PhD student at the Department of Computer Science & Engineering, Tezpur University, India**.<br>
# Visit this URL to view his works - https://www.researchgate.net/profile/Tapas-Saha-3
# <br>Thank you!
#
#
# **While Loop** <br>
# While loop allows the user, to execute a group of statements repeatedly, but it checks the condition before entering the loop body.<br>The repetitions will continue until the condition false.
#
# Syntax of while loop:
#
#
#
# ```
# while expression:
# statement(s)
# ```
#
#
#
# Examples:
#
# - Print the number 1 to 5:
# In[57]:
# initialization
i=1
while i<=5:
print("Number ::",i) # print the sum
i=i+1
# Another Example
#
# * Sum of n natural number
# In[58]:
# sum = 1+2+3+...+n
#Take input from the user
n = int(input("Enter n: "))
# initialization
sum = 0
i = 1
while i <= n:
sum = sum + i
i = i+1
# print the sum
print("The sum is", sum)
# **For Loops:** <br>
# A for loop is used in python, to execute iterating over the item any sequence.<br>It can be a list, or a tuple, or a dictionary, or a set, or a string.
#
# Syntax of for loop:
# ```
# for x in sequence :
# body
# ```
# Example:
#
# * Print each characters of the given string
# In[59]:
string=" Python"
for i in string :
print(i)
# Another Example:
#
# * Print user input string’s each character and index of the characters.
# In[60]:
#Take input from the user,
string=input("Enter some String: ")
# initialization
i=0
for x in string :
print("index of",x,"is:",i) # print
i=i+1
# One more example!
#
# * Program to calculate the sum and of all numbers stored in a list
# In[61]:
# List of numbers
n = [4, 9, 5, 10]
# initialization
sum = 0
mul=1
for i in n:
sum = sum+i
mul=mul*i
print("The sum is", sum) #pint
print("The multiplication is", mul)
# # **Functions in python**
# Nothing to write here. Let the code do the talking! 🔥🔥 <br>
# Functions are like recipies. Suppose you and I want to bake a cake. You went online and googled a recipie. Now we both will follow the same recipies but you want to use chololate flavour and I want to use pineapple! So we follow the same recipie but produce new results.
#
# A function is thus a block of code that can be re-used or run whenever it is needed. Information passed to a function is called an argument (the ingredients for the recipie-analogy)<br>
#
# **Defining the function** <br>
# Let us create a dedicated function that can add 2 numbers
#
#
#
#
# In[62]:
#defining the function
def add(a,b):
return (a+b)
# In[63]:
# Calling a function
# We will now feed some data to the function to get the sum value
x = float(input("Hey user, enter the first number - "))
y = float(input("Nice! now enter the second number - "))
print(f"The sum of {x} and {y} is {add(x,y)}")
# Saw that? Now just imagine how cool it would be to have a dedicated function for doing a more complex task!!
#
# **Lambda Expressions**
#
# Lambda function is used to create small elegant anonymous functions, generally used with `filter()` and `map()`
# In[64]:
m = lambda n:n**2
m(3)
# **The `map()` function**
#
# - This takes in a function and a list.
# - The function perform an operation on the entire list and return the results in a new list.
# - Let us see it work with a simple cube of a number
# In[65]:
my_list = [5, 10, 55 , 568, 468, 77]
output_list = list(map( lambda x: x**3, my_list))
print(output_list)
# **The `filter()` function**
#
# * This performs an operation on a list based on a specific condition after filtering
# * Let us see it work with a simple condition statement - numbers less than or equal to 201
# In[66]:
my_list = [5, 10, 55 , 568, 468, 77]
condition = list(filter(lambda x: (x <= 201), my_list))
print(condition)
# # **Error handling in python**
#
# This is one of the most important concepts to make a coder's life easier.
# Just walk through the code and you'll get what I mean. Let the code do the talking! 🔥🔥 <br>
#
# While running an automated code that works on unknown or new data, it might happen that the code encounters an error at some point. You, as a coder might not like the execution process to stop. Rather you would like to have a notification that the error was found and that particular execution was bye-passed.
#
# This is where the **`try-except`** feature of pythons comes to the rescue!
# In[67]:
# understanding an error - let us try to print an undefined variable
print(name)
a=111
b=222
print(a+b)
# In[68]:
# Notice that the consequent codes were not executed after the error.
# Now let us attempt to bye-pass the error part of the code and move on to the next executions
try:
print(name)
except NameError:
print("Error - The variable has no value defined to be printed in the first place.")
except:
print("Error - Not sure what the error is, but there is something wrong!")
a=111
b=222
print(a+b)
# The above example only shows an application of the Built-in exceptions in python.<br>
# There are many Built in Exceptions available to be used.<br>
# You can learn about them here - https://docs.python.org/3/library/exceptions.html#bltin-exceptions <br>
# Till then, have fun!
# # **File handling in python**
# Let the code do the talking! 🔥🔥 <br>
#
# **File Operations using python**
# * Modes for file handling
# * Creating a file - "x"
# * Reading a file - "r"
# * Writing a file - "w"
# * Appending a file - "a"
#
# **Creating a file** <br>
# Here we create a .txt (text) file, which we will use in the next steps!
# In[69]:
f = open("file.txt", "x") # open() is used to open the file we want to create/read/write/append
# "f" above can be considered as a file handler. One can use other names too! <br>
#
# Now it's time to write some data in the file
# In[70]:
f.write("This is a text file!")
# The output above is the number of characters we wrote into the file.
#
# **Reading a file** <br>
# This only works when the file name mentioned actually exists, just like one can only read a book if the book actually exists!
# In[71]:
f = open("file.txt", "r")
print(f.read())
# **Writing to a file** <br>
# This creates a new file if the file name used does not exist, just like one can write a book if the book does not already exist (I know the analogy is a bit lame, but please bear with me! 😅)
# In[72]:
f = open("file.txt", "w")
f.write("This sentence replaces the sentence already present in the file named 'file.txt'")
# In[73]:
#lets check the result
f = open("file.txt", "r")
print(f.read())
# In[74]:
# Now, let's try writing to a file that does not exist. We will see that the file is created for us before writing into it.
f = open("another-file.txt", "w")
f.write("This sentence is present in the file named 'another-file.txt'")
# In[75]:
f = open("another-file.txt", "r")
print(f.read())
# Nice!!
#
# **Appendig to a file** <br>
# Works same as writing to a file. Only difference is that it does not replace the pre-existing text.
# In[76]:
f = open("file.txt", "r")
print(f.read())
# In[77]:
f = open("file.txt", "a")
f.write("This sentence appends to the sentence already present in the file named 'file.txt'")
# In[78]:
f = open("file.txt", "r")
print(f.read())
# In[79]:
# Now, let's try appending to a file that does not exist. We will see that the file is created for us before appending into it.
f = open("another-file2.txt", "a")
f.write("This sentence is present in the file named 'another-file2.txt'")
# In[80]:
f = open("another-file2.txt", "r")
print(f.read())
# **Now, that's how we deal with files using python. We can also make files of other extensions like .csv, .tsv, etc in the same procedure!**
|
#!/usr/local/bin/python2.7
from sys import exit
from os import environ, system
environ['KERAS_BACKEND'] = 'tensorflow'
environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
environ["CUDA_VISIBLE_DEVICES"] = ""
import numpy as np
from subtlenet import config, utils
from subtlenet.backend import obj
from subtlenet.generators.gen import make_coll
n_batches = 500
partition = 'test'
p = utils.Plotter()
r = utils.Roccer()
OUTPUT = environ['FIGSDIR'] + '/'
system('mkdir -p %s'%OUTPUT)
components = [
'singletons',
'shallow_best',
# 'trunc4_limit50_best',
'trunc7_limit100_best',
]
components_gen = [
'singletons',
'shallow_best',
# 'baseline_trunc4_limit50_best',
'baseline_Adam_7_100',
]
basedir = '/local/snarayan/genarrays/v_deepgen_4_finegrid_small'
basedir_gen = '/fastscratch/snarayan/genarrays/v_deepgen_4_small/'
colls = {
't' : make_coll(basedir + '/PARTITION/Top_*_CATEGORY.npy',categories=components),
'q' : make_coll(basedir + '/PARTITION/QCD_*_CATEGORY.npy',categories=components),
}
colls_gen = {
't' : make_coll(basedir_gen + '/PARTITION/Top_*_CATEGORY.npy',categories=components_gen),
'q' : make_coll(basedir_gen + '/PARTITION/QCD_*_CATEGORY.npy',categories=components_gen),
}
# run DNN
def predict(data,model):
return data[model]
def access(data, v):
return data['singletons'][:,config.gen_singletons[v]]
def div(data, num, den):
return access(data, num) / np.clip(access(data, den), 0.0001, 999)
f_vars = {
'tau32' : (lambda x : div(x, 'tau3', 'tau2'), np.arange(0,1.2,0.01), r'$\tau_{32}$'),
'tau32sd' : (lambda x : div(x, 'tau3sd', 'tau2sd'), np.arange(0,1.2,0.01), r'$\tau_{32}^\mathrm{sd}$'),
'shallow_best_roc' : (lambda x : x['shallow_best'], np.arange(0,1.2,0.0001), r'Shallow (no $p_{T}$) classifier'),
# 'lstm4_50_roc' : (lambda x : x['trunc4_limit50_best'], np.arange(0,1.2,0.00001), 'LSTM (4,50)'),
'lstm7_100_roc' : (lambda x : x['trunc7_limit100_best'], np.arange(0,1.2,0.00001), 'LSTM (7,100)'),
}
f_vars_gen = {
'gen_shallow_best' : (lambda x : x['shallow_best'], np.arange(0,1.2,0.01), r'Shallow (no $p_{T}$) classifier'),
'gen_shallow_best_roc' : (lambda x : x['shallow_best'], np.arange(0,1.2,0.0001), r'Shallow (no $p_{T}$) classifier'),
# 'gen_lstm4_50_roc' : (lambda x : x['baseline_trunc4_limit50_best'], np.arange(0,1.2,0.00001), 'LSTM (4,50)'),
'gen_lstm7_100_roc' : (lambda x : x['baseline_Adam_7_100'], np.arange(0,1.2,0.00001), 'LSTM (7,100)'),
}
roc_vars = {
'tau32':(r'$\tau_{32}$',0,':'),
'tau32sd':(r'$\tau_{32}^\mathrm{SD}$',2,':'),
# 'lstm4_50_roc':(r'(4,50) $\delta R=0.02$',5,'--'),
'lstm7_100_roc':(r'(7,100) $\delta R=0.02$',3,'--'),
# 'gen_lstm4_50_roc':('(4,50)',5),
'gen_lstm7_100_roc':('(7,100)',3),
'gen_shallow_best_roc':('Shallow',9),
'shallow_best_roc':('Shallow $\delta R=0.02$',9,'--'),
}
order = [
'tau32',
'tau32sd',
'shallow_best_roc',
'gen_shallow_best_roc',
# 'lstm4_50_roc',
'lstm7_100_roc',
# 'gen_lstm4_50_roc',
'gen_lstm7_100_roc',
]
# unmasked first
hists = {}
for k,v in colls.iteritems():
hists[k] = v.draw(components=components,
f_vars=f_vars,
n_batches=n_batches, partition=partition)
for k,v in colls_gen.iteritems():
hists[k].update(v.draw(components=components_gen,
f_vars=f_vars_gen,
n_batches=n_batches, partition=partition))
for k in hists['t']:
if 'roc' in k:
continue
ht = hists['t'][k]
hq = hists['q'][k]
for h in [ht, hq]:
h.scale()
r.clear()
r.add_vars(hists['t'],
hists['q'],
roc_vars,
order
)
r.plot(**{'output':OUTPUT+'roc'})
def f_mask(data):
mass = data['singletons'][:,config.gen_singletons['msd']]
return (mass > 150) & (mass < 200)
hists = {}
for k,v in colls.iteritems():
hists[k] = v.draw(components=components,
f_vars=f_vars,
n_batches=n_batches, partition=partition,
f_mask=f_mask)
for k,v in colls_gen.iteritems():
hists[k].update(v.draw(components=components_gen,
f_vars=f_vars_gen,
n_batches=n_batches, partition=partition))
for k in hists['t']:
if 'roc' in k:
continue
ht = hists['t'][k]
hq = hists['q'][k]
for h in [ht, hq]:
h.scale()
r.clear()
r.add_vars(hists['t'],
hists['q'],
roc_vars,
order
)
r.plot(**{'output':OUTPUT+'mass_roc'})
|
import hashlib
from front_end.grpc import assignService_pb2
'''
This class represents a Region which contains the size of the region and
finger prints contained in it.
'''
class Region:
def __init__(self, max_size: int):
self.max_size = max_size
self.current_size = 0
self.fingerprints = []
self.hash = hashlib.sha1()
def add_fingerprint(self, fingerprint: bytes, chunk_size: int):
if chunk_size + self.current_size <= self.max_size:
self.fingerprints.append(assignService_pb2.Fingerprint(fingerPrint=fingerprint, fingerPrintSize=chunk_size))
self.hash.update(fingerprint)
self.current_size += chunk_size
else:
raise BufferError(f"Region is too full to accept fingerprint. {self.current_size}/{self.max_size}")
|
"""The scripts will produce a pandas `DataFrame` which contains information
about which combinations of environments have which data types.
"""
import itertools
import logging
import os
from collections import defaultdict
import pandas
from evaluator.properties import (
Density,
EnthalpyOfMixing,
EnthalpyOfVaporization,
ExcessMolarVolume,
)
from nistdataselection.curation.filtering import filter_by_checkmol
from nistdataselection.utils import SubstanceType
from nistdataselection.utils.utils import (
chemical_environment_codes,
substance_type_to_int,
)
logger = logging.getLogger(__name__)
def main():
# Set up logging
logging.basicConfig(level=logging.INFO)
data_set_path = os.path.join("test_sets", "full_set.csv")
data_set = pandas.read_csv(data_set_path)
# Define the environments we are interested in.
environments = {
"alcohol": [
chemical_environment_codes["hydroxy"],
chemical_environment_codes["alcohol"],
],
"ester": [
chemical_environment_codes["caboxylic_acid"],
chemical_environment_codes["ester"],
],
}
# Define the properties and environments we are interested in.
properties_of_interest = [
(Density, SubstanceType.Pure),
(EnthalpyOfVaporization, SubstanceType.Pure),
(EnthalpyOfMixing, SubstanceType.Binary),
(Density, SubstanceType.Binary),
(ExcessMolarVolume, SubstanceType.Binary),
]
friendly_names = {
(Density, SubstanceType.Pure): "rho",
(EnthalpyOfVaporization, SubstanceType.Pure): "Hvap",
(EnthalpyOfMixing, SubstanceType.Binary): "Hmix(x)",
(Density, SubstanceType.Binary): "rho(x)",
(ExcessMolarVolume, SubstanceType.Binary): "Vexcess(x)",
}
# Extend the combination of properties.
property_combinations = [(x,) for x in properties_of_interest]
binary_properties = [
x for x in properties_of_interest if x[1] == SubstanceType.Binary
]
property_combinations.extend(itertools.combinations(binary_properties, 2))
# Define the combinations of environments
environment_combinations = {
SubstanceType.Pure: [(x,) for x in environments],
SubstanceType.Binary: [
*[(x, x) for x in environments],
*itertools.combinations(environments, 2),
],
}
data_counts = defaultdict(dict)
for property_types in property_combinations:
all_substances = defaultdict(list)
for property_type, substance_type in property_types:
header = (
f"{property_type.__name__} Value ({property_type.default_unit():~})"
)
n_components = substance_type_to_int[substance_type]
property_data = data_set[data_set[header].notnull()]
property_data = property_data[property_data["N Components"] == n_components]
for environment_types in environment_combinations[substance_type]:
environment_types = tuple(sorted(environment_types))
chemical_environments = [environments[x] for x in environment_types]
environment_data = filter_by_checkmol(
property_data, *chemical_environments
)
components = []
for index in range(n_components):
components.append([*environment_data[f"Component {index + 1}"]])
all_substances[environment_types].append(
set(tuple(sorted(x)) for x in zip(*components))
)
common_substances = {x: set.intersection(*y) for x, y in all_substances.items()}
for environment_type in common_substances:
data_counts[environment_type][property_types] = len(
common_substances[environment_type]
)
data_rows = []
for environment_types in data_counts:
data_row = {}
for index, environment_type in enumerate(environment_types):
data_row[f"Environment {index + 1}"] = environment_type
for property_types in data_counts[environment_types]:
header = " + ".join(friendly_names[x] for x in property_types)
data_row[header] = int(data_counts[environment_types][property_types])
data_rows.append(data_row)
n_environments = max(len(x) for x in data_counts)
count_frame = pandas.DataFrame(data_rows)
reordered_frame = pandas.DataFrame()
# Re-order the headers.
for index in range(n_environments):
reordered_frame[f"Environment {index + 1}"] = count_frame[
f"Environment {index + 1}"
]
for column_name in count_frame:
if "Environment" in column_name:
continue
reordered_frame[column_name] = count_frame[column_name]
reordered_frame.fillna("-", inplace=True)
reordered_frame.to_csv("summary.csv", index=False)
with open("summary.md", "w") as file:
reordered_frame.to_markdown(file, showindex=False)
if __name__ == "__main__":
main()
|
import board
import terminalio
from adafruit_display_text import label
display = board.DISPLAY
# Set text, font, and color
text = "HELLO WORLD"
font = terminalio.FONT
color = 0x0000FF
# Create the tet label
text_area = label.Label(font, text="HELLO WORLD", color=0x00FF00)
# Set the location
text_area.x = 100
text_area.y = 80
# Show it
display.show(text_area)
|
airport_codes = {"AAL": "Aalborg, Denmark",
"AES": "Aalesund, Norway",
"AAR": "Aarhus, Denmark",
"YXX": "Abbotsford, BC, Canada",
"YXX": "Abbotsford, BC, Canada",
"ABZ": "Aberdeen, Scotland",
"ABR": "Aberdeen, SD, USA",
"ABJ": "Abidjan, Ivory Coast",
"ABI": "Abilene, TX, USA",
"AUH": "Abu Dhabi, United Arab Emirates",
"ABV": "Abuja, Nigeria",
"ACA": "Acapulco, Mexico",
"ACC": "Accra, Ghana",
"ADA": "Adana, Turkey",
"ADD": "Addis Ababa, Ethiopia",
"ADL": "Adelaide, S.A., Australia",
"ADE": "Aden, Yemen",
"ADF": "Adiyaman, Turkey",
"AGA": "Agadir, Morocco",
"GUM": "Agana, Guam",
"BQN": "Aguadilla, Puerto Rico",
"AGU": "Aguascalientes, Mexico",
"AHE": "Ahe, French Polynesia",
"AMD": "Ahmedabad, India",
"AJA": "Ajaccio, Corsica, France",
"AXT": "Akita, Japan",
"CAK": "Akron, OH, USA",
"ALS": "Alamosa, CO, USA",
"ABY": "Albany, GA, USA",
"ALB": "Albany, NY, USA",
"ALL": "Albenga, Italy",
"ABQ": "Albuquerque, NM, USA",
"ABX": "Albury, N.S.W., Australia",
"ALY": "Alexandria, Egypt",
"HBE": "Alexandria, Egypt",
"AEX": "Alexandria, LA, USA",
"AXD": "Alexandroupolis, Greece",
"AHO": "Alghero, Sardinia, Italy",
"ALG": "Algiers, Algeria",
"ALC": "Alicante, Spain",
"ASP": "Alice Springs, N.T., Australia",
"NSB": "Alice Town, North Bimini Island, Bahamas",
"ABE": "Allentown, PA, USA",
"AIA": "Alliance, NE, USA",
"ALA": "Almaty, Kazakhstan",
"LEI": "Almeria, Spain",
"AOR": "Alor Setar, Malaysia",
"APN": "Alpena, MI, USA",
"ALF": "Alta, Norway",
"ACH": "Altenrhein, Switzerland"} |
from django import template
register = template.Library()
@register.simple_tag
def get_lessons_names(course, module_name):
module = course.modules.filter(name__exact=module_name)
return [lesson.name for lesson in module.lessons.all()]
|
import pickle
import zlib
from pathlib import Path
from typing import Any
from app.osm.pbf.blob_meta import BlobMeta
from app.osm.pbf.osm_pb2 import Blob
def read(filepath: Path) -> Any:
with filepath.open(mode='rb') as file:
return pickle.load(file)
def write(content: Any, filepath: Path) -> Any:
with filepath.open(mode='wb') as file:
pickle.dump(content, file)
def read_blob(blob_meta: BlobMeta) -> bytes:
with blob_meta.filepath.open(mode='rb') as file:
file.seek(blob_meta.position)
blob_data = file.read(blob_meta.size)
blob = Blob()
blob.ParseFromString(blob_data)
raw_data = blob.raw
if raw_data:
return raw_data
return zlib.decompress(blob.zlib_data)
|
class GithubRepo:
pass
# def __init__(self, root_path, defaults=None):
# dict.__init__(self, defaults or {})
# self.root_path = ''
def scan(self, repo):
return self
def exists(self):
return False
|
#! /usr/bin/env python
import signal
# Flask
from flask import Flask, render_template, flash, request
from flask_socketio import SocketIO, emit
from collections import namedtuple
from ml_thread import MLThread
from udp_thread import UDPThread
__FADE__=False
# Generates a flask app that lets you type to interact
def main():
# Create a flask app
app = Flask(__name__)
app.config.from_object(__name__)
app.config['SECRET_KEY'] = '1-9840-813413491340-813-04'
# Turn the flask app into a socketio app
socketio = SocketIO(app)
# Make threads
udp_thread = UDPThread("192.168.1.2", "192.168.1.1")
ml_thread = MLThread('Salem', update_delay=5)
threads = [udp_thread, ml_thread]
@app.route("/")
def speed_reader():
return render_template('speed_reader.html')
@app.route("/text_input")
def text_input():
return render_template('text_input.html')
@socketio.on('connect')
def test_connect():
print('Client connected')
@socketio.on('disconnect')
def test_disconnect():
print('Client disconnected')
@socketio.on('input_text')
def input_text_cb(msg):
global __FADE__
if ">>fd" in msg['text']:
# __FADE__ = not __FADE__
idx = msg['text'].find(">>fd")
socketio.emit("fade", {"value":bool(int(msg['text'][idx+5:]))})
return
if ">>re" in msg['text'] or ">>rs" in msg['text']:
idx = msg['text'].find(">>rs")
msg['text']= msg['text'][idx+5:]
ml_thread.clear_history(msg['text'])
ml_thread.set_paused(False)
socketio.emit("generated_text", {"text": msg['text'], "color":True, "delay":1, "instant":True})
return
if ">>cl" in msg['text']:
socketio.emit("clear", {})
ml_thread.set_paused(True)
return
if ">>cr" in msg['text']:
idx = msg['text'].find(">>cr")
# ml_thread.set_call_response(not ml_thread.get_call_response())
ml_thread.set_call_response(bool(int(msg['text'][idx+5:])))
return
if ">>sp" in msg['text']:
idx = msg['text'].find(">>sp")
speed = float(msg['text'][idx+5:])
print(speed)
if speed >= 0:
ml_thread.set_update_delay(speed)
ml_thread.set_paused(False)
else:
ml_thread.set_paused(True)
return
ml_thread.add_text(msg['text'])
if ml_thread.get_call_response():
ml_thread.set_paused(False)
socketio.emit("generated_text", {"text": msg['text'], "color":True, "delay":1, "instant":True})
def service_shutdown(signum, frame):
print('Caught signal %d' % signum)
raise Exception
# Set ML thread callback
def text_generated_cb(output_text):
socketio.emit("generated_text", {"text": output_text, "color": False, "delay":ml_thread.update_delay, "instant":True})
udp_thread.send_text(output_text)
ml_thread.text_generated_cb = text_generated_cb
def udp_receive_cb(input_text):
msg = {"text": str(input_text)}
input_text_cb(msg)
# print(msg['text'])
udp_thread.receive_cb = udp_receive_cb
signal.signal(signal.SIGTERM, service_shutdown)
signal.signal(signal.SIGINT, service_shutdown)
# Start the job threads
try:
for thread in threads:
thread.start()
socketio.run(app)
except Exception:
func = request.environ.get('werkzeug.server.shutdown')
func()
for thread in threads:
thread.shutdown_flag.set()
thread.join()
if __name__=='__main__':
main() |
# -- Read command line args --
# argparse guidance: https://docs.python.org/3/library/argparse.html
import argparse
parser = argparse.ArgumentParser(description='Update external dependencies.')
parser.add_argument('--verbose', help='chatty output', action='store_true')
args = parser.parse_args()
from devops_spt import Directory, GradleVersion, KotlinVersion
with Directory.cd(".."):
GradleVersion.update(args.verbose)
KotlinVersion.update(args.verbose)
|
from utils import *
class FeatureMem:
def __init__(self, n_k, u_emb_dim, base_model, device):
self.n_k = n_k
self.base_model = base_model
self.p_memory = torch.randn(n_k, u_emb_dim, device=device).normal_() # on device
u_param, _, _ = base_model.get_weights()
self.u_memory = []
for i in range(n_k):
bias_list = []
for param in u_param:
bias_list.append(param.normal_(std=0.05))
self.u_memory.append(bias_list)
self.att_values = torch.zeros(n_k).to(device)
self.device = device
def read_head(self, p_u, alpha, train=True):
# get personalized mu
att_model = Attention(self.n_k).to(self.device)
"""
p_u is the raw feature.
"""
attention_values = att_model(p_u, self.p_memory).to(self.device) # pu on devic
personalized_mu = get_mu(attention_values, self.u_memory, self.base_model, self.device)
# update mp
transposed_att = attention_values.reshape(self.n_k, 1)
product = torch.mm(transposed_att, p_u)
if train:
self.p_memory = alpha * product + (1-alpha) * self.p_memory
self.att_values = attention_values
return personalized_mu, attention_values
def write_head(self, u_grads, lr):
update_mu(self.att_values, self.u_memory, u_grads, lr)
class TaskMem:
def __init__(self, n_k, emb_dim, device):
self.n_k = n_k
self.memory_UI = torch.rand(n_k, emb_dim *2, emb_dim*2, device=device).normal_()
self.att_values = torch.zeros(n_k)
def read_head(self, att_values):
self.att_values = att_values
return get_mui(att_values, self.memory_UI, self.n_k)
def write_head(self, u_mui, lr):
update_values = update_mui(self.att_values, self.n_k, u_mui)
self.memory_UI = lr* update_values + (1-lr) * self.memory_UI
def cosine_similarity(input1, input2):
query_norm = torch.sqrt(torch.sum(input1**2+0.00001, 1))
doc_norm = torch.sqrt(torch.sum(input2**2+0.00001, 1))
prod = torch.sum(torch.mul(input1, input2), 1)
norm_prod = torch.mul(query_norm, doc_norm)
cos_sim_raw = torch.div(prod, norm_prod)
return cos_sim_raw
class Attention(torch.nn.Module):
def __init__(self, n_k, activation='relu'):
super(Attention, self).__init__()
self.n_k = n_k
self.fc_layer = torch.nn.Linear(self.n_k, self.n_k, activation_func(activation))
self.soft_max_layer = torch.nn.Softmax()
def forward(self, pu, mp):
# note that len(mp) = n_k
# pu from [batch_size, embed_dim] -> [batch_size, embed_dim * n_k]
# -> [n_k , batch_size * embed_dim] // confirmed, batch_size = 1.
# mp: [n_k, u_embed_dim]
expanded_pu = pu.repeat(1, len(mp)).view(len(mp), -1) # shape, n_k, pu_dim
# print("Forward expanded_pu", expanded_pu.shape)
# print("MP", mp.shape)
# print("PU", pu.shape)
inputs = cosine_similarity(expanded_pu, mp)
# print("inputs shape", inputs.shape)
fc_layers = self.fc_layer(inputs)
attention_values = self.soft_max_layer(fc_layers)
return attention_values
def get_mu(att_values, mu, model, device):
mu0,_,_ = model.get_zero_weights()
# len(mu) = n_k, the # of rate levels.
attention_values = att_values.reshape(len(mu),1)
for i in range(len(mu)):
for j in range(len(mu[i])):
mu0[j] += attention_values[i] * mu[i][j].to(device)
return mu0
def update_mu(att_values, mu, grads, lr):
att_values = att_values.reshape(len(mu), 1)
for i in range(len(mu)):
for j in range(len(mu[i])):
mu[i][j] = lr * att_values[i] * grads[j] + (1-lr) * mu[i][j]
def get_mui(att_values, mui, n_k):
attention_values = att_values.reshape(n_k, 1, 1)
attend_mui = torch.mul(attention_values, mui)
u_mui = attend_mui.sum(dim=0)
return u_mui
def update_mui(att_values, n_k, u_mui):
# print("update_mui", u_mui.shape)
repeat_u_mui = u_mui.unsqueeze(0).repeat(n_k, 1, 1) # add external axis at 0.
# print("repeat_u_mui.unsqueeze(0)", u_mui.unsqueeze(0).shape)
# print("repeat_u_mui", repeat_u_mui.shape)
# print("att_values", att_values.shape)
attention_tensor = att_values.reshape(n_k, 1, 1)
# print("reshape att_values", attention_tensor.shape)
attend_u_mui = torch.mul(attention_tensor, repeat_u_mui)
return attend_u_mui
|
"""
Handles reading the session cookie.
"""
import datetime
import json
from bottle import local, request, response
from codalab.server.authenticated_plugin import user_is_authenticated
class LoginCookie(object):
"""
Represents the user's session cookie after logging in.
"""
KEY = "codalab_session"
PATH = "/"
def __init__(self, user_id, max_age, expires=None):
self.user_id = user_id
self.max_age = max_age
self.expires = expires or (datetime.datetime.utcnow() + datetime.timedelta(seconds=max_age))
def save(self):
"""
Save cookie on the Bottle response object.
"""
self.clear()
response.set_cookie(
self.KEY,
self.serialize(),
secret=local.config['server']['secret_key'],
max_age=self.max_age,
path=self.PATH,
)
def serialize(self):
return json.dumps(
{"user_id": self.user_id, "max_age": self.max_age, "expires": self.expires.timestamp()}
)
@classmethod
def get(cls):
"""
Get cookie on the Bottle request object.
Will only return cookie if it exists and has not expired yet.
:return: LoginCookie or None
"""
try:
cookie = request.get_cookie(
cls.KEY, secret=local.config['server']['secret_key'], default=None
)
except UnicodeDecodeError:
# Sometimes, the cookie may already be stored in the old pickle format (which is unreadable), so don't error when that happens.
return None
if not cookie:
return None
try:
cookie = json.loads(cookie)
cookie = LoginCookie(
user_id=cookie["user_id"],
max_age=cookie["max_age"],
expires=datetime.datetime.fromtimestamp(cookie["expires"]),
)
if cookie.expires > datetime.datetime.utcnow():
return cookie
else:
return None
except json.JSONDecodeError:
return None
@classmethod
def clear(cls):
"""
Delete cookie on the Bottle response object.
"""
response.delete_cookie(cls.KEY, path=cls.PATH)
class CookieAuthenticationPlugin(object):
"""
Bottle plugin that checks the cookie and populates request.user if it is
found and valid.
"""
api = 2
def apply(self, callback, route):
def wrapper(*args, **kwargs):
if not user_is_authenticated():
cookie = LoginCookie.get()
if cookie:
request.user = local.model.get_user(user_id=cookie.user_id)
else:
request.user = None
return callback(*args, **kwargs)
return wrapper
|
from JDI.core.settings.jdi_settings import JDISettings
from JDI.web.selenium.elements.api_interact.find_element_by import By
from JDI.web.selenium.elements.common.text import Text
from JDI.web.selenium.elements.complex.table.cell import Cell
from JDI.web.selenium.elements.complex.table.columns import Columns
from JDI.web.selenium.elements.complex.table.rows import Rows
class Table(Text):
cache = True
footer = list()
by_cell_locator_template = None
all_cells = list()
columns = Columns()
rows = Rows()
by_footer_locator = By.xpath(".//tfoot/tr/th")
def __init__(self, by_table_locator=None,
by_column_header=None, by_row_header=None,
by_row=None, by_column=None,
row_start_index=None, column_start_index=None,
by_cell_locator_template=None,
by_footer=None, root=None):
super(Table, self).__init__(by_locator=by_table_locator)
self.columns.table = self
self.rows.table = self
if by_column is not None: self.columns.by_line_template = by_column
if by_column_header is not None: self.columns.by_headers_locator = by_column_header
if by_row is not None: self.rows.by_line_template = by_row
if by_row_header is not None: self.rows.by_headers_locator = by_row_header
if column_start_index is not None and column_start_index > -1:
self.columns.start_index = column_start_index
if row_start_index is not None and row_start_index > -1:
self.rows.start_index = row_start_index
self.by_cell_locator_template = by_cell_locator_template
self.by_footer_locator = by_footer
def get_text_action(self):
return "||X||" + "|".join(self.columns.get_headers()) + "||" + "".join(
list(map(lambda rn: "\n||" + rn + "||" + "|".join(self.row_value(rn)) + "||", self.rows.get_headers())))
def row_value(self, row_name):
return self.rows.get_row_value(row_name)
def get_headers(self):
return self.columns.get_headers()
def is_empty(self):
try:
self.get_driver().implicitly_wait(0)
row_count = self.rows.get_count(True)
return row_count == 0
finally:
self.get_driver().implicitly_wait(JDISettings.get_current_timeout_sec())
def column(self, val, row=None):
if row is not None:
column_cell = self.cell(value=val, row=row)
col = None
if column_cell is not None:
num = column_cell.column_num
col = self.columns.get_column(num)
return col
return self.columns.get_column(val)
def cell(self, web_element=None, column=None, row=None, value=None):
if None not in [web_element, column, row] and value is None:
return self.add_cell(web_element,
column.get(lambda name: self.columns.get_headers().index(name), lambda num: num),
row.get(lambda name: self.rows.get_headers().index(name), lambda num: num),
column.get(lambda name: name, lambda num: ""),
row.get(lambda name: name, lambda num: ""))
elif None not in [value, row] and web_element == column is None:
row_num = self.rows.get_headers().index(row.name) + 1 if row.has_name() else row.num
return list(filter(lambda x: x[1].get_value() == value, self.rows.get_row(row_num)))[0][1]
elif None not in [value, column] and web_element == row is None:
col_index = self.columns.get_headers().index(column.name)+1 if column.has_name() else column.num
return list(filter(lambda x: x[1].get_value() == value, self.columns.get_column(col_index)))[0][1]
else:
return None
def add_cell(self, web_element, col_num, row_num, col_name, row_name):
if web_element is not None:
cells = list(filter(lambda c: c.column_num == col_num and c.row_num == row_num, self.all_cells))
cell = cells[0] if len(cells) > 0 else None
if cell is not None:
cell.set_web_element(web_element)
return cell.update_data(col_name, row_name)
cell = Cell(web_element=web_element, column_num=col_num, row_num=row_num,
col_name=col_name, row_name=row_name, cell_locator_template=self.by_cell_locator_template,
table=self)
if self.cache:
self.all_cells.append(cell)
return cell
def row(self, val, column=None):
if column is not None:
row_cell = self.cell(value=val, column=column)
return self.rows.get_row(row_cell.row_num) if row_cell is not None else None
return self.rows.get_row(val)
def get_rows(self, *col_name_values):
if len(col_name_values) == 0:
return ro
|
# -*- coding: utf-8 -*-
import os
from django.core.urlresolvers import reverse
from seaserv import seafile_api
from seahub.test_utils import BaseTestCase
class ListPrivSharedFoldersTest(BaseTestCase):
def tearDown(self):
self.remove_repo()
def test_can_list_priv_shared_folders(self):
repo_id = self.repo.id
username = self.user.username
parent_dir = '/'
dirname = 'test-folder'
full_dir_path = os.path.join(parent_dir, dirname)
# create folder
self.create_folder(repo_id=repo_id,
parent_dir=parent_dir,
dirname=dirname,
username=username)
sub_repo_id = seafile_api.create_virtual_repo(repo_id, full_dir_path, dirname, dirname, username)
seafile_api.share_repo(sub_repo_id, username, self.admin.username, 'rw')
self.login_as(self.user)
resp = self.client.get(reverse('list_priv_shared_folders'))
self.assertEqual(200, resp.status_code)
href = reverse("view_common_lib_dir", args=[repo_id, full_dir_path.strip('/')])
self.assertRegexpMatches(resp.content, href)
|
"""Forwards call to setuptools.setup() after parsing:
* name from package folder
* long description from README.rst
* packages from setuptools.find_packages()
* dependencies from requirements.txt
* remaining properties from package-level package.json
"""
import os
import json
from setuptools import setup, find_packages
def main():
"""We should totally grab the version from the most recent git tag
"""
buildPath, _ = os.path.split(os.path.abspath(__file__))
packPath, _ = os.path.split(buildPath)
_, packName = os.path.split(packPath)
currDir = os.getcwd()
os.chdir(buildPath)
try:
settingsPath = packPath + "/package.json"
readmePath = packPath + "/README.rst"
reqsPath = packPath + "/requirements.txt"
with open(settingsPath, 'r') as f:
settings = json.load(f)
with open(readmePath, 'r') as f:
settings["long_description"] = f.read()
with open(reqsPath, 'r') as f:
settings["install_requires"] = f.readlines()
settings["name"] = packName
settings["packages"] = [packName]
setup(**settings)
except Exception as e:
print("Error while building package:", e)
os.chdir(currDir)
if __name__ == "__main__":
main()
|
from pathlib import Path
from torch.utils.data import Dataset, ConcatDataset, DataLoader, distributed
from torchvision import transforms as trans
from torchvision.datasets import ImageFolder
from PIL import Image, ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
import numpy as np
import cv2
import bcolz
import pickle
import os
import torch
import mxnet as mx
from tqdm import tqdm
from data.sampler import DistRandomIdentitySampler
from log import logger
def de_preprocess(tensor):
return tensor*0.5 + 0.5
# def get_train_dataset(imgs_folder):
# train_transform = trans.Compose([
# trans.RandomHorizontalFlip(),
# trans.ToTensor(),
# trans.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
# ])
# ds = ImageFolder(str(imgs_folder), train_transform)
# class_num = ds[-1][1] + 1
# return ds, class_num
#
# def get_train_loader(conf):
# if conf.data_mode in ['ms1m', 'concat']:
# ms1m_ds, ms1m_class_num = get_train_dataset(conf.ms1m_folder/'imgs')
# print('ms1m loader generated')
# if conf.data_mode in ['vgg', 'concat']:
# vgg_ds, vgg_class_num = get_train_dataset(conf.vgg_folder/'imgs')
# print('vgg loader generated')
# if conf.data_mode == 'vgg':
# ds = vgg_ds
# class_num = vgg_class_num
# elif conf.data_mode == 'ms1m':
# ds = ms1m_ds
# class_num = ms1m_class_num
# elif conf.data_mode == 'concat':
# for i,(url,label) in enumerate(vgg_ds.imgs):
# vgg_ds.imgs[i] = (url, label + ms1m_class_num)
# ds = ConcatDataset([ms1m_ds,vgg_ds])
# class_num = vgg_class_num + ms1m_class_num
# elif conf.data_mode == 'emore':
# ds, class_num = get_train_dataset(conf.emore_folder/'imgs')
# elif conf.data_mode == 'glint':
# ds, class_num = get_train_dataset(conf.glint_folder/'imgs')
# train_sampler = distributed.DistributedSampler(ds) #add line
# loader = DataLoader(ds, batch_size=conf.batch_size, shuffle=False, pin_memory=conf.pin_memory, num_workers=conf.num_workers, sampler = train_sampler)
# return loader, class_num
def get_test_dataset(imgs_folder):
test_transform = trans.Compose([
trans.ToTensor(),
trans.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])
ds = ImageFolder(imgs_folder, test_transform)
# logger.debug('dataset: {}'.format(ds.class_to_idx))
class_num = ds[-1][1] + 1
return ds, class_num
def get_test_loader(conf):
# sh_dynamic=conf.data_path/'test'/'shanghai_cam_dynamic_112_test_1k'
sh_dynamic=conf.data_path/'test'/'kc_employee_dynamic_112'
sh_dynamic_ds, sh_dynamic_class_num = get_test_dataset(sh_dynamic)
query_ds = sh_dynamic_ds
query_class_num = sh_dynamic_class_num
# sh_id=conf.data_path/'test'/'shanghai_cam_id_112_test_1k'
sh_id=conf.data_path/'test'/'kc_employee_id_112'
sh_id_ds, sh_id_class_num = get_test_dataset(sh_id)
db_id=conf.data_path/'test'/'10w_112'
db_id_ds, db_id_class_num = get_test_dataset(db_id)
for i,(url,label) in enumerate(db_id_ds.imgs):
db_id_ds.imgs[i] = (url, label + sh_id_class_num)
gallery_ds = ConcatDataset([sh_id_ds, db_id_ds])
gallery_class_num = sh_id_class_num + db_id_class_num
# sh_dynamic=conf.data_path/'test'/'q_shanghai_cam_dynamic_112_test_1k'
# sh_dynamic_ds, sh_dynamic_class_num = get_test_dataset(sh_dynamic)
# query_ds = sh_dynamic_ds
# query_class_num = sh_dynamic_class_num
#
# sh_id=conf.data_path/'test'/'g_shanghai_cam_dynamic_112_test_1k'
# sh_id_ds, sh_id_class_num = get_test_dataset(sh_id)
# gallery_ds = sh_id_ds
# gallery_class_num = sh_id_class_num
loader = {}
loader['query'] = {}
loader['query']['dl'] = DataLoader(query_ds, batch_size=conf.batch_size, shuffle=False, pin_memory=conf.pin_memory, num_workers=conf.num_workers)
loader['query']['cn'] = query_class_num
loader['query']['len'] = len(query_ds)
loader['gallery'] = {}
loader['gallery']['dl'] = DataLoader(gallery_ds, batch_size=conf.batch_size, shuffle=False, pin_memory=conf.pin_memory, num_workers=conf.num_workers)
loader['gallery']['cn'] = gallery_class_num
loader['gallery']['len'] = len(gallery_ds)
return loader, query_ds, gallery_ds
def load_bin(path, rootdir, transform, image_size=[112,112]):
if not rootdir.exists():
rootdir.mkdir()
bins, issame_list = pickle.load(open(path, 'rb'), encoding='bytes')
data = bcolz.fill([len(bins), 3, image_size[0], image_size[1]], dtype=np.float32, rootdir=rootdir, mode='w')
for i in range(len(bins)):
_bin = bins[i]
img = mx.image.imdecode(_bin).asnumpy()
# img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) # remarked by fengchen
img = Image.fromarray(img.astype(np.uint8))
data[i, ...] = transform(img)
i += 1
if i % 1000 == 0:
print('loading bin', i)
print(data.shape)
np.save(str(rootdir)+'_list', np.array(issame_list))
return data, issame_list
def get_val_pair(path, name):
carray = bcolz.carray(rootdir = str(path/name), mode='r')
issame = np.load(path/'{}_list.npy'.format(name))
return carray, issame
def get_val_data(data_path):
agedb_30, agedb_30_issame = get_val_pair(data_path, 'agedb_30')
cfp_fp, cfp_fp_issame = get_val_pair(data_path, 'cfp_fp')
lfw, lfw_issame = get_val_pair(data_path, 'lfw')
return agedb_30, cfp_fp, lfw, agedb_30_issame, cfp_fp_issame, lfw_issame
def load_mx_rec(rec_path):
save_path = rec_path/'imgs'
if not save_path.exists():
save_path.mkdir()
imgrec = mx.recordio.MXIndexedRecordIO(str(rec_path/'train.idx'), str(rec_path/'train.rec'), 'r')
img_info = imgrec.read_idx(0)
header,_ = mx.recordio.unpack(img_info)
max_idx = int(header.label[0])
for idx in tqdm(range(1,max_idx)):
img_info = imgrec.read_idx(idx)
header, img = mx.recordio.unpack_img(img_info)
# label = int(header.label)
label = int(header.label[0])
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # added by fengchen
img = Image.fromarray(img)
label_path = save_path/str(label)
if not label_path.exists():
label_path.mkdir()
img.save(label_path/'{}.jpg'.format(idx), quality=95)
# img.save(label_path/'{}.png'.format(idx))
# img_path = str(label_path/'{}.png'.format(idx))
# cv2.imwrite(img_path, img, [int(cv2.IMWRITE_PNG_COMPRESSION), 0])
# class train_dataset(Dataset):
# def __init__(self, imgs_bcolz, label_bcolz, h_flip=True):
# self.imgs = bcolz.carray(rootdir = imgs_bcolz)
# self.labels = bcolz.carray(rootdir = label_bcolz)
# self.h_flip = h_flip
# self.length = len(self.imgs) - 1
# if h_flip:
# self.transform = trans.Compose([
# trans.ToPILImage(),
# trans.RandomHorizontalFlip(),
# trans.ToTensor(),
# trans.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
# ])
# self.class_num = self.labels[-1] + 1
# def __len__(self):
# return self.length
# def __getitem__(self, index):
# img = torch.tensor(self.imgs[index+1], dtype=torch.float)
# label = torch.tensor(self.labels[index+1], dtype=torch.long)
# if self.h_flip:
# img = de_preprocess(img)
# img = self.transform(img)
# return img, label
def has_file_allowed_extension(filename, extensions):
"""Checks if a file is an allowed extension.
Args:
filename (string): path to a file
Returns:
bool: True if the filename ends with a known image extension
"""
filename_lower = filename.lower()
return any(filename_lower.endswith(ext) for ext in extensions)
def find_classes(dir):
classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]
classes.sort()
class_to_idx = {classes[i]: i for i in range(len(classes))}
return len(classes), class_to_idx
def make_dataset(dir, class_to_idx, extensions):
images = [] # (path, label)
dir = os.path.expanduser(dir)
for target in sorted(os.listdir(dir)):
d = os.path.join(dir, target)
if not os.path.isdir(d):
continue
for root, _, fnames in sorted(os.walk(d)):
for fname in sorted(fnames):
if has_file_allowed_extension(fname, extensions):
path = os.path.join(root, fname)
item = (path, class_to_idx[target])
images.append(item)
return images
def read_image(img_path):
"""Keep reading image until succeed.
This can avoid IOError incurred by heavy IO process."""
got_img = False
if not os.path.exists(img_path):
raise IOError("{} does not exist".format(img_path))
while not got_img:
try:
img = Image.open(img_path).convert('RGB')
got_img = True
except IOError:
print("IOError incurred when reading '{}'. Will redo. Don't worry. Just chill.".format(img_path))
pass
return img
class ImageDataset(Dataset):
# def __init__(self, root, class_to_idx, transform=None):
# self.root = root
# extensions = ['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif']
# dataset = make_dataset(root, class_to_idx, extensions)
def __init__(self, dataset, transform=None):
self.dataset = dataset
self.transform = transform
def __len__(self):
return len(self.dataset)
def __getitem__(self, index):
img_path, pid = self.dataset[index]
img = read_image(img_path)
if self.transform is not None:
img = self.transform(img)
return img, pid
class ImageLandmarkDataset(Dataset):
def __init__(self, txt_path, transform=None):
self.root = txt_path.parent
self.class_num = 0
if not os.path.exists(txt_path):
logger.error('Error:', txt_path)
exit(1)
self.dataset = self.process_txt(txt_path,relabel=True)
self.transform = transform
def getDataFromTxt(self, txt_path):
data_dict = {}
with open(txt_path, 'r') as f:
data_raw = f.readlines()
for line_i in data_raw:
line_i = line_i.strip('\n')
dataitem = line_i.split()
img_name = dataitem[0]
pid = dataitem[1]
landmarks = [float(i) for i in dataitem[2:]]
if pid in data_dict:
data_dict[pid].append((img_name, landmarks))
else:
data_dict[pid] = [(img_name, landmarks)]
logger.debug('image num {}'.format(len(data_raw)))
self.class_num = len(data_dict)
logger.debug('person num {}'.format(self.class_num))
return data_dict
def process_txt(self, txt_path, relabel=False):
logger.debug('txt_path {}'.format(txt_path))
res_data_list = []
data_dir, _ = os.path.split(txt_path)
data_dic = self.getDataFromTxt(txt_path)
all_inds = data_dic.keys()
if relabel:
pid2label = {pid: label for label, pid in enumerate(all_inds)}
for pid in all_inds:
data_per_person = data_dic[pid]
if relabel:
pid = pid2label[pid]
for i, (path, landmarks) in enumerate(data_per_person):
# print(path, landmarks)
img_path = os.path.join(data_dir, path)
res_data_list.append((img_path, pid, landmarks))
return res_data_list
def __len__(self):
return len(self.dataset)
def __getitem__(self, index):
img_path, pid, landmarks = self.dataset[index]
img = read_image(img_path)
if self.transform is not None:
img = self.transform(img)
return img, pid, landmarks
def get_train_loader(conf, data_mode, sample_identity=False):
if data_mode == 'emore':
root = conf.emore_folder/'imgs'
elif data_mode == 'glint':
root = conf.glint_folder/'imgs'
else:
logger.fatal('invalide data_mode {}'.format(data_mode))
exit(1)
class_num, class_to_idx = find_classes(root)
train_transform = trans.Compose([
trans.RandomHorizontalFlip(),
trans.ColorJitter(brightness=0.2, contrast=0.15, saturation=0, hue=0),
trans.ToTensor(),
trans.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])
extensions = ['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif']
path_ds = make_dataset(root, class_to_idx, extensions)
dataset = ImageDataset(path_ds, train_transform)
if sample_identity:
train_sampler = DistRandomIdentitySampler(dataset.dataset, conf.batch_size, conf.num_instances)
else:
train_sampler = distributed.DistributedSampler(dataset)
loader = DataLoader(dataset, batch_size=conf.batch_size, shuffle=False, pin_memory=conf.pin_memory, num_workers=conf.num_workers, sampler = train_sampler)
return loader, class_num
def get_train_loader_concat(conf, data_roots, sample_identity=False):
extensions = ['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif']
total_class_num = 0
datasets = []
for root in data_roots:
class_num, class_to_idx = find_classes(root)
train_transform = trans.Compose([
trans.RandomHorizontalFlip(),
trans.ColorJitter(brightness=0.2, contrast=0.15, saturation=0, hue=0),
trans.ToTensor(),
trans.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])
path_ds = make_dataset(root, class_to_idx, extensions)
for i, (url, label) in enumerate(path_ds):
path_ds[i] = (url, label + total_class_num)
datasets.extend(path_ds)
total_class_num += class_num
# logger.debug('datasets {}'.format(datasets))
image_ds = ImageDataset(datasets, train_transform)
if sample_identity:
train_sampler = DistRandomIdentitySampler(image_ds.dataset, conf.batch_size, conf.num_instances)
else:
train_sampler = distributed.DistributedSampler(image_ds)
loader = DataLoader(image_ds, batch_size=conf.batch_size, shuffle=False, pin_memory=conf.pin_memory, num_workers=conf.num_workers, sampler = train_sampler)
return loader, total_class_num
def get_train_loader_from_txt(conf, data_mode, sample_identity=False):
if data_mode == 'emore':
txt_path = conf.emore_folder/'imgs'/'train_list.txt'
elif data_mode == 'glint':
txt_path = conf.glint_folder/'imgs'/'train_list.txt'
else:
logger.fatal('invalide data_mode {}'.format(data_mode))
exit(1)
train_transform = trans.Compose([
trans.RandomHorizontalFlip(),
trans.ColorJitter(brightness=0.2, contrast=0.15, saturation=0, hue=0),
trans.ToTensor(),
trans.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])
dataset = ImageLandmarkDataset(txt_path, train_transform)
if sample_identity:
train_sampler = DistRandomIdentitySampler(dataset.dataset, conf.batch_size, conf.num_instances)
else:
train_sampler = distributed.DistributedSampler(dataset)
loader = DataLoader(dataset, batch_size=conf.batch_size, shuffle=False, pin_memory=conf.pin_memory, num_workers=conf.num_workers, sampler = train_sampler)
return loader, dataset.class_num |
from protocols.migration import MigrateReports500To600, BaseMigration
from protocols.migration.base_migration import BaseMigrateReports500And600
from protocols.protocol_6_1 import reports as new_model
from protocols.protocol_7_0 import reports as old_model
from protocols.protocol_7_0.reports import diseaseType, TissueSource
from protocols.tests.test_migration.base_test_migration import TestCaseMigration
from protocols.migration.migration_reports_600_to_reports_500 import MigrateReports600To500
class TestMigrateReports600To500(TestCaseMigration):
def test_migrate_interpretation_request_rd(self, fill_nullables=True):
ir_rd_6 = self.get_valid_object(object_type=old_model.InterpretationRequestRD, version=self.version_7_0,
fill_nullables=fill_nullables)
ir_rd_5 = MigrateReports600To500().migrate_interpretation_request_rd(old_instance=ir_rd_6)
self.assertIsInstance(ir_rd_5, new_model.InterpretationRequestRD)
self.assertTrue(ir_rd_5.validate(ir_rd_5.toJsonDict()))
def test_migrate_interpretation_request_rd_nulls(self):
self.test_migrate_interpretation_request_rd(fill_nullables=False)
def variant_with_type_valid_in_both_models(self):
small_variant = self.get_valid_object(object_type=old_model.SmallVariant, version=self.version_7_0)
for re in small_variant.reportEvents:
for ge in re.genomicEntities:
ge.type = old_model.GenomicEntityType.intergenic
return small_variant
def test_migration_of_new_enum_values_get_set_to_none(self):
ir_6 = self.get_valid_object(object_type=old_model.CancerInterpretationRequest, version=self.version_7_0)
samples = ir_6.cancerParticipant.tumourSamples
for sample in samples:
sample.diseaseType = diseaseType.ENDOCRINE
sample.tissueSource = TissueSource.NOT_SPECIFIED
ir_5 = MigrateReports600To500().migrate_interpretation_request_cancer(old_instance=ir_6)
self.assertIsInstance(ir_5, new_model.CancerInterpretationRequest)
self.assertTrue(ir_5.validate(ir_5.toJsonDict()))
samples = ir_5.cancerParticipant.tumourSamples
for sample in samples:
self.assertIsNone(sample.diseaseType)
self.assertIsNone(sample.tissueSource)
def test_migrate_interpreted_genome_to_interpreted_genome_rd(self):
# Small Variants are required to migrate from InterpretedGenome version 6 to InterpretedGenomeRD version 5 as
# Reported Variants are required in v5 so nullables must be filled
ig_6 = self.get_valid_object(
object_type=old_model.InterpretedGenome, version=self.version_7_0, fill_nullables=True,
)
ig_rd_5 = MigrateReports600To500().migrate_interpreted_genome_to_interpreted_genome_rd(old_instance=ig_6)
self.assertIsInstance(ig_rd_5, new_model.InterpretedGenomeRD)
self.assertTrue(ig_rd_5.validate(ig_rd_5.toJsonDict()))
def test_migrate_clinical_report_rd(self, fill_nullables=True):
cr_rd_6 = self.get_valid_object(object_type=old_model.ClinicalReport, version=self.version_7_0, fill_nullables=fill_nullables)
cr_rd_5 = MigrateReports600To500().migrate_clinical_report_rd(old_instance=cr_rd_6)
self.assertIsInstance(cr_rd_5, new_model.ClinicalReportRD)
self.assertTrue(cr_rd_5.validate(cr_rd_5.toJsonDict()))
def test_migrate_clinical_report_rd_no_nullables(self):
self.test_migrate_clinical_report_rd(fill_nullables=False)
def test_migrate_clinical_report_cancer(self, fill_nullables=True):
cr_6 = self.get_valid_object(object_type=old_model.ClinicalReport, version=self.version_7_0, fill_nullables=fill_nullables)
cr_c_5 = MigrateReports600To500().migrate_clinical_report_cancer(old_instance=cr_6)
self.assertIsInstance(cr_c_5, new_model.ClinicalReportCancer)
self.assertTrue(cr_c_5.validate(cr_c_5.toJsonDict()))
def test_migrate_clinical_report_cancer_no_nullables(self):
self.test_migrate_clinical_report_cancer(fill_nullables=False)
def test_migrate_exit_questionnaire_rd(self, fill_nullables=True):
eq_rd_6 = self.get_valid_object(object_type=old_model.RareDiseaseExitQuestionnaire, version=self.version_7_0,
fill_nullables=fill_nullables)
eq_rd_5 = MigrateReports600To500().migrate_exit_questionnaire_rd(old_instance=eq_rd_6)
self.assertIsInstance(eq_rd_5, new_model.RareDiseaseExitQuestionnaire)
self.assertTrue(eq_rd_5.validate(eq_rd_5.toJsonDict()))
self._check_variant_details_conversion(
[vq for gq in eq_rd_6.variantGroupLevelQuestions for vq in gq.variantLevelQuestions],
[vq for gq in eq_rd_5.variantGroupLevelQuestions for vq in gq.variantLevelQuestions])
def test_migrate_exit_questionnaire_rd_no_nullables(self):
self.test_migrate_exit_questionnaire_rd(fill_nullables=False)
def test_migrate_cancer_exit_questionnaire(self, fill_nullables=True):
ceq_6 = self.get_valid_object(object_type=old_model.CancerExitQuestionnaire, version=self.version_7_0, fill_nullables=fill_nullables)
ceq_5 = MigrateReports600To500().migrate_cancer_exit_questionnaire(old_instance=ceq_6)
self.assertIsInstance(ceq_5, new_model.CancerExitQuestionnaire)
self.assertTrue(ceq_5.validate(ceq_5.toJsonDict()))
self._check_variant_details_conversion(ceq_6.somaticVariantLevelQuestions, ceq_5.somaticVariantLevelQuestions)
self._check_variant_details_conversion(ceq_6.germlineVariantLevelQuestions, ceq_5.germlineVariantLevelQuestions)
self._check_variant_details_conversion(ceq_6.otherActionableVariants, ceq_5.otherActionableVariants)
def _check_variant_details_conversion(self, things_with_coordinates, things_with_details):
if things_with_details and things_with_coordinates:
coordinates = [sq.variantCoordinates for sq in things_with_coordinates]
details = [sq.variantDetails for sq in things_with_details]
for c, d in zip(coordinates, details):
d_fields = d.split(":")
self.assertEqual(d_fields[0], c.chromosome)
self.assertEqual(d_fields[1], str(c.position))
self.assertEqual(d_fields[2], c.reference)
self.assertEqual(d_fields[3], c.alternate)
def test_migrate_cancer_exit_questionnaire_no_nullables(self):
self.test_migrate_cancer_exit_questionnaire(fill_nullables=False)
def test_migrate_report_event(self, fill_nullables=True):
re_rd_6 = self.get_valid_object(object_type=old_model.ReportEvent, version=self.version_7_0,
fill_nullables=fill_nullables)
re_rd_6.eventJustification = None
re_rd_6.segregationPattern = old_model.SegregationPattern.CompoundHeterozygous
re_rd_5 = BaseMigration.convert_class(target_klass=new_model.ReportEvent, instance=re_rd_6)
re_rd_5 = MigrateReports600To500()._migrate_report_event((re_rd_6, re_rd_5))
self.assertIsInstance(re_rd_5, new_model.ReportEvent)
self.assertTrue(re_rd_5.validate(re_rd_5.toJsonDict()))
self.assertTrue(old_model.SegregationPattern.CompoundHeterozygous in re_rd_5.eventJustification)
re_rd_6 = self.get_valid_object(object_type=old_model.ReportEvent, version=self.version_7_0,
fill_nullables=fill_nullables)
re_rd_6.eventJustification = "I have an event justification"
re_rd_6.segregationPattern = old_model.SegregationPattern.CompoundHeterozygous
re_rd_5 = BaseMigration.convert_class(target_klass=new_model.ReportEvent, instance=re_rd_6)
re_rd_5 = MigrateReports600To500()._migrate_report_event((re_rd_6, re_rd_5))
self.assertIsInstance(re_rd_5, new_model.ReportEvent)
self.assertTrue(re_rd_5.validate(re_rd_5.toJsonDict()))
self.assertTrue(re_rd_5.eventJustification is not None)
self.assertTrue(old_model.SegregationPattern.CompoundHeterozygous not in re_rd_5.eventJustification)
re_rd_6 = self.get_valid_object(object_type=old_model.ReportEvent, version=self.version_7_0,
fill_nullables=fill_nullables)
re_rd_6.eventJustification = None
re_rd_6.segregationPattern = None
re_rd_5 = BaseMigration.convert_class(target_klass=new_model.ReportEvent, instance=re_rd_6)
re_rd_5 = MigrateReports600To500()._migrate_report_event((re_rd_6, re_rd_5))
self.assertIsInstance(re_rd_5, new_model.ReportEvent)
self.assertTrue(re_rd_5.validate(re_rd_5.toJsonDict()))
self.assertTrue(re_rd_5.eventJustification is None)
|
# -*- coding: utf-8 -*-
# code for console Encoding difference. Don't mind on it
import sys
import imp
imp.reload(sys)
try: sys.setdefaultencoding('UTF8')
except Except as E: pass
import testValue
from closedown import CloseDown, CloseDownException
closedownChecker = CloseDown(testValue.LinkID,testValue.SecretKey)
try:
print("휴폐업조회 - 대량")
# 사업자번호 목록, 최대 1000건
corpNumList = ["4108600477","1234567890","8888888888", "4352343543"]
corpStateList = closedownChecker.checkCorpNums(corpNumList)
tmp = "* state(사업자상태) : null - 알수없음, 0 - 등록되지 않은 사업자번호, 1 - 사업중, 2 - 폐업, 3 - 휴업 " +"\n"
tmp += "* type(사업자유형) : null - 알수없음, 1 - 부가가치세 일반과세자, 2 - 부가가치세 면세과세자, 3 - 부가가치세 간이과세자, 4 - 비영리법인 또는 국가기관, 고유번호가 부여된 단체 " +"\n"
print(tmp)
i = 0
for CorpState in corpStateList:
i = i + 1
print ("corpNum : %s " % CorpState.corpNum)
print ("type : %s " % CorpState.type)
print ("state : %s " % CorpState.state)
print ("stateDate : %s " % CorpState.stateDate)
print ("checkDate : %s " % CorpState.checkDate)
print ("\n")
#for state in corpStateList:
# for key, value in state.__dict__.items():
# if not key.startswith("__"):
# print("%s : %s" % (key,value))
# print("\n")
except CloseDownException as CE:
print("Exception Occur : [%d] %s" % (CE.code, CE.message)) |
import logging
import os
from collections import namedtuple
import threading
from du.gerrit.Utils import Utils as GerritUtils
from du.utils.ShellCommand import ShellCommand, CommandFailedException
from du.gerrit.rest.change.ChangeEndpoint import ChangeEndpoint
from du.gerrit.ssh.Connection import Connection
from du.gerrit.ssh.Change import Change
from du.drepo.Utils import Utils
from du.gerrit.rest.change.QueryOption import QueryOption
from du.gerrit.ChangeStatus import ChangeStatus
from du.drepo.report.Types import *
import concurrent.futures
logger = logging.getLogger(__name__.split(".")[-1])
class Analyzer:
"""
DRepo repositories cls. Uses the manifest and goes trough all local repositories buildling metadata along the way
"""
@classmethod
def analyze(
cls, manifest, httpCredentials, numMergedCommits, tagPattern=None, numThreads=1
):
"""
Analyze projects
@param manifest Input manifest
@param httpCredentials Credentials used for REST calls to Gerrit
@param numMergedCommits Number of merged commits to include in the report
@param tagPattern Tag pattern to match
"""
projectInfoResults = {}
projectsToAnalyze = [proj for proj in manifest.projects]
with concurrent.futures.ThreadPoolExecutor(max_workers=numThreads) as executor:
# Launch threads
futures = []
for i in range(numThreads):
futures.append(
executor.submit(
cls.__analyzer,
projectsToAnalyze,
manifest,
httpCredentials,
tagPattern,
numMergedCommits,
)
)
# Wait for results
for future in futures:
for projectInfo in future.result():
projectInfoResults[projectInfo.manifestProject] = projectInfo
executor.shutdown()
# Sort the results in the same order as defined in the manifest
projectsInfo = []
for project in manifest.projects:
if project in projectInfoResults:
projectsInfo.append(projectInfoResults[project])
# Host name
hostName = ShellCommand.execute(["hostname"]).stdoutStr.strip()
# User name
userName = ShellCommand.execute(["whoami"]).stdoutStr.strip()
return ReportInfo(manifest, projectsInfo, hostName, userName)
@classmethod
def __analyzer(
cls, projectsToAnalyze, manifest, httpCredentials, tagPattern, numMergedCommits
):
"""
Project analyzer thread
@param projectsToAnalyze List of projects that are not yet analyzed (shared between threads
@param manifest see cls.analyze#manifest
@param httpCredentials see cls.analyze#httpCredentials
@param tagPattern see cls.analyze#tagPattern
@param numMergedCommits Number of merged commits to include in the report
"""
threadName = threading.current_thread().name
logger.debug("[%s] start analyzer" % threadName)
result = []
while projectsToAnalyze:
# Take next available project
try:
project = projectsToAnalyze.pop()
except IndexError:
break
logger.debug("[%s] analyzing %r" % (threadName, project.name))
# Process
projectInfo = cls.__analyzeProject(
manifest, project, httpCredentials, tagPattern, numMergedCommits
)
if projectInfo:
result.append(projectInfo)
logger.debug("[%s] done analyzing %r" % (threadName, project.name))
return result
@classmethod
def __analyzeProject(
cls, manifest, proj, httpCredentials, tagPattern, numMergedCommits
):
logger.debug("processing %r .." % proj.name)
# Local project directory
localDir = os.path.join(manifest.selectedBuild.root, proj.path)
logger.debug("directory %r" % localDir)
# Don't crash in case one of the projects was deleted from the disk, just report a warning
if not os.path.isdir(localDir) or not os.path.isdir(
os.path.join(localDir, ".git")
):
logger.warning("not valid git directory, skpping ..")
return None
# Create a connnection for Gerrit communication
conn = Utils.createQueryConnection(proj.remote, httpCredentials)
# Get tag name
tagInfo = cls.__getTagInfo(localDir, tagPattern)
# Get commit log of the project
log = cls.__getGitLog(localDir)
historyLength = numMergedCommits
commits = []
# Go trough the log
for logItem in log:
# Extract full message from the local .git
message = ShellCommand.execute(
["git", "show", "-s", "--format=%B", logItem.hash],
workingDirectory=localDir,
).stdoutStr
# Extract author (TODO Can we get message & author in a single command?)
author = ShellCommand.execute(
["git", "log", "--format=%an", logItem.hash + "^!"],
workingDirectory=localDir,
).stdoutStr
# Extract gerrit change from local .git message
changeId = GerritUtils.extractChangeId(message)
# Gerrit change info
gerritChangeInfo = None
if not changeId:
# No change ID (not a gerrit commmit ?)
logger.warning(
"could not extract Gerrit change ID, for commit %r from message %r"
% (logItem.hash, message)
)
else:
# Fetch information about this change
gerritChangeInfo = cls.__fetchGerritChangeInfo(
conn, changeId, proj.name, logItem.hash
)
commitInfo = CommitInfo(
logItem.title,
logItem.hash,
logItem.shortHash,
author,
gerritChangeInfo,
)
commits.append(commitInfo)
if (
commitInfo.gerritChangeInfo
and commitInfo.gerritChangeInfo.status == ChangeStatus.MERGED
):
historyLength -= 1
if historyLength == 0:
# Reached allowed number of merged commits depth
logger.debug("Maximum history length reached %d" % numMergedCommits)
break
return ProjectInfo(proj, tagInfo, commits)
@classmethod
def __fetchGerritChangeInfo(cls, conn, changeId, projectName, commitHash):
"""
Fetch a change from specific project
@conn Gerrit connection
@param changeId Change ID
@param projectName Project name
@param commitHash Local commit hash
@return Change information
"""
# Fetch data from server
if isinstance(conn, ChangeEndpoint):
changes = conn.query(
changeId,
options=[QueryOption.ALL_REVISIONS, QueryOption.CURRENT_COMMIT],
)
else:
changes = conn.query(
Connection.QUERY_ARG_PATCHSETS,
Connection.QUERY_ARG_CURRENT_PATCHSET,
change=changeId,
)
# Find a change in the project we're processing now
patchsetNumber = None
changeInfo = None
for change in changes:
# Ignore changes which do not belong to our project
if change.project != projectName:
continue
changeInfo = change
# Try to figure out the patchset number, by comparing hash values
if isinstance(conn, ChangeEndpoint):
for revisionHash, revision in change.revisions.items():
if revisionHash == commitHash:
patchsetNumber = revision.number
break
else:
for i in change.patchSets:
if i.revision == commitHash:
patchsetNumber = i.number
break
if patchsetNumber:
# Found exactly this hash on Gerrit
break
if not changeInfo:
logger.warning(
"could find change %r in project %r" % (changeId, projectName)
)
return None
return GerritChangeInfo(
changeInfo.number,
patchsetNumber,
changeInfo.status,
changeInfo.currentRevision.number,
)
@classmethod
def __getGitLog(cls, directory):
"""
Get a list of git commits for given directory
@param directory Directory path
@return a list of log items
"""
LogItem = namedtuple("LogItem", "hash, shortHash, title")
# List the logs long/short hashes and their subjects
cmd = ShellCommand.execute(
["git", "log", "--pretty=%H %h %s"], workingDirectory=directory
)
items = []
for line in cmd.stdoutStr.splitlines():
# Find the first whitespace, indicating the start of the short hash
longHashEndPos = line.find(" ")
# Find the first whitespace after that, indicating the start of subject
shortHashEndPos = line.find(" ", longHashEndPos + 1)
commitLongHash = line[:longHashEndPos].strip()
commitShortHash = line[longHashEndPos + 1 : shortHashEndPos]
commitMessage = line[shortHashEndPos + 1 :].rstrip()
items.append(LogItem(commitLongHash, commitShortHash, commitMessage))
return items
@classmethod
def __getTagInfo(cls, directory, tagPattern=None):
"""
@param directory Git directory
@param tagPattern Tag pattern to look for, in order to provide matchedTagName/cleanMatchedTagName fields
"""
tagName = None
tagCleanName = None
# Get HEAD hash ID
headHash = ShellCommand.execute(
["git", "rev-parse", "--short", "HEAD"], workingDirectory=directory
).stdoutStr.rstrip()
# Get head name (if it's tagged)
headTagName = None
try:
headTagName = ShellCommand.execute(
["git", "describe", "--exact-match", "--tags", headHash],
workingDirectory=directory,
).stdoutStr.rstrip()
except CommandFailedException:
pass
tagRefHash = None
if headTagName:
tagRefHash = ShellCommand.execute(
["git", "show-ref", headTagName, "--hash"], workingDirectory=directory
).stdoutStr.rstrip()
# Find a tag which matches given pattern (may be dirty if commits are present before this tag)
# For example if we're looking for "master*" tags we could get "master-0.32.0-1-g9298258bf" as a result
# because there are commits after the "master-0.32" tag
matchedTagName = None
if tagPattern:
try:
matchedTagName = ShellCommand.execute(
["git", "describe", "--match", tagPattern, "--tags"],
workingDirectory=directory,
).stdoutStr.rstrip()
except CommandFailedException:
logger.warning("Could not find any tags which match %r" % tagPattern)
# We're looking for the last clean tag name which matches the patter nabove (e.g. instead of "master-0.32.0-1-g9298258bf", we'll get "master-0.32")
cleanMatchedTagName = None
if tagPattern:
try:
cleanMatchedTagName = ShellCommand.execute(
["git", "describe", "--match", tagPattern, "--tags", "--abbrev=0"],
workingDirectory=directory,
).stdoutStr.rstrip()
except CommandFailedException:
logger.warning("Could not find any tags which match %r" % tagPattern)
return TagInfo(
headHash, tagRefHash, headTagName, matchedTagName, cleanMatchedTagName
)
|
# -*- coding: utf-8 -*-
"""
@date: 2020/3/4 下午4:00
@file: custom_classifier_dataset.py
@author: zj
@description: 分类器数据集类,可进行正负样本集替换,适用于hard negative mining操作
"""
import numpy as np
import os
import cv2
from PIL import Image
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
from utils.util import parse_car_csv
class CustomClassifierDataset(Dataset):
def __init__(self, root_dir, transform=None):
samples = parse_car_csv(root_dir)
jpeg_images = list()
positive_list = list()
negative_list = list()
for idx in range(len(samples)):
sample_name = samples[idx]
jpeg_images.append(cv2.imread(os.path.join(root_dir, 'JPEGImages', sample_name + ".jpg")))
positive_annotation_path = os.path.join(root_dir, 'Annotations', sample_name + '_1.csv')
positive_annotations = np.loadtxt(positive_annotation_path, dtype=np.int, delimiter=' ')
# 考虑csv文件为空或者仅包含单个标注框
if len(positive_annotations.shape) == 1:
# 单个标注框坐标
if positive_annotations.shape[0] == 4:
positive_dict = dict()
positive_dict['rect'] = positive_annotations
positive_dict['image_id'] = idx
# positive_dict['image_name'] = sample_name
positive_list.append(positive_dict)
else:
for positive_annotation in positive_annotations:
positive_dict = dict()
positive_dict['rect'] = positive_annotation
positive_dict['image_id'] = idx
# positive_dict['image_name'] = sample_name
positive_list.append(positive_dict)
negative_annotation_path = os.path.join(root_dir, 'Annotations', sample_name + '_0.csv')
negative_annotations = np.loadtxt(negative_annotation_path, dtype=np.int, delimiter=' ')
# 考虑csv文件为空或者仅包含单个标注框
if len(negative_annotations.shape) == 1:
# 单个标注框坐标
if negative_annotations.shape[0] == 4:
negative_dict = dict()
negative_dict['rect'] = negative_annotations
negative_dict['image_id'] = idx
# negative_dict['image_name'] = sample_name
negative_list.append(negative_dict)
else:
for negative_annotation in negative_annotations:
negative_dict = dict()
negative_dict['rect'] = negative_annotation
negative_dict['image_id'] = idx
# negative_dict['image_name'] = sample_name
negative_list.append(negative_dict)
self.transform = transform
self.jpeg_images = jpeg_images
self.positive_list = positive_list
self.negative_list = negative_list
def __getitem__(self, index: int):
# 定位下标所属图像
if index < len(self.positive_list):
# 正样本
target = 1
positive_dict = self.positive_list[index]
xmin, ymin, xmax, ymax = positive_dict['rect']
image_id = positive_dict['image_id']
image = self.jpeg_images[image_id][ymin:ymax, xmin:xmax]
cache_dict = positive_dict
else:
# 负样本
target = 0
idx = index - len(self.positive_list)
negative_dict = self.negative_list[idx]
xmin, ymin, xmax, ymax = negative_dict['rect']
image_id = negative_dict['image_id']
image = self.jpeg_images[image_id][ymin:ymax, xmin:xmax]
cache_dict = negative_dict
# print('index: %d image_id: %d target: %d image.shape: %s [xmin, ymin, xmax, ymax]: [%d, %d, %d, %d]' %
# (index, image_id, target, str(image.shape), xmin, ymin, xmax, ymax))
if self.transform:
image = self.transform(image)
return image, target, cache_dict
def __len__(self) -> int:
return len(self.positive_list) + len(self.negative_list)
def get_transform(self):
return self.transform
def get_jpeg_images(self) -> list:
return self.jpeg_images
def get_positive_num(self) -> int:
return len(self.positive_list)
def get_negative_num(self) -> int:
return len(self.negative_list)
def get_positives(self) -> list:
return self.positive_list
def get_negatives(self) -> list:
return self.negative_list
# 用于hard negative mining
# 替换负样本
def set_negative_list(self, negative_list):
self.negative_list = negative_list
def test(idx):
root_dir = '../../data/classifier_car/val'
train_data_set = CustomClassifierDataset(root_dir)
print('positive num: %d' % train_data_set.get_positive_num())
print('negative num: %d' % train_data_set.get_negative_num())
print('total num: %d' % train_data_set.__len__())
# 测试id=3/66516/66517/530856
image, target, cache_dict = train_data_set.__getitem__(idx)
print('target: %d' % target)
print('dict: ' + str(cache_dict))
image = Image.fromarray(image)
print(image)
print(type(image))
# cv2.imshow('image', image)
# cv2.waitKey(0)
def test2():
root_dir = '../../data/classifier_car/train'
transform = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize((227, 227)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
train_data_set = CustomClassifierDataset(root_dir, transform=transform)
image, target, cache_dict = train_data_set.__getitem__(230856)
print('target: %d' % target)
print('dict: ' + str(cache_dict))
print('image.shape: ' + str(image.shape))
def test3():
root_dir = '../../data/classifier_car/train'
transform = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize((227, 227)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
train_data_set = CustomClassifierDataset(root_dir, transform=transform)
data_loader = DataLoader(train_data_set, batch_size=128, num_workers=8, drop_last=True)
inputs, targets, cache_dicts = next(data_loader.__iter__())
print(targets)
print(inputs.shape)
if __name__ == '__main__':
# test(159622)
# test(4051)
test(24768)
# test2()
# test3()
|
from direct.directnotify import DirectNotifyGlobal
from toontown.cogdominium.DistCogdoGameAI import DistCogdoGameAI
import CogdoMazeGameGlobals
from direct.distributed.ClockDelta import *
from direct.task import Timer
from toontown.battle import BattleBase
from toontown.building.ElevatorConstants import *
GAME_DONE_DELAY = CogdoMazeGameGlobals.FinishDurationSeconds
BASE_TOON_UP = 10
JOKE_TOON_UP = 5
class DistCogdoMazeGameAI(DistCogdoGameAI):
notify = DirectNotifyGlobal.directNotify.newCategory('DistCogdoMazeGameAI')
delayIntro = BattleBase.ELEVATOR_T + ElevatorData[ELEVATOR_NORMAL]['openTime']
def __init__(self, air):
DistCogdoGameAI.__init__(self, air)
self.numSuits = (0, 0, 0)
self.timer = Timer.Timer()
self.doorRevealed = False
self.toonsInDoor = []
self.bosses = {}
self.fastMinions = {}
self.slowMinions = {}
self.suitTypes = [self.bosses, self.fastMinions, self.slowMinions]
self.numJokes = {}
def announceGenerate(self):
DistCogdoGameAI.announceGenerate(self)
self.setupSuitsAI()
def setupSuitsAI(self):
bossHp = CogdoMazeGameGlobals.SuitData[0]['hp']
fastMiniHp = CogdoMazeGameGlobals.SuitData[1]['hp']
slowMiniHp = CogdoMazeGameGlobals.SuitData[2]['hp']
serialNum = 0
for i in range(self.numSuits[0]):
self.bosses[serialNum] = bossHp
serialNum += 1
for i in range(self.numSuits[1]):
self.fastMinions[serialNum] = fastMiniHp
serialNum += 1
for i in range(self.numSuits[2]):
self.slowMinions[serialNum] = slowMiniHp
serialNum += 1
def setNumSuits(self, num):
self.numSuits = num
def getNumSuits(self):
return self.numSuits
def requestUseGag(self, x, y, h, timestamp):
avId = self.air.getAvatarIdFromSender()
self.sendUpdate('toonUsedGag', [avId, x, y, h, globalClockDelta.getRealNetworkTime()])
def requestSuitHitByGag(self, suitType, suitNum):
hitAI = self.hitSuitAI(suitType, suitNum)
if not hitAI:
self.notify.warning('Cannot hit suit!')
return
avId = self.air.getAvatarIdFromSender()
self.sendUpdate('suitHitByGag', [avId, suitType, suitNum])
def requestHitBySuit(self, suitType, suitNum, nettime):
avId = self.air.getAvatarIdFromSender()
av = self.air.doId2do.get(avId)
if av:
lostHp = CogdoMazeGameGlobals.SuitData[suitType]['toonDamage'] * self.getDifficulty() * 10
av.takeDamage(lostHp)
networkTime = globalClockDelta.getRealNetworkTime()
self.sendUpdate('toonHitBySuit', [avId, suitType, suitNum, networkTime])
if av.getHp() < 1:
self.toonWentSad(avId)
def requestHitByDrop(self):
avId = self.air.getAvatarIdFromSender()
av = self.air.doId2do.get(avId)
if av:
lostHp = CogdoMazeGameGlobals.DropDamage
av.takeDamage(lostHp)
self.sendUpdate('toonHitByDrop', [avId])
def requestPickUp(self, pickupNum):
avId = self.air.getAvatarIdFromSender()
av = self.air.doId2do.get(avId)
if av:
now = globalClockDelta.getRealNetworkTime()
if avId in self.numJokes:
self.numJokes[avId] += 1
else:
self.numJokes[avId] = 1
self.sendUpdate('pickUp', [avId, pickupNum, now])
def requestGag(self, coolerIndex):
avId = self.air.getAvatarIdFromSender()
self.sendUpdate('hasGag', [avId, globalClockDelta.getRealNetworkTime()])
def hitSuitAI(self, suitType, suitNum):
cogKey = None
for cogNum in self.suitTypes[suitType].keys():
if cogNum == suitNum:
cogKey = cogNum
break
if cogKey == None:
return 0
cogHp = self.suitTypes[suitType][cogKey]
cogHp -= 1
self.suitTypes[suitType][cogKey] = cogHp
if cogHp <= 0:
del self.suitTypes[suitType][cogKey]
return 1
def handleStart(self):
taskMgr.add(self.__checkGameDone, self.taskName('check-game-done'))
taskMgr.add(self.__checkPlayersTask, self.taskName('check-players-task'))
serverDelay = 1.0
self.timer.startCallback(CogdoMazeGameGlobals.SecondsUntilTimeout + serverDelay, self.__handleGameOver)
taskMgr.doMethodLater(serverDelay, self.clientCountdown, self.taskName('client_countdown'))
taskMgr.add(self.__timeWarningTask, self.taskName('time-warning-task'))
def clientCountdown(self, task):
self.doAction(CogdoMazeGameGlobals.GameActions.Countdown, 0)
return task.done
def __handleGameOver(self):
self.removeAll()
self.gameDone(failed=True)
def __checkGameDone(self, task):
bossesLeft = self.bosses
if len(bossesLeft) == 0:
self.timer.stop()
self.doAction(CogdoMazeGameGlobals.GameActions.OpenDoor, 0)
self.__startTimeout()
return task.done
return task.again
def __startTimeout(self):
self.timer.startCallback(CogdoMazeGameGlobals.SecondsUntilGameEnds, self.__handleTimeout)
def __handleTimeout(self):
for toon in self.toons:
if toon not in self.toonsInDoor:
self.killToon(toon)
self.removeAll()
self.gameDone()
def __timeWarningTask(self, task):
if self.timer.getT() <= CogdoMazeGameGlobals.SecondsForTimeAlert:
self.doAction(CogdoMazeGameGlobals.GameActions.TimeAlert, 0)
return task.done
return task.again
def killToon(self, avId):
av = self.air.doId2do.get(avId)
if av:
if av.getHp() > 0:
av.takeDamage(av.getHp())
self.toonWentSad(avId)
self.__playerDisconnected(avId)
def __checkPlayersTask(self, task):
for toonId in self.toons:
toon = self.air.doId2do.get(toonId)
if not toon:
self.__playerDisconnected(toonId)
return task.again
def __playerDisconnected(self, avId):
self.sendUpdate('setToonDisconnect', [avId])
self.toons.pop(self.toons.index(avId))
if len(self.toons) == 0:
self.removeAll()
self.gameDone(failed=True)
def doAction(self, action, data):
self.sendUpdate('doAction', [action, data, globalClockDelta.getRealNetworkTime()])
def requestAction(self, action, data):
Globals = CogdoMazeGameGlobals
avId = self.air.getAvatarIdFromSender()
if action == Globals.GameActions.RevealDoor:
if not self.doorRevealed:
self.doAction(action, avId)
self.doorRevealed = True
else:
self.notify.warning("Toon tried to reveal door but it's already revealed! Ignoring.")
else:
if action == Globals.GameActions.EnterDoor:
if avId not in self.toonsInDoor:
self.doAction(action, avId)
self.toonsInDoor.append(avId)
else:
self.notify.warning('Toon tried to enter into door but already entered! Ignoring.')
return
if len(self.toonsInDoor) >= len(self.toons):
self.__handleAllAboard()
else:
self.notify.warning("Client requested unknown action '%s'" % action)
def __handleAllAboard(self):
if len(self.toonsInDoor) != len(self.toons):
self.notify.warning('__handleAllAboard expect all toons aboard!')
return
self.removeAll()
self.sendUpdate('gameDone')
taskMgr.doMethodLater(GAME_DONE_DELAY, lambda t: self.gameDone(), self.taskName('game-done-delay'))
def toonUpToon(self, toonId):
if toonId in self.toonsInDoor:
toon = self.air.doId2do.get(toonId)
if toon:
val = min(BASE_TOON_UP + JOKE_TOON_UP * self.numJokes.get(toonId, 0), toon.getMaxHp())
toon.toonUp(val)
def removeAll(self):
taskMgr.remove(self.taskName('check-game-done'))
taskMgr.remove(self.taskName('check-players-task'))
taskMgr.remove(self.taskName('time-warning-task'))
taskMgr.remove(self.taskName('game-done-delay'))
self.timer.stop()
def disable(self):
DistCogdoGameAI.disable(self)
self.removeAll()
from otp.ai.MagicWordGlobal import *
@magicWord(category=CATEGORY_OVERRIDE)
def endMaze():
if hasattr(simbase.air, 'cogdoGame'):
maze = simbase.air.cogdoGame
maze.doAction(CogdoMazeGameGlobals.GameActions.OpenDoor, 0)
return 'Completed Maze Game' |
from sanic.exceptions import abort
def test_sanic_abort_401(app):
sanic_app, _ = app
@sanic_app.route("/abort")
async def abort_request(request):
abort(401)
_, response = sanic_app.test_client.get("/abort")
assert response.status == 401
# TODO test issue #93
|
"""General-purpose test script for image-to-image translation.
Original code: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/test.py
Once you have trained your model with train.py, you can use this script to test the model.
It will load a saved model from '--checkpoints_dir' and save the results to '--results_dir'.
It first creates model and dataset given the option. It will hard-code some parameters.
It then runs inference for '--num_test' images and save results to an HTML file.
Example (You need to train models first or download pre-trained models from our website):
Test an AODA model (one side only):
python3 test.py --model_suffix _B --dataroot ./scribble_10class/testA --name scribble_aoda --model test --phase test --no_dropout --n_classes 10
The option '--model test' is used for generating results only for one side.
This option will automatically set '--dataset_mode single', which only loads the images from one set.
On the contrary, using '--model aoda_gan' requires loading and generating results in both directions, and n_classes
which is sometimes unnecessary. The results will be saved at ./results/.
Use '--results_dir <directory_path_to_save_result>' to specify the results directory.
Test an aoda model:
python test.py --dataroot ./datasets/scribble --phase test --no_dropout --n_classes 10 --name scribble_aoda --model aoda_gan --direction BtoA
"""
import os
from options.test_options import TestOptions
from data import create_dataset
from models import create_model
from util.visualizer import save_images
from util import html
import time
if __name__ == '__main__':
opt = TestOptions().parse() # get test options
# hard-code some parameters for test
opt.num_threads = 0 # test code only supports num_threads = 1
opt.batch_size = 1 # test code only supports batch_size = 1
# disable data shuffling; comment this line if results on randomly chosen images are needed.
opt.serial_batches = True
# no flip; comment this line if results on flipped images are needed.
opt.no_flip = True
# no visdom display; the test code saves the results to a HTML file.
opt.display_id = -1
# create a dataset given opt.dataset_mode and other options
dataset = create_dataset(opt)
# create a model given opt.model and other options
model = create_model(opt)
# regular setup: load and print networks; create schedulers
model.setup(opt)
# create a website
web_dir = os.path.join(opt.results_dir, opt.name, '{}_{}'.format(
opt.phase, opt.epoch)) # define the website directory
if opt.load_iter > 0: # load_iter is 0 by default
web_dir = '{:s}_iter{:d}'.format(web_dir, opt.load_iter)
print('creating web directory', web_dir)
webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (
opt.name, opt.phase, opt.epoch))
# test with eval mode. This only affects layers like batchnorm and dropout.
if opt.eval:
model.eval()
consume_time = []
for i, data in enumerate(dataset):
if i >= opt.num_test: # only apply our model to opt.num_test images.
break
model.set_input(data) # unpack data from data loader
start_t = time.time()
model.test() # run inference
end_t = time.time()
consume_time.append(end_t - start_t)
visuals = model.get_current_visuals() # get image results
img_path = model.get_image_paths() # get image paths
if i % 5 == 0: # save images to an HTML file
print('processing (%04d)-th image... %s' % (i, img_path))
save_images(webpage, visuals, img_path,
aspect_ratio=opt.aspect_ratio, width=opt.display_winsize)
webpage.save() # save the HTML
sum_time = sum(consume_time)
avg_time = float(sum_time) / len(consume_time)
print('Total runtime {}s for {} images, average runtime {}'.format(
sum_time, len(consume_time), avg_time))
|
def guess_number():
answer=None
our_number=21
while answer != our_number:
answer=int(raw_input('guess me'))
if answer == our_number:
print "correct"
return
elif answer > our_number:
print "no, higher"
else:
print "no, lower"
return our_number
if __name__ == '__main__':
guess_number()
|
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""
Module contains class ``PandasOnPythonDataframe``.
``PandasOnPythonDataframe`` is dataframe class with pandas storage format and Python engine.
"""
from modin.core.dataframe.pandas.dataframe.dataframe import PandasDataframe
from ..partitioning.partition_manager import PandasOnPythonDataframePartitionManager
class PandasOnPythonDataframe(PandasDataframe):
"""
Class for dataframes with pandas storage format and Python engine.
``PandasOnPythonDataframe`` doesn't implement any specific interfaces,
all functionality is inherited from the ``PandasDataframe`` class.
Parameters
----------
partitions : np.ndarray
A 2D NumPy array of partitions.
index : sequence
The index for the dataframe. Converted to a ``pandas.Index``.
columns : sequence
The columns object for the dataframe. Converted to a ``pandas.Index``.
row_lengths : list, optional
The length of each partition in the rows. The "height" of
each of the block partitions. Is computed if not provided.
column_widths : list, optional
The width of each partition in the columns. The "width" of
each of the block partitions. Is computed if not provided.
dtypes : pandas.Series, optional
The data types for the dataframe columns.
"""
_partition_mgr_cls = PandasOnPythonDataframePartitionManager
|
"""
This file is part of the TheLMA (THe Laboratory Management Application) project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
Job entity classes.
"""
from everest.entities.base import Entity
from thelma.entities.iso import ISO_STATUS
from thelma.entities.iso import IsoJobPreparationPlate
from thelma.tools.semiconstants import get_item_status_managed
from thelma.utils import get_utc_time
__docformat__ = 'reStructuredText en'
__all__ = ['JOB_TYPES',
'Job',
'ExperimentJob',
'IsoJob']
class JOB_TYPES(object):
"""
Valid job types.
"""
#: Base type.
BASE = 'BASE'
#: Experiment jobs contain :class:`Experiment`s that are handled together.
#: All experiments in the job must belong to the same
# :class:`ExperimentMetadata`.
EXPERIMENT = 'EXPERIMENT'
#: ISO job contain :class:`Iso`s that are processed together. There might
#: share stock racks (e.g. for controls in screenings). All ISOs in the job
#: must belong to the same :class:`IsoRequest`.
ISO = 'ISO'
class Job(Entity):
"""
Jobs group entities that represent (laboratory) tasks. Tasks belonging to
the same job are typically conducted together in one run (physically).
Items belonging to the same job might share properties such as a layout or
a rack.
There is no status tracking at the moment except for the storage of
the creation time.
"""
#: Defines the entity type group by this job (see :class:`JOB_TYPES`).
job_type = None
#: The (human-readable) label of this job.
label = None
#: The user this job is assigned to (:class:`thelma.entities.user.User`)
user = None
#: A timestamp storing the time of creation.
creation_time = None
def __init__(self, label, user, creation_time=None, job_type=None, **kw):
"""
Constructor
"""
if self.__class__ is Job:
raise NotImplementedError('Abstract class')
Entity.__init__(self, **kw)
if job_type is None:
job_type = JOB_TYPES.BASE
self.job_type = job_type
self.label = label
self.user = user
if creation_time is None:
creation_time = get_utc_time()
self.creation_time = creation_time
def __str__(self):
return self.label
def __repr__(self):
str_format = '<%s id: %s, label: %s, user: %s>'
params = (self.__class__.__name__, self.id, self.label, self.user)
return str_format % params
class ExperimentJob(Job):
"""
A job class grouping :class:`Experiment` entities. All experiments must
belong to the same :class:`ExperimentDesign`.
"""
#: A list of ExperimentRack objects (:class:`ExperimentRack`)
#: associated with this job.
experiments = None
def __init__(self, label, user, experiments,
job_type=JOB_TYPES.EXPERIMENT, **kw):
"""
Constructor:
"""
if experiments is None or len(experiments) < 1:
raise ValueError('An experiment job must consist of at least ' \
'1 experiment!')
Job.__init__(self, label=label, user=user, job_type=job_type, **kw)
self.experiments = experiments
def __len__(self):
return len(self.experiments)
def __iter__(self):
return iter(self.experiments)
class IsoJob(Job):
"""
A job class grouping :class:`Iso` entities. All ISOs must belong to the
same :class:`IsoRequest`. They might share an :class:`IsoJobStockRack`
and an :class:`IsoJobPreparationPlate`.
"""
#: The ISOs belonging to this job.
isos = []
#: The maximum number if ISO stock racks for this ISO job.
number_stock_racks = None
#: The rack containing the stock tubes for the controls that are
#: used in this job (not every ISO job needs some, list of
#: :class:`thelma.entities.iso.IsoJobStockRack`)
iso_job_stock_racks = None
#: The plates used to predilute controls before there are transferred
#: to the ISO plates. The samples in this plate serve as source for all
#: ISOs in this job (not every ISO job needs some, list of
#: :class:`thelma.entities.iso.IsoJobPreparationPlate`).
iso_job_preparation_plates = None
#: Contains the worklists specific to the (lab) ISO job processing. Can
#: be *None*; :class:`thelma.entities.liquidtransfer.WorklistSeries`
worklist_series = None
def __init__(self, label, user, isos, number_stock_racks,
worklist_series=None, **kw):
if isos is None or len(isos) < 1:
raise ValueError('An ISO job must consist of at least 1 ISO!')
Job.__init__(self, label=label, user=user, job_type=JOB_TYPES.ISO, **kw)
self.isos = isos
self.number_stock_racks = number_stock_racks
self.worklist_series = worklist_series
self.__status = None
@property
def iso_request(self):
"""
ISO request this job belongs to.
"""
iso_request = None
for iso in self.isos:
if iso_request is None:
iso_request = iso.iso_request
elif not iso_request == iso.iso_request:
msg = 'Integrity Error: The ISOs of this ISO job belong to ' \
'different ISO requests (%s and %s).' \
% (iso.iso_request, iso_request)
raise ValueError(msg)
return iso_request
def add_preparation_plate(self, plate, rack_layout):
"""
Adds an :class:`IsoJobPreparationPlate`.
:param plate: The plate to be added.
:type plate: :class:`thelma.entities.rack.Plate`
:param rack_layout: The rack layout containing the plate data.
:type rack_layout: :class:`thelma.entities.racklayout.RackLayout`
"""
IsoJobPreparationPlate(iso_job=self, rack=plate,
rack_layout=rack_layout)
def __get_status(self):
try:
status = self.__status
except AttributeError:
pp = self.preparation_plates
# Detect if this ISO job is done (it is sufficient to check the
# status of the first preparation plate).
item_status_managed = get_item_status_managed()
if len(pp) > 0 and pp[0].status == item_status_managed:
status = ISO_STATUS.DONE
else:
status = ISO_STATUS.QUEUED
return status
def __set_status(self, status):
self.__status = status
#: Status flag merely used for job processing
# FIXME: Reconcile entity ISO status flags with ISO processing status
# flags used by the client!
status = property(__get_status, __set_status)
@property
def preparation_plates(self):
"""
Read only access to the racks of the preparation plates in this ISO
job.
"""
return [ipp.rack for ipp in self.iso_job_preparation_plates]
def __len__(self):
return len(self.isos)
def __iter__(self):
return iter(self.isos)
|
import csv
file=open("student.csv","w",newline="")
fwriter=csv.writer(file)
fwriter.writerow(["rollno","name","marks"])
for i in range(5):
print("Student Record",i+1)
rollno=int(input("Enter rollno:"))
name=input("Enter Name:")
marks=float(input("Enter Marks:"))
record=[rollno,name,marks]
fwriter.writerow(record)
file.close()
file=open("student.csv","r",newline="")
freader=csv.reader(file)
print("The file contains the following records:")
for rec in freader:
print(rec)
file.close()
|
"""Test the LED device."""
from mpf.core.rgb_color import RGBColor
from mpf.tests.MpfTestCase import MpfTestCase, test_config
class TestDeviceLight(MpfTestCase):
def get_config_file(self):
return 'light.yaml'
def get_machine_path(self):
return 'tests/machine_files/light/'
def test_default_on_color(self):
led = self.machine.lights["led1"]
# color on should map to red
led.color("on")
self.assertLightColor("led1", RGBColor("red"))
# turn off again
led.off()
self.assertLightColor("led1", RGBColor("black"))
# on should also map to red
led.on()
self.assertLightColor("led1", RGBColor("red"))
# on with half brightness should map to dimmed red
led.on(127)
self.assertLightColor("led1", RGBColor("red%50"))
@test_config("light_default_color_correction.yaml")
def test_default_color_correction(self):
led = self.machine.lights["led1"]
led.color(RGBColor("white"))
self.advance_time_and_run()
# color is uncorrected
self.assertLightColor("led1", RGBColor("white"))
# corrected color
self.assertEqual(RGBColor([210, 184, 159]), led.color_correct(led.get_color()))
# check hardware
self.assertEqual(210 / 255.0, led.hw_drivers["red"][0].current_brightness)
self.assertEqual(184 / 255.0, led.hw_drivers["green"][0].current_brightness)
self.assertEqual(159 / 255.0, led.hw_drivers["blue"][0].current_brightness)
led.color(RGBColor([128, 128, 128]))
self.advance_time_and_run()
self.assertLightColor("led1", [128, 128, 128])
self.assertEqual(RGBColor([96, 83, 70]), led.color_correct(led.get_color()))
self.assertEqual(96 / 255.0, led.hw_drivers["red"][0].current_brightness)
self.assertEqual(83 / 255.0, led.hw_drivers["green"][0].current_brightness)
self.assertEqual(70 / 255.0, led.hw_drivers["blue"][0].current_brightness)
led.color(RGBColor("black"))
self.advance_time_and_run()
self.assertLightColor("led1", [0, 0, 0])
self.assertEqual(RGBColor([0, 0, 0]), led.color_correct(led.get_color()))
self.assertEqual(0 / 255.0, led.hw_drivers["red"][0].current_brightness)
self.assertEqual(0 / 255.0, led.hw_drivers["green"][0].current_brightness)
self.assertEqual(0 / 255.0, led.hw_drivers["blue"][0].current_brightness)
def test_consecutive_fades(self):
self.assertLightColor("led1", [0, 0, 0])
led = self.machine.lights["led1"]
led.color(RGBColor("red"), fade_ms=1000, key="one")
self.advance_time_and_run(.5)
self.assertLightColor("led1", [127, 0, 0])
self.advance_time_and_run(1)
self.assertLightColor("led1", "red")
# play another color with the same key
led.color(RGBColor("red"), fade_ms=1000, key="one")
self.advance_time_and_run(.1)
self.assertLightColor("led1", "red")
self.advance_time_and_run(1)
# remove key and play again
led.remove_from_stack_by_key("one", fade_ms=1000)
led.color(RGBColor("red"), fade_ms=1000, key="one")
self.advance_time_and_run(.1)
self.assertLightColor("led1", "red")
self.advance_time_and_run(1)
led.remove_from_stack_by_key("one", fade_ms=1000)
self.assertLightColor("led1", "red")
self.advance_time_and_run(.5)
self.assertLightColor("led1", [128, 0, 0])
self.advance_time_and_run(.6)
self.assertLightColor("led1", [0, 0, 0])
self.assertFalse(led.stack)
led.color(RGBColor("blue"), key="lower", priority=1, fade_ms=10000)
led.color(RGBColor("red"), key="upper", priority=2, fade_ms=1000)
self.advance_time_and_run(.5)
self.assertLightColor("led1", [127, 0, 0])
self.advance_time_and_run(.51)
self.assertLightColor("led1", "red")
self.advance_time_and_run(2) # lower is at 3/10
led.remove_from_stack_by_key("upper", fade_ms=4000)
self.assertLightColor("led1", "red")
self.advance_time_and_run(2) # lower is at 5/10 -> [0, 0, 127]. upper at 2/4 (50% alpha)
self.assertLightColor("led1", [128, 0, 63])
self.advance_time_and_run(2) # lower is at 7/10. upper is gone
self.assertLightColor("led1", [0, 0, 178])
self.advance_time_and_run(3) # lower is at 10/10. upper is gone
self.assertLightColor("led1", [0, 0, 255])
self.assertEqual(1, len(led.stack))
def test_color_and_stack(self):
led1 = self.machine.lights["led1"]
# set led1 to red and check the color and stack
led1.color('red')
# need to advance time since LEDs are updated once per frame via a
# clock.schedule_interval
self.advance_time_and_run()
self.assertLightColor("led1", "red")
color_setting = led1.stack[0]
self.assertEqual(color_setting.priority, 0)
self.assertEqual(color_setting.start_color, RGBColor('off'))
self.assertEqual(color_setting.dest_time, 0)
self.assertEqual(color_setting.dest_color, RGBColor('red'))
self.assertEqual(led1.get_color(), RGBColor('red'))
self.assertFalse(color_setting.key)
# test get_color()
self.assertEqual(led1.get_color(), RGBColor('red'))
# set to blue & test
led1.color('blue')
self.advance_time_and_run()
self.assertLightColor("led1", "blue")
color_setting = led1.stack[0]
self.assertEqual(color_setting.priority, 0)
# self.assertEqual(color_setting.start_color, RGBColor('red'))
self.assertEqual(color_setting.dest_time, 0)
self.assertEqual(color_setting.dest_color, RGBColor('blue'))
self.assertEqual(led1.get_color(), RGBColor('blue'))
self.assertFalse(color_setting.key)
self.assertEqual(len(led1.stack), 1)
# set it to green, at a higher priority, but with no key. Stack should
# reflect the higher priority, but still be len 1 since the key is the
# same (None)
led1.color('green', priority=100)
self.advance_time_and_run()
self.assertLightColor("led1", "green")
self.assertEqual(len(led1.stack), 1)
color_setting = led1.stack[0]
self.assertEqual(color_setting.priority, 100)
# self.assertEqual(color_setting.start_color, RGBColor('blue'))
self.assertEqual(color_setting.dest_time, 0)
self.assertEqual(color_setting.dest_color, RGBColor('green'))
self.assertEqual(led1.get_color(), RGBColor('green'))
self.assertFalse(color_setting.key)
# set led1 orange, lower priority, but with a key, so led should stay
# green, but stack len should be 2
led1.color('orange', key='test')
self.advance_time_and_run()
self.assertLightColor("led1", "green")
self.assertEqual(len(led1.stack), 2)
color_setting = led1.stack[0]
self.assertEqual(color_setting.priority, 100)
# self.assertEqual(color_setting.start_color, RGBColor('blue'))
self.assertEqual(color_setting.dest_time, 0)
self.assertEqual(color_setting.dest_color, RGBColor('green'))
self.assertEqual(led1.get_color(), RGBColor('green'))
self.assertFalse(color_setting.key)
# remove the orange key from the stack
led1.remove_from_stack_by_key('test')
self.assertEqual(len(led1.stack), 1)
# clear the stack
led1.clear_stack()
self.assertEqual(len(led1.stack), 0)
# test the stack ordering with different priorities & keys
led1.color('red', priority=200, key='red')
self.advance_time_and_run()
self.assertLightColor("led1", "red")
led1.color('blue', priority=300, key='blue')
self.advance_time_and_run()
self.assertLightColor("led1", "blue")
led1.color('green', priority=200, key='green')
self.advance_time_and_run()
self.assertLightColor("led1", "blue")
led1.color('orange', priority=100, key='orange')
self.advance_time_and_run()
self.assertLightColor("led1", "blue")
# verify the stack is right
# order should be priority, then key, so
# should be: blue, green, red, orange
self.assertEqual(RGBColor('blue'), led1.stack[0].dest_color)
self.assertEqual(RGBColor('red'), led1.stack[1].dest_color)
self.assertEqual(RGBColor('green'), led1.stack[2].dest_color)
self.assertEqual(RGBColor('orange'), led1.stack[3].dest_color)
# test that a replacement key slots in properly
led1.color('red', priority=300, key='red')
self.advance_time_and_run()
self.assertLightColor("led1", "red")
self.assertEqual(RGBColor('red'), led1.stack[0].dest_color)
self.assertEqual(RGBColor('blue'), led1.stack[1].dest_color)
self.assertEqual(RGBColor('green'), led1.stack[2].dest_color)
self.assertEqual(RGBColor('orange'), led1.stack[3].dest_color)
def test_named_colors(self):
led1 = self.machine.lights["led1"]
led1.color('jans_red')
self.machine_run()
self.assertLightColor(led1.name, "jans_red")
self.assertLightColor(led1.name, [251, 23, 42])
def test_fades(self):
led1 = self.machine.lights["led1"]
led1.color('red', fade_ms=2000)
self.machine_run()
# check the stack before the fade starts
color_setting = led1.stack[0]
self.assertEqual(color_setting.priority, 0)
self.assertEqual(color_setting.start_color, RGBColor('off'))
self.assertEqual(color_setting.dest_time,
color_setting.start_time + 2)
self.assertEqual(color_setting.dest_color, RGBColor('red'))
self.assertEqual(led1.get_color(), RGBColor('off'))
self.assertFalse(color_setting.key)
# advance to half way through the fade
self.advance_time_and_run(1)
self.assertTrue(led1.fade_in_progress)
self.assertEqual(color_setting.priority, 0)
self.assertEqual(color_setting.start_color, RGBColor('off'))
self.assertEqual(color_setting.dest_time,
color_setting.start_time + 2)
self.assertEqual(color_setting.dest_color, RGBColor('red'))
self.assertEqual(led1.get_color(), RGBColor((127, 0, 0)))
self.assertFalse(color_setting.key)
self.assertLightColor("led1", [127, 0, 0])
# advance to after the fade is done
self.advance_time_and_run(2)
self.assertFalse(led1.fade_in_progress)
self.assertEqual(color_setting.priority, 0)
self.assertEqual(color_setting.start_color, RGBColor('off'))
self.assertEqual(color_setting.dest_color, RGBColor('red'))
self.assertEqual(led1.get_color(), RGBColor('red'))
self.assertFalse(color_setting.key)
self.assertLightColor("led1", "red")
led = self.machine.lights["led4"]
self.assertEqual(1000, led.default_fade_ms)
led.color('white')
self.advance_time_and_run(.02)
self.advance_time_and_run(.5)
self.assertLightColor("led4", [132, 132, 132])
self.advance_time_and_run(.5)
self.assertLightColor("led4", [255, 255, 255])
def test_restore_to_fade_in_progress(self):
led1 = self.machine.lights["led1"]
led1.color('red', fade_ms=4000, priority=50)
self.advance_time_and_run(0.02)
self.advance_time_and_run(1)
# fade is 25% complete
self.assertLightColor("led1", [65, 0, 0])
# higher priority color which goes on top of fade
led1.color('blue', key='test', priority=100)
self.advance_time_and_run(1)
self.assertLightColor("led1", 'blue')
self.assertFalse(led1.fade_in_progress)
led1.remove_from_stack_by_key('test')
# should go back to the fade in progress, which is now 75% complete
self.advance_time_and_run(1)
self.assertLightColor("led1", [192, 0, 0])
self.assertTrue(led1.fade_in_progress)
# go to 1 sec after fade and make sure it finished
self.advance_time_and_run(2)
self.assertLightColor("led1", 'red')
self.assertFalse(led1.fade_in_progress)
def test_multiple_concurrent_fades(self):
# start one fade, and while that's in progress, start a second fade.
# the second fade should start from the wherever the current fade was.
led1 = self.machine.lights["led1"]
led1.color('red', fade_ms=4000, priority=50)
self.advance_time_and_run(0.02)
self.advance_time_and_run(1)
# fade is 25% complete
self.assertLightColor("led1", [65, 0, 0])
# start a blue 2s fade
led1.color('blue', key='test', fade_ms=2000, priority=100)
# advance 1s, since we're half way to the blue fade from the 25% red,
# we should now be at 12.5% red and 50% blue
# Note: technically the red fade should continue even as it's being
# faded to blue, but meh, we'll handle that with alpha channels in the
# future
self.advance_time_and_run(1)
self.assertLightColor("led1", [33, 0, 127])
# advance past the end
self.advance_time_and_run(2)
self.assertLightColor("led1", 'blue')
self.assertFalse(led1.fade_in_progress)
def test_color_correction(self):
led = self.machine.lights["led_corrected"]
led.color(RGBColor("white"))
self.advance_time_and_run()
# color is uncorrected
self.assertLightColor("led_corrected", RGBColor("white"))
# corrected color
self.assertEqual(RGBColor([210, 184, 159]), led.color_correct(led.get_color()))
# check hardware
self.assertEqual(210 / 255.0, led.hw_drivers["red"][0].current_brightness)
self.assertEqual(184 / 255.0, led.hw_drivers["green"][0].current_brightness)
self.assertEqual(159 / 255.0, led.hw_drivers["blue"][0].current_brightness)
led.color(RGBColor([128, 128, 128]))
self.advance_time_and_run()
self.assertLightColor("led_corrected", [128, 128, 128])
self.assertEqual(RGBColor([96, 83, 70]), led.color_correct(led.get_color()))
self.assertEqual(96 / 255.0, led.hw_drivers["red"][0].current_brightness)
self.assertEqual(83 / 255.0, led.hw_drivers["green"][0].current_brightness)
self.assertEqual(70 / 255.0, led.hw_drivers["blue"][0].current_brightness)
led.color(RGBColor("black"))
self.advance_time_and_run()
self.assertLightColor("led_corrected", [0, 0, 0])
self.assertEqual(RGBColor([0, 0, 0]), led.color_correct(led.get_color()))
self.assertEqual(0 / 255.0, led.hw_drivers["red"][0].current_brightness)
self.assertEqual(0 / 255.0, led.hw_drivers["green"][0].current_brightness)
self.assertEqual(0 / 255.0, led.hw_drivers["blue"][0].current_brightness)
def test_non_rgb_leds(self):
# test bgr
led = self.machine.lights["led2"]
led.color(RGBColor((11, 23, 42)))
self.advance_time_and_run(1)
self.assertEqual(42 / 255, led.hw_drivers["blue"][0].current_brightness)
self.assertEqual('led-2', led.hw_drivers["blue"][0].number)
self.assertEqual(23 / 255, led.hw_drivers["green"][0].current_brightness)
self.assertEqual('led-3', led.hw_drivers["green"][0].number)
self.assertEqual(11 / 255, led.hw_drivers["red"][0].current_brightness)
self.assertEqual('led-4', led.hw_drivers["red"][0].number)
led = self.machine.lights["led_bgr_2"]
led.color(RGBColor((11, 23, 42)))
self.advance_time_and_run(1)
self.assertEqual(42 / 255, led.hw_drivers["blue"][0].current_brightness)
self.assertEqual('led-42-r', led.hw_drivers["blue"][0].number)
self.assertEqual(23 / 255, led.hw_drivers["green"][0].current_brightness)
self.assertEqual('led-42-g', led.hw_drivers["green"][0].number)
self.assertEqual(11 / 255, led.hw_drivers["red"][0].current_brightness)
self.assertEqual('led-42-b', led.hw_drivers["red"][0].number)
# test rgbw
led = self.machine.lights["led3"]
led.color(RGBColor((11, 23, 42)))
self.advance_time_and_run(1)
self.assertLightColor("led2", [11, 23, 42])
self.assertEqual(11 / 255, led.hw_drivers["white"][0].current_brightness)
self.assertEqual('led-10', led.hw_drivers["white"][0].number)
# test www light
led = self.machine.lights["led_www"]
led.on(128)
self.advance_time_and_run(1)
self.assertLightColor("led_www", [128, 128, 128])
self.assertEqual(128 / 255, led.hw_drivers["white"][0].current_brightness)
self.assertEqual('led-23-r', led.hw_drivers["white"][0].number)
self.assertEqual(128 / 255, led.hw_drivers["white"][1].current_brightness)
self.assertEqual('led-23-g', led.hw_drivers["white"][1].number)
self.assertEqual(128 / 255, led.hw_drivers["white"][2].current_brightness)
self.assertEqual('led-23-b', led.hw_drivers["white"][2].number)
def test_brightness_correction(self):
led = self.machine.lights["led1"]
led.color(RGBColor((100, 100, 100)))
self.advance_time_and_run(1)
self.assertLightColor("led1", [100, 100, 100])
self.assertEqual(100 / 255.0, led.hw_drivers["red"][0].current_brightness)
self.assertEqual(100 / 255.0, led.hw_drivers["green"][0].current_brightness)
self.assertEqual(100 / 255.0, led.hw_drivers["blue"][0].current_brightness)
self.machine.variables.set_machine_var("brightness", 0.8)
self.advance_time_and_run(.1)
led.color(RGBColor((100, 100, 100)))
self.advance_time_and_run(1)
self.assertLightColor("led1", [100, 100, 100])
self.assertEqual(80 / 255.0, led.hw_drivers["red"][0].current_brightness)
self.assertEqual(80 / 255.0, led.hw_drivers["green"][0].current_brightness)
self.assertEqual(80 / 255.0, led.hw_drivers["blue"][0].current_brightness)
class TestLightOnDriver(MpfTestCase):
def get_config_file(self):
return 'lights_on_drivers.yaml'
def get_machine_path(self):
return 'tests/machine_files/light/'
def get_platform(self):
# no force platform. we are testing the drivers platform
return False
def test_driver_platform(self):
driver = self.machine.coils["coil_01"].hw_driver
self.assertEqual("disabled", driver.state)
self.machine.lights["light_on_driver"].on()
self.assertEqual("enabled", driver.state)
self.machine.lights["light_on_driver"].off()
self.assertEqual("disabled", driver.state)
|
from utils import read_file
def calc_fuel(mass):
return mass // 3 - 2
def calc_fuel_recursive(mass):
total = 0
res = calc_fuel(mass)
while res > 0:
total += res
res = calc_fuel(res)
return total
print("#--- part1 ---#")
assert(calc_fuel(12) == 2)
assert(calc_fuel(14) == 2)
assert(calc_fuel(1969) == 654)
assert(calc_fuel(100756) == 33583)
values = map(int, read_file('01.txt'))
print(sum(map(calc_fuel, values)))
print("#--- part2 ---#")
assert(calc_fuel_recursive(14) == 2)
assert(calc_fuel_recursive(1969) == 966)
assert(calc_fuel_recursive(100756) == 50346)
values = map(int, read_file('01.txt'))
print(sum(map(calc_fuel_recursive, values)))
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import logging
import sys
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler(sys.stdout))
class TabularNet(nn.Module):
def __init__(self, n_cont, n_cat, emb_sz = 100, dropout_p = 0.1, layers=[200,100], cat_mask=[], cat_dim=[], y_min = 0., y_max = 1.):
super(TabularNet, self).__init__()
self.cat_mask = cat_mask
self.cat_dim = cat_dim
self.y_min = y_min
self.y_max = y_max
self.n_cat = n_cat
self.n_cont = n_cont
emb_dim = []
for ii in range(len(cat_mask)):
if cat_mask[ii]:
c_dim = cat_dim[ii]
emb_dim.append(c_dim)
#emb = nn.Embedding(c_dim, emb_sz)
#self.embeddings.append(emb)
#setattr(self, 'emb_{}'.format(ii), emb)
self.embeddings = nn.ModuleList([nn.Embedding(c_dim, emb_sz) for c_dim in emb_dim])
modules = []
prev_size = n_cont + n_cat * emb_sz
for l in layers:
modules.append(nn.Linear(prev_size, l))
modules.append(nn.BatchNorm1d(l))
modules.append(nn.Dropout(dropout_p))
modules.append(nn.ReLU(inplace=True))
prev_size = l
modules.append(nn.BatchNorm1d(prev_size))
modules.append(nn.Dropout(dropout_p))
modules.append(nn.Linear(prev_size, 1))
modules.append(nn.Sigmoid())
self.m_seq = nn.Sequential(*modules)
self.emb_drop = nn.Dropout(dropout_p)
self.bn_cont = nn.BatchNorm1d(n_cont)
def forward(self, x_in):
logger.debug(f"Forward pass on {x_in.shape}")
x = None
ee = 0
for ii in range(len(self.cat_mask)):
if self.cat_mask[ii]:
logger.debug(f"Embedding: {self.embeddings[ee]}, input: {x_in[:,ii]}")
logger.debug(f"cat Device for x_in: {x_in.get_device()}")
logger.debug(f"cat Device for x_in slice: {x_in[:,ii].get_device()}")
logger.debug(f"cat Device for embed: {next(self.embeddings[ee].parameters()).get_device()}")
x_e = self.embeddings[ee](x_in[:,ii].to(device = x_in.get_device(), dtype= torch.long))
logger.debug(f"cat Device for x_e: {x_e.get_device()}")
logger.debug(f"cat x_e = {x_e.shape}")
if x is None:
x = x_e
else:
x = torch.cat([x, x_e], 1)
logger.debug(f"cat Device for x: {x.get_device()}")
x = self.emb_drop(x)
logger.debug(f"cat Device for x: {x.get_device()}")
logger.debug(f"cat x = {x.shape}")
ee = ee + 1
else:
logger.debug(f"cont Device for x_in: {x_in.get_device()}")
x_cont = x_in[:, ii] # self.bn_cont(x_in[:, ii])
logger.debug(f"cont Device for x_cont: {x_cont.get_device()}")
logger.debug(f"cont x_cont = {x_cont.shape}")
if x is None:
x = torch.unsqueeze(x_cont, 1)
else:
x = torch.cat([x, torch.unsqueeze(x_cont, 1)], 1)
logger.debug(f"cont Device for x: {x.get_device()}")
logger.debug(f"cont x = {x.shape}")
return self.m_seq(x) * (self.y_max - self.y_min) + self.y_min |
from __future__ import unicode_literals
from lib2to3.fixer_base import BaseFix
from .utils import find_indentation
from lib2to3.pgen2 import token
from lib2to3.pygram import python_symbols as symbols
from .utils import (get_whitespace_before_definition, has_parent,
tuplize_comments)
class FixBlankLines(BaseFix):
'''
Separate top-level function and class definitions with two blank lines.
Method definitions inside a class are separated by a single blank line.
Extra blank lines may be used (sparingly) to separate groups of related
functions. Blank lines may be omitted between a bunch of related
one-liners (e.g. a set of dummy implementations).
Use blank lines in functions, sparingly, to indicate logical sections.
'''
def match(self, node):
# Get classes, non-decorateds funcs, decorators, and simple statements.
# Ignore decorateds funcs since they will be taken care of with the
# decorator.
if (node.type == symbols.funcdef and node.parent.type != symbols.
decorated or node.type == symbols.classdef and node.parent.type !=
symbols.decorated or node.type == symbols.decorated or node.type
== symbols.simple_stmt):
return True
return False
def transform(self, node, results):
# Sometimes newlines are in prefix of current node, sometimes they're
# in prefix of the prev sibling
if node.prefix.count('\n'):
newline_node = node
else:
newline_node = get_whitespace_before_definition(node)
if not newline_node:
# No previous node, must be the first node.
return
if newline_node.type in [token.INDENT, token.NEWLINE]:
# If the newline_node is an indent or newline, we don't need to
# worry about fixing indentation since it is not part of the
# prefix. Dedents do have it as part of the prefix.
curr_node_indentation = ''
else:
curr_node_indentation = find_indentation(node)
min_lines_between_defs, max_lines_between_defs = (self.
get_newline_limits(node))
new_prefix = self.trim_comments(curr_node_indentation, newline_node.
prefix, min_lines_between_defs, max_lines_between_defs)
if newline_node.prefix != new_prefix:
newline_node.prefix = new_prefix
newline_node.changed()
def get_newline_limits(self, node):
if node.type == symbols.simple_stmt or has_parent(node, symbols.
simple_stmt):
max_lines_between_defs = 1
min_lines_between_defs = 0
elif has_parent(node, symbols.classdef) or has_parent(node, symbols.
funcdef):
# If we're inside a definition, only use a single space
max_lines_between_defs = 1
min_lines_between_defs = 1
else:
# Top-level definition
max_lines_between_defs = 2
min_lines_between_defs = 2
return (min_lines_between_defs, max_lines_between_defs)
def trim_comments(self, curr_node_indentation, previous_whitespace,
min_lines_between_defs, max_lines_between_defs):
before_comments, comments, after_comments = tuplize_comments(
previous_whitespace)
if before_comments.count("\n") > max_lines_between_defs:
before_comments = '\n' * max_lines_between_defs
if after_comments.count("\n") > max_lines_between_defs:
after_comments = '\n' * max_lines_between_defs
if (before_comments.count("\n") + after_comments.count("\n") >
max_lines_between_defs):
if before_comments and after_comments:
# If there are spaces before and after, trim them down on both
# sides to either 1 before and 1 after or 0 before and 1 after.
before_comments = ('\n' * (min_lines_between_defs - 1) if
min_lines_between_defs else '')
after_comments = '\n'
comment_lines = before_comments.count("\n") + after_comments.count(
"\n")
if comment_lines < min_lines_between_defs:
before_comments += (min_lines_between_defs - comment_lines) * '\n'
result = '%s%s%s' % (before_comments, comments, after_comments)
# Make sure that the result indenation matches the original indentation
if result.split('\n')[-1] != curr_node_indentation:
result = "%s%s" % (result.rstrip(' '), curr_node_indentation)
return result
|
from setuptools import setup, find_packages
setup(
name="peinfo",
version="2.0.0",
author="Marcus LaFerrera (@mlaferrera)",
url="https://github.com/PUNCH-Cyber/stoq-plugins-public",
license="Apache License 2.0",
description="Gather relevant information about an executable using pefile",
packages=find_packages(),
package_data={'peinfo': ['peinfo.stoq', 'userdb.txt']},
)
|
# Copyright (c) 2017, John Skinner
import typing
import bson
import pymodm.fields as fields
from pymodm.context_managers import no_auto_dereference
from arvet.database.autoload_modules import autoload_modules
from arvet.core.image_source import ImageSource
from arvet.batch_analysis.task import Task
from arvet.config.path_manager import PathManager
class ImportDatasetTask(Task):
"""
A task for importing a dataset. Result will be an image source id
"""
module_name = fields.CharField(required=True)
path = fields.CharField(required=True)
additional_args = fields.DictField(default={}, blank=True)
result = fields.ReferenceField(ImageSource, on_delete=fields.ReferenceField.CASCADE)
def get_unique_name(self) -> str:
if 'dataset_name' in self.additional_args:
name = self.additional_args['dataset_name'].replace(' ', '_')
return 'import_{0}_{1}'.format(name, self.pk)
return "import_{0}".format(self.pk)
def load_referenced_models(self) -> None:
"""
Load the result type so we can save the task
:return:
"""
with no_auto_dereference(ImportDatasetTask):
if isinstance(self.result, bson.ObjectId):
# result is an id and not a model, autoload the model
autoload_modules(ImageSource, [self.result])
def run_task(self, path_manager: PathManager):
import logging
import traceback
import importlib
# Try and import the desired loader module
try:
loader_module = importlib.import_module(self.module_name)
except ImportError as exception:
logging.getLogger(__name__).error(
"Could not load module {0} for importing dataset, check it exists".format(self.module_name))
self.mark_job_failed()
raise exception
# Check the module has the required function
if not hasattr(loader_module, 'import_dataset'):
logging.getLogger(__name__).error(
"Module {0} does not have method 'import_dataset'".format(self.module_name))
self.mark_job_failed()
return
# Try and find the root directory or file to load the dataset from
try:
actual_path = path_manager.find_path(self.path)
except FileNotFoundError:
logging.getLogger(__name__).error(
"Could not find dataset path {0}".format(self.path))
self.mark_job_failed()
return
logging.getLogger(__name__).info(
"Importing dataset from {0} using module {1}".format(actual_path, self.module_name))
try:
image_collection = loader_module.import_dataset(actual_path, **self.additional_args)
except Exception as exception:
logging.getLogger(__name__).error(
"Exception occurred while importing dataset from {0} with module {1}:\n{2}".format(
actual_path, self.module_name, traceback.format_exc()
))
self.mark_job_failed()
raise exception
if image_collection is None:
logging.getLogger(__name__).error("Failed to import dataset from {0} with module {1}".format(
actual_path, self.module_name))
self.mark_job_failed()
else:
self.result = image_collection
self.mark_job_complete()
logging.getLogger(__name__).info("Successfully imported dataset")
@property
def result_id(self) -> typing.Union[bson.ObjectId, None]:
"""
Get the id of the result, without attempting to construct the object.
Makes it easier for other objects to refer to this result, without loading large result objects.
:return:
"""
with no_auto_dereference(ImportDatasetTask):
if self.result is None:
return None
if isinstance(self.result, bson.ObjectId):
return self.result
return self.result.pk
def get_result(self) -> typing.Union[ImageSource, None]:
"""
Actually get the result object.
This will auto-load the result model, and then attempt to construct it.
:return:
"""
with no_auto_dereference(ImportDatasetTask):
if self.result is None:
return None
if isinstance(self.result, bson.ObjectId):
# result is an id and not a model, autoload the model
autoload_modules(ImageSource, [self.result])
# This will now dereference correctly
return self.result
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('login', '0014_auto_20160707_1625'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='theme',
field=models.CharField(default=b'cerulean', max_length=10, choices=[(b'cerulean', b'cerulean'), (b'flat', b'flat')]),
),
]
|
import re
import time
import pytest
from freiner.storage import MovingWindow
from freiner.strategies.fixed_window import FixedWindowRateLimiter
from freiner.strategies.moving_window import MovingWindowRateLimiter
def test_pluggable_storage_fixed_window():
class MyStorage:
def incr(self, key: str, expiry: int, elastic_expiry: bool = False) -> int:
return 1
def get(self, key: str) -> int:
return 0
def get_expiry(self, key: str) -> float:
return time.time()
def clear(self, key: str):
pass
storage = MyStorage()
strategy = FixedWindowRateLimiter(storage)
assert strategy.storage is storage
errmsg = re.escape(
"Moving Window rate limiting is not implemented for storage of type MyStorage"
)
with pytest.raises(TypeError, match=errmsg):
# Ignore the type error here because that's exactly what we're testing for.
MovingWindowRateLimiter(storage) # type: ignore
def test_pluggable_storage_moving_window():
class MyStorage:
def acquire_entry(self, key: str, limit: int, expiry: int, no_add: bool = False) -> bool:
return True
def get_moving_window(self, key: str, limit: int, expiry: int) -> MovingWindow:
return MovingWindow(time.time(), 0)
def clear(self, key: str):
pass
storage = MyStorage()
strategy = MovingWindowRateLimiter(storage)
assert strategy.storage is storage
errmsg = re.escape(
"Fixed Window rate limiting is not implemented for storage of type MyStorage"
)
with pytest.raises(TypeError, match=errmsg):
# Ignore the type error here because that's exactly what we're testing for.
FixedWindowRateLimiter(storage) # type: ignore
|
# Importing the pickle library
import pickle
# Dumping the model object to save it as model.pkl file
pickle.dump(model, open('model.pkl', 'wb+'))
|
import discord
from discord.ext import commands
class Fun(commands.Cog):
def __init__(self, client):
self.client = client
def setup(client):
client.add_cog(Fun(client)) |
import tkinter
class Display(object):
def __init__(self , W , H , background):
self.window = tkinter.Tk()
self.width = W
self.height = H
self.screen = self.window.geometry(str(W) + 'x' + str(H))
self.window.configure(background = background)
def __repr__(self):
return 'tKinter Display of Width: {} and Height: {}'.format(self.width , self.height)
def title(self , title):
self.window.title(title)
return self
def label(self , text , fg , bg , font , size = 12 , weight = 'normal'):
label = tkinter.Label(self.window , text = text , fg = fg , bg = bg , font = (font , size , weight))
return label
def button(self , text , x , y , height , width , command):
button = tkinter.Button(self.window , text = text , command = command , height = height, width = width)
button.place(x = x, y = y)
return button |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.